# Checking port 63673 # Found port 63673 Name: primary Data directory: /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/t_027_stream_regress_primary_data/pgdata Backup directory: /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/t_027_stream_regress_primary_data/backup Archive directory: /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/t_027_stream_regress_primary_data/archives Connection string: port=63673 host=/tmp/oMDFrQQhD0 Log file: /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/log/027_stream_regress_primary.log [11:41:40.806](0.042s) # initializing database system by copying initdb template # Running: cp -RPp /tmp/cirrus-ci-build/build-32/tmp_install/initdb-template /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/t_027_stream_regress_primary_data/pgdata # Running: /tmp/cirrus-ci-build/build-32/src/test/regress/pg_regress --config-auth /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/t_027_stream_regress_primary_data/pgdata ### Starting node "primary" # Running: pg_ctl -w -D /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/t_027_stream_regress_primary_data/pgdata -l /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/log/027_stream_regress_primary.log -o --cluster-name=primary start waiting for server to start.... done server started # Postmaster PID for node "primary" is 66943 (standby_1,) [11:41:41.172](0.366s) ok 1 - physical slot created on primary # Taking pg_basebackup my_backup from node "primary" # Running: pg_basebackup -D /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/t_027_stream_regress_primary_data/backup/my_backup -h /tmp/oMDFrQQhD0 -p 63673 --checkpoint fast --no-sync # Backup finished # Checking port 63674 # Found port 63674 Name: standby_1 Data directory: /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/t_027_stream_regress_standby_1_data/pgdata Backup directory: /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/t_027_stream_regress_standby_1_data/backup Archive directory: /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/t_027_stream_regress_standby_1_data/archives Connection string: port=63674 host=/tmp/oMDFrQQhD0 Log file: /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/log/027_stream_regress_standby_1.log # Initializing node "standby_1" from backup "my_backup" of node "primary" ### Enabling streaming replication for node "standby_1" ### Starting node "standby_1" # Running: pg_ctl -w -D /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/t_027_stream_regress_standby_1_data/pgdata -l /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/log/027_stream_regress_standby_1.log -o --cluster-name=standby_1 start waiting for server to start.... done server started # Postmaster PID for node "standby_1" is 67034 # using postmaster on /tmp/oMDFrQQhD0, port 63673 ok 1 - test_setup 531 ms # parallel group (20 tests): text oid txid name pg_lsn varchar char float4 money int2 uuid regproc boolean int4 int8 float8 bit enum rangetypes numeric ok 2 + boolean 230 ms ok 3 + char 186 ms ok 4 + name 145 ms ok 5 + varchar 152 ms ok 6 + text 114 ms ok 7 + int2 205 ms ok 8 + int4 236 ms ok 9 + int8 283 ms ok 10 + oid 114 ms ok 11 + float4 194 ms ok 12 + float8 297 ms ok 13 + bit 332 ms ok 14 + numeric 1396 ms ok 15 + txid 135 ms ok 16 + uuid 224 ms ok 17 + enum 348 ms ok 18 + money 199 ms ok 19 + rangetypes 1166 ms ok 20 + pg_lsn 149 ms ok 21 + regproc 224 ms # parallel group (20 tests): lseg md5 time path line circle macaddr timetz macaddr8 point numerology date inet interval timestamp strings timestamptz box polygon multirangetypes ok 22 + strings 603 ms ok 23 + md5 126 ms ok 24 + numerology 209 ms ok 25 + point 196 ms ok 26 + lseg 104 ms ok 27 + line 131 ms ok 28 + box 1460 ms ok 29 + path 130 ms ok 30 + polygon 1469 ms ok 31 + circle 136 ms ok 32 + date 281 ms ok 33 + time 125 ms ok 34 + timetz 145 ms ok 35 + timestamp 532 ms ok 36 + timestamptz 838 ms ok 37 + interval 406 ms ok 38 + inet 313 ms ok 39 + macaddr 139 ms ok 40 + macaddr8 158 ms ok 41 + multirangetypes 1550 ms # parallel group (12 tests): comments misc_sanity unicode xid expressions tstypes horology mvcc geometry type_sanity regex opr_sanity ok 42 + geometry 520 ms ok 43 + horology 434 ms ok 44 + tstypes 248 ms ok 45 + regex 1003 ms ok 46 + type_sanity 564 ms ok 47 + opr_sanity 1083 ms ok 48 + misc_sanity 79 ms ok 49 + comments 66 ms ok 50 + expressions 237 ms ok 51 + unicode 125 ms ok 52 + xid 208 ms ok 53 + mvcc 440 ms # parallel group (5 tests): copydml copyselect copy insert_conflict insert ok 54 + copy 382 ms ok 55 + copyselect 116 ms ok 56 + copydml 97 ms ok 57 + insert 756 ms ok 58 + insert_conflict 392 ms # parallel group (7 tests): create_function_c create_operator create_misc create_type create_schema create_procedure create_table ok 59 + create_function_c 25 ms ok 60 + create_misc 122 ms ok 61 + create_operator 113 ms ok 62 + create_procedure 179 ms ok 63 + create_table 1478 ms ok 64 + create_type 131 ms ok 65 + create_schema 135 ms # parallel group (5 tests): index_including index_including_gist create_view create_index_spgist create_index ok 66 + create_index 3625 ms ok 67 + create_index_spgist 2403 ms ok 68 + create_view 1733 ms ok 69 + index_including 554 ms ok 70 + index_including_gist 1130 ms # parallel group (16 tests): create_cast errors roleattributes hash_func create_aggregate drop_if_exists select create_am create_function_sql typed_table infinite_recurse constraints vacuum updatable_views inherit triggers ok 71 + create_aggregate 231 ms ok 72 + create_function_sql 548 ms ok 73 + create_cast 135 ms ok 74 + constraints 1150 ms ok 75 + triggers 6524 ms ok 76 + select 403 ms ok 77 + inherit 3770 ms ok 78 + typed_table 592 ms ok 79 + vacuum 1689 ms ok 80 + drop_if_exists 304 ms ok 81 + updatable_views 3429 ms ok 82 + roleattributes 203 ms ok 83 + create_am 541 ms ok 84 + hash_func 227 ms ok 85 + errors 145 ms ok 86 + infinite_recurse 793 ms ok 87 - sanity_check 291 ms # parallel group (20 tests): select_distinct_on select_having delete select_implicit random case namespace select_into prepared_xacts portals transactions select_distinct union arrays subselect update hash_index join aggregates btree_index ok 88 + select_into 315 ms ok 89 + select_distinct 683 ms ok 90 + select_distinct_on 113 ms ok 91 + select_implicit 179 ms ok 92 + select_having 131 ms ok 93 + subselect 1201 ms ok 94 + union 949 ms ok 95 + case 225 ms ok 96 + join 3293 ms ok 97 + aggregates 3602 ms ok 98 + transactions 679 ms ok 99 + random 208 ms ok 100 + portals 657 ms ok 101 + arrays 1043 ms ok 102 + btree_index 5656 ms ok 103 + hash_index 2672 ms ok 104 + update 1778 ms ok 105 + delete 134 ms ok 106 + namespace 274 ms ok 107 + prepared_xacts 373 ms # parallel group (20 tests): init_privs drop_operator security_label tablesample password lock object_address collate replica_identity groupingsets identity matview spgist generated rowsecurity gin gist brin join_hash privileges ok 108 + brin 4660 ms ok 109 + gin 3281 ms ok 110 + gist 3505 ms ok 111 + spgist 2746 ms ok 112 + privileges 5322 ms ok 113 + init_privs 79 ms ok 114 + security_label 269 ms ok 115 + collate 778 ms ok 116 + matview 1773 ms ok 117 + lock 382 ms ok 118 + replica_identity 994 ms ok 119 + rowsecurity 2939 ms ok 120 + object_address 571 ms ok 121 + tablesample 364 ms ok 122 + groupingsets 1398 ms ok 123 + drop_operator 118 ms ok 124 + password 376 ms ok 125 + identity 1653 ms ok 126 + generated 2755 ms ok 127 + join_hash 4678 ms # parallel group (2 tests): brin_bloom brin_multi ok 128 + brin_bloom 185 ms ok 129 + brin_multi 1165 ms # parallel group (17 tests): collate.icu.utf8 async dbsize tidrangescan tsrf alter_operator tid tidscan create_role sysviews misc_functions misc incremental_sort alter_generic without_overlaps create_table_like merge ok 130 + create_table_like 955 ms ok 131 + alter_generic 621 ms ok 132 + alter_operator 209 ms ok 133 + misc 541 ms ok 134 + async 68 ms ok 135 + dbsize 83 ms ok 136 + merge 1064 ms ok 137 + misc_functions 524 ms ok 138 + sysviews 436 ms ok 139 + tsrf 184 ms ok 140 + tid 212 ms ok 141 + tidscan 232 ms ok 142 + tidrangescan 169 ms ok 143 + collate.icu.utf8 44 ms ok 144 + incremental_sort 580 ms ok 145 + create_role 294 ms ok 146 + without_overlaps 749 ms # parallel group (7 tests): collate.linux.utf8 collate.windows.win1252 amutils psql_crosstab rules psql stats_ext ok 147 + rules 1242 ms ok 148 + psql 1376 ms ok 149 + psql_crosstab 68 ms ok 150 + amutils 66 ms ok 151 + stats_ext 2975 ms ok 152 + collate.linux.utf8 41 ms ok 153 + collate.windows.win1252 42 ms ok 154 - select_parallel 2065 ms ok 155 - write_parallel 241 ms ok 156 - vacuum_parallel 235 ms # parallel group (2 tests): subscription publication ok 157 + publication 1104 ms ok 158 + subscription 101 ms # parallel group (17 tests): combocid portals_p2 advisory_lock xmlmap functional_deps tsdicts equivclass guc dependency select_views indirect_toast window cluster bitmapops tsearch foreign_data foreign_key ok 159 + select_views 660 ms ok 160 + portals_p2 193 ms ok 161 + foreign_key 3187 ms ok 162 + cluster 1190 ms ok 163 + dependency 411 ms ok 164 + guc 353 ms ok 165 + bitmapops 1244 ms ok 166 + combocid 184 ms ok 167 + tsearch 1324 ms ok 168 + tsdicts 315 ms ok 169 + foreign_data 2066 ms ok 170 + window 1086 ms ok 171 + xmlmap 273 ms ok 172 + functional_deps 284 ms ok 173 + advisory_lock 194 ms ok 174 + indirect_toast 982 ms ok 175 + equivclass 315 ms # parallel group (7 tests): jsonpath_encoding json_encoding jsonpath sqljson json jsonb_jsonpath jsonb ok 176 + json 329 ms ok 177 + jsonb 663 ms ok 178 + json_encoding 73 ms ok 179 + jsonpath 121 ms ok 180 + jsonpath_encoding 64 ms ok 181 + jsonb_jsonpath 340 ms ok 182 + sqljson 177 ms # parallel group (18 tests): prepare returning plancache limit conversion temp sequence copy2 rowtypes truncate polymorphism with largeobject rangefuncs domain xml plpgsql alter_table ok 183 + plancache 482 ms ok 184 + limit 508 ms ok 185 + plpgsql 3361 ms ok 186 + copy2 879 ms ok 187 + temp 801 ms ok 188 + domain 1314 ms ok 189 + rangefuncs 1273 ms ok 190 + prepare 239 ms ok 191 + conversion 634 ms ok 192 + truncate 1085 ms ok 193 + alter_table 4656 ms ok 194 + sequence 829 ms ok 195 + polymorphism 1096 ms ok 196 + rowtypes 1024 ms ok 197 + returning 309 ms ok 198 + largeobject 1178 ms ok 199 + with 1110 ms ok 200 + xml 2031 ms # parallel group (14 tests): predicate hash_part reloptions explain partition_info memoize eager_aggregate compression stats partition_aggregate partition_prune indexing tuplesort partition_join ok 201 + partition_join 3108 ms ok 202 + partition_prune 2875 ms ok 203 + reloptions 255 ms ok 204 + hash_part 151 ms ok 205 + indexing 2889 ms ok 206 + partition_aggregate 2520 ms ok 207 + partition_info 391 ms ok 208 + tuplesort 2948 ms ok 209 + explain 334 ms ok 210 + compression 784 ms ok 211 + memoize 681 ms ok 212 + stats 1252 ms ok 213 + predicate 117 ms not ok 214 + eager_aggregate 738 ms # parallel group (2 tests): oidjoins event_trigger ok 215 + oidjoins 628 ms ok 216 + event_trigger 713 ms ok 217 - event_trigger_login 66 ms ok 218 - fast_default 372 ms ok 219 - tablespace 871 ms 1..219 # 1 of 219 tests failed. # The differences that caused some tests to fail can be viewed in the file "/tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/regression.diffs". # A copy of the test summary that you see above is saved in the file "/tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/regression.out". === dumping /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/regression.diffs === diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/eager_aggregate.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/eager_aggregate.out --- /tmp/cirrus-ci-build/src/test/regress/expected/eager_aggregate.out 2024-03-05 11:34:40.681578566 +0000 +++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/eager_aggregate.out 2024-03-05 11:42:30.029179633 +0000 @@ -20,27 +20,24 @@ -- and finalize the aggregation. EXPLAIN (VERBOSE, COSTS OFF) SELECT t1.a, avg(t2.c) FROM eager_agg_t1 t1 JOIN eager_agg_t2 t2 ON t1.b = t2.b GROUP BY t1.a ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------------------- - Finalize GroupAggregate + QUERY PLAN +------------------------------------------------------------ + GroupAggregate Output: t1.a, avg(t2.c) Group Key: t1.a -> Sort - Output: t1.a, (PARTIAL avg(t2.c)) + Output: t1.a, t2.c Sort Key: t1.a -> Hash Join - Output: t1.a, (PARTIAL avg(t2.c)) - Hash Cond: (t1.b = t2.b) - -> Seq Scan on public.eager_agg_t1 t1 - Output: t1.a, t1.b, t1.c + Output: t1.a, t2.c + Hash Cond: (t2.b = t1.b) + -> Seq Scan on public.eager_agg_t2 t2 + Output: t2.a, t2.b, t2.c -> Hash - Output: t2.b, (PARTIAL avg(t2.c)) - -> Partial HashAggregate - Output: t2.b, PARTIAL avg(t2.c) - Group Key: t2.b - -> Seq Scan on public.eager_agg_t2 t2 - Output: t2.a, t2.b, t2.c -(18 rows) + Output: t1.a, t1.b + -> Seq Scan on public.eager_agg_t1 t1 + Output: t1.a, t1.b +(15 rows) SELECT t1.a, avg(t2.c) FROM eager_agg_t1 t1 JOIN eager_agg_t2 t2 ON t1.b = t2.b GROUP BY t1.a ORDER BY t1.a; a | avg @@ -60,30 +57,24 @@ SET enable_hashagg TO off; EXPLAIN (VERBOSE, COSTS OFF) SELECT t1.a, avg(t2.c) FROM eager_agg_t1 t1 JOIN eager_agg_t2 t2 ON t1.b = t2.b GROUP BY t1.a ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------------------------- - Finalize GroupAggregate + QUERY PLAN +------------------------------------------------------------ + GroupAggregate Output: t1.a, avg(t2.c) Group Key: t1.a -> Sort - Output: t1.a, (PARTIAL avg(t2.c)) + Output: t1.a, t2.c Sort Key: t1.a -> Hash Join - Output: t1.a, (PARTIAL avg(t2.c)) - Hash Cond: (t1.b = t2.b) - -> Seq Scan on public.eager_agg_t1 t1 - Output: t1.a, t1.b, t1.c + Output: t1.a, t2.c + Hash Cond: (t2.b = t1.b) + -> Seq Scan on public.eager_agg_t2 t2 + Output: t2.a, t2.b, t2.c -> Hash - Output: t2.b, (PARTIAL avg(t2.c)) - -> Partial GroupAggregate - Output: t2.b, PARTIAL avg(t2.c) - Group Key: t2.b - -> Sort - Output: t2.c, t2.b - Sort Key: t2.b - -> Seq Scan on public.eager_agg_t2 t2 - Output: t2.c, t2.b -(21 rows) + Output: t1.a, t1.b + -> Seq Scan on public.eager_agg_t1 t1 + Output: t1.a, t1.b +(15 rows) SELECT t1.a, avg(t2.c) FROM eager_agg_t1 t1 JOIN eager_agg_t2 t2 ON t1.b = t2.b GROUP BY t1.a ORDER BY t1.a; a | avg @@ -107,34 +98,31 @@ -- and finalize the aggregation. EXPLAIN (VERBOSE, COSTS OFF) SELECT t1.a, avg(t2.c + t3.c) FROM eager_agg_t1 t1 JOIN eager_agg_t2 t2 ON t1.b = t2.b JOIN eager_agg_t3 t3 ON t2.a = t3.a GROUP BY t1.a ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------------------------------- - Finalize GroupAggregate + QUERY PLAN +------------------------------------------------------------------ + GroupAggregate Output: t1.a, avg((t2.c + t3.c)) Group Key: t1.a -> Sort - Output: t1.a, (PARTIAL avg((t2.c + t3.c))) + Output: t1.a, t2.c, t3.c Sort Key: t1.a -> Hash Join - Output: t1.a, (PARTIAL avg((t2.c + t3.c))) - Hash Cond: (t1.b = t2.b) - -> Seq Scan on public.eager_agg_t1 t1 - Output: t1.a, t1.b, t1.c + Output: t1.a, t2.c, t3.c + Hash Cond: (t2.b = t1.b) + -> Hash Join + Output: t2.c, t2.b, t3.c + Hash Cond: (t3.a = t2.a) + -> Seq Scan on public.eager_agg_t3 t3 + Output: t3.a, t3.b, t3.c + -> Hash + Output: t2.c, t2.b, t2.a + -> Seq Scan on public.eager_agg_t2 t2 + Output: t2.c, t2.b, t2.a -> Hash - Output: t2.b, (PARTIAL avg((t2.c + t3.c))) - -> Partial HashAggregate - Output: t2.b, PARTIAL avg((t2.c + t3.c)) - Group Key: t2.b - -> Hash Join - Output: t2.c, t3.c, t2.b - Hash Cond: (t3.a = t2.a) - -> Seq Scan on public.eager_agg_t3 t3 - Output: t3.a, t3.b, t3.c - -> Hash - Output: t2.c, t2.b, t2.a - -> Seq Scan on public.eager_agg_t2 t2 - Output: t2.c, t2.b, t2.a -(25 rows) + Output: t1.a, t1.b + -> Seq Scan on public.eager_agg_t1 t1 + Output: t1.a, t1.b +(22 rows) SELECT t1.a, avg(t2.c + t3.c) FROM eager_agg_t1 t1 JOIN eager_agg_t2 t2 ON t1.b = t2.b JOIN eager_agg_t3 t3 ON t2.a = t3.a GROUP BY t1.a ORDER BY t1.a; a | avg @@ -154,37 +142,31 @@ SET enable_hashagg TO off; EXPLAIN (VERBOSE, COSTS OFF) SELECT t1.a, avg(t2.c + t3.c) FROM eager_agg_t1 t1 JOIN eager_agg_t2 t2 ON t1.b = t2.b JOIN eager_agg_t3 t3 ON t2.a = t3.a GROUP BY t1.a ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------------------------------------- - Finalize GroupAggregate + QUERY PLAN +------------------------------------------------------------------ + GroupAggregate Output: t1.a, avg((t2.c + t3.c)) Group Key: t1.a -> Sort - Output: t1.a, (PARTIAL avg((t2.c + t3.c))) + Output: t1.a, t2.c, t3.c Sort Key: t1.a -> Hash Join - Output: t1.a, (PARTIAL avg((t2.c + t3.c))) - Hash Cond: (t1.b = t2.b) - -> Seq Scan on public.eager_agg_t1 t1 - Output: t1.a, t1.b, t1.c + Output: t1.a, t2.c, t3.c + Hash Cond: (t2.b = t1.b) + -> Hash Join + Output: t2.c, t2.b, t3.c + Hash Cond: (t3.a = t2.a) + -> Seq Scan on public.eager_agg_t3 t3 + Output: t3.a, t3.b, t3.c + -> Hash + Output: t2.c, t2.b, t2.a + -> Seq Scan on public.eager_agg_t2 t2 + Output: t2.c, t2.b, t2.a -> Hash - Output: t2.b, (PARTIAL avg((t2.c + t3.c))) - -> Partial GroupAggregate - Output: t2.b, PARTIAL avg((t2.c + t3.c)) - Group Key: t2.b - -> Sort - Output: t2.c, t3.c, t2.b - Sort Key: t2.b - -> Hash Join - Output: t2.c, t3.c, t2.b - Hash Cond: (t3.a = t2.a) - -> Seq Scan on public.eager_agg_t3 t3 - Output: t3.a, t3.b, t3.c - -> Hash - Output: t2.c, t2.b, t2.a - -> Seq Scan on public.eager_agg_t2 t2 - Output: t2.c, t2.b, t2.a -(28 rows) + Output: t1.a, t1.b + -> Seq Scan on public.eager_agg_t1 t1 + Output: t1.a, t1.b +(22 rows) SELECT t1.a, avg(t2.c + t3.c) FROM eager_agg_t1 t1 JOIN eager_agg_t2 t2 ON t1.b = t2.b JOIN eager_agg_t3 t3 ON t2.a = t3.a GROUP BY t1.a ORDER BY t1.a; a | avg @@ -207,27 +189,24 @@ -- Ensure aggregation can be pushed down to the non-nullable side EXPLAIN (VERBOSE, COSTS OFF) SELECT t1.a, avg(t2.c) FROM eager_agg_t1 t1 RIGHT JOIN eager_agg_t2 t2 ON t1.b = t2.b GROUP BY t1.a ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------------------- - Finalize GroupAggregate + QUERY PLAN +------------------------------------------------------------ + GroupAggregate Output: t1.a, avg(t2.c) Group Key: t1.a -> Sort - Output: t1.a, (PARTIAL avg(t2.c)) + Output: t1.a, t2.c Sort Key: t1.a - -> Hash Right Join - Output: t1.a, (PARTIAL avg(t2.c)) - Hash Cond: (t1.b = t2.b) - -> Seq Scan on public.eager_agg_t1 t1 - Output: t1.a, t1.b, t1.c + -> Hash Left Join + Output: t1.a, t2.c + Hash Cond: (t2.b = t1.b) + -> Seq Scan on public.eager_agg_t2 t2 + Output: t2.a, t2.b, t2.c -> Hash - Output: t2.b, (PARTIAL avg(t2.c)) - -> Partial HashAggregate - Output: t2.b, PARTIAL avg(t2.c) - Group Key: t2.b - -> Seq Scan on public.eager_agg_t2 t2 - Output: t2.a, t2.b, t2.c -(18 rows) + Output: t1.a, t1.b + -> Seq Scan on public.eager_agg_t1 t1 + Output: t1.a, t1.b +(15 rows) SELECT t1.a, avg(t2.c) FROM eager_agg_t1 t1 RIGHT JOIN eager_agg_t2 t2 ON t1.b = t2.b GROUP BY t1.a ORDER BY t1.a; a | avg === EOF === [11:42:34.608](53.436s) not ok 2 - regression tests pass [11:42:34.608](0.000s) # Failed test 'regression tests pass' # at /tmp/cirrus-ci-build/src/test/recovery/t/027_stream_regress.pl line 95. [11:42:34.608](0.000s) # got: '256' # expected: '0' 5 1 1 1 1 2 1 1 9 5 5 3 4 3 4 4 1 1006 32 1 1 1 6 104 2 1 1 4001 2 41 5 17 9 1 33 34 46 1 1 1 1 -1 1 1 -1 -32768 32767 -2 1 1 Waiting for replication conn standby_1's replay_lsn to pass 0/13E36CE0 on primary done # Running: pg_dumpall -f /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/primary.dump --no-sync -p 63673 --no-unlogged-table-data [11:42:37.872](3.264s) ok 3 - dump primary server # Running: pg_dumpall -f /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/standby.dump --no-sync -p 63674 [11:42:41.016](3.144s) ok 4 - dump standby server # Running: diff /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/primary.dump /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/standby.dump [11:42:41.095](0.079s) ok 5 - compare primary and standby dumps [11:42:41.349](0.254s) ok 6 - check contents of pg_stat_statements on regression database ### Stopping node "standby_1" using mode fast # Running: pg_ctl -D /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/t_027_stream_regress_standby_1_data/pgdata -m fast stop waiting for server to shut down.... done server stopped # No postmaster PID for node "standby_1" ### Stopping node "primary" using mode fast # Running: pg_ctl -D /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/t_027_stream_regress_primary_data/pgdata -m fast stop waiting for server to shut down.... done server stopped # No postmaster PID for node "primary" [11:42:41.568](0.219s) 1..6 [11:42:41.570](0.001s) # Looks like you failed 1 test of 6.