# Checking port 62599 # Found port 62599 Name: primary Data directory: /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_primary_data/pgdata Backup directory: /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_primary_data/backup Archive directory: /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_primary_data/archives Connection string: port=62599 host=/var/folders/n8/pp_chp6x3jlfy1n94r6hdjv40000gn/T/860vQ2yvue Log file: /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/log/035_standby_logical_decoding_primary.log # Checking port 62600 # Found port 62600 Name: standby Data directory: /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_standby_data/pgdata Backup directory: /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_standby_data/backup Archive directory: /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_standby_data/archives Connection string: port=62600 host=/var/folders/n8/pp_chp6x3jlfy1n94r6hdjv40000gn/T/860vQ2yvue Log file: /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/log/035_standby_logical_decoding_standby.log # Checking port 62601 # Found port 62601 Name: cascading_standby Data directory: /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_cascading_standby_data/pgdata Backup directory: /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_cascading_standby_data/backup Archive directory: /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_cascading_standby_data/archives Connection string: port=62601 host=/var/folders/n8/pp_chp6x3jlfy1n94r6hdjv40000gn/T/860vQ2yvue Log file: /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/log/035_standby_logical_decoding_cascading_standby.log # Checking port 62602 # Found port 62602 Name: subscriber Data directory: /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_subscriber_data/pgdata Backup directory: /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_subscriber_data/backup Archive directory: /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_subscriber_data/archives Connection string: port=62602 host=/var/folders/n8/pp_chp6x3jlfy1n94r6hdjv40000gn/T/860vQ2yvue Log file: /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/log/035_standby_logical_decoding_subscriber.log [18:04:44.674](0.046s) # initializing database system by copying initdb template # Running: cp -RPp /Users/admin/pgsql/build/tmp_install/initdb-template /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_primary_data/pgdata # Running: /Users/admin/pgsql/build/src/test/regress/pg_regress --config-auth /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_primary_data/pgdata ### Enabling WAL archiving for node "primary" Name: primary Version: 17devel Data directory: /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_primary_data/pgdata Backup directory: /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_primary_data/backup Archive directory: /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_primary_data/archives Connection string: port=62599 host=/var/folders/n8/pp_chp6x3jlfy1n94r6hdjv40000gn/T/860vQ2yvue Log file: /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/log/035_standby_logical_decoding_primary.log ### Starting node "primary" # Running: pg_ctl -w -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_primary_data/pgdata -l /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/log/035_standby_logical_decoding_primary.log -o --cluster-name=primary start waiting for server to start.... done server started # Postmaster PID for node "primary" is 19053 [18:04:45.611](0.936s) ok 1 - Physical slot reports conflicting as NULL # Taking pg_basebackup b1 from node "primary" # Running: pg_basebackup -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_primary_data/backup/b1 -h /var/folders/n8/pp_chp6x3jlfy1n94r6hdjv40000gn/T/860vQ2yvue -p 62599 --checkpoint fast --no-sync # Backup finished # Initializing node "standby" from backup "b1" of node "primary" ### Enabling streaming replication for node "standby" ### Enabling WAL restore for node "standby" ### Starting node "standby" # Running: pg_ctl -w -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_standby_data/pgdata -l /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/log/035_standby_logical_decoding_standby.log -o --cluster-name=standby start waiting for server to start.... done server started # Postmaster PID for node "standby" is 19289 Waiting for replication conn standby's replay_lsn to pass 0/3019DF8 on primary done [18:04:46.840](1.229s) # initializing database system by copying initdb template # Running: cp -RPp /Users/admin/pgsql/build/tmp_install/initdb-template /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_subscriber_data/pgdata # Running: /Users/admin/pgsql/build/src/test/regress/pg_regress --config-auth /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_subscriber_data/pgdata ### Starting node "subscriber" # Running: pg_ctl -w -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_subscriber_data/pgdata -l /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/log/035_standby_logical_decoding_subscriber.log -o --cluster-name=subscriber start waiting for server to start.... done server started # Postmaster PID for node "subscriber" is 19423 [18:04:47.755](0.916s) ok 2 - behaves_ok_inactiveslot on standby created [18:04:47.919](0.164s) ok 3 - behaves_ok_activeslot on standby created Waiting for replication conn standby's replay_lsn to pass 0/3026818 on primary done [18:04:47.988](0.069s) ok 4 - Decoding produced 14 rows (2 BEGIN/COMMIT and 10 rows) Waiting for replication conn standby's replay_lsn to pass 0/3026948 on primary done [18:04:48.083](0.095s) ok 5 - got expected output from SQL decoding session Waiting for replication conn standby's replay_lsn to pass 0/30274F8 on primary done [18:04:48.218](0.135s) ok 6 - got same expected output from pg_recvlogical decoding session [18:04:48.293](0.075s) ok 7 - pg_recvlogical acknowledged changes Waiting for replication conn standby's replay_lsn to pass 0/345ADA0 on primary done [18:04:48.587](0.294s) ok 8 - replaying logical slot from another database fails Waiting for replication conn standby's replay_lsn to pass 0/34825B0 on primary done Waiting for all subscriptions in "subscriber" to synchronize data Waiting for replication conn tap_sub's replay_lsn to pass 0/34825E8 on standby done done Waiting for replication conn standby's replay_lsn to pass 0/3482B78 on primary done Waiting for replication conn tap_sub's replay_lsn to pass 0/3482B78 on standby done [18:04:49.105](0.518s) ok 9 - check replicated inserts after subscription on standby #### Begin standard error psql::1: NOTICE: dropped replication slot "tap_sub" on publisher #### End standard error ### Stopping node "subscriber" using mode fast # Running: pg_ctl -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_subscriber_data/pgdata -m fast stop waiting for server to shut down.... done server stopped # No postmaster PID for node "subscriber" [18:04:49.527](0.421s) ok 10 - vacuum_full_inactiveslot on standby created [18:04:49.729](0.202s) ok 11 - vacuum_full_activeslot on standby created ### Reloading node "standby" # Running: pg_ctl -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_standby_data/pgdata reload server signaled #### Begin standard error psql::1: INFO: vacuuming "pg_catalog.pg_class" psql::1: INFO: "pg_catalog.pg_class": found 5 removable, 420 nonremovable row versions in 14 pages DETAIL: 0 dead row versions cannot be removed yet. CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s. #### End standard error Waiting for replication conn standby's replay_lsn to pass 0/34B2CF8 on primary done [18:04:50.586](0.857s) ok 12 - inactiveslot slot invalidation is logged with vacuum FULL on pg_class [18:04:50.586](0.000s) ok 13 - activeslot slot invalidation is logged with vacuum FULL on pg_class [18:04:50.616](0.031s) ok 14 - confl_active_logicalslot updated [18:04:50.641](0.025s) ok 15 - vacuum_full_activeslot reason for conflict is rows_removed [18:04:50.666](0.024s) ok 16 - vacuum_full_inactiveslot reason for conflict is rows_removed [18:04:50.689](0.023s) ok 17 - replication slot stats not removed after invalidation [18:04:50.705](0.016s) ok 18 - pg_recvlogical exited non-zero [18:04:50.705](0.000s) ok 19 - slot has been invalidated ### Reloading node "standby" # Running: pg_ctl -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_standby_data/pgdata reload server signaled ### Restarting node "standby" # Running: pg_ctl -w -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_standby_data/pgdata -l /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/log/035_standby_logical_decoding_standby.log restart waiting for server to shut down.... done server stopped waiting for server to start.... done server started # Postmaster PID for node "standby" is 20128 [18:04:51.189](0.484s) ok 20 - vacuum_full_activeslot reason for conflict is rows_removed [18:04:51.210](0.021s) ok 21 - vacuum_full_inactiveslot reason for conflict is rows_removed Waiting for replication conn standby's replay_lsn to pass 0/40000F8 on primary done [18:04:51.882](0.672s) ok 22 - invalidated logical slots do not lead to retaining WAL [18:04:52.157](0.275s) ok 23 - row_removal_inactiveslot on standby created [18:04:52.362](0.205s) ok 24 - row_removal_activeslot on standby created ### Reloading node "standby" # Running: pg_ctl -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_standby_data/pgdata reload server signaled #### Begin standard error psql::1: INFO: vacuuming "testdb.pg_catalog.pg_class" psql::1: INFO: finished vacuuming "testdb.pg_catalog.pg_class": index scans: 1 pages: 0 removed, 11 remain, 11 scanned (100.00% of total) tuples: 4 removed, 419 remain, 0 are dead but not yet removable removable cutoff: 755, which was 0 XIDs old when operation ended frozen: 0 pages from table (0.00% of total) had 0 tuples frozen index scan needed: 3 pages from table (27.27% of total) had 5 dead item identifiers removed index "pg_class_oid_index": pages: 4 in total, 0 newly deleted, 0 currently deleted, 0 reusable index "pg_class_relname_nsp_index": pages: 5 in total, 0 newly deleted, 0 currently deleted, 0 reusable index "pg_class_tblspc_relfilenode_index": pages: 2 in total, 0 newly deleted, 0 currently deleted, 0 reusable avg read rate: 84.688 MB/s, avg write rate: 158.791 MB/s buffer usage: 78 hits, 8 misses, 15 dirtied WAL usage: 28 records, 15 full page images, 122919 bytes system usage: CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s #### End standard error Waiting for replication conn standby's replay_lsn to pass 0/403E6D0 on primary done [18:04:52.771](0.409s) ok 25 - inactiveslot slot invalidation is logged with vacuum on pg_class [18:04:52.771](0.000s) ok 26 - activeslot slot invalidation is logged with vacuum on pg_class [18:04:52.790](0.019s) ok 27 - confl_active_logicalslot updated [18:04:52.848](0.057s) ok 28 - row_removal_activeslot reason for conflict is rows_removed [18:04:52.897](0.049s) ok 29 - row_removal_inactiveslot reason for conflict is rows_removed [18:04:52.945](0.048s) ok 30 - pg_recvlogical exited non-zero [18:04:52.945](0.000s) ok 31 - slot has been invalidated [18:04:53.168](0.223s) ok 32 - shared_row_removal_inactiveslot on standby created [18:04:53.360](0.192s) ok 33 - shared_row_removal_activeslot on standby created ### Reloading node "standby" # Running: pg_ctl -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_standby_data/pgdata reload server signaled #### Begin standard error psql::1: INFO: vacuuming "testdb.pg_catalog.pg_authid" psql::1: INFO: finished vacuuming "testdb.pg_catalog.pg_authid": index scans: 1 pages: 0 removed, 1 remain, 1 scanned (100.00% of total) tuples: 1 removed, 16 remain, 0 are dead but not yet removable removable cutoff: 758, which was 0 XIDs old when operation ended new relfrozenxid: 758, which is 28 XIDs ahead of previous value frozen: 0 pages from table (0.00% of total) had 0 tuples frozen index scan needed: 1 pages from table (100.00% of total) had 1 dead item identifiers removed index "pg_authid_rolname_index": pages: 2 in total, 0 newly deleted, 0 currently deleted, 0 reusable index "pg_authid_oid_index": pages: 2 in total, 0 newly deleted, 0 currently deleted, 0 reusable avg read rate: 102.796 MB/s, avg write rate: 51.398 MB/s buffer usage: 36 hits, 2 misses, 1 dirtied WAL usage: 6 records, 1 full page images, 8693 bytes system usage: CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s psql::1: INFO: vacuuming "testdb.pg_toast.pg_toast_1260" psql::1: INFO: finished vacuuming "testdb.pg_toast.pg_toast_1260": index scans: 0 pages: 0 removed, 0 remain, 0 scanned (100.00% of total) tuples: 0 removed, 0 remain, 0 are dead but not yet removable removable cutoff: 758, which was 0 XIDs old when operation ended new relfrozenxid: 758, which is 28 XIDs ahead of previous value frozen: 0 pages from table (100.00% of total) had 0 tuples frozen index scan not needed: 0 pages from table (100.00% of total) had 0 dead item identifiers removed avg read rate: 42.459 MB/s, avg write rate: 42.459 MB/s buffer usage: 24 hits, 1 misses, 1 dirtied WAL usage: 1 records, 0 full page images, 188 bytes system usage: CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s #### End standard error Waiting for replication conn standby's replay_lsn to pass 0/4041A38 on primary done [18:04:53.561](0.201s) ok 34 - inactiveslot slot invalidation is logged with vacuum on pg_authid [18:04:53.562](0.000s) ok 35 - activeslot slot invalidation is logged with vacuum on pg_authid [18:04:53.578](0.016s) ok 36 - confl_active_logicalslot updated [18:04:53.619](0.041s) ok 37 - shared_row_removal_activeslot reason for conflict is rows_removed [18:04:53.638](0.018s) ok 38 - shared_row_removal_inactiveslot reason for conflict is rows_removed [18:04:53.658](0.021s) ok 39 - pg_recvlogical exited non-zero [18:04:53.658](0.000s) ok 40 - slot has been invalidated [18:04:53.762](0.104s) ok 41 - no_conflict_inactiveslot on standby created [18:04:53.976](0.213s) ok 42 - no_conflict_activeslot on standby created ### Reloading node "standby" # Running: pg_ctl -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_standby_data/pgdata reload server signaled #### Begin standard error psql::1: INFO: vacuuming "testdb.public.conflict_test" psql::1: INFO: finished vacuuming "testdb.public.conflict_test": index scans: 0 pages: 0 removed, 1 remain, 1 scanned (100.00% of total) tuples: 4 removed, 4 remain, 0 are dead but not yet removable removable cutoff: 762, which was 0 XIDs old when operation ended new relfrozenxid: 761, which is 2 XIDs ahead of previous value frozen: 0 pages from table (0.00% of total) had 0 tuples frozen index scan not needed: 0 pages from table (0.00% of total) had 0 dead item identifiers removed avg read rate: 56.071 MB/s, avg write rate: 74.761 MB/s buffer usage: 19 hits, 3 misses, 4 dirtied WAL usage: 6 records, 4 full page images, 33237 bytes system usage: CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s psql::1: INFO: vacuuming "testdb.pg_toast.pg_toast_16418" psql::1: INFO: finished vacuuming "testdb.pg_toast.pg_toast_16418": index scans: 0 pages: 0 removed, 0 remain, 0 scanned (100.00% of total) tuples: 0 removed, 0 remain, 0 are dead but not yet removable removable cutoff: 762, which was 0 XIDs old when operation ended new relfrozenxid: 762, which is 3 XIDs ahead of previous value frozen: 0 pages from table (100.00% of total) had 0 tuples frozen index scan not needed: 0 pages from table (100.00% of total) had 0 dead item identifiers removed avg read rate: 93.006 MB/s, avg write rate: 0.000 MB/s buffer usage: 24 hits, 1 misses, 0 dirtied WAL usage: 1 records, 0 full page images, 188 bytes system usage: CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s #### End standard error Waiting for replication conn standby's replay_lsn to pass 0/404FC10 on primary done [18:04:54.179](0.204s) ok 43 - inactiveslot slot invalidation is not logged with vacuum on conflict_test [18:04:54.180](0.000s) ok 44 - activeslot slot invalidation is not logged with vacuum on conflict_test [18:04:54.225](0.045s) ok 45 - confl_active_logicalslot not updated [18:04:54.346](0.121s) ok 46 - Logical slots are reported as non conflicting ### Reloading node "standby" # Running: pg_ctl -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_standby_data/pgdata reload server signaled ### Restarting node "standby" # Running: pg_ctl -w -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_standby_data/pgdata -l /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/log/035_standby_logical_decoding_standby.log restart waiting for server to shut down.... done server stopped waiting for server to start.... done server started # Postmaster PID for node "standby" is 20865 [18:04:54.770](0.423s) ok 47 - pruning_inactiveslot on standby created [18:04:54.972](0.202s) ok 48 - pruning_activeslot on standby created ### Reloading node "standby" # Running: pg_ctl -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_standby_data/pgdata reload server signaled Waiting for replication conn standby's replay_lsn to pass 0/405C0D8 on primary done [18:04:55.290](0.319s) ok 49 - inactiveslot slot invalidation is logged with on-access pruning [18:04:55.290](0.000s) ok 50 - activeslot slot invalidation is logged with on-access pruning [18:04:55.304](0.014s) ok 51 - confl_active_logicalslot updated [18:04:55.318](0.014s) ok 52 - pruning_activeslot reason for conflict is rows_removed [18:04:55.331](0.013s) ok 53 - pruning_inactiveslot reason for conflict is rows_removed [18:04:55.343](0.012s) ok 54 - pg_recvlogical exited non-zero [18:04:55.343](0.000s) not ok 55 - slot has been invalidated [18:04:55.343](0.000s) # Failed test 'slot has been invalidated' # at /Users/admin/pgsql/src/test/recovery/t/035_standby_logical_decoding.pl line 117. [18:04:55.343](0.000s) # '' # doesn't match '(?^:can no longer get changes from replication slot "pruning_activeslot")' ### Reloading node "standby" # Running: pg_ctl -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_standby_data/pgdata reload server signaled [18:04:55.989](0.645s) ok 56 - wal_level_inactiveslot on standby created [18:04:56.217](0.229s) ok 57 - wal_level_activeslot on standby created ### Restarting node "primary" # Running: pg_ctl -w -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_primary_data/pgdata -l /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/log/035_standby_logical_decoding_primary.log restart waiting for server to shut down.... done server stopped waiting for server to start.... done server started # Postmaster PID for node "primary" is 21322 Waiting for replication conn standby's replay_lsn to pass 0/50000D8 on primary done [18:04:56.985](0.768s) ok 58 - inactiveslot slot invalidation is logged due to wal_level [18:04:56.985](0.000s) ok 59 - activeslot slot invalidation is logged due to wal_level [18:04:57.000](0.015s) ok 60 - confl_active_logicalslot updated [18:04:57.013](0.013s) ok 61 - wal_level_activeslot reason for conflict is wal_level_insufficient [18:04:57.025](0.012s) ok 62 - wal_level_inactiveslot reason for conflict is wal_level_insufficient [18:04:57.123](0.099s) ok 63 - pg_recvlogical exited non-zero [18:04:57.124](0.000s) ok 64 - slot has been invalidated ### Restarting node "primary" # Running: pg_ctl -w -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_primary_data/pgdata -l /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/log/035_standby_logical_decoding_primary.log restart waiting for server to shut down.... done server stopped waiting for server to start.... done server started # Postmaster PID for node "primary" is 21433 Waiting for replication conn standby's replay_lsn to pass 0/60000D8 on primary done [18:04:58.037](0.913s) ok 65 - pg_recvlogical exited non-zero [18:04:58.037](0.000s) ok 66 - slot has been invalidated [18:04:58.342](0.306s) ok 67 - drop_db_inactiveslot on standby created [18:04:58.750](0.407s) ok 68 - drop_db_activeslot on standby created [18:04:59.124](0.374s) ok 69 - otherslot on standby created Waiting for replication conn standby's replay_lsn to pass 0/6000770 on primary done [18:04:59.339](0.215s) ok 70 - database dropped on standby [18:04:59.362](0.023s) ok 71 - inactiveslot on standby dropped [18:04:59.381](0.019s) ok 72 - activeslot on standby dropped [18:04:59.381](0.000s) ok 73 - pg_recvlogical exited non-zero [18:04:59.382](0.000s) ok 74 - slot has been invalidated [18:04:59.395](0.014s) ok 75 - otherslot on standby not dropped ### Reloading node "standby" # Running: pg_ctl -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_standby_data/pgdata reload server signaled Waiting for replication conn standby's replay_lsn to pass 0/6438A68 on primary done # Taking pg_basebackup b1 from node "standby" # Running: pg_basebackup -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_standby_data/backup/b1 -h /var/folders/n8/pp_chp6x3jlfy1n94r6hdjv40000gn/T/860vQ2yvue -p 62600 --checkpoint fast --no-sync # Backup finished # Initializing node "cascading_standby" from backup "b1" of node "standby" ### Enabling streaming replication for node "cascading_standby" ### Enabling WAL restore for node "cascading_standby" ### Starting node "cascading_standby" # Running: pg_ctl -w -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_cascading_standby_data/pgdata -l /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/log/035_standby_logical_decoding_cascading_standby.log -o --cluster-name=cascading_standby start waiting for server to start.... done server started # Postmaster PID for node "cascading_standby" is 22665 [18:05:06.438](7.043s) ok 76 - promotion_inactiveslot on standby created [18:05:06.646](0.208s) ok 77 - promotion_activeslot on standby created Waiting for replication conn cascading_standby's replay_lsn to pass 0/6438AD8 on standby done [18:05:06.857](0.210s) ok 78 - promotion_inactiveslot on standby created [18:05:07.045](0.188s) ok 79 - promotion_activeslot on standby created Waiting for replication conn standby's replay_lsn to pass 0/6438C78 on primary done Waiting for replication conn cascading_standby's replay_lsn to pass 0/6438C78 on standby done ### Promoting node "standby" # Running: pg_ctl -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_standby_data/pgdata -l /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/log/035_standby_logical_decoding_standby.log promote waiting for server to promote.... done server promoted Waiting for replication conn cascading_standby's replay_lsn to pass 0/6438E88 on standby done [18:05:07.480](0.435s) ok 80 - got expected output from SQL decoding session on promoted standby [18:05:07.480](0.000s) ok 81 - got 2 COMMIT from pg_recvlogical output [18:05:07.480](0.000s) ok 82 - got same expected output from pg_recvlogical decoding session [18:05:07.516](0.036s) ok 83 - got expected output from SQL decoding session on cascading standby [18:05:07.516](0.000s) ok 84 - got 2 COMMIT from pg_recvlogical output [18:05:07.517](0.001s) ok 85 - got same expected output from pg_recvlogical decoding session on cascading standby [18:05:07.517](0.000s) 1..85 ### Stopping node "primary" using mode immediate # Running: pg_ctl -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_primary_data/pgdata -m immediate stop waiting for server to shut down.... done server stopped # No postmaster PID for node "primary" ### Stopping node "standby" using mode immediate # Running: pg_ctl -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_standby_data/pgdata -m immediate stop waiting for server to shut down.... done server stopped # No postmaster PID for node "standby" ### Stopping node "cascading_standby" using mode immediate # Running: pg_ctl -D /Users/admin/pgsql/build/testrun/recovery/035_standby_logical_decoding/data/t_035_standby_logical_decoding_cascading_standby_data/pgdata -m immediate stop waiting for server to shut down.... done server stopped # No postmaster PID for node "cascading_standby" [18:05:07.877](0.360s) # Looks like you failed 1 test of 85.