diff -U3 /tmp/cirrus-ci-build/contrib/pageinspect/expected/brin.out /tmp/cirrus-ci-build/contrib/pageinspect/results/brin.out --- /tmp/cirrus-ci-build/contrib/pageinspect/expected/brin.out 2024-10-21 09:36:51.113188235 +0000 +++ /tmp/cirrus-ci-build/contrib/pageinspect/results/brin.out 2024-10-21 09:41:36.668636564 +0000 @@ -161,50 +161,7 @@ SELECT * FROM brin_page_items(get_raw_page('brin_test_parallel_idx', 2), 'brin_test_parallel_idx') EXCEPT SELECT * FROM brin_page_items(get_raw_page('brin_test_serial_idx', 2), 'brin_test_serial_idx'); - itemoffset | blknum | attnum | allnulls | hasnulls | placeholder | empty | value -------------+--------+--------+----------+----------+-------------+-------+------- -(0 rows) - -SELECT * FROM brin_page_items(get_raw_page('brin_test_serial_idx', 2), 'brin_test_serial_idx') -EXCEPT -SELECT * FROM brin_page_items(get_raw_page('brin_test_parallel_idx', 2), 'brin_test_parallel_idx'); - itemoffset | blknum | attnum | allnulls | hasnulls | placeholder | empty | value -------------+--------+--------+----------+----------+-------------+-------+------- -(0 rows) - -DROP INDEX brin_test_parallel_idx; --- force parallel build, but don't allow starting parallel workers to force --- fallback to serial build, and repeat the checks -SET max_parallel_workers = 0; -CREATE INDEX brin_test_parallel_idx ON brin_parallel_test - USING brin (a int4_minmax_ops, a int4_bloom_ops, b, c int8_minmax_multi_ops) - WITH (pages_per_range=7) - WHERE NOT (a BETWEEN 1000 and 1500); -SELECT relname, relpages - FROM pg_class - WHERE relname IN ('brin_test_serial_idx', 'brin_test_parallel_idx') - ORDER BY relname; - relname | relpages -------------------------+---------- - brin_test_parallel_idx | 4 - brin_test_serial_idx | 4 -(2 rows) - -SELECT * FROM brin_page_items(get_raw_page('brin_test_parallel_idx', 2), 'brin_test_parallel_idx') -EXCEPT -SELECT * FROM brin_page_items(get_raw_page('brin_test_serial_idx', 2), 'brin_test_serial_idx'); - itemoffset | blknum | attnum | allnulls | hasnulls | placeholder | empty | value -------------+--------+--------+----------+----------+-------------+-------+------- -(0 rows) - -SELECT * FROM brin_page_items(get_raw_page('brin_test_serial_idx', 2), 'brin_test_serial_idx') -EXCEPT -SELECT * FROM brin_page_items(get_raw_page('brin_test_parallel_idx', 2), 'brin_test_parallel_idx'); - itemoffset | blknum | attnum | allnulls | hasnulls | placeholder | empty | value -------------+--------+--------+----------+----------+-------------+-------+------- -(0 rows) - -DROP TABLE brin_parallel_test; -RESET min_parallel_table_scan_size; -RESET max_parallel_maintenance_workers; -RESET maintenance_work_mem; +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/contrib/pageinspect/expected/gin.out /tmp/cirrus-ci-build/contrib/pageinspect/results/gin.out --- /tmp/cirrus-ci-build/contrib/pageinspect/expected/gin.out 2024-10-21 09:36:51.113188235 +0000 +++ /tmp/cirrus-ci-build/contrib/pageinspect/results/gin.out 2024-10-21 09:41:37.544635838 +0000 @@ -28,44 +28,10 @@ ERROR: input page is not a compressed GIN data leaf page DETAIL: Flags 0002, expected 0083 INSERT INTO test1 SELECT x, ARRAY[1,10] FROM generate_series(2,10000) x; -SELECT COUNT(*) > 0 -FROM gin_leafpage_items(get_raw_page('test1_y_idx', - (pg_relation_size('test1_y_idx') / - current_setting('block_size')::bigint)::int - 1)); --[ RECORD 1 ] -?column? | t - --- Failure with various modes. --- Suppress the DETAIL message, to allow the tests to work across various --- page sizes and architectures. -\set VERBOSITY terse --- invalid page size -SELECT gin_leafpage_items('aaa'::bytea); -ERROR: invalid page size -SELECT gin_metapage_info('bbb'::bytea); -ERROR: invalid page size -SELECT gin_page_opaque_info('ccc'::bytea); -ERROR: invalid page size --- invalid special area size -SELECT * FROM gin_metapage_info(get_raw_page('test1', 0)); -ERROR: input page is not a valid GIN metapage -SELECT * FROM gin_page_opaque_info(get_raw_page('test1', 0)); -ERROR: input page is not a valid GIN data leaf page -SELECT * FROM gin_leafpage_items(get_raw_page('test1', 0)); -ERROR: input page is not a valid GIN data leaf page -\set VERBOSITY default --- Tests with all-zero pages. -SHOW block_size \gset -SELECT gin_leafpage_items(decode(repeat('00', :block_size), 'hex')); --[ RECORD 1 ]------+- -gin_leafpage_items | - -SELECT gin_metapage_info(decode(repeat('00', :block_size), 'hex')); --[ RECORD 1 ]-----+- -gin_metapage_info | - -SELECT gin_page_opaque_info(decode(repeat('00', :block_size), 'hex')); --[ RECORD 1 ]--------+- -gin_page_opaque_info | - -DROP TABLE test1; +WARNING: terminating connection because of crash of another server process +DETAIL: The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory. +HINT: In a moment you should be able to reconnect to the database and repeat your command. +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/contrib/pageinspect/expected/gist.out /tmp/cirrus-ci-build/contrib/pageinspect/results/gist.out --- /tmp/cirrus-ci-build/contrib/pageinspect/expected/gist.out 2024-10-21 09:36:51.113188235 +0000 +++ /tmp/cirrus-ci-build/contrib/pageinspect/results/gist.out 2024-10-21 09:41:37.644635756 +0000 @@ -1,125 +1,2 @@ --- The gist_page_opaque_info() function prints the page's LSN. --- Use an unlogged index, so that the LSN is predictable. -CREATE UNLOGGED TABLE test_gist AS SELECT point(i,i) p, i::text t FROM - generate_series(1,1000) i; -CREATE INDEX test_gist_idx ON test_gist USING gist (p); --- Page 0 is the root, the rest are leaf pages -SELECT * FROM gist_page_opaque_info(get_raw_page('test_gist_idx', 0)); - lsn | nsn | rightlink | flags ------+-----+------------+------- - 0/1 | 0/0 | 4294967295 | {} -(1 row) - -SELECT * FROM gist_page_opaque_info(get_raw_page('test_gist_idx', 1)); - lsn | nsn | rightlink | flags ------+-----+------------+-------- - 0/1 | 0/0 | 4294967295 | {leaf} -(1 row) - -SELECT * FROM gist_page_opaque_info(get_raw_page('test_gist_idx', 2)); - lsn | nsn | rightlink | flags ------+-----+-----------+-------- - 0/1 | 0/0 | 1 | {leaf} -(1 row) - -SELECT * FROM gist_page_items(get_raw_page('test_gist_idx', 0), 'test_gist_idx'); - itemoffset | ctid | itemlen | dead | keys -------------+-----------+---------+------+------------------------------- - 1 | (1,65535) | 40 | f | (p)=("(185,185),(1,1)") - 2 | (2,65535) | 40 | f | (p)=("(370,370),(186,186)") - 3 | (3,65535) | 40 | f | (p)=("(555,555),(371,371)") - 4 | (4,65535) | 40 | f | (p)=("(740,740),(556,556)") - 5 | (5,65535) | 40 | f | (p)=("(870,870),(741,741)") - 6 | (6,65535) | 40 | f | (p)=("(1000,1000),(871,871)") -(6 rows) - -SELECT * FROM gist_page_items(get_raw_page('test_gist_idx', 1), 'test_gist_idx') LIMIT 5; - itemoffset | ctid | itemlen | dead | keys -------------+-------+---------+------+--------------------- - 1 | (0,1) | 40 | f | (p)=("(1,1),(1,1)") - 2 | (0,2) | 40 | f | (p)=("(2,2),(2,2)") - 3 | (0,3) | 40 | f | (p)=("(3,3),(3,3)") - 4 | (0,4) | 40 | f | (p)=("(4,4),(4,4)") - 5 | (0,5) | 40 | f | (p)=("(5,5),(5,5)") -(5 rows) - --- gist_page_items_bytea prints the raw key data as a bytea. The output of that is --- platform-dependent (endianness), so omit the actual key data from the output. -SELECT itemoffset, ctid, itemlen FROM gist_page_items_bytea(get_raw_page('test_gist_idx', 0)); - itemoffset | ctid | itemlen -------------+-----------+--------- - 1 | (1,65535) | 40 - 2 | (2,65535) | 40 - 3 | (3,65535) | 40 - 4 | (4,65535) | 40 - 5 | (5,65535) | 40 - 6 | (6,65535) | 40 -(6 rows) - --- Suppress the DETAIL message, to allow the tests to work across various --- page sizes and architectures. -\set VERBOSITY terse --- Failures with non-GiST index. -CREATE INDEX test_gist_btree on test_gist(t); -SELECT gist_page_items(get_raw_page('test_gist_btree', 0), 'test_gist_btree'); -ERROR: "test_gist_btree" is not a GiST index -SELECT gist_page_items(get_raw_page('test_gist_btree', 0), 'test_gist_idx'); -ERROR: input page is not a valid GiST page --- Failure with various modes. --- invalid page size -SELECT gist_page_items_bytea('aaa'::bytea); -ERROR: invalid page size -SELECT gist_page_items('aaa'::bytea, 'test_gist_idx'::regclass); -ERROR: invalid page size -SELECT gist_page_opaque_info('aaa'::bytea); -ERROR: invalid page size --- invalid special area size -SELECT * FROM gist_page_opaque_info(get_raw_page('test_gist', 0)); -ERROR: input page is not a valid GiST page -SELECT gist_page_items_bytea(get_raw_page('test_gist', 0)); -ERROR: input page is not a valid GiST page -SELECT gist_page_items_bytea(get_raw_page('test_gist_btree', 0)); -ERROR: input page is not a valid GiST page -\set VERBOSITY default --- Tests with all-zero pages. -SHOW block_size \gset -SELECT gist_page_items_bytea(decode(repeat('00', :block_size), 'hex')); - gist_page_items_bytea ------------------------ -(0 rows) - -SELECT gist_page_items(decode(repeat('00', :block_size), 'hex'), 'test_gist_idx'::regclass); - gist_page_items ------------------ -(0 rows) - -SELECT gist_page_opaque_info(decode(repeat('00', :block_size), 'hex')); - gist_page_opaque_info ------------------------ - -(1 row) - --- Test gist_page_items with included columns. --- Non-leaf pages contain only the key attributes, and leaf pages contain --- the included attributes. -ALTER TABLE test_gist ADD COLUMN i int DEFAULT NULL; -CREATE INDEX test_gist_idx_inc ON test_gist - USING gist (p) INCLUDE (t, i); --- Mask the value of the key attribute to avoid alignment issues. -SELECT regexp_replace(keys, '\(p\)=\("(.*?)"\)', '(p)=("")') AS keys_nonleaf_1 - FROM gist_page_items(get_raw_page('test_gist_idx_inc', 0), 'test_gist_idx_inc') - WHERE itemoffset = 1; - keys_nonleaf_1 ----------------- - (p)=("") -(1 row) - -SELECT keys AS keys_leaf_1 - FROM gist_page_items(get_raw_page('test_gist_idx_inc', 1), 'test_gist_idx_inc') - WHERE itemoffset = 1; - keys_leaf_1 ------------------------------------------------------- - (p) INCLUDE (t, i)=("(1,1),(1,1)") INCLUDE (1, null) -(1 row) - -DROP TABLE test_gist; +psql: error: connection to server on socket "/tmp/pg_regress-6po1UD/.s.PGSQL.65312" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /tmp/cirrus-ci-build/contrib/pageinspect/expected/hash.out /tmp/cirrus-ci-build/contrib/pageinspect/results/hash.out --- /tmp/cirrus-ci-build/contrib/pageinspect/expected/hash.out 2024-10-21 09:36:51.113188235 +0000 +++ /tmp/cirrus-ci-build/contrib/pageinspect/results/hash.out 2024-10-21 09:41:37.716635697 +0000 @@ -1,210 +1,2 @@ -CREATE TABLE test_hash (a int, b text); -INSERT INTO test_hash VALUES (1, 'one'); -CREATE INDEX test_hash_a_idx ON test_hash USING hash (a); -CREATE TABLE test_hash_part (a int, b int) PARTITION BY RANGE (a); -CREATE INDEX test_hash_part_idx ON test_hash_part USING hash(b); -\x -SELECT hash_page_type(get_raw_page('test_hash_a_idx', 0)); --[ RECORD 1 ]--+--------- -hash_page_type | metapage - -SELECT hash_page_type(get_raw_page('test_hash_a_idx', 1)); --[ RECORD 1 ]--+------- -hash_page_type | bucket - -SELECT hash_page_type(get_raw_page('test_hash_a_idx', 2)); --[ RECORD 1 ]--+------- -hash_page_type | bucket - -SELECT hash_page_type(get_raw_page('test_hash_a_idx', 3)); --[ RECORD 1 ]--+------- -hash_page_type | bucket - -SELECT hash_page_type(get_raw_page('test_hash_a_idx', 4)); --[ RECORD 1 ]--+------- -hash_page_type | bucket - -SELECT hash_page_type(get_raw_page('test_hash_a_idx', 5)); --[ RECORD 1 ]--+------- -hash_page_type | bitmap - -SELECT hash_page_type(get_raw_page('test_hash_a_idx', 6)); -ERROR: block number 6 is out of range for relation "test_hash_a_idx" -SELECT * FROM hash_bitmap_info('test_hash_a_idx', -1); -ERROR: invalid block number -SELECT * FROM hash_bitmap_info('test_hash_a_idx', 0); -ERROR: invalid overflow block number 0 -SELECT * FROM hash_bitmap_info('test_hash_a_idx', 1); -ERROR: invalid overflow block number 1 -SELECT * FROM hash_bitmap_info('test_hash_a_idx', 2); -ERROR: invalid overflow block number 2 -SELECT * FROM hash_bitmap_info('test_hash_a_idx', 3); -ERROR: invalid overflow block number 3 -SELECT * FROM hash_bitmap_info('test_hash_a_idx', 4); -ERROR: invalid overflow block number 4 -SELECT * FROM hash_bitmap_info('test_hash_a_idx', 5); -ERROR: invalid overflow block number 5 -SELECT * FROM hash_bitmap_info('test_hash_a_idx', 6); -ERROR: block number 6 is out of range for relation "test_hash_a_idx" -SELECT * FROM hash_bitmap_info('test_hash_part_idx', 1); -- error -ERROR: "test_hash_part_idx" is not a hash index -SELECT magic, version, ntuples, bsize, bmsize, bmshift, maxbucket, highmask, -lowmask, ovflpoint, firstfree, nmaps, procid, spares, mapp FROM -hash_metapage_info(get_raw_page('test_hash_a_idx', 0)); --[ RECORD 1 ]-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -magic | 105121344 -version | 4 -ntuples | 1 -bsize | 8152 -bmsize | 4096 -bmshift | 15 -maxbucket | 3 -highmask | 7 -lowmask | 3 -ovflpoint | 2 -firstfree | 0 -nmaps | 1 -procid | 450 -spares | {0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0} -mapp | {5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0} - -SELECT magic, version, ntuples, bsize, bmsize, bmshift, maxbucket, highmask, -lowmask, ovflpoint, firstfree, nmaps, procid, spares, mapp FROM -hash_metapage_info(get_raw_page('test_hash_a_idx', 1)); -ERROR: page is not a hash meta page -SELECT magic, version, ntuples, bsize, bmsize, bmshift, maxbucket, highmask, -lowmask, ovflpoint, firstfree, nmaps, procid, spares, mapp FROM -hash_metapage_info(get_raw_page('test_hash_a_idx', 2)); -ERROR: page is not a hash meta page -SELECT magic, version, ntuples, bsize, bmsize, bmshift, maxbucket, highmask, -lowmask, ovflpoint, firstfree, nmaps, procid, spares, mapp FROM -hash_metapage_info(get_raw_page('test_hash_a_idx', 3)); -ERROR: page is not a hash meta page -SELECT magic, version, ntuples, bsize, bmsize, bmshift, maxbucket, highmask, -lowmask, ovflpoint, firstfree, nmaps, procid, spares, mapp FROM -hash_metapage_info(get_raw_page('test_hash_a_idx', 4)); -ERROR: page is not a hash meta page -SELECT magic, version, ntuples, bsize, bmsize, bmshift, maxbucket, highmask, -lowmask, ovflpoint, firstfree, nmaps, procid, spares, mapp FROM -hash_metapage_info(get_raw_page('test_hash_a_idx', 5)); -ERROR: page is not a hash meta page -SELECT live_items, dead_items, page_size, hasho_prevblkno, hasho_nextblkno, -hasho_bucket, hasho_flag, hasho_page_id FROM -hash_page_stats(get_raw_page('test_hash_a_idx', 0)); -ERROR: page is not a hash bucket or overflow page -SELECT live_items, dead_items, page_size, hasho_prevblkno, hasho_nextblkno, -hasho_bucket, hasho_flag, hasho_page_id FROM -hash_page_stats(get_raw_page('test_hash_a_idx', 1)); --[ RECORD 1 ]---+----------- -live_items | 0 -dead_items | 0 -page_size | 8192 -hasho_prevblkno | 3 -hasho_nextblkno | 4294967295 -hasho_bucket | 0 -hasho_flag | 2 -hasho_page_id | 65408 - -SELECT live_items, dead_items, page_size, hasho_prevblkno, hasho_nextblkno, -hasho_bucket, hasho_flag, hasho_page_id FROM -hash_page_stats(get_raw_page('test_hash_a_idx', 2)); --[ RECORD 1 ]---+----------- -live_items | 0 -dead_items | 0 -page_size | 8192 -hasho_prevblkno | 3 -hasho_nextblkno | 4294967295 -hasho_bucket | 1 -hasho_flag | 2 -hasho_page_id | 65408 - -SELECT live_items, dead_items, page_size, hasho_prevblkno, hasho_nextblkno, -hasho_bucket, hasho_flag, hasho_page_id FROM -hash_page_stats(get_raw_page('test_hash_a_idx', 3)); --[ RECORD 1 ]---+----------- -live_items | 1 -dead_items | 0 -page_size | 8192 -hasho_prevblkno | 3 -hasho_nextblkno | 4294967295 -hasho_bucket | 2 -hasho_flag | 2 -hasho_page_id | 65408 - -SELECT live_items, dead_items, page_size, hasho_prevblkno, hasho_nextblkno, -hasho_bucket, hasho_flag, hasho_page_id FROM -hash_page_stats(get_raw_page('test_hash_a_idx', 4)); --[ RECORD 1 ]---+----------- -live_items | 0 -dead_items | 0 -page_size | 8192 -hasho_prevblkno | 3 -hasho_nextblkno | 4294967295 -hasho_bucket | 3 -hasho_flag | 2 -hasho_page_id | 65408 - -SELECT live_items, dead_items, page_size, hasho_prevblkno, hasho_nextblkno, -hasho_bucket, hasho_flag, hasho_page_id FROM -hash_page_stats(get_raw_page('test_hash_a_idx', 5)); -ERROR: page is not a hash bucket or overflow page -SELECT * FROM hash_page_items(get_raw_page('test_hash_a_idx', 0)); -ERROR: page is not a hash bucket or overflow page -SELECT * FROM hash_page_items(get_raw_page('test_hash_a_idx', 1)); -(0 rows) - -SELECT * FROM hash_page_items(get_raw_page('test_hash_a_idx', 2)); -(0 rows) - -SELECT * FROM hash_page_items(get_raw_page('test_hash_a_idx', 3)); --[ RECORD 1 ]---------- -itemoffset | 1 -ctid | (0,1) -data | 2389907270 - -SELECT * FROM hash_page_items(get_raw_page('test_hash_a_idx', 4)); -(0 rows) - -SELECT * FROM hash_page_items(get_raw_page('test_hash_a_idx', 5)); -ERROR: page is not a hash bucket or overflow page --- Failure with non-hash index -CREATE INDEX test_hash_a_btree ON test_hash USING btree (a); -SELECT hash_bitmap_info('test_hash_a_btree', 0); -ERROR: "test_hash_a_btree" is not a hash index --- Failure with various modes. --- Suppress the DETAIL message, to allow the tests to work across various --- page sizes and architectures. -\set VERBOSITY terse --- invalid page size -SELECT hash_metapage_info('aaa'::bytea); -ERROR: invalid page size -SELECT hash_page_items('bbb'::bytea); -ERROR: invalid page size -SELECT hash_page_stats('ccc'::bytea); -ERROR: invalid page size -SELECT hash_page_type('ddd'::bytea); -ERROR: invalid page size --- invalid special area size -SELECT hash_metapage_info(get_raw_page('test_hash', 0)); -ERROR: input page is not a valid hash page -SELECT hash_page_items(get_raw_page('test_hash', 0)); -ERROR: input page is not a valid hash page -SELECT hash_page_stats(get_raw_page('test_hash', 0)); -ERROR: input page is not a valid hash page -SELECT hash_page_type(get_raw_page('test_hash', 0)); -ERROR: input page is not a valid hash page -\set VERBOSITY default --- Tests with all-zero pages. -SHOW block_size \gset -SELECT hash_metapage_info(decode(repeat('00', :block_size), 'hex')); -ERROR: page is not a hash meta page -SELECT hash_page_items(decode(repeat('00', :block_size), 'hex')); -ERROR: page is not a hash bucket or overflow page -SELECT hash_page_stats(decode(repeat('00', :block_size), 'hex')); -ERROR: page is not a hash bucket or overflow page -SELECT hash_page_type(decode(repeat('00', :block_size), 'hex')); --[ RECORD 1 ]--+------- -hash_page_type | unused - -DROP TABLE test_hash; -DROP TABLE test_hash_part; +psql: error: connection to server on socket "/tmp/pg_regress-6po1UD/.s.PGSQL.65312" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /tmp/cirrus-ci-build/contrib/pageinspect/expected/checksum.out /tmp/cirrus-ci-build/contrib/pageinspect/results/checksum.out --- /tmp/cirrus-ci-build/contrib/pageinspect/expected/checksum.out 2024-10-21 09:36:51.113188235 +0000 +++ /tmp/cirrus-ci-build/contrib/pageinspect/results/checksum.out 2024-10-21 09:41:37.800635628 +0000 @@ -1,40 +1,2 @@ --- --- Verify correct calculation of checksums --- --- Postgres' checksum algorithm produces different answers on little-endian --- and big-endian machines. The results of this test also vary depending --- on the configured block size. This test has several different expected --- results files to handle the following possibilities: --- --- BLCKSZ end file --- 8K LE checksum.out --- 8K BE checksum_1.out --- --- In future we might provide additional expected-results files for other --- block sizes, but there seems little point as long as so many other --- test scripts also show false failures for non-default block sizes. --- --- This is to label the results files with blocksize: -SHOW block_size; - block_size ------------- - 8192 -(1 row) - -SHOW block_size \gset --- Apply page_checksum() to some different data patterns and block numbers -SELECT blkno, - page_checksum(decode(repeat('01', :block_size), 'hex'), blkno) AS checksum_01, - page_checksum(decode(repeat('04', :block_size), 'hex'), blkno) AS checksum_04, - page_checksum(decode(repeat('ff', :block_size), 'hex'), blkno) AS checksum_ff, - page_checksum(decode(repeat('abcd', :block_size / 2), 'hex'), blkno) AS checksum_abcd, - page_checksum(decode(repeat('e6d6', :block_size / 2), 'hex'), blkno) AS checksum_e6d6, - page_checksum(decode(repeat('4a5e', :block_size / 2), 'hex'), blkno) AS checksum_4a5e - FROM generate_series(0, 100, 50) AS a (blkno); - blkno | checksum_01 | checksum_04 | checksum_ff | checksum_abcd | checksum_e6d6 | checksum_4a5e --------+-------------+-------------+-------------+---------------+---------------+--------------- - 0 | 1175 | 28338 | 3612 | -30781 | -16269 | -27377 - 50 | 1225 | 28352 | 3598 | -30795 | -16251 | -27391 - 100 | 1139 | 28438 | 3648 | -30881 | -16305 | -27349 -(3 rows) - +psql: error: connection to server on socket "/tmp/pg_regress-6po1UD/.s.PGSQL.65312" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /tmp/cirrus-ci-build/contrib/pageinspect/expected/oldextversions.out /tmp/cirrus-ci-build/contrib/pageinspect/results/oldextversions.out --- /tmp/cirrus-ci-build/contrib/pageinspect/expected/oldextversions.out 2024-10-21 09:36:51.113188235 +0000 +++ /tmp/cirrus-ci-build/contrib/pageinspect/results/oldextversions.out 2024-10-21 09:41:37.860635578 +0000 @@ -1,56 +1,2 @@ --- test old extension version entry points -DROP EXTENSION pageinspect; -CREATE EXTENSION pageinspect VERSION '1.8'; -CREATE TABLE test1 (a int8, b text); -INSERT INTO test1 VALUES (72057594037927937, 'text'); -CREATE INDEX test1_a_idx ON test1 USING btree (a); --- from page.sql -SELECT octet_length(get_raw_page('test1', 0)) AS main_0; - main_0 --------- - 8192 -(1 row) - -SELECT octet_length(get_raw_page('test1', 'main', 0)) AS main_0; - main_0 --------- - 8192 -(1 row) - -SELECT page_checksum(get_raw_page('test1', 0), 0) IS NOT NULL AS silly_checksum_test; - silly_checksum_test ---------------------- - t -(1 row) - --- from btree.sql -SELECT * FROM bt_page_stats('test1_a_idx', 1); - blkno | type | live_items | dead_items | avg_item_size | page_size | free_size | btpo_prev | btpo_next | btpo | btpo_flags --------+------+------------+------------+---------------+-----------+-----------+-----------+-----------+------+------------ - 1 | l | 1 | 0 | 16 | 8192 | 8128 | 0 | 0 | 0 | 3 -(1 row) - -SELECT * FROM bt_page_items('test1_a_idx', 1); - itemoffset | ctid | itemlen | nulls | vars | data | dead | htid | tids -------------+-------+---------+-------+------+-------------------------+------+-------+------ - 1 | (0,1) | 16 | f | f | 01 00 00 00 00 00 00 01 | f | (0,1) | -(1 row) - --- page_header() uses int instead of smallint for lower, upper, special and --- pagesize in pageinspect >= 1.10. -ALTER EXTENSION pageinspect UPDATE TO '1.9'; -\df page_header - List of functions - Schema | Name | Result data type | Argument data types | Type ---------+-------------+------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------ - public | page_header | record | page bytea, OUT lsn pg_lsn, OUT checksum smallint, OUT flags smallint, OUT lower smallint, OUT upper smallint, OUT special smallint, OUT pagesize smallint, OUT version smallint, OUT prune_xid xid | func -(1 row) - -SELECT pagesize, version FROM page_header(get_raw_page('test1', 0)); - pagesize | version -----------+--------- - 8192 | 4 -(1 row) - -DROP TABLE test1; -DROP EXTENSION pageinspect; +psql: error: connection to server on socket "/tmp/pg_regress-6po1UD/.s.PGSQL.65312" failed: No such file or directory + Is the server running locally and accepting connections on that socket?