diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/brin.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/brin.out --- /tmp/cirrus-ci-build/src/test/regress/expected/brin.out 2024-03-13 23:12:37.622552000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/brin.out 2024-03-13 23:14:29.329876000 +0000 @@ -539,42 +539,10 @@ -- vacuum actually removes the TOAST rows. Creating an index concurrently -- is a one way to achieve that, because it does exactly such wait. CREATE INDEX CONCURRENTLY brin_test_temp_idx ON brintest_3(a); -DROP INDEX brin_test_temp_idx; --- vacuum the table, to discard TOAST data -VACUUM brintest_3; --- retry insert with a different random-looking (but deterministic) value --- the value is different, and so should replace either min or max in the --- brin summary -WITH rand_value AS (SELECT string_agg(fipshash((-i)::text),'') AS val FROM generate_series(1,60) s(i)) -INSERT INTO brintest_3 -SELECT val, val, val, val FROM rand_value; --- now try some queries, accessing the brin index -SET enable_seqscan = off; -EXPLAIN (COSTS OFF) -SELECT * FROM brintest_3 WHERE b < '0'; - QUERY PLAN ------------------------------------------------- - Bitmap Heap Scan on brintest_3 - Recheck Cond: (b < '0'::text) - -> Bitmap Index Scan on brin_test_toast_idx - Index Cond: (b < '0'::text) -(4 rows) - -SELECT * FROM brintest_3 WHERE b < '0'; - a | b | c | d ----+---+---+--- -(0 rows) - -DROP TABLE brintest_3; -RESET enable_seqscan; --- test an unlogged table, mostly to get coverage of brinbuildempty -CREATE UNLOGGED TABLE brintest_unlogged (n numrange); -CREATE INDEX brinidx_unlogged ON brintest_unlogged USING brin (n); -INSERT INTO brintest_unlogged VALUES (numrange(0, 2^1000::numeric)); -DROP TABLE brintest_unlogged; --- test that the insert optimization works if no rows end up inserted -CREATE TABLE brin_insert_optimization (a int); -INSERT INTO brin_insert_optimization VALUES (1); -CREATE INDEX ON brin_insert_optimization USING brin (a); -UPDATE brin_insert_optimization SET a = a; -DROP TABLE brin_insert_optimization; +WARNING: terminating connection because of crash of another server process +DETAIL: The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory. +HINT: In a moment you should be able to reconnect to the database and repeat your command. +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/privileges.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/privileges.out --- /tmp/cirrus-ci-build/src/test/regress/expected/privileges.out 2024-03-13 23:12:37.626448000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/privileges.out 2024-03-13 23:14:29.330868000 +0000 @@ -1286,1700 +1286,7 @@ ALTER TABLE test9b ADD COLUMN c priv_testdomain1; ALTER TABLE test9b ALTER COLUMN b TYPE priv_testdomain1; CREATE TYPE test7b AS (a int, b priv_testdomain1); -CREATE TYPE test8b AS (a int, b int); -ALTER TYPE test8b ADD ATTRIBUTE c priv_testdomain1; -ALTER TYPE test8b ALTER ATTRIBUTE b TYPE priv_testdomain1; -CREATE TABLE test11b AS (SELECT 1::priv_testdomain1 AS a); -REVOKE ALL ON TYPE priv_testtype1 FROM PUBLIC; -WARNING: no privileges could be revoked for "priv_testtype1" -\c - -DROP AGGREGATE priv_testagg1b(priv_testdomain1); -DROP DOMAIN priv_testdomain2b; -DROP OPERATOR !! (NONE, priv_testdomain1); -DROP FUNCTION priv_testfunc5b(a priv_testdomain1); -DROP FUNCTION priv_testfunc6b(b int); -DROP TABLE test5b; -DROP TABLE test6b; -DROP TABLE test9b; -DROP TABLE test10b; -DROP TYPE test7b; -DROP TYPE test8b; -DROP CAST (priv_testdomain1 AS priv_testdomain3b); -DROP FUNCTION castfunc(int) CASCADE; -DROP DOMAIN priv_testdomain3b; -DROP TABLE test11b; -DROP TYPE priv_testtype1; -- ok -DROP DOMAIN priv_testdomain1; -- ok --- truncate -SET SESSION AUTHORIZATION regress_priv_user5; -TRUNCATE atest2; -- ok -TRUNCATE atest3; -- fail -ERROR: permission denied for table atest3 --- has_table_privilege function --- bad-input checks -select has_table_privilege(NULL,'pg_authid','select'); - has_table_privilege ---------------------- - -(1 row) - -select has_table_privilege('pg_shad','select'); -ERROR: relation "pg_shad" does not exist -select has_table_privilege('nosuchuser','pg_authid','select'); -ERROR: role "nosuchuser" does not exist -select has_table_privilege('pg_authid','sel'); -ERROR: unrecognized privilege type: "sel" -select has_table_privilege(-999999,'pg_authid','update'); - has_table_privilege ---------------------- - f -(1 row) - -select has_table_privilege(1,'select'); - has_table_privilege ---------------------- - -(1 row) - --- superuser -\c - -select has_table_privilege(current_user,'pg_authid','select'); - has_table_privilege ---------------------- - t -(1 row) - -select has_table_privilege(current_user,'pg_authid','insert'); - has_table_privilege ---------------------- - t -(1 row) - -select has_table_privilege(t2.oid,'pg_authid','update') -from (select oid from pg_roles where rolname = current_user) as t2; - has_table_privilege ---------------------- - t -(1 row) - -select has_table_privilege(t2.oid,'pg_authid','delete') -from (select oid from pg_roles where rolname = current_user) as t2; - has_table_privilege ---------------------- - t -(1 row) - --- 'rule' privilege no longer exists, but for backwards compatibility --- has_table_privilege still recognizes the keyword and says FALSE -select has_table_privilege(current_user,t1.oid,'rule') -from (select oid from pg_class where relname = 'pg_authid') as t1; - has_table_privilege ---------------------- - f -(1 row) - -select has_table_privilege(current_user,t1.oid,'references') -from (select oid from pg_class where relname = 'pg_authid') as t1; - has_table_privilege ---------------------- - t -(1 row) - -select has_table_privilege(t2.oid,t1.oid,'select') -from (select oid from pg_class where relname = 'pg_authid') as t1, - (select oid from pg_roles where rolname = current_user) as t2; - has_table_privilege ---------------------- - t -(1 row) - -select has_table_privilege(t2.oid,t1.oid,'insert') -from (select oid from pg_class where relname = 'pg_authid') as t1, - (select oid from pg_roles where rolname = current_user) as t2; - has_table_privilege ---------------------- - t -(1 row) - -select has_table_privilege('pg_authid','update'); - has_table_privilege ---------------------- - t -(1 row) - -select has_table_privilege('pg_authid','delete'); - has_table_privilege ---------------------- - t -(1 row) - -select has_table_privilege('pg_authid','truncate'); - has_table_privilege ---------------------- - t -(1 row) - -select has_table_privilege(t1.oid,'select') -from (select oid from pg_class where relname = 'pg_authid') as t1; - has_table_privilege ---------------------- - t -(1 row) - -select has_table_privilege(t1.oid,'trigger') -from (select oid from pg_class where relname = 'pg_authid') as t1; - has_table_privilege ---------------------- - t -(1 row) - --- non-superuser -SET SESSION AUTHORIZATION regress_priv_user3; -select has_table_privilege(current_user,'pg_class','select'); - has_table_privilege ---------------------- - t -(1 row) - -select has_table_privilege(current_user,'pg_class','insert'); - has_table_privilege ---------------------- - f -(1 row) - -select has_table_privilege(t2.oid,'pg_class','update') -from (select oid from pg_roles where rolname = current_user) as t2; - has_table_privilege ---------------------- - f -(1 row) - -select has_table_privilege(t2.oid,'pg_class','delete') -from (select oid from pg_roles where rolname = current_user) as t2; - has_table_privilege ---------------------- - f -(1 row) - -select has_table_privilege(current_user,t1.oid,'references') -from (select oid from pg_class where relname = 'pg_class') as t1; - has_table_privilege ---------------------- - f -(1 row) - -select has_table_privilege(t2.oid,t1.oid,'select') -from (select oid from pg_class where relname = 'pg_class') as t1, - (select oid from pg_roles where rolname = current_user) as t2; - has_table_privilege ---------------------- - t -(1 row) - -select has_table_privilege(t2.oid,t1.oid,'insert') -from (select oid from pg_class where relname = 'pg_class') as t1, - (select oid from pg_roles where rolname = current_user) as t2; - has_table_privilege ---------------------- - f -(1 row) - -select has_table_privilege('pg_class','update'); - has_table_privilege ---------------------- - f -(1 row) - -select has_table_privilege('pg_class','delete'); - has_table_privilege ---------------------- - f -(1 row) - -select has_table_privilege('pg_class','truncate'); - has_table_privilege ---------------------- - f -(1 row) - -select has_table_privilege(t1.oid,'select') -from (select oid from pg_class where relname = 'pg_class') as t1; - has_table_privilege ---------------------- - t -(1 row) - -select has_table_privilege(t1.oid,'trigger') -from (select oid from pg_class where relname = 'pg_class') as t1; - has_table_privilege ---------------------- - f -(1 row) - -select has_table_privilege(current_user,'atest1','select'); - has_table_privilege ---------------------- - t -(1 row) - -select has_table_privilege(current_user,'atest1','insert'); - has_table_privilege ---------------------- - f -(1 row) - -select has_table_privilege(t2.oid,'atest1','update') -from (select oid from pg_roles where rolname = current_user) as t2; - has_table_privilege ---------------------- - f -(1 row) - -select has_table_privilege(t2.oid,'atest1','delete') -from (select oid from pg_roles where rolname = current_user) as t2; - has_table_privilege ---------------------- - f -(1 row) - -select has_table_privilege(current_user,t1.oid,'references') -from (select oid from pg_class where relname = 'atest1') as t1; - has_table_privilege ---------------------- - f -(1 row) - -select has_table_privilege(t2.oid,t1.oid,'select') -from (select oid from pg_class where relname = 'atest1') as t1, - (select oid from pg_roles where rolname = current_user) as t2; - has_table_privilege ---------------------- - t -(1 row) - -select has_table_privilege(t2.oid,t1.oid,'insert') -from (select oid from pg_class where relname = 'atest1') as t1, - (select oid from pg_roles where rolname = current_user) as t2; - has_table_privilege ---------------------- - f -(1 row) - -select has_table_privilege('atest1','update'); - has_table_privilege ---------------------- - f -(1 row) - -select has_table_privilege('atest1','delete'); - has_table_privilege ---------------------- - f -(1 row) - -select has_table_privilege('atest1','truncate'); - has_table_privilege ---------------------- - f -(1 row) - -select has_table_privilege(t1.oid,'select') -from (select oid from pg_class where relname = 'atest1') as t1; - has_table_privilege ---------------------- - t -(1 row) - -select has_table_privilege(t1.oid,'trigger') -from (select oid from pg_class where relname = 'atest1') as t1; - has_table_privilege ---------------------- - f -(1 row) - --- has_column_privilege function --- bad-input checks (as non-super-user) -select has_column_privilege('pg_authid',NULL,'select'); - has_column_privilege ----------------------- - -(1 row) - -select has_column_privilege('pg_authid','nosuchcol','select'); -ERROR: column "nosuchcol" of relation "pg_authid" does not exist -select has_column_privilege(9999,'nosuchcol','select'); - has_column_privilege ----------------------- - -(1 row) - -select has_column_privilege(9999,99::int2,'select'); - has_column_privilege ----------------------- - -(1 row) - -select has_column_privilege('pg_authid',99::int2,'select'); - has_column_privilege ----------------------- - -(1 row) - -select has_column_privilege(9999,99::int2,'select'); - has_column_privilege ----------------------- - -(1 row) - -create temp table mytable(f1 int, f2 int, f3 int); -alter table mytable drop column f2; -select has_column_privilege('mytable','f2','select'); -ERROR: column "f2" of relation "mytable" does not exist -select has_column_privilege('mytable','........pg.dropped.2........','select'); - has_column_privilege ----------------------- - -(1 row) - -select has_column_privilege('mytable',2::int2,'select'); - has_column_privilege ----------------------- - -(1 row) - -select has_column_privilege('mytable',99::int2,'select'); - has_column_privilege ----------------------- - -(1 row) - -revoke select on table mytable from regress_priv_user3; -select has_column_privilege('mytable',2::int2,'select'); - has_column_privilege ----------------------- - -(1 row) - -select has_column_privilege('mytable',99::int2,'select'); - has_column_privilege ----------------------- - -(1 row) - -drop table mytable; --- Grant options -SET SESSION AUTHORIZATION regress_priv_user1; -CREATE TABLE atest4 (a int); -GRANT SELECT ON atest4 TO regress_priv_user2 WITH GRANT OPTION; -GRANT UPDATE ON atest4 TO regress_priv_user2; -GRANT SELECT ON atest4 TO GROUP regress_priv_group1 WITH GRANT OPTION; -SET SESSION AUTHORIZATION regress_priv_user2; -GRANT SELECT ON atest4 TO regress_priv_user3; -GRANT UPDATE ON atest4 TO regress_priv_user3; -- fail -WARNING: no privileges were granted for "atest4" -SET SESSION AUTHORIZATION regress_priv_user1; -REVOKE SELECT ON atest4 FROM regress_priv_user3; -- does nothing -SELECT has_table_privilege('regress_priv_user3', 'atest4', 'SELECT'); -- true - has_table_privilege ---------------------- - t -(1 row) - -REVOKE SELECT ON atest4 FROM regress_priv_user2; -- fail -ERROR: dependent privileges exist -HINT: Use CASCADE to revoke them too. -REVOKE GRANT OPTION FOR SELECT ON atest4 FROM regress_priv_user2 CASCADE; -- ok -SELECT has_table_privilege('regress_priv_user2', 'atest4', 'SELECT'); -- true - has_table_privilege ---------------------- - t -(1 row) - -SELECT has_table_privilege('regress_priv_user3', 'atest4', 'SELECT'); -- false - has_table_privilege ---------------------- - f -(1 row) - -SELECT has_table_privilege('regress_priv_user1', 'atest4', 'SELECT WITH GRANT OPTION'); -- true - has_table_privilege ---------------------- - t -(1 row) - --- security-restricted operations -\c - -CREATE ROLE regress_sro_user; --- Check that index expressions and predicates are run as the table's owner --- A dummy index function checking current_user -CREATE FUNCTION sro_ifun(int) RETURNS int AS $$ -BEGIN - -- Below we set the table's owner to regress_sro_user - ASSERT current_user = 'regress_sro_user', - format('sro_ifun(%s) called by %s', $1, current_user); - RETURN $1; -END; -$$ LANGUAGE plpgsql IMMUTABLE; --- Create a table owned by regress_sro_user -CREATE TABLE sro_tab (a int); -ALTER TABLE sro_tab OWNER TO regress_sro_user; -INSERT INTO sro_tab VALUES (1), (2), (3); --- Create an expression index with a predicate -CREATE INDEX sro_idx ON sro_tab ((sro_ifun(a) + sro_ifun(0))) - WHERE sro_ifun(a + 10) > sro_ifun(10); -DROP INDEX sro_idx; --- Do the same concurrently -CREATE INDEX CONCURRENTLY sro_idx ON sro_tab ((sro_ifun(a) + sro_ifun(0))) - WHERE sro_ifun(a + 10) > sro_ifun(10); --- REINDEX -REINDEX TABLE sro_tab; -REINDEX INDEX sro_idx; -REINDEX TABLE CONCURRENTLY sro_tab; -DROP INDEX sro_idx; --- CLUSTER -CREATE INDEX sro_cluster_idx ON sro_tab ((sro_ifun(a) + sro_ifun(0))); -CLUSTER sro_tab USING sro_cluster_idx; -DROP INDEX sro_cluster_idx; --- BRIN index -CREATE INDEX sro_brin ON sro_tab USING brin ((sro_ifun(a) + sro_ifun(0))); -SELECT brin_desummarize_range('sro_brin', 0); - brin_desummarize_range ------------------------- - -(1 row) - -SELECT brin_summarize_range('sro_brin', 0); - brin_summarize_range ----------------------- - 1 -(1 row) - -DROP TABLE sro_tab; --- Check with a partitioned table -CREATE TABLE sro_ptab (a int) PARTITION BY RANGE (a); -ALTER TABLE sro_ptab OWNER TO regress_sro_user; -CREATE TABLE sro_part PARTITION OF sro_ptab FOR VALUES FROM (1) TO (10); -ALTER TABLE sro_part OWNER TO regress_sro_user; -INSERT INTO sro_ptab VALUES (1), (2), (3); -CREATE INDEX sro_pidx ON sro_ptab ((sro_ifun(a) + sro_ifun(0))) - WHERE sro_ifun(a + 10) > sro_ifun(10); -REINDEX TABLE sro_ptab; -REINDEX INDEX CONCURRENTLY sro_pidx; -SET SESSION AUTHORIZATION regress_sro_user; -CREATE FUNCTION unwanted_grant() RETURNS void LANGUAGE sql AS - 'GRANT regress_priv_group2 TO regress_sro_user'; -CREATE FUNCTION mv_action() RETURNS bool LANGUAGE sql AS - 'DECLARE c CURSOR WITH HOLD FOR SELECT public.unwanted_grant(); SELECT true'; --- REFRESH of this MV will queue a GRANT at end of transaction -CREATE MATERIALIZED VIEW sro_mv AS SELECT mv_action() WITH NO DATA; -REFRESH MATERIALIZED VIEW sro_mv; -ERROR: cannot create a cursor WITH HOLD within security-restricted operation -CONTEXT: SQL function "mv_action" statement 1 -\c - -REFRESH MATERIALIZED VIEW sro_mv; -ERROR: cannot create a cursor WITH HOLD within security-restricted operation -CONTEXT: SQL function "mv_action" statement 1 -SET SESSION AUTHORIZATION regress_sro_user; --- INSERT to this table will queue a GRANT at end of transaction -CREATE TABLE sro_trojan_table (); -CREATE FUNCTION sro_trojan() RETURNS trigger LANGUAGE plpgsql AS - 'BEGIN PERFORM public.unwanted_grant(); RETURN NULL; END'; -CREATE CONSTRAINT TRIGGER t AFTER INSERT ON sro_trojan_table - INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE sro_trojan(); --- Now, REFRESH will issue such an INSERT, queueing the GRANT -CREATE OR REPLACE FUNCTION mv_action() RETURNS bool LANGUAGE sql AS - 'INSERT INTO public.sro_trojan_table DEFAULT VALUES; SELECT true'; -REFRESH MATERIALIZED VIEW sro_mv; -ERROR: cannot fire deferred trigger within security-restricted operation -CONTEXT: SQL function "mv_action" statement 1 -\c - -REFRESH MATERIALIZED VIEW sro_mv; -ERROR: cannot fire deferred trigger within security-restricted operation -CONTEXT: SQL function "mv_action" statement 1 -BEGIN; SET CONSTRAINTS ALL IMMEDIATE; REFRESH MATERIALIZED VIEW sro_mv; COMMIT; -ERROR: permission denied to grant role "regress_priv_group2" -DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may grant this role. -CONTEXT: SQL function "unwanted_grant" statement 1 -SQL statement "SELECT public.unwanted_grant()" -PL/pgSQL function public.sro_trojan() line 1 at PERFORM -SQL function "mv_action" statement 1 --- REFRESH MATERIALIZED VIEW CONCURRENTLY use of eval_const_expressions() -SET SESSION AUTHORIZATION regress_sro_user; -CREATE FUNCTION unwanted_grant_nofail(int) RETURNS int - IMMUTABLE LANGUAGE plpgsql AS $$ -BEGIN - PERFORM public.unwanted_grant(); - RAISE WARNING 'owned'; - RETURN 1; -EXCEPTION WHEN OTHERS THEN - RETURN 2; -END$$; -CREATE MATERIALIZED VIEW sro_index_mv AS SELECT 1 AS c; -CREATE UNIQUE INDEX ON sro_index_mv (c) WHERE unwanted_grant_nofail(1) > 0; -\c - -REFRESH MATERIALIZED VIEW CONCURRENTLY sro_index_mv; -REFRESH MATERIALIZED VIEW sro_index_mv; -DROP OWNED BY regress_sro_user; -DROP ROLE regress_sro_user; --- Admin options -SET SESSION AUTHORIZATION regress_priv_user4; -CREATE FUNCTION dogrant_ok() RETURNS void LANGUAGE sql SECURITY DEFINER AS - 'GRANT regress_priv_group2 TO regress_priv_user5'; -GRANT regress_priv_group2 TO regress_priv_user5; -- ok: had ADMIN OPTION -SET ROLE regress_priv_group2; -GRANT regress_priv_group2 TO regress_priv_user5; -- fails: SET ROLE suspended privilege -ERROR: permission denied to grant role "regress_priv_group2" -DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may grant this role. -SET SESSION AUTHORIZATION regress_priv_user1; -GRANT regress_priv_group2 TO regress_priv_user5; -- fails: no ADMIN OPTION -ERROR: permission denied to grant role "regress_priv_group2" -DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may grant this role. -SELECT dogrant_ok(); -- ok: SECURITY DEFINER conveys ADMIN -NOTICE: role "regress_priv_user5" has already been granted membership in role "regress_priv_group2" by role "regress_priv_user4" - dogrant_ok ------------- - -(1 row) - -SET ROLE regress_priv_group2; -GRANT regress_priv_group2 TO regress_priv_user5; -- fails: SET ROLE did not help -ERROR: permission denied to grant role "regress_priv_group2" -DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may grant this role. -SET SESSION AUTHORIZATION regress_priv_group2; -GRANT regress_priv_group2 TO regress_priv_user5; -- fails: no self-admin -ERROR: permission denied to grant role "regress_priv_group2" -DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may grant this role. -SET SESSION AUTHORIZATION regress_priv_user4; -DROP FUNCTION dogrant_ok(); -REVOKE regress_priv_group2 FROM regress_priv_user5; --- has_sequence_privilege tests -\c - -CREATE SEQUENCE x_seq; -GRANT USAGE on x_seq to regress_priv_user2; -SELECT has_sequence_privilege('regress_priv_user1', 'atest1', 'SELECT'); -ERROR: "atest1" is not a sequence -SELECT has_sequence_privilege('regress_priv_user1', 'x_seq', 'INSERT'); -ERROR: unrecognized privilege type: "INSERT" -SELECT has_sequence_privilege('regress_priv_user1', 'x_seq', 'SELECT'); - has_sequence_privilege ------------------------- - f -(1 row) - -SET SESSION AUTHORIZATION regress_priv_user2; -SELECT has_sequence_privilege('x_seq', 'USAGE'); - has_sequence_privilege ------------------------- - t -(1 row) - --- largeobject privilege tests -\c - -SET SESSION AUTHORIZATION regress_priv_user1; -SELECT lo_create(1001); - lo_create ------------ - 1001 -(1 row) - -SELECT lo_create(1002); - lo_create ------------ - 1002 -(1 row) - -SELECT lo_create(1003); - lo_create ------------ - 1003 -(1 row) - -SELECT lo_create(1004); - lo_create ------------ - 1004 -(1 row) - -SELECT lo_create(1005); - lo_create ------------ - 1005 -(1 row) - -GRANT ALL ON LARGE OBJECT 1001 TO PUBLIC; -GRANT SELECT ON LARGE OBJECT 1003 TO regress_priv_user2; -GRANT SELECT,UPDATE ON LARGE OBJECT 1004 TO regress_priv_user2; -GRANT ALL ON LARGE OBJECT 1005 TO regress_priv_user2; -GRANT SELECT ON LARGE OBJECT 1005 TO regress_priv_user2 WITH GRANT OPTION; -GRANT SELECT, INSERT ON LARGE OBJECT 1001 TO PUBLIC; -- to be failed -ERROR: invalid privilege type INSERT for large object -GRANT SELECT, UPDATE ON LARGE OBJECT 1001 TO nosuchuser; -- to be failed -ERROR: role "nosuchuser" does not exist -GRANT SELECT, UPDATE ON LARGE OBJECT 999 TO PUBLIC; -- to be failed -ERROR: large object 999 does not exist -\c - -SET SESSION AUTHORIZATION regress_priv_user2; -SELECT lo_create(2001); - lo_create ------------ - 2001 -(1 row) - -SELECT lo_create(2002); - lo_create ------------ - 2002 -(1 row) - -SELECT loread(lo_open(1001, x'20000'::int), 32); -- allowed, for now - loread --------- - \x -(1 row) - -SELECT lowrite(lo_open(1001, x'40000'::int), 'abcd'); -- fail, wrong mode -ERROR: large object descriptor 0 was not opened for writing -SELECT loread(lo_open(1001, x'40000'::int), 32); - loread --------- - \x -(1 row) - -SELECT loread(lo_open(1002, x'40000'::int), 32); -- to be denied -ERROR: permission denied for large object 1002 -SELECT loread(lo_open(1003, x'40000'::int), 32); - loread --------- - \x -(1 row) - -SELECT loread(lo_open(1004, x'40000'::int), 32); - loread --------- - \x -(1 row) - -SELECT lowrite(lo_open(1001, x'20000'::int), 'abcd'); - lowrite ---------- - 4 -(1 row) - -SELECT lowrite(lo_open(1002, x'20000'::int), 'abcd'); -- to be denied -ERROR: permission denied for large object 1002 -SELECT lowrite(lo_open(1003, x'20000'::int), 'abcd'); -- to be denied -ERROR: permission denied for large object 1003 -SELECT lowrite(lo_open(1004, x'20000'::int), 'abcd'); - lowrite ---------- - 4 -(1 row) - -GRANT SELECT ON LARGE OBJECT 1005 TO regress_priv_user3; -GRANT UPDATE ON LARGE OBJECT 1006 TO regress_priv_user3; -- to be denied -ERROR: large object 1006 does not exist -REVOKE ALL ON LARGE OBJECT 2001, 2002 FROM PUBLIC; -GRANT ALL ON LARGE OBJECT 2001 TO regress_priv_user3; -SELECT lo_unlink(1001); -- to be denied -ERROR: must be owner of large object 1001 -SELECT lo_unlink(2002); - lo_unlink ------------ - 1 -(1 row) - -\c - --- confirm ACL setting -SELECT oid, pg_get_userbyid(lomowner) ownername, lomacl FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid; - oid | ownername | lomacl -------+--------------------+------------------------------------------------------------------------------------------------------------------------------ - 1001 | regress_priv_user1 | {regress_priv_user1=rw/regress_priv_user1,=rw/regress_priv_user1} - 1002 | regress_priv_user1 | - 1003 | regress_priv_user1 | {regress_priv_user1=rw/regress_priv_user1,regress_priv_user2=r/regress_priv_user1} - 1004 | regress_priv_user1 | {regress_priv_user1=rw/regress_priv_user1,regress_priv_user2=rw/regress_priv_user1} - 1005 | regress_priv_user1 | {regress_priv_user1=rw/regress_priv_user1,regress_priv_user2=r*w/regress_priv_user1,regress_priv_user3=r/regress_priv_user2} - 2001 | regress_priv_user2 | {regress_priv_user2=rw/regress_priv_user2,regress_priv_user3=rw/regress_priv_user2} -(6 rows) - -SET SESSION AUTHORIZATION regress_priv_user3; -SELECT loread(lo_open(1001, x'40000'::int), 32); - loread ------------- - \x61626364 -(1 row) - -SELECT loread(lo_open(1003, x'40000'::int), 32); -- to be denied -ERROR: permission denied for large object 1003 -SELECT loread(lo_open(1005, x'40000'::int), 32); - loread --------- - \x -(1 row) - -SELECT lo_truncate(lo_open(1005, x'20000'::int), 10); -- to be denied -ERROR: permission denied for large object 1005 -SELECT lo_truncate(lo_open(2001, x'20000'::int), 10); - lo_truncate -------------- - 0 -(1 row) - --- compatibility mode in largeobject permission -\c - -SET lo_compat_privileges = false; -- default setting -SET SESSION AUTHORIZATION regress_priv_user4; -SELECT loread(lo_open(1002, x'40000'::int), 32); -- to be denied -ERROR: permission denied for large object 1002 -SELECT lowrite(lo_open(1002, x'20000'::int), 'abcd'); -- to be denied -ERROR: permission denied for large object 1002 -SELECT lo_truncate(lo_open(1002, x'20000'::int), 10); -- to be denied -ERROR: permission denied for large object 1002 -SELECT lo_put(1002, 1, 'abcd'); -- to be denied -ERROR: permission denied for large object 1002 -SELECT lo_unlink(1002); -- to be denied -ERROR: must be owner of large object 1002 -SELECT lo_export(1001, '/dev/null'); -- to be denied -ERROR: permission denied for function lo_export -SELECT lo_import('/dev/null'); -- to be denied -ERROR: permission denied for function lo_import -SELECT lo_import('/dev/null', 2003); -- to be denied -ERROR: permission denied for function lo_import -\c - -SET lo_compat_privileges = true; -- compatibility mode -SET SESSION AUTHORIZATION regress_priv_user4; -SELECT loread(lo_open(1002, x'40000'::int), 32); - loread --------- - \x -(1 row) - -SELECT lowrite(lo_open(1002, x'20000'::int), 'abcd'); - lowrite ---------- - 4 -(1 row) - -SELECT lo_truncate(lo_open(1002, x'20000'::int), 10); - lo_truncate -------------- - 0 -(1 row) - -SELECT lo_unlink(1002); - lo_unlink ------------ - 1 -(1 row) - -SELECT lo_export(1001, '/dev/null'); -- to be denied -ERROR: permission denied for function lo_export --- don't allow unpriv users to access pg_largeobject contents -\c - -SELECT * FROM pg_largeobject LIMIT 0; - loid | pageno | data -------+--------+------ -(0 rows) - -SET SESSION AUTHORIZATION regress_priv_user1; -SELECT * FROM pg_largeobject LIMIT 0; -- to be denied -ERROR: permission denied for table pg_largeobject --- pg_signal_backend can't signal superusers -RESET SESSION AUTHORIZATION; -BEGIN; -CREATE OR REPLACE FUNCTION terminate_nothrow(pid int) RETURNS bool - LANGUAGE plpgsql SECURITY DEFINER SET client_min_messages = error AS $$ -BEGIN - RETURN pg_terminate_backend($1); -EXCEPTION WHEN OTHERS THEN - RETURN false; -END$$; -ALTER FUNCTION terminate_nothrow OWNER TO pg_signal_backend; -SELECT backend_type FROM pg_stat_activity -WHERE CASE WHEN COALESCE(usesysid, 10) = 10 THEN terminate_nothrow(pid) END; - backend_type --------------- -(0 rows) - -ROLLBACK; --- test pg_database_owner -RESET SESSION AUTHORIZATION; -GRANT pg_database_owner TO regress_priv_user1; -ERROR: role "pg_database_owner" cannot have explicit members -GRANT regress_priv_user1 TO pg_database_owner; -ERROR: role "pg_database_owner" cannot be a member of any role -CREATE TABLE datdba_only (); -ALTER TABLE datdba_only OWNER TO pg_database_owner; -REVOKE DELETE ON datdba_only FROM pg_database_owner; -SELECT - pg_has_role('regress_priv_user1', 'pg_database_owner', 'USAGE') as priv, - pg_has_role('regress_priv_user1', 'pg_database_owner', 'MEMBER') as mem, - pg_has_role('regress_priv_user1', 'pg_database_owner', - 'MEMBER WITH ADMIN OPTION') as admin; - priv | mem | admin -------+-----+------- - f | f | f -(1 row) - -BEGIN; -DO $$BEGIN EXECUTE format( - 'ALTER DATABASE %I OWNER TO regress_priv_group2', current_catalog); END$$; -SELECT - pg_has_role('regress_priv_user1', 'pg_database_owner', 'USAGE') as priv, - pg_has_role('regress_priv_user1', 'pg_database_owner', 'MEMBER') as mem, - pg_has_role('regress_priv_user1', 'pg_database_owner', - 'MEMBER WITH ADMIN OPTION') as admin; - priv | mem | admin -------+-----+------- - t | t | f -(1 row) - -SET SESSION AUTHORIZATION regress_priv_user1; -TABLE information_schema.enabled_roles ORDER BY role_name COLLATE "C"; - role_name ---------------------- - pg_database_owner - regress_priv_group2 - regress_priv_user1 -(3 rows) - -TABLE information_schema.applicable_roles ORDER BY role_name COLLATE "C"; - grantee | role_name | is_grantable ----------------------+---------------------+-------------- - regress_priv_group2 | pg_database_owner | NO - regress_priv_user1 | regress_priv_group2 | NO -(2 rows) - -INSERT INTO datdba_only DEFAULT VALUES; -SAVEPOINT q; DELETE FROM datdba_only; ROLLBACK TO q; -ERROR: permission denied for table datdba_only -SET SESSION AUTHORIZATION regress_priv_user2; -TABLE information_schema.enabled_roles; - role_name --------------------- - regress_priv_user2 -(1 row) - -INSERT INTO datdba_only DEFAULT VALUES; -ERROR: permission denied for table datdba_only -ROLLBACK; --- test default ACLs -\c - -CREATE SCHEMA testns; -GRANT ALL ON SCHEMA testns TO regress_priv_user1; -CREATE TABLE testns.acltest1 (x int); -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- no - has_table_privilege ---------------------- - f -(1 row) - -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no - has_table_privilege ---------------------- - f -(1 row) - --- placeholder for test with duplicated schema and role names -ALTER DEFAULT PRIVILEGES IN SCHEMA testns,testns GRANT SELECT ON TABLES TO public,public; -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- no - has_table_privilege ---------------------- - f -(1 row) - -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no - has_table_privilege ---------------------- - f -(1 row) - -DROP TABLE testns.acltest1; -CREATE TABLE testns.acltest1 (x int); -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- yes - has_table_privilege ---------------------- - t -(1 row) - -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no - has_table_privilege ---------------------- - f -(1 row) - -ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT INSERT ON TABLES TO regress_priv_user1; -DROP TABLE testns.acltest1; -CREATE TABLE testns.acltest1 (x int); -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- yes - has_table_privilege ---------------------- - t -(1 row) - -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- yes - has_table_privilege ---------------------- - t -(1 row) - -ALTER DEFAULT PRIVILEGES IN SCHEMA testns REVOKE INSERT ON TABLES FROM regress_priv_user1; -DROP TABLE testns.acltest1; -CREATE TABLE testns.acltest1 (x int); -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- yes - has_table_privilege ---------------------- - t -(1 row) - -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no - has_table_privilege ---------------------- - f -(1 row) - -ALTER DEFAULT PRIVILEGES FOR ROLE regress_priv_user1 REVOKE EXECUTE ON FUNCTIONS FROM public; -ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT USAGE ON SCHEMAS TO regress_priv_user2; -- error -ERROR: cannot use IN SCHEMA clause when using GRANT/REVOKE ON SCHEMAS --- Test makeaclitem() -SELECT makeaclitem('regress_priv_user1'::regrole, 'regress_priv_user2'::regrole, - 'SELECT', TRUE); -- single privilege - makeaclitem ------------------------------------------- - regress_priv_user1=r*/regress_priv_user2 -(1 row) - -SELECT makeaclitem('regress_priv_user1'::regrole, 'regress_priv_user2'::regrole, - 'SELECT, INSERT, UPDATE , DELETE ', FALSE); -- multiple privileges - makeaclitem --------------------------------------------- - regress_priv_user1=arwd/regress_priv_user2 -(1 row) - -SELECT makeaclitem('regress_priv_user1'::regrole, 'regress_priv_user2'::regrole, - 'SELECT, fake_privilege', FALSE); -- error -ERROR: unrecognized privilege type: "fake_privilege" --- Test non-throwing aclitem I/O -SELECT pg_input_is_valid('regress_priv_user1=r/regress_priv_user2', 'aclitem'); - pg_input_is_valid -------------------- - t -(1 row) - -SELECT pg_input_is_valid('regress_priv_user1=r/', 'aclitem'); - pg_input_is_valid -------------------- - f -(1 row) - -SELECT * FROM pg_input_error_info('regress_priv_user1=r/', 'aclitem'); - message | detail | hint | sql_error_code ----------------------------------+--------+------+---------------- - a name must follow the "/" sign | | | 22P02 -(1 row) - -SELECT pg_input_is_valid('regress_priv_user1=r/regress_no_such_user', 'aclitem'); - pg_input_is_valid -------------------- - f -(1 row) - -SELECT * FROM pg_input_error_info('regress_priv_user1=r/regress_no_such_user', 'aclitem'); - message | detail | hint | sql_error_code ---------------------------------------------+--------+------+---------------- - role "regress_no_such_user" does not exist | | | 42704 -(1 row) - -SELECT pg_input_is_valid('regress_priv_user1=rY', 'aclitem'); - pg_input_is_valid -------------------- - f -(1 row) - -SELECT * FROM pg_input_error_info('regress_priv_user1=rY', 'aclitem'); - message | detail | hint | sql_error_code -----------------------------------------------------------+--------+------+---------------- - invalid mode character: must be one of "arwdDxtXUCTcsAm" | | | 22P02 -(1 row) - --- --- Testing blanket default grants is very hazardous since it might change --- the privileges attached to objects created by concurrent regression tests. --- To avoid that, be sure to revoke the privileges again before committing. --- -BEGIN; -ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS TO regress_priv_user2; -CREATE SCHEMA testns2; -SELECT has_schema_privilege('regress_priv_user2', 'testns2', 'USAGE'); -- yes - has_schema_privilege ----------------------- - t -(1 row) - -SELECT has_schema_privilege('regress_priv_user6', 'testns2', 'USAGE'); -- yes - has_schema_privilege ----------------------- - t -(1 row) - -SELECT has_schema_privilege('regress_priv_user2', 'testns2', 'CREATE'); -- no - has_schema_privilege ----------------------- - f -(1 row) - -ALTER DEFAULT PRIVILEGES REVOKE USAGE ON SCHEMAS FROM regress_priv_user2; -CREATE SCHEMA testns3; -SELECT has_schema_privilege('regress_priv_user2', 'testns3', 'USAGE'); -- no - has_schema_privilege ----------------------- - f -(1 row) - -SELECT has_schema_privilege('regress_priv_user2', 'testns3', 'CREATE'); -- no - has_schema_privilege ----------------------- - f -(1 row) - -ALTER DEFAULT PRIVILEGES GRANT ALL ON SCHEMAS TO regress_priv_user2; -CREATE SCHEMA testns4; -SELECT has_schema_privilege('regress_priv_user2', 'testns4', 'USAGE'); -- yes - has_schema_privilege ----------------------- - t -(1 row) - -SELECT has_schema_privilege('regress_priv_user2', 'testns4', 'CREATE'); -- yes - has_schema_privilege ----------------------- - t -(1 row) - -ALTER DEFAULT PRIVILEGES REVOKE ALL ON SCHEMAS FROM regress_priv_user2; -COMMIT; --- Test for DROP OWNED BY with shared dependencies. This is done in a --- separate, rollbacked, transaction to avoid any trouble with other --- regression sessions. -BEGIN; -ALTER DEFAULT PRIVILEGES GRANT ALL ON FUNCTIONS TO regress_priv_user2; -ALTER DEFAULT PRIVILEGES GRANT ALL ON SCHEMAS TO regress_priv_user2; -ALTER DEFAULT PRIVILEGES GRANT ALL ON SEQUENCES TO regress_priv_user2; -ALTER DEFAULT PRIVILEGES GRANT ALL ON TABLES TO regress_priv_user2; -ALTER DEFAULT PRIVILEGES GRANT ALL ON TYPES TO regress_priv_user2; -SELECT count(*) FROM pg_shdepend - WHERE deptype = 'a' AND - refobjid = 'regress_priv_user2'::regrole AND - classid = 'pg_default_acl'::regclass; - count -------- - 5 -(1 row) - -DROP OWNED BY regress_priv_user2, regress_priv_user2; -SELECT count(*) FROM pg_shdepend - WHERE deptype = 'a' AND - refobjid = 'regress_priv_user2'::regrole AND - classid = 'pg_default_acl'::regclass; - count -------- - 0 -(1 row) - -ROLLBACK; -CREATE SCHEMA testns5; -SELECT has_schema_privilege('regress_priv_user2', 'testns5', 'USAGE'); -- no - has_schema_privilege ----------------------- - f -(1 row) - -SELECT has_schema_privilege('regress_priv_user2', 'testns5', 'CREATE'); -- no - has_schema_privilege ----------------------- - f -(1 row) - -SET ROLE regress_priv_user1; -CREATE FUNCTION testns.foo() RETURNS int AS 'select 1' LANGUAGE sql; -CREATE AGGREGATE testns.agg1(int) (sfunc = int4pl, stype = int4); -CREATE PROCEDURE testns.bar() AS 'select 1' LANGUAGE sql; -SELECT has_function_privilege('regress_priv_user2', 'testns.foo()', 'EXECUTE'); -- no - has_function_privilege ------------------------- - f -(1 row) - -SELECT has_function_privilege('regress_priv_user2', 'testns.agg1(int)', 'EXECUTE'); -- no - has_function_privilege ------------------------- - f -(1 row) - -SELECT has_function_privilege('regress_priv_user2', 'testns.bar()', 'EXECUTE'); -- no - has_function_privilege ------------------------- - f -(1 row) - -ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT EXECUTE ON ROUTINES to public; -DROP FUNCTION testns.foo(); -CREATE FUNCTION testns.foo() RETURNS int AS 'select 1' LANGUAGE sql; -DROP AGGREGATE testns.agg1(int); -CREATE AGGREGATE testns.agg1(int) (sfunc = int4pl, stype = int4); -DROP PROCEDURE testns.bar(); -CREATE PROCEDURE testns.bar() AS 'select 1' LANGUAGE sql; -SELECT has_function_privilege('regress_priv_user2', 'testns.foo()', 'EXECUTE'); -- yes - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_priv_user2', 'testns.agg1(int)', 'EXECUTE'); -- yes - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_priv_user2', 'testns.bar()', 'EXECUTE'); -- yes (counts as function here) - has_function_privilege ------------------------- - t -(1 row) - -DROP FUNCTION testns.foo(); -DROP AGGREGATE testns.agg1(int); -DROP PROCEDURE testns.bar(); -ALTER DEFAULT PRIVILEGES FOR ROLE regress_priv_user1 REVOKE USAGE ON TYPES FROM public; -CREATE DOMAIN testns.priv_testdomain1 AS int; -SELECT has_type_privilege('regress_priv_user2', 'testns.priv_testdomain1', 'USAGE'); -- no - has_type_privilege --------------------- - f -(1 row) - -ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT USAGE ON TYPES to public; -DROP DOMAIN testns.priv_testdomain1; -CREATE DOMAIN testns.priv_testdomain1 AS int; -SELECT has_type_privilege('regress_priv_user2', 'testns.priv_testdomain1', 'USAGE'); -- yes - has_type_privilege --------------------- - t -(1 row) - -DROP DOMAIN testns.priv_testdomain1; -RESET ROLE; -SELECT count(*) - FROM pg_default_acl d LEFT JOIN pg_namespace n ON defaclnamespace = n.oid - WHERE nspname = 'testns'; - count -------- - 3 -(1 row) - -DROP SCHEMA testns CASCADE; -NOTICE: drop cascades to table testns.acltest1 -DROP SCHEMA testns2 CASCADE; -DROP SCHEMA testns3 CASCADE; -DROP SCHEMA testns4 CASCADE; -DROP SCHEMA testns5 CASCADE; -SELECT d.* -- check that entries went away - FROM pg_default_acl d LEFT JOIN pg_namespace n ON defaclnamespace = n.oid - WHERE nspname IS NULL AND defaclnamespace != 0; - oid | defaclrole | defaclnamespace | defaclobjtype | defaclacl ------+------------+-----------------+---------------+----------- -(0 rows) - --- Grant on all objects of given type in a schema -\c - -CREATE SCHEMA testns; -CREATE TABLE testns.t1 (f1 int); -CREATE TABLE testns.t2 (f1 int); -SELECT has_table_privilege('regress_priv_user1', 'testns.t1', 'SELECT'); -- false - has_table_privilege ---------------------- - f -(1 row) - -GRANT ALL ON ALL TABLES IN SCHEMA testns TO regress_priv_user1; -SELECT has_table_privilege('regress_priv_user1', 'testns.t1', 'SELECT'); -- true - has_table_privilege ---------------------- - t -(1 row) - -SELECT has_table_privilege('regress_priv_user1', 'testns.t2', 'SELECT'); -- true - has_table_privilege ---------------------- - t -(1 row) - -REVOKE ALL ON ALL TABLES IN SCHEMA testns FROM regress_priv_user1; -SELECT has_table_privilege('regress_priv_user1', 'testns.t1', 'SELECT'); -- false - has_table_privilege ---------------------- - f -(1 row) - -SELECT has_table_privilege('regress_priv_user1', 'testns.t2', 'SELECT'); -- false - has_table_privilege ---------------------- - f -(1 row) - -CREATE FUNCTION testns.priv_testfunc(int) RETURNS int AS 'select 3 * $1;' LANGUAGE sql; -CREATE AGGREGATE testns.priv_testagg(int) (sfunc = int4pl, stype = int4); -CREATE PROCEDURE testns.priv_testproc(int) AS 'select 3' LANGUAGE sql; -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testfunc(int)', 'EXECUTE'); -- true by default - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testagg(int)', 'EXECUTE'); -- true by default - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- true by default - has_function_privilege ------------------------- - t -(1 row) - -REVOKE ALL ON ALL FUNCTIONS IN SCHEMA testns FROM PUBLIC; -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testfunc(int)', 'EXECUTE'); -- false - has_function_privilege ------------------------- - f -(1 row) - -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testagg(int)', 'EXECUTE'); -- false - has_function_privilege ------------------------- - f -(1 row) - -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- still true, not a function - has_function_privilege ------------------------- - t -(1 row) - -REVOKE ALL ON ALL PROCEDURES IN SCHEMA testns FROM PUBLIC; -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- now false - has_function_privilege ------------------------- - f -(1 row) - -GRANT ALL ON ALL ROUTINES IN SCHEMA testns TO PUBLIC; -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testfunc(int)', 'EXECUTE'); -- true - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testagg(int)', 'EXECUTE'); -- true - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- true - has_function_privilege ------------------------- - t -(1 row) - -DROP SCHEMA testns CASCADE; -NOTICE: drop cascades to 5 other objects -DETAIL: drop cascades to table testns.t1 -drop cascades to table testns.t2 -drop cascades to function testns.priv_testfunc(integer) -drop cascades to function testns.priv_testagg(integer) -drop cascades to function testns.priv_testproc(integer) --- Change owner of the schema & and rename of new schema owner -\c - -CREATE ROLE regress_schemauser1 superuser login; -CREATE ROLE regress_schemauser2 superuser login; -SET SESSION ROLE regress_schemauser1; -CREATE SCHEMA testns; -SELECT nspname, rolname FROM pg_namespace, pg_roles WHERE pg_namespace.nspname = 'testns' AND pg_namespace.nspowner = pg_roles.oid; - nspname | rolname ----------+--------------------- - testns | regress_schemauser1 -(1 row) - -ALTER SCHEMA testns OWNER TO regress_schemauser2; -ALTER ROLE regress_schemauser2 RENAME TO regress_schemauser_renamed; -SELECT nspname, rolname FROM pg_namespace, pg_roles WHERE pg_namespace.nspname = 'testns' AND pg_namespace.nspowner = pg_roles.oid; - nspname | rolname ----------+---------------------------- - testns | regress_schemauser_renamed -(1 row) - -set session role regress_schemauser_renamed; -DROP SCHEMA testns CASCADE; --- clean up -\c - -DROP ROLE regress_schemauser1; -DROP ROLE regress_schemauser_renamed; --- test that dependent privileges are revoked (or not) properly -\c - -set session role regress_priv_user1; -create table dep_priv_test (a int); -grant select on dep_priv_test to regress_priv_user2 with grant option; -grant select on dep_priv_test to regress_priv_user3 with grant option; -set session role regress_priv_user2; -grant select on dep_priv_test to regress_priv_user4 with grant option; -set session role regress_priv_user3; -grant select on dep_priv_test to regress_priv_user4 with grant option; -set session role regress_priv_user4; -grant select on dep_priv_test to regress_priv_user5; -\dp dep_priv_test - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+---------------+-------+------------------------------------------------+-------------------+---------- - public | dep_priv_test | table | regress_priv_user1=arwdDxtm/regress_priv_user1+| | - | | | regress_priv_user2=r*/regress_priv_user1 +| | - | | | regress_priv_user3=r*/regress_priv_user1 +| | - | | | regress_priv_user4=r*/regress_priv_user2 +| | - | | | regress_priv_user4=r*/regress_priv_user3 +| | - | | | regress_priv_user5=r/regress_priv_user4 | | -(1 row) - -set session role regress_priv_user2; -revoke select on dep_priv_test from regress_priv_user4 cascade; -\dp dep_priv_test - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+---------------+-------+------------------------------------------------+-------------------+---------- - public | dep_priv_test | table | regress_priv_user1=arwdDxtm/regress_priv_user1+| | - | | | regress_priv_user2=r*/regress_priv_user1 +| | - | | | regress_priv_user3=r*/regress_priv_user1 +| | - | | | regress_priv_user4=r*/regress_priv_user3 +| | - | | | regress_priv_user5=r/regress_priv_user4 | | -(1 row) - -set session role regress_priv_user3; -revoke select on dep_priv_test from regress_priv_user4 cascade; -\dp dep_priv_test - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+---------------+-------+------------------------------------------------+-------------------+---------- - public | dep_priv_test | table | regress_priv_user1=arwdDxtm/regress_priv_user1+| | - | | | regress_priv_user2=r*/regress_priv_user1 +| | - | | | regress_priv_user3=r*/regress_priv_user1 | | -(1 row) - -set session role regress_priv_user1; -drop table dep_priv_test; --- clean up -\c -drop sequence x_seq; -DROP AGGREGATE priv_testagg1(int); -DROP FUNCTION priv_testfunc2(int); -DROP FUNCTION priv_testfunc4(boolean); -DROP PROCEDURE priv_testproc1(int); -DROP VIEW atestv0; -DROP VIEW atestv1; -DROP VIEW atestv2; --- this should cascade to drop atestv4 -DROP VIEW atestv3 CASCADE; -NOTICE: drop cascades to view atestv4 --- this should complain "does not exist" -DROP VIEW atestv4; -ERROR: view "atestv4" does not exist -DROP TABLE atest1; -DROP TABLE atest2; -DROP TABLE atest3; -DROP TABLE atest4; -DROP TABLE atest5; -DROP TABLE atest6; -DROP TABLE atestc; -DROP TABLE atestp1; -DROP TABLE atestp2; -SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid; - lo_unlink ------------ - 1 - 1 - 1 - 1 - 1 -(5 rows) - -DROP GROUP regress_priv_group1; -DROP GROUP regress_priv_group2; --- these are needed to clean up permissions -REVOKE USAGE ON LANGUAGE sql FROM regress_priv_user1; -DROP OWNED BY regress_priv_user1; -DROP USER regress_priv_user1; -DROP USER regress_priv_user2; -DROP USER regress_priv_user3; -DROP USER regress_priv_user4; -DROP USER regress_priv_user5; -DROP USER regress_priv_user6; -DROP USER regress_priv_user7; -DROP USER regress_priv_user8; -- does not exist -ERROR: role "regress_priv_user8" does not exist --- permissions with LOCK TABLE -CREATE USER regress_locktable_user; -CREATE TABLE lock_table (a int); --- LOCK TABLE and SELECT permission -GRANT SELECT ON lock_table TO regress_locktable_user; -SET SESSION AUTHORIZATION regress_locktable_user; -BEGIN; -LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass -COMMIT; -BEGIN; -LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should fail -ERROR: permission denied for table lock_table -ROLLBACK; -BEGIN; -LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should fail -ERROR: permission denied for table lock_table -ROLLBACK; -\c -REVOKE SELECT ON lock_table FROM regress_locktable_user; --- LOCK TABLE and INSERT permission -GRANT INSERT ON lock_table TO regress_locktable_user; -SET SESSION AUTHORIZATION regress_locktable_user; -BEGIN; -LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass -ROLLBACK; -BEGIN; -LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass -COMMIT; -BEGIN; -LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should fail -ERROR: permission denied for table lock_table -ROLLBACK; -\c -REVOKE INSERT ON lock_table FROM regress_locktable_user; --- LOCK TABLE and UPDATE permission -GRANT UPDATE ON lock_table TO regress_locktable_user; -SET SESSION AUTHORIZATION regress_locktable_user; -BEGIN; -LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass -ROLLBACK; -BEGIN; -LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass -COMMIT; -BEGIN; -LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass -COMMIT; -\c -REVOKE UPDATE ON lock_table FROM regress_locktable_user; --- LOCK TABLE and DELETE permission -GRANT DELETE ON lock_table TO regress_locktable_user; -SET SESSION AUTHORIZATION regress_locktable_user; -BEGIN; -LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass -ROLLBACK; -BEGIN; -LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass -COMMIT; -BEGIN; -LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass -COMMIT; -\c -REVOKE DELETE ON lock_table FROM regress_locktable_user; --- LOCK TABLE and TRUNCATE permission -GRANT TRUNCATE ON lock_table TO regress_locktable_user; -SET SESSION AUTHORIZATION regress_locktable_user; -BEGIN; -LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass -ROLLBACK; -BEGIN; -LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass -COMMIT; -BEGIN; -LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass -COMMIT; -\c -REVOKE TRUNCATE ON lock_table FROM regress_locktable_user; --- LOCK TABLE and MAINTAIN permission -GRANT MAINTAIN ON lock_table TO regress_locktable_user; -SET SESSION AUTHORIZATION regress_locktable_user; -BEGIN; -LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass -ROLLBACK; -BEGIN; -LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass -COMMIT; -BEGIN; -LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass -COMMIT; -\c -REVOKE MAINTAIN ON lock_table FROM regress_locktable_user; --- clean up -DROP TABLE lock_table; -DROP USER regress_locktable_user; --- test to check privileges of system views pg_shmem_allocations and --- pg_backend_memory_contexts. --- switch to superuser -\c - -CREATE ROLE regress_readallstats; -SELECT has_table_privilege('regress_readallstats','pg_backend_memory_contexts','SELECT'); -- no - has_table_privilege ---------------------- - f -(1 row) - -SELECT has_table_privilege('regress_readallstats','pg_shmem_allocations','SELECT'); -- no - has_table_privilege ---------------------- - f -(1 row) - -GRANT pg_read_all_stats TO regress_readallstats; -SELECT has_table_privilege('regress_readallstats','pg_backend_memory_contexts','SELECT'); -- yes - has_table_privilege ---------------------- - t -(1 row) - -SELECT has_table_privilege('regress_readallstats','pg_shmem_allocations','SELECT'); -- yes - has_table_privilege ---------------------- - t -(1 row) - --- run query to ensure that functions within views can be executed -SET ROLE regress_readallstats; -SELECT COUNT(*) >= 0 AS ok FROM pg_backend_memory_contexts; - ok ----- - t -(1 row) - -SELECT COUNT(*) >= 0 AS ok FROM pg_shmem_allocations; - ok ----- - t -(1 row) - -RESET ROLE; --- clean up -DROP ROLE regress_readallstats; --- test role grantor machinery -CREATE ROLE regress_group; -CREATE ROLE regress_group_direct_manager; -CREATE ROLE regress_group_indirect_manager; -CREATE ROLE regress_group_member; -GRANT regress_group TO regress_group_direct_manager WITH INHERIT FALSE, ADMIN TRUE; -GRANT regress_group_direct_manager TO regress_group_indirect_manager; -SET SESSION AUTHORIZATION regress_group_direct_manager; -GRANT regress_group TO regress_group_member; -SELECT member::regrole::text, CASE WHEN grantor = 10 THEN 'BOOTSTRAP SUPERUSER' ELSE grantor::regrole::text END FROM pg_auth_members WHERE roleid = 'regress_group'::regrole ORDER BY 1, 2; - member | grantor -------------------------------+------------------------------ - regress_group_direct_manager | BOOTSTRAP SUPERUSER - regress_group_member | regress_group_direct_manager -(2 rows) - -REVOKE regress_group FROM regress_group_member; -SET SESSION AUTHORIZATION regress_group_indirect_manager; -GRANT regress_group TO regress_group_member; -SELECT member::regrole::text, CASE WHEN grantor = 10 THEN 'BOOTSTRAP SUPERUSER' ELSE grantor::regrole::text END FROM pg_auth_members WHERE roleid = 'regress_group'::regrole ORDER BY 1, 2; - member | grantor -------------------------------+------------------------------ - regress_group_direct_manager | BOOTSTRAP SUPERUSER - regress_group_member | regress_group_direct_manager -(2 rows) - -REVOKE regress_group FROM regress_group_member; -RESET SESSION AUTHORIZATION; -DROP ROLE regress_group; -DROP ROLE regress_group_direct_manager; -DROP ROLE regress_group_indirect_manager; -DROP ROLE regress_group_member; --- test SET and INHERIT options with object ownership changes -CREATE ROLE regress_roleoption_protagonist; -CREATE ROLE regress_roleoption_donor; -CREATE ROLE regress_roleoption_recipient; -CREATE SCHEMA regress_roleoption; -GRANT CREATE, USAGE ON SCHEMA regress_roleoption TO PUBLIC; -GRANT regress_roleoption_donor TO regress_roleoption_protagonist WITH INHERIT TRUE, SET FALSE; -GRANT regress_roleoption_recipient TO regress_roleoption_protagonist WITH INHERIT FALSE, SET TRUE; -SET SESSION AUTHORIZATION regress_roleoption_protagonist; -CREATE TABLE regress_roleoption.t1 (a int); -CREATE TABLE regress_roleoption.t2 (a int); -SET SESSION AUTHORIZATION regress_roleoption_donor; -CREATE TABLE regress_roleoption.t3 (a int); -SET SESSION AUTHORIZATION regress_roleoption_recipient; -CREATE TABLE regress_roleoption.t4 (a int); -SET SESSION AUTHORIZATION regress_roleoption_protagonist; -ALTER TABLE regress_roleoption.t1 OWNER TO regress_roleoption_donor; -- fails, can't be come donor -ERROR: must be able to SET ROLE "regress_roleoption_donor" -ALTER TABLE regress_roleoption.t2 OWNER TO regress_roleoption_recipient; -- works -ALTER TABLE regress_roleoption.t3 OWNER TO regress_roleoption_protagonist; -- works -ALTER TABLE regress_roleoption.t4 OWNER TO regress_roleoption_protagonist; -- fails, we don't inherit from recipient -ERROR: must be owner of table t4 -RESET SESSION AUTHORIZATION; -DROP TABLE regress_roleoption.t1; -DROP TABLE regress_roleoption.t2; -DROP TABLE regress_roleoption.t3; -DROP TABLE regress_roleoption.t4; -DROP SCHEMA regress_roleoption; -DROP ROLE regress_roleoption_protagonist; -DROP ROLE regress_roleoption_donor; -DROP ROLE regress_roleoption_recipient; --- MAINTAIN -CREATE ROLE regress_no_maintain; -CREATE ROLE regress_maintain; -CREATE ROLE regress_maintain_all IN ROLE pg_maintain; -CREATE TABLE maintain_test (a INT); -CREATE INDEX ON maintain_test (a); -GRANT MAINTAIN ON maintain_test TO regress_maintain; -CREATE MATERIALIZED VIEW refresh_test AS SELECT 1; -GRANT MAINTAIN ON refresh_test TO regress_maintain; -CREATE SCHEMA reindex_test; --- negative tests; should fail -SET ROLE regress_no_maintain; -VACUUM maintain_test; -WARNING: permission denied to vacuum "maintain_test", skipping it -ANALYZE maintain_test; -WARNING: permission denied to analyze "maintain_test", skipping it -VACUUM (ANALYZE) maintain_test; -WARNING: permission denied to vacuum "maintain_test", skipping it -CLUSTER maintain_test USING maintain_test_a_idx; -ERROR: permission denied for table maintain_test -REFRESH MATERIALIZED VIEW refresh_test; -ERROR: permission denied for materialized view refresh_test -REINDEX TABLE maintain_test; -ERROR: permission denied for table maintain_test -REINDEX INDEX maintain_test_a_idx; -ERROR: permission denied for index maintain_test_a_idx -REINDEX SCHEMA reindex_test; -ERROR: must be owner of schema reindex_test -RESET ROLE; -SET ROLE regress_maintain; -VACUUM maintain_test; -ANALYZE maintain_test; -VACUUM (ANALYZE) maintain_test; -CLUSTER maintain_test USING maintain_test_a_idx; -REFRESH MATERIALIZED VIEW refresh_test; -REINDEX TABLE maintain_test; -REINDEX INDEX maintain_test_a_idx; -REINDEX SCHEMA reindex_test; -ERROR: must be owner of schema reindex_test -RESET ROLE; -SET ROLE regress_maintain_all; -VACUUM maintain_test; -ANALYZE maintain_test; -VACUUM (ANALYZE) maintain_test; -CLUSTER maintain_test USING maintain_test_a_idx; -REFRESH MATERIALIZED VIEW refresh_test; -REINDEX TABLE maintain_test; -REINDEX INDEX maintain_test_a_idx; -REINDEX SCHEMA reindex_test; -RESET ROLE; -DROP TABLE maintain_test; -DROP MATERIALIZED VIEW refresh_test; -DROP SCHEMA reindex_test; -DROP ROLE regress_no_maintain; -DROP ROLE regress_maintain; -DROP ROLE regress_maintain_all; +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/rowsecurity.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/rowsecurity.out --- /tmp/cirrus-ci-build/src/test/regress/expected/rowsecurity.out 2024-03-13 23:12:37.627007000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/rowsecurity.out 2024-03-13 23:14:29.333256000 +0000 @@ -1942,2608 +1942,7 @@ (3 rows) UPDATE bv1 SET b = 'yyy' WHERE a = 4 AND f_leak(b); -NOTICE: f_leak => 4b227777d4dd1fc61c6f884f48641d02 -EXPLAIN (COSTS OFF) DELETE FROM bv1 WHERE a = 6 AND f_leak(b); - QUERY PLAN ------------------------------------------------------------------------ - Delete on b1 - -> Seq Scan on b1 - Filter: ((a > 0) AND (a = 6) AND ((a % 2) = 0) AND f_leak(b)) -(3 rows) - -DELETE FROM bv1 WHERE a = 6 AND f_leak(b); -NOTICE: f_leak => e7f6c011776e8db7cd330b54174fd76f -SET SESSION AUTHORIZATION regress_rls_alice; -SELECT * FROM b1; - a | b ------+---------------------------------- - -10 | c171d4ec282b23db89a99880cd624e9b - -9 | d5c534fde62beb89c745a59952c8efed - -8 | e91592205d3881e3ea35d66973bb4898 - -7 | a770d3270c9dcdedf12ed9fd70444f7c - -6 | 03b26944890929ff751653acb2f2af79 - -5 | 37aa1ccf80e481832b2db282d4d4f895 - -4 | e5e0093f285a4fb94c3fcc2ad7fd04ed - -3 | 615bdd17c2556f82f384392ea8557f8c - -2 | cf3bae39dd692048a8bf961182e6a34d - -1 | 1bad6b8cf97131fceab8543e81f77571 - 0 | 5feceb66ffc86f38d952786c6d696c79 - 1 | 6b86b273ff34fce19d6b804eff5a3f57 - 2 | d4735e3a265e16eee03f59718b9b5d03 - 3 | 4e07408562bedb8b60ce05c1decfe3ad - 5 | ef2d127de37b942baad06145e54b0c61 - 7 | 7902699be42c8a8e46fbbb4501726517 - 8 | 2c624232cdd221771294dfbb310aca00 - 9 | 19581e27de7ced00ff1ce50b2047e7a5 - 10 | 4a44dc15364204a80fe80e9039455cc1 - 12 | xxx - 4 | yyy -(21 rows) - --- --- INSERT ... ON CONFLICT DO UPDATE and Row-level security --- -SET SESSION AUTHORIZATION regress_rls_alice; -DROP POLICY p1 ON document; -DROP POLICY p1r ON document; -CREATE POLICY p1 ON document FOR SELECT USING (true); -CREATE POLICY p2 ON document FOR INSERT WITH CHECK (dauthor = current_user); -CREATE POLICY p3 ON document FOR UPDATE - USING (cid = (SELECT cid from category WHERE cname = 'novel')) - WITH CHECK (dauthor = current_user); -SET SESSION AUTHORIZATION regress_rls_bob; --- Exists... -SELECT * FROM document WHERE did = 2; - did | cid | dlevel | dauthor | dtitle ------+-----+--------+-----------------+----------------- - 2 | 11 | 2 | regress_rls_bob | my second novel -(1 row) - --- ...so violates actual WITH CHECK OPTION within UPDATE (not INSERT, since --- alternative UPDATE path happens to be taken): -INSERT INTO document VALUES (2, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_carol', 'my first novel') - ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, dauthor = EXCLUDED.dauthor; -ERROR: new row violates row-level security policy for table "document" --- Violates USING qual for UPDATE policy p3. --- --- UPDATE path is taken, but UPDATE fails purely because *existing* row to be --- updated is not a "novel"/cid 11 (row is not leaked, even though we have --- SELECT privileges sufficient to see the row in this instance): -INSERT INTO document VALUES (33, 22, 1, 'regress_rls_bob', 'okay science fiction'); -- preparation for next statement -INSERT INTO document VALUES (33, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_bob', 'Some novel, replaces sci-fi') -- takes UPDATE path - ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle; -ERROR: new row violates row-level security policy (USING expression) for table "document" --- Fine (we UPDATE, since INSERT WCOs and UPDATE security barrier quals + WCOs --- not violated): -INSERT INTO document VALUES (2, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_bob', 'my first novel') - ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle RETURNING *; - did | cid | dlevel | dauthor | dtitle ------+-----+--------+-----------------+---------------- - 2 | 11 | 2 | regress_rls_bob | my first novel -(1 row) - --- Fine (we INSERT, so "cid = 33" ("technology") isn't evaluated): -INSERT INTO document VALUES (78, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_bob', 'some technology novel') - ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, cid = 33 RETURNING *; - did | cid | dlevel | dauthor | dtitle ------+-----+--------+-----------------+----------------------- - 78 | 11 | 1 | regress_rls_bob | some technology novel -(1 row) - --- Fine (same query, but we UPDATE, so "cid = 33", ("technology") is not the --- case in respect of *existing* tuple): -INSERT INTO document VALUES (78, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_bob', 'some technology novel') - ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, cid = 33 RETURNING *; - did | cid | dlevel | dauthor | dtitle ------+-----+--------+-----------------+----------------------- - 78 | 33 | 1 | regress_rls_bob | some technology novel -(1 row) - --- Same query a third time, but now fails due to existing tuple finally not --- passing quals: -INSERT INTO document VALUES (78, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_bob', 'some technology novel') - ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, cid = 33 RETURNING *; -ERROR: new row violates row-level security policy (USING expression) for table "document" --- Don't fail just because INSERT doesn't satisfy WITH CHECK option that --- originated as a barrier/USING() qual from the UPDATE. Note that the UPDATE --- path *isn't* taken, and so UPDATE-related policy does not apply: -INSERT INTO document VALUES (79, (SELECT cid from category WHERE cname = 'technology'), 1, 'regress_rls_bob', 'technology book, can only insert') - ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle RETURNING *; - did | cid | dlevel | dauthor | dtitle ------+-----+--------+-----------------+---------------------------------- - 79 | 33 | 1 | regress_rls_bob | technology book, can only insert -(1 row) - --- But this time, the same statement fails, because the UPDATE path is taken, --- and updating the row just inserted falls afoul of security barrier qual --- (enforced as WCO) -- what we might have updated target tuple to is --- irrelevant, in fact. -INSERT INTO document VALUES (79, (SELECT cid from category WHERE cname = 'technology'), 1, 'regress_rls_bob', 'technology book, can only insert') - ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle RETURNING *; -ERROR: new row violates row-level security policy (USING expression) for table "document" --- Test default USING qual enforced as WCO -SET SESSION AUTHORIZATION regress_rls_alice; -DROP POLICY p1 ON document; -DROP POLICY p2 ON document; -DROP POLICY p3 ON document; -CREATE POLICY p3_with_default ON document FOR UPDATE - USING (cid = (SELECT cid from category WHERE cname = 'novel')); -SET SESSION AUTHORIZATION regress_rls_bob; --- Just because WCO-style enforcement of USING quals occurs with --- existing/target tuple does not mean that the implementation can be allowed --- to fail to also enforce this qual against the final tuple appended to --- relation (since in the absence of an explicit WCO, this is also interpreted --- as an UPDATE/ALL WCO in general). --- --- UPDATE path is taken here (fails due to existing tuple). Note that this is --- not reported as a "USING expression", because it's an RLS UPDATE check that originated as --- a USING qual for the purposes of RLS in general, as opposed to an explicit --- USING qual that is ordinarily a security barrier. We leave it up to the --- UPDATE to make this fail: -INSERT INTO document VALUES (79, (SELECT cid from category WHERE cname = 'technology'), 1, 'regress_rls_bob', 'technology book, can only insert') - ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle RETURNING *; -ERROR: new row violates row-level security policy for table "document" --- UPDATE path is taken here. Existing tuple passes, since its cid --- corresponds to "novel", but default USING qual is enforced against --- post-UPDATE tuple too (as always when updating with a policy that lacks an --- explicit WCO), and so this fails: -INSERT INTO document VALUES (2, (SELECT cid from category WHERE cname = 'technology'), 1, 'regress_rls_bob', 'my first novel') - ON CONFLICT (did) DO UPDATE SET cid = EXCLUDED.cid, dtitle = EXCLUDED.dtitle RETURNING *; -ERROR: new row violates row-level security policy for table "document" -SET SESSION AUTHORIZATION regress_rls_alice; -DROP POLICY p3_with_default ON document; --- --- Test ALL policies with ON CONFLICT DO UPDATE (much the same as existing UPDATE --- tests) --- -CREATE POLICY p3_with_all ON document FOR ALL - USING (cid = (SELECT cid from category WHERE cname = 'novel')) - WITH CHECK (dauthor = current_user); -SET SESSION AUTHORIZATION regress_rls_bob; --- Fails, since ALL WCO is enforced in insert path: -INSERT INTO document VALUES (80, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_carol', 'my first novel') - ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, cid = 33; -ERROR: new row violates row-level security policy for table "document" --- Fails, since ALL policy USING qual is enforced (existing, target tuple is in --- violation, since it has the "manga" cid): -INSERT INTO document VALUES (4, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_bob', 'my first novel') - ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle; -ERROR: new row violates row-level security policy (USING expression) for table "document" --- Fails, since ALL WCO are enforced: -INSERT INTO document VALUES (1, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_bob', 'my first novel') - ON CONFLICT (did) DO UPDATE SET dauthor = 'regress_rls_carol'; -ERROR: new row violates row-level security policy for table "document" --- --- MERGE --- -RESET SESSION AUTHORIZATION; -DROP POLICY p3_with_all ON document; -ALTER TABLE document ADD COLUMN dnotes text DEFAULT ''; --- all documents are readable -CREATE POLICY p1 ON document FOR SELECT USING (true); --- one may insert documents only authored by them -CREATE POLICY p2 ON document FOR INSERT WITH CHECK (dauthor = current_user); --- one may only update documents in 'novel' category and new dlevel must be > 0 -CREATE POLICY p3 ON document FOR UPDATE - USING (cid = (SELECT cid from category WHERE cname = 'novel')) - WITH CHECK (dlevel > 0); --- one may only delete documents in 'manga' category -CREATE POLICY p4 ON document FOR DELETE - USING (cid = (SELECT cid from category WHERE cname = 'manga')); -SELECT * FROM document; - did | cid | dlevel | dauthor | dtitle | dnotes ------+-----+--------+-------------------+----------------------------------+-------- - 1 | 11 | 1 | regress_rls_bob | my first novel | - 3 | 22 | 2 | regress_rls_bob | my science fiction | - 4 | 44 | 1 | regress_rls_bob | my first manga | - 5 | 44 | 2 | regress_rls_bob | my second manga | - 6 | 22 | 1 | regress_rls_carol | great science fiction | - 7 | 33 | 2 | regress_rls_carol | great technology book | - 8 | 44 | 1 | regress_rls_carol | great manga | - 9 | 22 | 1 | regress_rls_dave | awesome science fiction | - 10 | 33 | 2 | regress_rls_dave | awesome technology book | - 11 | 33 | 1 | regress_rls_carol | hoge | - 33 | 22 | 1 | regress_rls_bob | okay science fiction | - 2 | 11 | 2 | regress_rls_bob | my first novel | - 78 | 33 | 1 | regress_rls_bob | some technology novel | - 79 | 33 | 1 | regress_rls_bob | technology book, can only insert | -(14 rows) - -SET SESSION AUTHORIZATION regress_rls_bob; --- Fails, since update violates WITH CHECK qual on dlevel -MERGE INTO document d -USING (SELECT 1 as sdid) s -ON did = s.sdid -WHEN MATCHED THEN - UPDATE SET dnotes = dnotes || ' notes added by merge1 ', dlevel = 0; -ERROR: new row violates row-level security policy for table "document" --- Should be OK since USING and WITH CHECK quals pass -MERGE INTO document d -USING (SELECT 1 as sdid) s -ON did = s.sdid -WHEN MATCHED THEN - UPDATE SET dnotes = dnotes || ' notes added by merge2 '; --- Even when dlevel is updated explicitly, but to the existing value -MERGE INTO document d -USING (SELECT 1 as sdid) s -ON did = s.sdid -WHEN MATCHED THEN - UPDATE SET dnotes = dnotes || ' notes added by merge3 ', dlevel = 1; --- There is a MATCH for did = 3, but UPDATE's USING qual does not allow --- updating an item in category 'science fiction' -MERGE INTO document d -USING (SELECT 3 as sdid) s -ON did = s.sdid -WHEN MATCHED THEN - UPDATE SET dnotes = dnotes || ' notes added by merge '; -ERROR: target row violates row-level security policy (USING expression) for table "document" --- The same thing with DELETE action, but fails again because no permissions --- to delete items in 'science fiction' category that did 3 belongs to. -MERGE INTO document d -USING (SELECT 3 as sdid) s -ON did = s.sdid -WHEN MATCHED THEN - DELETE; -ERROR: target row violates row-level security policy (USING expression) for table "document" --- Document with did 4 belongs to 'manga' category which is allowed for --- deletion. But this fails because the UPDATE action is matched first and --- UPDATE policy does not allow updation in the category. -MERGE INTO document d -USING (SELECT 4 as sdid) s -ON did = s.sdid -WHEN MATCHED AND dnotes = '' THEN - UPDATE SET dnotes = dnotes || ' notes added by merge ' -WHEN MATCHED THEN - DELETE; -ERROR: target row violates row-level security policy (USING expression) for table "document" --- UPDATE action is not matched this time because of the WHEN qual. --- DELETE still fails because role regress_rls_bob does not have SELECT --- privileges on 'manga' category row in the category table. -MERGE INTO document d -USING (SELECT 4 as sdid) s -ON did = s.sdid -WHEN MATCHED AND dnotes <> '' THEN - UPDATE SET dnotes = dnotes || ' notes added by merge ' -WHEN MATCHED THEN - DELETE; -ERROR: target row violates row-level security policy (USING expression) for table "document" --- OK if DELETE is replaced with DO NOTHING -MERGE INTO document d -USING (SELECT 4 as sdid) s -ON did = s.sdid -WHEN MATCHED AND dnotes <> '' THEN - UPDATE SET dnotes = dnotes || ' notes added by merge ' -WHEN MATCHED THEN - DO NOTHING; -SELECT * FROM document WHERE did = 4; - did | cid | dlevel | dauthor | dtitle | dnotes ------+-----+--------+-----------------+----------------+-------- - 4 | 44 | 1 | regress_rls_bob | my first manga | -(1 row) - --- Switch to regress_rls_carol role and try the DELETE again. It should succeed --- this time -RESET SESSION AUTHORIZATION; -SET SESSION AUTHORIZATION regress_rls_carol; -MERGE INTO document d -USING (SELECT 4 as sdid) s -ON did = s.sdid -WHEN MATCHED AND dnotes <> '' THEN - UPDATE SET dnotes = dnotes || ' notes added by merge ' -WHEN MATCHED THEN - DELETE; --- Switch back to regress_rls_bob role -RESET SESSION AUTHORIZATION; -SET SESSION AUTHORIZATION regress_rls_bob; --- Try INSERT action. This fails because we are trying to insert --- dauthor = regress_rls_dave and INSERT's WITH CHECK does not allow --- that -MERGE INTO document d -USING (SELECT 12 as sdid) s -ON did = s.sdid -WHEN MATCHED THEN - DELETE -WHEN NOT MATCHED THEN - INSERT VALUES (12, 11, 1, 'regress_rls_dave', 'another novel'); -ERROR: new row violates row-level security policy for table "document" --- This should be fine -MERGE INTO document d -USING (SELECT 12 as sdid) s -ON did = s.sdid -WHEN MATCHED THEN - DELETE -WHEN NOT MATCHED THEN - INSERT VALUES (12, 11, 1, 'regress_rls_bob', 'another novel'); --- ok -MERGE INTO document d -USING (SELECT 1 as sdid) s -ON did = s.sdid -WHEN MATCHED THEN - UPDATE SET dnotes = dnotes || ' notes added by merge4 ' -WHEN NOT MATCHED THEN - INSERT VALUES (12, 11, 1, 'regress_rls_bob', 'another novel'); --- drop and create a new SELECT policy which prevents us from reading --- any document except with category 'novel' -RESET SESSION AUTHORIZATION; -DROP POLICY p1 ON document; -CREATE POLICY p1 ON document FOR SELECT - USING (cid = (SELECT cid from category WHERE cname = 'novel')); -SET SESSION AUTHORIZATION regress_rls_bob; --- MERGE can no longer see the matching row and hence attempts the --- NOT MATCHED action, which results in unique key violation -MERGE INTO document d -USING (SELECT 7 as sdid) s -ON did = s.sdid -WHEN MATCHED THEN - UPDATE SET dnotes = dnotes || ' notes added by merge5 ' -WHEN NOT MATCHED THEN - INSERT VALUES (12, 11, 1, 'regress_rls_bob', 'another novel'); -ERROR: duplicate key value violates unique constraint "document_pkey" --- UPDATE action fails if new row is not visible -MERGE INTO document d -USING (SELECT 1 as sdid) s -ON did = s.sdid -WHEN MATCHED THEN - UPDATE SET dnotes = dnotes || ' notes added by merge6 ', - cid = (SELECT cid from category WHERE cname = 'technology'); -ERROR: new row violates row-level security policy for table "document" --- but OK if new row is visible -MERGE INTO document d -USING (SELECT 1 as sdid) s -ON did = s.sdid -WHEN MATCHED THEN - UPDATE SET dnotes = dnotes || ' notes added by merge7 ', - cid = (SELECT cid from category WHERE cname = 'novel'); --- OK to insert a new row that is not visible -MERGE INTO document d -USING (SELECT 13 as sdid) s -ON did = s.sdid -WHEN MATCHED THEN - UPDATE SET dnotes = dnotes || ' notes added by merge8 ' -WHEN NOT MATCHED THEN - INSERT VALUES (13, 44, 1, 'regress_rls_bob', 'new manga'); -RESET SESSION AUTHORIZATION; --- drop the restrictive SELECT policy so that we can look at the --- final state of the table -DROP POLICY p1 ON document; --- Just check everything went per plan -SELECT * FROM document; - did | cid | dlevel | dauthor | dtitle | dnotes ------+-----+--------+-------------------+----------------------------------+---------------------------------------------------------------------------------------------- - 3 | 22 | 2 | regress_rls_bob | my science fiction | - 5 | 44 | 2 | regress_rls_bob | my second manga | - 6 | 22 | 1 | regress_rls_carol | great science fiction | - 7 | 33 | 2 | regress_rls_carol | great technology book | - 8 | 44 | 1 | regress_rls_carol | great manga | - 9 | 22 | 1 | regress_rls_dave | awesome science fiction | - 10 | 33 | 2 | regress_rls_dave | awesome technology book | - 11 | 33 | 1 | regress_rls_carol | hoge | - 33 | 22 | 1 | regress_rls_bob | okay science fiction | - 2 | 11 | 2 | regress_rls_bob | my first novel | - 78 | 33 | 1 | regress_rls_bob | some technology novel | - 79 | 33 | 1 | regress_rls_bob | technology book, can only insert | - 12 | 11 | 1 | regress_rls_bob | another novel | - 1 | 11 | 1 | regress_rls_bob | my first novel | notes added by merge2 notes added by merge3 notes added by merge4 notes added by merge7 - 13 | 44 | 1 | regress_rls_bob | new manga | -(15 rows) - --- --- ROLE/GROUP --- -SET SESSION AUTHORIZATION regress_rls_alice; -CREATE TABLE z1 (a int, b text); -CREATE TABLE z2 (a int, b text); -GRANT SELECT ON z1,z2 TO regress_rls_group1, regress_rls_group2, - regress_rls_bob, regress_rls_carol; -INSERT INTO z1 VALUES - (1, 'aba'), - (2, 'bbb'), - (3, 'ccc'), - (4, 'dad'); -CREATE POLICY p1 ON z1 TO regress_rls_group1 USING (a % 2 = 0); -CREATE POLICY p2 ON z1 TO regress_rls_group2 USING (a % 2 = 1); -ALTER TABLE z1 ENABLE ROW LEVEL SECURITY; -SET SESSION AUTHORIZATION regress_rls_bob; -SELECT * FROM z1 WHERE f_leak(b); -NOTICE: f_leak => bbb -NOTICE: f_leak => dad - a | b ----+----- - 2 | bbb - 4 | dad -(2 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM z1 WHERE f_leak(b); - QUERY PLAN ------------------------------------------ - Seq Scan on z1 - Filter: (((a % 2) = 0) AND f_leak(b)) -(2 rows) - -PREPARE plancache_test AS SELECT * FROM z1 WHERE f_leak(b); -EXPLAIN (COSTS OFF) EXECUTE plancache_test; - QUERY PLAN ------------------------------------------ - Seq Scan on z1 - Filter: (((a % 2) = 0) AND f_leak(b)) -(2 rows) - -PREPARE plancache_test2 AS WITH q AS MATERIALIZED (SELECT * FROM z1 WHERE f_leak(b)) SELECT * FROM q,z2; -EXPLAIN (COSTS OFF) EXECUTE plancache_test2; - QUERY PLAN -------------------------------------------------- - Nested Loop - CTE q - -> Seq Scan on z1 - Filter: (((a % 2) = 0) AND f_leak(b)) - -> CTE Scan on q - -> Materialize - -> Seq Scan on z2 -(7 rows) - -PREPARE plancache_test3 AS WITH q AS MATERIALIZED (SELECT * FROM z2) SELECT * FROM q,z1 WHERE f_leak(z1.b); -EXPLAIN (COSTS OFF) EXECUTE plancache_test3; - QUERY PLAN ------------------------------------------------------ - Nested Loop - CTE q - -> Seq Scan on z2 - -> CTE Scan on q - -> Materialize - -> Seq Scan on z1 - Filter: (((a % 2) = 0) AND f_leak(b)) -(7 rows) - -SET ROLE regress_rls_group1; -SELECT * FROM z1 WHERE f_leak(b); -NOTICE: f_leak => bbb -NOTICE: f_leak => dad - a | b ----+----- - 2 | bbb - 4 | dad -(2 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM z1 WHERE f_leak(b); - QUERY PLAN ------------------------------------------ - Seq Scan on z1 - Filter: (((a % 2) = 0) AND f_leak(b)) -(2 rows) - -EXPLAIN (COSTS OFF) EXECUTE plancache_test; - QUERY PLAN ------------------------------------------ - Seq Scan on z1 - Filter: (((a % 2) = 0) AND f_leak(b)) -(2 rows) - -EXPLAIN (COSTS OFF) EXECUTE plancache_test2; - QUERY PLAN -------------------------------------------------- - Nested Loop - CTE q - -> Seq Scan on z1 - Filter: (((a % 2) = 0) AND f_leak(b)) - -> CTE Scan on q - -> Materialize - -> Seq Scan on z2 -(7 rows) - -EXPLAIN (COSTS OFF) EXECUTE plancache_test3; - QUERY PLAN ------------------------------------------------------ - Nested Loop - CTE q - -> Seq Scan on z2 - -> CTE Scan on q - -> Materialize - -> Seq Scan on z1 - Filter: (((a % 2) = 0) AND f_leak(b)) -(7 rows) - -SET SESSION AUTHORIZATION regress_rls_carol; -SELECT * FROM z1 WHERE f_leak(b); -NOTICE: f_leak => aba -NOTICE: f_leak => ccc - a | b ----+----- - 1 | aba - 3 | ccc -(2 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM z1 WHERE f_leak(b); - QUERY PLAN ------------------------------------------ - Seq Scan on z1 - Filter: (((a % 2) = 1) AND f_leak(b)) -(2 rows) - -EXPLAIN (COSTS OFF) EXECUTE plancache_test; - QUERY PLAN ------------------------------------------ - Seq Scan on z1 - Filter: (((a % 2) = 1) AND f_leak(b)) -(2 rows) - -EXPLAIN (COSTS OFF) EXECUTE plancache_test2; - QUERY PLAN -------------------------------------------------- - Nested Loop - CTE q - -> Seq Scan on z1 - Filter: (((a % 2) = 1) AND f_leak(b)) - -> CTE Scan on q - -> Materialize - -> Seq Scan on z2 -(7 rows) - -EXPLAIN (COSTS OFF) EXECUTE plancache_test3; - QUERY PLAN ------------------------------------------------------ - Nested Loop - CTE q - -> Seq Scan on z2 - -> CTE Scan on q - -> Materialize - -> Seq Scan on z1 - Filter: (((a % 2) = 1) AND f_leak(b)) -(7 rows) - -SET ROLE regress_rls_group2; -SELECT * FROM z1 WHERE f_leak(b); -NOTICE: f_leak => aba -NOTICE: f_leak => ccc - a | b ----+----- - 1 | aba - 3 | ccc -(2 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM z1 WHERE f_leak(b); - QUERY PLAN ------------------------------------------ - Seq Scan on z1 - Filter: (((a % 2) = 1) AND f_leak(b)) -(2 rows) - -EXPLAIN (COSTS OFF) EXECUTE plancache_test; - QUERY PLAN ------------------------------------------ - Seq Scan on z1 - Filter: (((a % 2) = 1) AND f_leak(b)) -(2 rows) - -EXPLAIN (COSTS OFF) EXECUTE plancache_test2; - QUERY PLAN -------------------------------------------------- - Nested Loop - CTE q - -> Seq Scan on z1 - Filter: (((a % 2) = 1) AND f_leak(b)) - -> CTE Scan on q - -> Materialize - -> Seq Scan on z2 -(7 rows) - -EXPLAIN (COSTS OFF) EXECUTE plancache_test3; - QUERY PLAN ------------------------------------------------------ - Nested Loop - CTE q - -> Seq Scan on z2 - -> CTE Scan on q - -> Materialize - -> Seq Scan on z1 - Filter: (((a % 2) = 1) AND f_leak(b)) -(7 rows) - --- --- Views should follow policy for view owner. --- --- View and Table owner are the same. -SET SESSION AUTHORIZATION regress_rls_alice; -CREATE VIEW rls_view AS SELECT * FROM z1 WHERE f_leak(b); -GRANT SELECT ON rls_view TO regress_rls_bob; --- Query as role that is not owner of view or table. Should return all records. -SET SESSION AUTHORIZATION regress_rls_bob; -SELECT * FROM rls_view; -NOTICE: f_leak => aba -NOTICE: f_leak => bbb -NOTICE: f_leak => ccc -NOTICE: f_leak => dad - a | b ----+----- - 1 | aba - 2 | bbb - 3 | ccc - 4 | dad -(4 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; - QUERY PLAN ---------------------- - Seq Scan on z1 - Filter: f_leak(b) -(2 rows) - --- Query as view/table owner. Should return all records. -SET SESSION AUTHORIZATION regress_rls_alice; -SELECT * FROM rls_view; -NOTICE: f_leak => aba -NOTICE: f_leak => bbb -NOTICE: f_leak => ccc -NOTICE: f_leak => dad - a | b ----+----- - 1 | aba - 2 | bbb - 3 | ccc - 4 | dad -(4 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; - QUERY PLAN ---------------------- - Seq Scan on z1 - Filter: f_leak(b) -(2 rows) - -DROP VIEW rls_view; --- View and Table owners are different. -SET SESSION AUTHORIZATION regress_rls_bob; -CREATE VIEW rls_view AS SELECT * FROM z1 WHERE f_leak(b); -GRANT SELECT ON rls_view TO regress_rls_alice; --- Query as role that is not owner of view but is owner of table. --- Should return records based on view owner policies. -SET SESSION AUTHORIZATION regress_rls_alice; -SELECT * FROM rls_view; -NOTICE: f_leak => bbb -NOTICE: f_leak => dad - a | b ----+----- - 2 | bbb - 4 | dad -(2 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; - QUERY PLAN ------------------------------------------ - Seq Scan on z1 - Filter: (((a % 2) = 0) AND f_leak(b)) -(2 rows) - --- Query as role that is not owner of table but is owner of view. --- Should return records based on view owner policies. -SET SESSION AUTHORIZATION regress_rls_bob; -SELECT * FROM rls_view; -NOTICE: f_leak => bbb -NOTICE: f_leak => dad - a | b ----+----- - 2 | bbb - 4 | dad -(2 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; - QUERY PLAN ------------------------------------------ - Seq Scan on z1 - Filter: (((a % 2) = 0) AND f_leak(b)) -(2 rows) - --- Query as role that is not the owner of the table or view without permissions. -SET SESSION AUTHORIZATION regress_rls_carol; -SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for view rls_view -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for view rls_view --- Query as role that is not the owner of the table or view with permissions. -SET SESSION AUTHORIZATION regress_rls_bob; -GRANT SELECT ON rls_view TO regress_rls_carol; -SET SESSION AUTHORIZATION regress_rls_carol; -SELECT * FROM rls_view; -NOTICE: f_leak => bbb -NOTICE: f_leak => dad - a | b ----+----- - 2 | bbb - 4 | dad -(2 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; - QUERY PLAN ------------------------------------------ - Seq Scan on z1 - Filter: (((a % 2) = 0) AND f_leak(b)) -(2 rows) - --- Policy requiring access to another table. -SET SESSION AUTHORIZATION regress_rls_alice; -CREATE TABLE z1_blacklist (a int); -INSERT INTO z1_blacklist VALUES (3), (4); -CREATE POLICY p3 ON z1 AS RESTRICTIVE USING (a NOT IN (SELECT a FROM z1_blacklist)); --- Query as role that is not owner of table but is owner of view without permissions. -SET SESSION AUTHORIZATION regress_rls_bob; -SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for table z1_blacklist -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for table z1_blacklist --- Query as role that is not the owner of the table or view without permissions. -SET SESSION AUTHORIZATION regress_rls_carol; -SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for table z1_blacklist -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for table z1_blacklist --- Query as role that is not owner of table but is owner of view with permissions. -SET SESSION AUTHORIZATION regress_rls_alice; -GRANT SELECT ON z1_blacklist TO regress_rls_bob; -SET SESSION AUTHORIZATION regress_rls_bob; -SELECT * FROM rls_view; -NOTICE: f_leak => bbb - a | b ----+----- - 2 | bbb -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; - QUERY PLAN ----------------------------------------------------------------------- - Seq Scan on z1 - Filter: ((NOT (hashed SubPlan 1)) AND ((a % 2) = 0) AND f_leak(b)) - SubPlan 1 - -> Seq Scan on z1_blacklist -(4 rows) - --- Query as role that is not the owner of the table or view with permissions. -SET SESSION AUTHORIZATION regress_rls_carol; -SELECT * FROM rls_view; -NOTICE: f_leak => bbb - a | b ----+----- - 2 | bbb -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; - QUERY PLAN ----------------------------------------------------------------------- - Seq Scan on z1 - Filter: ((NOT (hashed SubPlan 1)) AND ((a % 2) = 0) AND f_leak(b)) - SubPlan 1 - -> Seq Scan on z1_blacklist -(4 rows) - -SET SESSION AUTHORIZATION regress_rls_alice; -REVOKE SELECT ON z1_blacklist FROM regress_rls_bob; -DROP POLICY p3 ON z1; -SET SESSION AUTHORIZATION regress_rls_bob; -DROP VIEW rls_view; --- --- Security invoker views should follow policy for current user. --- --- View and table owner are the same. -SET SESSION AUTHORIZATION regress_rls_alice; -CREATE VIEW rls_view WITH (security_invoker) AS - SELECT * FROM z1 WHERE f_leak(b); -GRANT SELECT ON rls_view TO regress_rls_bob; -GRANT SELECT ON rls_view TO regress_rls_carol; --- Query as table owner. Should return all records. -SELECT * FROM rls_view; -NOTICE: f_leak => aba -NOTICE: f_leak => bbb -NOTICE: f_leak => ccc -NOTICE: f_leak => dad - a | b ----+----- - 1 | aba - 2 | bbb - 3 | ccc - 4 | dad -(4 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; - QUERY PLAN ---------------------- - Seq Scan on z1 - Filter: f_leak(b) -(2 rows) - --- Queries as other users. --- Should return records based on current user's policies. -SET SESSION AUTHORIZATION regress_rls_bob; -SELECT * FROM rls_view; -NOTICE: f_leak => bbb -NOTICE: f_leak => dad - a | b ----+----- - 2 | bbb - 4 | dad -(2 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; - QUERY PLAN ------------------------------------------ - Seq Scan on z1 - Filter: (((a % 2) = 0) AND f_leak(b)) -(2 rows) - -SET SESSION AUTHORIZATION regress_rls_carol; -SELECT * FROM rls_view; -NOTICE: f_leak => aba -NOTICE: f_leak => ccc - a | b ----+----- - 1 | aba - 3 | ccc -(2 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; - QUERY PLAN ------------------------------------------ - Seq Scan on z1 - Filter: (((a % 2) = 1) AND f_leak(b)) -(2 rows) - --- View and table owners are different. -SET SESSION AUTHORIZATION regress_rls_alice; -DROP VIEW rls_view; -SET SESSION AUTHORIZATION regress_rls_bob; -CREATE VIEW rls_view WITH (security_invoker) AS - SELECT * FROM z1 WHERE f_leak(b); -GRANT SELECT ON rls_view TO regress_rls_alice; -GRANT SELECT ON rls_view TO regress_rls_carol; --- Query as table owner. Should return all records. -SET SESSION AUTHORIZATION regress_rls_alice; -SELECT * FROM rls_view; -NOTICE: f_leak => aba -NOTICE: f_leak => bbb -NOTICE: f_leak => ccc -NOTICE: f_leak => dad - a | b ----+----- - 1 | aba - 2 | bbb - 3 | ccc - 4 | dad -(4 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; - QUERY PLAN ---------------------- - Seq Scan on z1 - Filter: f_leak(b) -(2 rows) - --- Queries as other users. --- Should return records based on current user's policies. -SET SESSION AUTHORIZATION regress_rls_bob; -SELECT * FROM rls_view; -NOTICE: f_leak => bbb -NOTICE: f_leak => dad - a | b ----+----- - 2 | bbb - 4 | dad -(2 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; - QUERY PLAN ------------------------------------------ - Seq Scan on z1 - Filter: (((a % 2) = 0) AND f_leak(b)) -(2 rows) - -SET SESSION AUTHORIZATION regress_rls_carol; -SELECT * FROM rls_view; -NOTICE: f_leak => aba -NOTICE: f_leak => ccc - a | b ----+----- - 1 | aba - 3 | ccc -(2 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; - QUERY PLAN ------------------------------------------ - Seq Scan on z1 - Filter: (((a % 2) = 1) AND f_leak(b)) -(2 rows) - --- Policy requiring access to another table. -SET SESSION AUTHORIZATION regress_rls_alice; -CREATE POLICY p3 ON z1 AS RESTRICTIVE USING (a NOT IN (SELECT a FROM z1_blacklist)); --- Query as role that is not owner of table but is owner of view without permissions. -SET SESSION AUTHORIZATION regress_rls_bob; -SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for table z1_blacklist -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for table z1_blacklist --- Query as role that is not the owner of the table or view without permissions. -SET SESSION AUTHORIZATION regress_rls_carol; -SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for table z1_blacklist -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for table z1_blacklist --- Query as role that is not owner of table but is owner of view with permissions. -SET SESSION AUTHORIZATION regress_rls_alice; -GRANT SELECT ON z1_blacklist TO regress_rls_bob; -SET SESSION AUTHORIZATION regress_rls_bob; -SELECT * FROM rls_view; -NOTICE: f_leak => bbb - a | b ----+----- - 2 | bbb -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; - QUERY PLAN ----------------------------------------------------------------------- - Seq Scan on z1 - Filter: ((NOT (hashed SubPlan 1)) AND ((a % 2) = 0) AND f_leak(b)) - SubPlan 1 - -> Seq Scan on z1_blacklist -(4 rows) - --- Query as role that is not the owner of the table or view without permissions. -SET SESSION AUTHORIZATION regress_rls_carol; -SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for table z1_blacklist -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for table z1_blacklist --- Query as role that is not the owner of the table or view with permissions. -SET SESSION AUTHORIZATION regress_rls_alice; -GRANT SELECT ON z1_blacklist TO regress_rls_carol; -SET SESSION AUTHORIZATION regress_rls_carol; -SELECT * FROM rls_view; -NOTICE: f_leak => aba - a | b ----+----- - 1 | aba -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; - QUERY PLAN ----------------------------------------------------------------------- - Seq Scan on z1 - Filter: ((NOT (hashed SubPlan 1)) AND ((a % 2) = 1) AND f_leak(b)) - SubPlan 1 - -> Seq Scan on z1_blacklist -(4 rows) - -SET SESSION AUTHORIZATION regress_rls_bob; -DROP VIEW rls_view; --- --- Command specific --- -SET SESSION AUTHORIZATION regress_rls_alice; -CREATE TABLE x1 (a int, b text, c text); -GRANT ALL ON x1 TO PUBLIC; -INSERT INTO x1 VALUES - (1, 'abc', 'regress_rls_bob'), - (2, 'bcd', 'regress_rls_bob'), - (3, 'cde', 'regress_rls_carol'), - (4, 'def', 'regress_rls_carol'), - (5, 'efg', 'regress_rls_bob'), - (6, 'fgh', 'regress_rls_bob'), - (7, 'fgh', 'regress_rls_carol'), - (8, 'fgh', 'regress_rls_carol'); -CREATE POLICY p0 ON x1 FOR ALL USING (c = current_user); -CREATE POLICY p1 ON x1 FOR SELECT USING (a % 2 = 0); -CREATE POLICY p2 ON x1 FOR INSERT WITH CHECK (a % 2 = 1); -CREATE POLICY p3 ON x1 FOR UPDATE USING (a % 2 = 0); -CREATE POLICY p4 ON x1 FOR DELETE USING (a < 8); -ALTER TABLE x1 ENABLE ROW LEVEL SECURITY; -SET SESSION AUTHORIZATION regress_rls_bob; -SELECT * FROM x1 WHERE f_leak(b) ORDER BY a ASC; -NOTICE: f_leak => abc -NOTICE: f_leak => bcd -NOTICE: f_leak => def -NOTICE: f_leak => efg -NOTICE: f_leak => fgh -NOTICE: f_leak => fgh - a | b | c ----+-----+------------------- - 1 | abc | regress_rls_bob - 2 | bcd | regress_rls_bob - 4 | def | regress_rls_carol - 5 | efg | regress_rls_bob - 6 | fgh | regress_rls_bob - 8 | fgh | regress_rls_carol -(6 rows) - -UPDATE x1 SET b = b || '_updt' WHERE f_leak(b) RETURNING *; -NOTICE: f_leak => abc -NOTICE: f_leak => bcd -NOTICE: f_leak => def -NOTICE: f_leak => efg -NOTICE: f_leak => fgh -NOTICE: f_leak => fgh - a | b | c ----+----------+------------------- - 1 | abc_updt | regress_rls_bob - 2 | bcd_updt | regress_rls_bob - 4 | def_updt | regress_rls_carol - 5 | efg_updt | regress_rls_bob - 6 | fgh_updt | regress_rls_bob - 8 | fgh_updt | regress_rls_carol -(6 rows) - -SET SESSION AUTHORIZATION regress_rls_carol; -SELECT * FROM x1 WHERE f_leak(b) ORDER BY a ASC; -NOTICE: f_leak => cde -NOTICE: f_leak => fgh -NOTICE: f_leak => bcd_updt -NOTICE: f_leak => def_updt -NOTICE: f_leak => fgh_updt -NOTICE: f_leak => fgh_updt - a | b | c ----+----------+------------------- - 2 | bcd_updt | regress_rls_bob - 3 | cde | regress_rls_carol - 4 | def_updt | regress_rls_carol - 6 | fgh_updt | regress_rls_bob - 7 | fgh | regress_rls_carol - 8 | fgh_updt | regress_rls_carol -(6 rows) - -UPDATE x1 SET b = b || '_updt' WHERE f_leak(b) RETURNING *; -NOTICE: f_leak => cde -NOTICE: f_leak => fgh -NOTICE: f_leak => bcd_updt -NOTICE: f_leak => def_updt -NOTICE: f_leak => fgh_updt -NOTICE: f_leak => fgh_updt - a | b | c ----+---------------+------------------- - 3 | cde_updt | regress_rls_carol - 7 | fgh_updt | regress_rls_carol - 2 | bcd_updt_updt | regress_rls_bob - 4 | def_updt_updt | regress_rls_carol - 6 | fgh_updt_updt | regress_rls_bob - 8 | fgh_updt_updt | regress_rls_carol -(6 rows) - -DELETE FROM x1 WHERE f_leak(b) RETURNING *; -NOTICE: f_leak => cde_updt -NOTICE: f_leak => fgh_updt -NOTICE: f_leak => bcd_updt_updt -NOTICE: f_leak => def_updt_updt -NOTICE: f_leak => fgh_updt_updt -NOTICE: f_leak => fgh_updt_updt - a | b | c ----+---------------+------------------- - 3 | cde_updt | regress_rls_carol - 7 | fgh_updt | regress_rls_carol - 2 | bcd_updt_updt | regress_rls_bob - 4 | def_updt_updt | regress_rls_carol - 6 | fgh_updt_updt | regress_rls_bob - 8 | fgh_updt_updt | regress_rls_carol -(6 rows) - --- --- Duplicate Policy Names --- -SET SESSION AUTHORIZATION regress_rls_alice; -CREATE TABLE y1 (a int, b text); -CREATE TABLE y2 (a int, b text); -GRANT ALL ON y1, y2 TO regress_rls_bob; -CREATE POLICY p1 ON y1 FOR ALL USING (a % 2 = 0); -CREATE POLICY p2 ON y1 FOR SELECT USING (a > 2); -CREATE POLICY p1 ON y1 FOR SELECT USING (a % 2 = 1); --fail -ERROR: policy "p1" for table "y1" already exists -CREATE POLICY p1 ON y2 FOR ALL USING (a % 2 = 0); --OK -ALTER TABLE y1 ENABLE ROW LEVEL SECURITY; -ALTER TABLE y2 ENABLE ROW LEVEL SECURITY; --- --- Expression structure with SBV --- --- Create view as table owner. RLS should NOT be applied. -SET SESSION AUTHORIZATION regress_rls_alice; -CREATE VIEW rls_sbv WITH (security_barrier) AS - SELECT * FROM y1 WHERE f_leak(b); -EXPLAIN (COSTS OFF) SELECT * FROM rls_sbv WHERE (a = 1); - QUERY PLAN ------------------------------------ - Seq Scan on y1 - Filter: (f_leak(b) AND (a = 1)) -(2 rows) - -DROP VIEW rls_sbv; --- Create view as role that does not own table. RLS should be applied. -SET SESSION AUTHORIZATION regress_rls_bob; -CREATE VIEW rls_sbv WITH (security_barrier) AS - SELECT * FROM y1 WHERE f_leak(b); -EXPLAIN (COSTS OFF) SELECT * FROM rls_sbv WHERE (a = 1); - QUERY PLAN ------------------------------------------------------------------- - Seq Scan on y1 - Filter: ((a = 1) AND ((a > 2) OR ((a % 2) = 0)) AND f_leak(b)) -(2 rows) - -DROP VIEW rls_sbv; --- --- Expression structure --- -SET SESSION AUTHORIZATION regress_rls_alice; -INSERT INTO y2 (SELECT x, public.fipshash(x::text) FROM generate_series(0,20) x); -CREATE POLICY p2 ON y2 USING (a % 3 = 0); -CREATE POLICY p3 ON y2 USING (a % 4 = 0); -SET SESSION AUTHORIZATION regress_rls_bob; -SELECT * FROM y2 WHERE f_leak(b); -NOTICE: f_leak => 5feceb66ffc86f38d952786c6d696c79 -NOTICE: f_leak => d4735e3a265e16eee03f59718b9b5d03 -NOTICE: f_leak => 4e07408562bedb8b60ce05c1decfe3ad -NOTICE: f_leak => 4b227777d4dd1fc61c6f884f48641d02 -NOTICE: f_leak => e7f6c011776e8db7cd330b54174fd76f -NOTICE: f_leak => 2c624232cdd221771294dfbb310aca00 -NOTICE: f_leak => 19581e27de7ced00ff1ce50b2047e7a5 -NOTICE: f_leak => 4a44dc15364204a80fe80e9039455cc1 -NOTICE: f_leak => 6b51d431df5d7f141cbececcf79edf3d -NOTICE: f_leak => 8527a891e224136950ff32ca212b45bc -NOTICE: f_leak => e629fa6598d732768f7c726b4b621285 -NOTICE: f_leak => b17ef6d19c7a5b1ee83b907c595526dc -NOTICE: f_leak => 4ec9599fc203d176a301536c2e091a19 -NOTICE: f_leak => f5ca38f748a1d6eaf726b8a42fb575c3 - a | b -----+---------------------------------- - 0 | 5feceb66ffc86f38d952786c6d696c79 - 2 | d4735e3a265e16eee03f59718b9b5d03 - 3 | 4e07408562bedb8b60ce05c1decfe3ad - 4 | 4b227777d4dd1fc61c6f884f48641d02 - 6 | e7f6c011776e8db7cd330b54174fd76f - 8 | 2c624232cdd221771294dfbb310aca00 - 9 | 19581e27de7ced00ff1ce50b2047e7a5 - 10 | 4a44dc15364204a80fe80e9039455cc1 - 12 | 6b51d431df5d7f141cbececcf79edf3d - 14 | 8527a891e224136950ff32ca212b45bc - 15 | e629fa6598d732768f7c726b4b621285 - 16 | b17ef6d19c7a5b1ee83b907c595526dc - 18 | 4ec9599fc203d176a301536c2e091a19 - 20 | f5ca38f748a1d6eaf726b8a42fb575c3 -(14 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM y2 WHERE f_leak(b); - QUERY PLAN ------------------------------------------------------------------------------ - Seq Scan on y2 - Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) -(2 rows) - --- --- Qual push-down of leaky functions, when not referring to table --- -SELECT * FROM y2 WHERE f_leak('abc'); -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc - a | b -----+---------------------------------- - 0 | 5feceb66ffc86f38d952786c6d696c79 - 2 | d4735e3a265e16eee03f59718b9b5d03 - 3 | 4e07408562bedb8b60ce05c1decfe3ad - 4 | 4b227777d4dd1fc61c6f884f48641d02 - 6 | e7f6c011776e8db7cd330b54174fd76f - 8 | 2c624232cdd221771294dfbb310aca00 - 9 | 19581e27de7ced00ff1ce50b2047e7a5 - 10 | 4a44dc15364204a80fe80e9039455cc1 - 12 | 6b51d431df5d7f141cbececcf79edf3d - 14 | 8527a891e224136950ff32ca212b45bc - 15 | e629fa6598d732768f7c726b4b621285 - 16 | b17ef6d19c7a5b1ee83b907c595526dc - 18 | 4ec9599fc203d176a301536c2e091a19 - 20 | f5ca38f748a1d6eaf726b8a42fb575c3 -(14 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM y2 WHERE f_leak('abc'); - QUERY PLAN ---------------------------------------------------------------------------------------- - Seq Scan on y2 - Filter: (f_leak('abc'::text) AND (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0))) -(2 rows) - -CREATE TABLE test_qual_pushdown ( - abc text -); -INSERT INTO test_qual_pushdown VALUES ('abc'),('def'); -SELECT * FROM y2 JOIN test_qual_pushdown ON (b = abc) WHERE f_leak(abc); -NOTICE: f_leak => abc -NOTICE: f_leak => def - a | b | abc ----+---+----- -(0 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM y2 JOIN test_qual_pushdown ON (b = abc) WHERE f_leak(abc); - QUERY PLAN -------------------------------------------------------------------------- - Hash Join - Hash Cond: (test_qual_pushdown.abc = y2.b) - -> Seq Scan on test_qual_pushdown - Filter: f_leak(abc) - -> Hash - -> Seq Scan on y2 - Filter: (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) -(7 rows) - -SELECT * FROM y2 JOIN test_qual_pushdown ON (b = abc) WHERE f_leak(b); -NOTICE: f_leak => 5feceb66ffc86f38d952786c6d696c79 -NOTICE: f_leak => d4735e3a265e16eee03f59718b9b5d03 -NOTICE: f_leak => 4e07408562bedb8b60ce05c1decfe3ad -NOTICE: f_leak => 4b227777d4dd1fc61c6f884f48641d02 -NOTICE: f_leak => e7f6c011776e8db7cd330b54174fd76f -NOTICE: f_leak => 2c624232cdd221771294dfbb310aca00 -NOTICE: f_leak => 19581e27de7ced00ff1ce50b2047e7a5 -NOTICE: f_leak => 4a44dc15364204a80fe80e9039455cc1 -NOTICE: f_leak => 6b51d431df5d7f141cbececcf79edf3d -NOTICE: f_leak => 8527a891e224136950ff32ca212b45bc -NOTICE: f_leak => e629fa6598d732768f7c726b4b621285 -NOTICE: f_leak => b17ef6d19c7a5b1ee83b907c595526dc -NOTICE: f_leak => 4ec9599fc203d176a301536c2e091a19 -NOTICE: f_leak => f5ca38f748a1d6eaf726b8a42fb575c3 - a | b | abc ----+---+----- -(0 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM y2 JOIN test_qual_pushdown ON (b = abc) WHERE f_leak(b); - QUERY PLAN ------------------------------------------------------------------------------------------ - Hash Join - Hash Cond: (test_qual_pushdown.abc = y2.b) - -> Seq Scan on test_qual_pushdown - -> Hash - -> Seq Scan on y2 - Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) -(6 rows) - -DROP TABLE test_qual_pushdown; --- --- Plancache invalidate on user change. --- -RESET SESSION AUTHORIZATION; -DROP TABLE t1 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table t2 -drop cascades to table t3 -CREATE TABLE t1 (a integer); -GRANT SELECT ON t1 TO regress_rls_bob, regress_rls_carol; -CREATE POLICY p1 ON t1 TO regress_rls_bob USING ((a % 2) = 0); -CREATE POLICY p2 ON t1 TO regress_rls_carol USING ((a % 4) = 0); -ALTER TABLE t1 ENABLE ROW LEVEL SECURITY; --- Prepare as regress_rls_bob -SET ROLE regress_rls_bob; -PREPARE role_inval AS SELECT * FROM t1; --- Check plan -EXPLAIN (COSTS OFF) EXECUTE role_inval; - QUERY PLAN -------------------------- - Seq Scan on t1 - Filter: ((a % 2) = 0) -(2 rows) - --- Change to regress_rls_carol -SET ROLE regress_rls_carol; --- Check plan- should be different -EXPLAIN (COSTS OFF) EXECUTE role_inval; - QUERY PLAN -------------------------- - Seq Scan on t1 - Filter: ((a % 4) = 0) -(2 rows) - --- Change back to regress_rls_bob -SET ROLE regress_rls_bob; --- Check plan- should be back to original -EXPLAIN (COSTS OFF) EXECUTE role_inval; - QUERY PLAN -------------------------- - Seq Scan on t1 - Filter: ((a % 2) = 0) -(2 rows) - --- --- CTE and RLS --- -RESET SESSION AUTHORIZATION; -DROP TABLE t1 CASCADE; -CREATE TABLE t1 (a integer, b text); -CREATE POLICY p1 ON t1 USING (a % 2 = 0); -ALTER TABLE t1 ENABLE ROW LEVEL SECURITY; -GRANT ALL ON t1 TO regress_rls_bob; -INSERT INTO t1 (SELECT x, public.fipshash(x::text) FROM generate_series(0,20) x); -SET SESSION AUTHORIZATION regress_rls_bob; -WITH cte1 AS MATERIALIZED (SELECT * FROM t1 WHERE f_leak(b)) SELECT * FROM cte1; -NOTICE: f_leak => 5feceb66ffc86f38d952786c6d696c79 -NOTICE: f_leak => d4735e3a265e16eee03f59718b9b5d03 -NOTICE: f_leak => 4b227777d4dd1fc61c6f884f48641d02 -NOTICE: f_leak => e7f6c011776e8db7cd330b54174fd76f -NOTICE: f_leak => 2c624232cdd221771294dfbb310aca00 -NOTICE: f_leak => 4a44dc15364204a80fe80e9039455cc1 -NOTICE: f_leak => 6b51d431df5d7f141cbececcf79edf3d -NOTICE: f_leak => 8527a891e224136950ff32ca212b45bc -NOTICE: f_leak => b17ef6d19c7a5b1ee83b907c595526dc -NOTICE: f_leak => 4ec9599fc203d176a301536c2e091a19 -NOTICE: f_leak => f5ca38f748a1d6eaf726b8a42fb575c3 - a | b -----+---------------------------------- - 0 | 5feceb66ffc86f38d952786c6d696c79 - 2 | d4735e3a265e16eee03f59718b9b5d03 - 4 | 4b227777d4dd1fc61c6f884f48641d02 - 6 | e7f6c011776e8db7cd330b54174fd76f - 8 | 2c624232cdd221771294dfbb310aca00 - 10 | 4a44dc15364204a80fe80e9039455cc1 - 12 | 6b51d431df5d7f141cbececcf79edf3d - 14 | 8527a891e224136950ff32ca212b45bc - 16 | b17ef6d19c7a5b1ee83b907c595526dc - 18 | 4ec9599fc203d176a301536c2e091a19 - 20 | f5ca38f748a1d6eaf726b8a42fb575c3 -(11 rows) - -EXPLAIN (COSTS OFF) -WITH cte1 AS MATERIALIZED (SELECT * FROM t1 WHERE f_leak(b)) SELECT * FROM cte1; - QUERY PLAN -------------------------------------------------- - CTE Scan on cte1 - CTE cte1 - -> Seq Scan on t1 - Filter: (((a % 2) = 0) AND f_leak(b)) -(4 rows) - -WITH cte1 AS (UPDATE t1 SET a = a + 1 RETURNING *) SELECT * FROM cte1; --fail -ERROR: new row violates row-level security policy for table "t1" -WITH cte1 AS (UPDATE t1 SET a = a RETURNING *) SELECT * FROM cte1; --ok - a | b -----+---------------------------------- - 0 | 5feceb66ffc86f38d952786c6d696c79 - 2 | d4735e3a265e16eee03f59718b9b5d03 - 4 | 4b227777d4dd1fc61c6f884f48641d02 - 6 | e7f6c011776e8db7cd330b54174fd76f - 8 | 2c624232cdd221771294dfbb310aca00 - 10 | 4a44dc15364204a80fe80e9039455cc1 - 12 | 6b51d431df5d7f141cbececcf79edf3d - 14 | 8527a891e224136950ff32ca212b45bc - 16 | b17ef6d19c7a5b1ee83b907c595526dc - 18 | 4ec9599fc203d176a301536c2e091a19 - 20 | f5ca38f748a1d6eaf726b8a42fb575c3 -(11 rows) - -WITH cte1 AS (INSERT INTO t1 VALUES (21, 'Fail') RETURNING *) SELECT * FROM cte1; --fail -ERROR: new row violates row-level security policy for table "t1" -WITH cte1 AS (INSERT INTO t1 VALUES (20, 'Success') RETURNING *) SELECT * FROM cte1; --ok - a | b -----+--------- - 20 | Success -(1 row) - --- --- Rename Policy --- -RESET SESSION AUTHORIZATION; -ALTER POLICY p1 ON t1 RENAME TO p1; --fail -ERROR: policy "p1" for table "t1" already exists -SELECT polname, relname - FROM pg_policy pol - JOIN pg_class pc ON (pc.oid = pol.polrelid) - WHERE relname = 't1'; - polname | relname ----------+--------- - p1 | t1 -(1 row) - -ALTER POLICY p1 ON t1 RENAME TO p2; --ok -SELECT polname, relname - FROM pg_policy pol - JOIN pg_class pc ON (pc.oid = pol.polrelid) - WHERE relname = 't1'; - polname | relname ----------+--------- - p2 | t1 -(1 row) - --- --- Check INSERT SELECT --- -SET SESSION AUTHORIZATION regress_rls_bob; -CREATE TABLE t2 (a integer, b text); -INSERT INTO t2 (SELECT * FROM t1); -EXPLAIN (COSTS OFF) INSERT INTO t2 (SELECT * FROM t1); - QUERY PLAN -------------------------------- - Insert on t2 - -> Seq Scan on t1 - Filter: ((a % 2) = 0) -(3 rows) - -SELECT * FROM t2; - a | b -----+---------------------------------- - 0 | 5feceb66ffc86f38d952786c6d696c79 - 2 | d4735e3a265e16eee03f59718b9b5d03 - 4 | 4b227777d4dd1fc61c6f884f48641d02 - 6 | e7f6c011776e8db7cd330b54174fd76f - 8 | 2c624232cdd221771294dfbb310aca00 - 10 | 4a44dc15364204a80fe80e9039455cc1 - 12 | 6b51d431df5d7f141cbececcf79edf3d - 14 | 8527a891e224136950ff32ca212b45bc - 16 | b17ef6d19c7a5b1ee83b907c595526dc - 18 | 4ec9599fc203d176a301536c2e091a19 - 20 | f5ca38f748a1d6eaf726b8a42fb575c3 - 20 | Success -(12 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM t2; - QUERY PLAN ----------------- - Seq Scan on t2 -(1 row) - -CREATE TABLE t3 AS SELECT * FROM t1; -SELECT * FROM t3; - a | b -----+---------------------------------- - 0 | 5feceb66ffc86f38d952786c6d696c79 - 2 | d4735e3a265e16eee03f59718b9b5d03 - 4 | 4b227777d4dd1fc61c6f884f48641d02 - 6 | e7f6c011776e8db7cd330b54174fd76f - 8 | 2c624232cdd221771294dfbb310aca00 - 10 | 4a44dc15364204a80fe80e9039455cc1 - 12 | 6b51d431df5d7f141cbececcf79edf3d - 14 | 8527a891e224136950ff32ca212b45bc - 16 | b17ef6d19c7a5b1ee83b907c595526dc - 18 | 4ec9599fc203d176a301536c2e091a19 - 20 | f5ca38f748a1d6eaf726b8a42fb575c3 - 20 | Success -(12 rows) - -SELECT * INTO t4 FROM t1; -SELECT * FROM t4; - a | b -----+---------------------------------- - 0 | 5feceb66ffc86f38d952786c6d696c79 - 2 | d4735e3a265e16eee03f59718b9b5d03 - 4 | 4b227777d4dd1fc61c6f884f48641d02 - 6 | e7f6c011776e8db7cd330b54174fd76f - 8 | 2c624232cdd221771294dfbb310aca00 - 10 | 4a44dc15364204a80fe80e9039455cc1 - 12 | 6b51d431df5d7f141cbececcf79edf3d - 14 | 8527a891e224136950ff32ca212b45bc - 16 | b17ef6d19c7a5b1ee83b907c595526dc - 18 | 4ec9599fc203d176a301536c2e091a19 - 20 | f5ca38f748a1d6eaf726b8a42fb575c3 - 20 | Success -(12 rows) - --- --- RLS with JOIN --- -SET SESSION AUTHORIZATION regress_rls_alice; -CREATE TABLE blog (id integer, author text, post text); -CREATE TABLE comment (blog_id integer, message text); -GRANT ALL ON blog, comment TO regress_rls_bob; -CREATE POLICY blog_1 ON blog USING (id % 2 = 0); -ALTER TABLE blog ENABLE ROW LEVEL SECURITY; -INSERT INTO blog VALUES - (1, 'alice', 'blog #1'), - (2, 'bob', 'blog #1'), - (3, 'alice', 'blog #2'), - (4, 'alice', 'blog #3'), - (5, 'john', 'blog #1'); -INSERT INTO comment VALUES - (1, 'cool blog'), - (1, 'fun blog'), - (3, 'crazy blog'), - (5, 'what?'), - (4, 'insane!'), - (2, 'who did it?'); -SET SESSION AUTHORIZATION regress_rls_bob; --- Check RLS JOIN with Non-RLS. -SELECT id, author, message FROM blog JOIN comment ON id = blog_id; - id | author | message -----+--------+------------- - 4 | alice | insane! - 2 | bob | who did it? -(2 rows) - --- Check Non-RLS JOIN with RLS. -SELECT id, author, message FROM comment JOIN blog ON id = blog_id; - id | author | message -----+--------+------------- - 4 | alice | insane! - 2 | bob | who did it? -(2 rows) - -SET SESSION AUTHORIZATION regress_rls_alice; -CREATE POLICY comment_1 ON comment USING (blog_id < 4); -ALTER TABLE comment ENABLE ROW LEVEL SECURITY; -SET SESSION AUTHORIZATION regress_rls_bob; --- Check RLS JOIN RLS -SELECT id, author, message FROM blog JOIN comment ON id = blog_id; - id | author | message -----+--------+------------- - 2 | bob | who did it? -(1 row) - -SELECT id, author, message FROM comment JOIN blog ON id = blog_id; - id | author | message -----+--------+------------- - 2 | bob | who did it? -(1 row) - -SET SESSION AUTHORIZATION regress_rls_alice; -DROP TABLE blog, comment; --- --- Default Deny Policy --- -RESET SESSION AUTHORIZATION; -DROP POLICY p2 ON t1; -ALTER TABLE t1 OWNER TO regress_rls_alice; --- Check that default deny does not apply to superuser. -RESET SESSION AUTHORIZATION; -SELECT * FROM t1; - a | b -----+---------------------------------- - 1 | 6b86b273ff34fce19d6b804eff5a3f57 - 3 | 4e07408562bedb8b60ce05c1decfe3ad - 5 | ef2d127de37b942baad06145e54b0c61 - 7 | 7902699be42c8a8e46fbbb4501726517 - 9 | 19581e27de7ced00ff1ce50b2047e7a5 - 11 | 4fc82b26aecb47d2868c4efbe3581732 - 13 | 3fdba35f04dc8c462986c992bcf87554 - 15 | e629fa6598d732768f7c726b4b621285 - 17 | 4523540f1504cd17100c4835e85b7eef - 19 | 9400f1b21cb527d7fa3d3eabba93557a - 0 | 5feceb66ffc86f38d952786c6d696c79 - 2 | d4735e3a265e16eee03f59718b9b5d03 - 4 | 4b227777d4dd1fc61c6f884f48641d02 - 6 | e7f6c011776e8db7cd330b54174fd76f - 8 | 2c624232cdd221771294dfbb310aca00 - 10 | 4a44dc15364204a80fe80e9039455cc1 - 12 | 6b51d431df5d7f141cbececcf79edf3d - 14 | 8527a891e224136950ff32ca212b45bc - 16 | b17ef6d19c7a5b1ee83b907c595526dc - 18 | 4ec9599fc203d176a301536c2e091a19 - 20 | f5ca38f748a1d6eaf726b8a42fb575c3 - 20 | Success -(22 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM t1; - QUERY PLAN ----------------- - Seq Scan on t1 -(1 row) - --- Check that default deny does not apply to table owner. -SET SESSION AUTHORIZATION regress_rls_alice; -SELECT * FROM t1; - a | b -----+---------------------------------- - 1 | 6b86b273ff34fce19d6b804eff5a3f57 - 3 | 4e07408562bedb8b60ce05c1decfe3ad - 5 | ef2d127de37b942baad06145e54b0c61 - 7 | 7902699be42c8a8e46fbbb4501726517 - 9 | 19581e27de7ced00ff1ce50b2047e7a5 - 11 | 4fc82b26aecb47d2868c4efbe3581732 - 13 | 3fdba35f04dc8c462986c992bcf87554 - 15 | e629fa6598d732768f7c726b4b621285 - 17 | 4523540f1504cd17100c4835e85b7eef - 19 | 9400f1b21cb527d7fa3d3eabba93557a - 0 | 5feceb66ffc86f38d952786c6d696c79 - 2 | d4735e3a265e16eee03f59718b9b5d03 - 4 | 4b227777d4dd1fc61c6f884f48641d02 - 6 | e7f6c011776e8db7cd330b54174fd76f - 8 | 2c624232cdd221771294dfbb310aca00 - 10 | 4a44dc15364204a80fe80e9039455cc1 - 12 | 6b51d431df5d7f141cbececcf79edf3d - 14 | 8527a891e224136950ff32ca212b45bc - 16 | b17ef6d19c7a5b1ee83b907c595526dc - 18 | 4ec9599fc203d176a301536c2e091a19 - 20 | f5ca38f748a1d6eaf726b8a42fb575c3 - 20 | Success -(22 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM t1; - QUERY PLAN ----------------- - Seq Scan on t1 -(1 row) - --- Check that default deny applies to non-owner/non-superuser when RLS on. -SET SESSION AUTHORIZATION regress_rls_bob; -SET row_security TO ON; -SELECT * FROM t1; - a | b ----+--- -(0 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM t1; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -SET SESSION AUTHORIZATION regress_rls_bob; -SELECT * FROM t1; - a | b ----+--- -(0 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM t1; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - --- --- COPY TO/FROM --- -RESET SESSION AUTHORIZATION; -DROP TABLE copy_t CASCADE; -ERROR: table "copy_t" does not exist -CREATE TABLE copy_t (a integer, b text); -CREATE POLICY p1 ON copy_t USING (a % 2 = 0); -ALTER TABLE copy_t ENABLE ROW LEVEL SECURITY; -GRANT ALL ON copy_t TO regress_rls_bob, regress_rls_exempt_user; -INSERT INTO copy_t (SELECT x, public.fipshash(x::text) FROM generate_series(0,10) x); --- Check COPY TO as Superuser/owner. -RESET SESSION AUTHORIZATION; -SET row_security TO OFF; -COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; -0,5feceb66ffc86f38d952786c6d696c79 -1,6b86b273ff34fce19d6b804eff5a3f57 -2,d4735e3a265e16eee03f59718b9b5d03 -3,4e07408562bedb8b60ce05c1decfe3ad -4,4b227777d4dd1fc61c6f884f48641d02 -5,ef2d127de37b942baad06145e54b0c61 -6,e7f6c011776e8db7cd330b54174fd76f -7,7902699be42c8a8e46fbbb4501726517 -8,2c624232cdd221771294dfbb310aca00 -9,19581e27de7ced00ff1ce50b2047e7a5 -10,4a44dc15364204a80fe80e9039455cc1 -SET row_security TO ON; -COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; -0,5feceb66ffc86f38d952786c6d696c79 -1,6b86b273ff34fce19d6b804eff5a3f57 -2,d4735e3a265e16eee03f59718b9b5d03 -3,4e07408562bedb8b60ce05c1decfe3ad -4,4b227777d4dd1fc61c6f884f48641d02 -5,ef2d127de37b942baad06145e54b0c61 -6,e7f6c011776e8db7cd330b54174fd76f -7,7902699be42c8a8e46fbbb4501726517 -8,2c624232cdd221771294dfbb310aca00 -9,19581e27de7ced00ff1ce50b2047e7a5 -10,4a44dc15364204a80fe80e9039455cc1 --- Check COPY TO as user with permissions. -SET SESSION AUTHORIZATION regress_rls_bob; -SET row_security TO OFF; -COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - would be affected by RLS -ERROR: query would be affected by row-level security policy for table "copy_t" -SET row_security TO ON; -COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --ok -0,5feceb66ffc86f38d952786c6d696c79 -2,d4735e3a265e16eee03f59718b9b5d03 -4,4b227777d4dd1fc61c6f884f48641d02 -6,e7f6c011776e8db7cd330b54174fd76f -8,2c624232cdd221771294dfbb310aca00 -10,4a44dc15364204a80fe80e9039455cc1 --- Check COPY TO as user with permissions and BYPASSRLS -SET SESSION AUTHORIZATION regress_rls_exempt_user; -SET row_security TO OFF; -COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --ok -0,5feceb66ffc86f38d952786c6d696c79 -1,6b86b273ff34fce19d6b804eff5a3f57 -2,d4735e3a265e16eee03f59718b9b5d03 -3,4e07408562bedb8b60ce05c1decfe3ad -4,4b227777d4dd1fc61c6f884f48641d02 -5,ef2d127de37b942baad06145e54b0c61 -6,e7f6c011776e8db7cd330b54174fd76f -7,7902699be42c8a8e46fbbb4501726517 -8,2c624232cdd221771294dfbb310aca00 -9,19581e27de7ced00ff1ce50b2047e7a5 -10,4a44dc15364204a80fe80e9039455cc1 -SET row_security TO ON; -COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --ok -0,5feceb66ffc86f38d952786c6d696c79 -1,6b86b273ff34fce19d6b804eff5a3f57 -2,d4735e3a265e16eee03f59718b9b5d03 -3,4e07408562bedb8b60ce05c1decfe3ad -4,4b227777d4dd1fc61c6f884f48641d02 -5,ef2d127de37b942baad06145e54b0c61 -6,e7f6c011776e8db7cd330b54174fd76f -7,7902699be42c8a8e46fbbb4501726517 -8,2c624232cdd221771294dfbb310aca00 -9,19581e27de7ced00ff1ce50b2047e7a5 -10,4a44dc15364204a80fe80e9039455cc1 --- Check COPY TO as user without permissions. SET row_security TO OFF; -SET SESSION AUTHORIZATION regress_rls_carol; -SET row_security TO OFF; -COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - would be affected by RLS -ERROR: query would be affected by row-level security policy for table "copy_t" -SET row_security TO ON; -COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - permission denied -ERROR: permission denied for table copy_t --- Check COPY relation TO; keep it just one row to avoid reordering issues -RESET SESSION AUTHORIZATION; -SET row_security TO ON; -CREATE TABLE copy_rel_to (a integer, b text); -CREATE POLICY p1 ON copy_rel_to USING (a % 2 = 0); -ALTER TABLE copy_rel_to ENABLE ROW LEVEL SECURITY; -GRANT ALL ON copy_rel_to TO regress_rls_bob, regress_rls_exempt_user; -INSERT INTO copy_rel_to VALUES (1, public.fipshash('1')); --- Check COPY TO as Superuser/owner. -RESET SESSION AUTHORIZATION; -SET row_security TO OFF; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; -1,6b86b273ff34fce19d6b804eff5a3f57 -SET row_security TO ON; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; -1,6b86b273ff34fce19d6b804eff5a3f57 --- Check COPY TO as user with permissions. -SET SESSION AUTHORIZATION regress_rls_bob; -SET row_security TO OFF; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - would be affected by RLS -ERROR: query would be affected by row-level security policy for table "copy_rel_to" -SET row_security TO ON; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok --- Check COPY TO as user with permissions and BYPASSRLS -SET SESSION AUTHORIZATION regress_rls_exempt_user; -SET row_security TO OFF; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok -1,6b86b273ff34fce19d6b804eff5a3f57 -SET row_security TO ON; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok -1,6b86b273ff34fce19d6b804eff5a3f57 --- Check COPY TO as user without permissions. SET row_security TO OFF; -SET SESSION AUTHORIZATION regress_rls_carol; -SET row_security TO OFF; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied -ERROR: permission denied for table copy_rel_to -SET row_security TO ON; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied -ERROR: permission denied for table copy_rel_to --- Check behavior with a child table. -RESET SESSION AUTHORIZATION; -SET row_security TO ON; -CREATE TABLE copy_rel_to_child () INHERITS (copy_rel_to); -INSERT INTO copy_rel_to_child VALUES (1, 'one'), (2, 'two'); --- Check COPY TO as Superuser/owner. -RESET SESSION AUTHORIZATION; -SET row_security TO OFF; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; -1,6b86b273ff34fce19d6b804eff5a3f57 -SET row_security TO ON; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; -1,6b86b273ff34fce19d6b804eff5a3f57 --- Check COPY TO as user with permissions. -SET SESSION AUTHORIZATION regress_rls_bob; -SET row_security TO OFF; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - would be affected by RLS -ERROR: query would be affected by row-level security policy for table "copy_rel_to" -SET row_security TO ON; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok --- Check COPY TO as user with permissions and BYPASSRLS -SET SESSION AUTHORIZATION regress_rls_exempt_user; -SET row_security TO OFF; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok -1,6b86b273ff34fce19d6b804eff5a3f57 -SET row_security TO ON; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok -1,6b86b273ff34fce19d6b804eff5a3f57 --- Check COPY TO as user without permissions. SET row_security TO OFF; -SET SESSION AUTHORIZATION regress_rls_carol; -SET row_security TO OFF; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied -ERROR: permission denied for table copy_rel_to -SET row_security TO ON; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied -ERROR: permission denied for table copy_rel_to --- Check COPY FROM as Superuser/owner. -RESET SESSION AUTHORIZATION; -SET row_security TO OFF; -COPY copy_t FROM STDIN; --ok -SET row_security TO ON; -COPY copy_t FROM STDIN; --ok --- Check COPY FROM as user with permissions. -SET SESSION AUTHORIZATION regress_rls_bob; -SET row_security TO OFF; -COPY copy_t FROM STDIN; --fail - would be affected by RLS. -ERROR: query would be affected by row-level security policy for table "copy_t" -SET row_security TO ON; -COPY copy_t FROM STDIN; --fail - COPY FROM not supported by RLS. -ERROR: COPY FROM not supported with row-level security -HINT: Use INSERT statements instead. --- Check COPY FROM as user with permissions and BYPASSRLS -SET SESSION AUTHORIZATION regress_rls_exempt_user; -SET row_security TO ON; -COPY copy_t FROM STDIN; --ok --- Check COPY FROM as user without permissions. -SET SESSION AUTHORIZATION regress_rls_carol; -SET row_security TO OFF; -COPY copy_t FROM STDIN; --fail - permission denied. -ERROR: permission denied for table copy_t -SET row_security TO ON; -COPY copy_t FROM STDIN; --fail - permission denied. -ERROR: permission denied for table copy_t -RESET SESSION AUTHORIZATION; -DROP TABLE copy_t; -DROP TABLE copy_rel_to CASCADE; -NOTICE: drop cascades to table copy_rel_to_child --- Check WHERE CURRENT OF -SET SESSION AUTHORIZATION regress_rls_alice; -CREATE TABLE current_check (currentid int, payload text, rlsuser text); -GRANT ALL ON current_check TO PUBLIC; -INSERT INTO current_check VALUES - (1, 'abc', 'regress_rls_bob'), - (2, 'bcd', 'regress_rls_bob'), - (3, 'cde', 'regress_rls_bob'), - (4, 'def', 'regress_rls_bob'); -CREATE POLICY p1 ON current_check FOR SELECT USING (currentid % 2 = 0); -CREATE POLICY p2 ON current_check FOR DELETE USING (currentid = 4 AND rlsuser = current_user); -CREATE POLICY p3 ON current_check FOR UPDATE USING (currentid = 4) WITH CHECK (rlsuser = current_user); -ALTER TABLE current_check ENABLE ROW LEVEL SECURITY; -SET SESSION AUTHORIZATION regress_rls_bob; --- Can SELECT even rows -SELECT * FROM current_check; - currentid | payload | rlsuser ------------+---------+----------------- - 2 | bcd | regress_rls_bob - 4 | def | regress_rls_bob -(2 rows) - --- Cannot UPDATE row 2 -UPDATE current_check SET payload = payload || '_new' WHERE currentid = 2 RETURNING *; - currentid | payload | rlsuser ------------+---------+--------- -(0 rows) - -BEGIN; -DECLARE current_check_cursor SCROLL CURSOR FOR SELECT * FROM current_check; --- Returns rows that can be seen according to SELECT policy, like plain SELECT --- above (even rows) -FETCH ABSOLUTE 1 FROM current_check_cursor; - currentid | payload | rlsuser ------------+---------+----------------- - 2 | bcd | regress_rls_bob -(1 row) - --- Still cannot UPDATE row 2 through cursor -UPDATE current_check SET payload = payload || '_new' WHERE CURRENT OF current_check_cursor RETURNING *; - currentid | payload | rlsuser ------------+---------+--------- -(0 rows) - --- Can update row 4 through cursor, which is the next visible row -FETCH RELATIVE 1 FROM current_check_cursor; - currentid | payload | rlsuser ------------+---------+----------------- - 4 | def | regress_rls_bob -(1 row) - -UPDATE current_check SET payload = payload || '_new' WHERE CURRENT OF current_check_cursor RETURNING *; - currentid | payload | rlsuser ------------+---------+----------------- - 4 | def_new | regress_rls_bob -(1 row) - -SELECT * FROM current_check; - currentid | payload | rlsuser ------------+---------+----------------- - 2 | bcd | regress_rls_bob - 4 | def_new | regress_rls_bob -(2 rows) - --- Plan should be a subquery TID scan -EXPLAIN (COSTS OFF) UPDATE current_check SET payload = payload WHERE CURRENT OF current_check_cursor; - QUERY PLAN -------------------------------------------------------------- - Update on current_check - -> Tid Scan on current_check - TID Cond: CURRENT OF current_check_cursor - Filter: ((currentid = 4) AND ((currentid % 2) = 0)) -(4 rows) - --- Similarly can only delete row 4 -FETCH ABSOLUTE 1 FROM current_check_cursor; - currentid | payload | rlsuser ------------+---------+----------------- - 2 | bcd | regress_rls_bob -(1 row) - -DELETE FROM current_check WHERE CURRENT OF current_check_cursor RETURNING *; - currentid | payload | rlsuser ------------+---------+--------- -(0 rows) - -FETCH RELATIVE 1 FROM current_check_cursor; - currentid | payload | rlsuser ------------+---------+----------------- - 4 | def | regress_rls_bob -(1 row) - -DELETE FROM current_check WHERE CURRENT OF current_check_cursor RETURNING *; - currentid | payload | rlsuser ------------+---------+----------------- - 4 | def_new | regress_rls_bob -(1 row) - -SELECT * FROM current_check; - currentid | payload | rlsuser ------------+---------+----------------- - 2 | bcd | regress_rls_bob -(1 row) - -COMMIT; --- --- check pg_stats view filtering --- -SET row_security TO ON; -SET SESSION AUTHORIZATION regress_rls_alice; -ANALYZE current_check; --- Stats visible -SELECT row_security_active('current_check'); - row_security_active ---------------------- - f -(1 row) - -SELECT attname, most_common_vals FROM pg_stats - WHERE tablename = 'current_check' - ORDER BY 1; - attname | most_common_vals ------------+------------------- - currentid | - payload | - rlsuser | {regress_rls_bob} -(3 rows) - -SET SESSION AUTHORIZATION regress_rls_bob; --- Stats not visible -SELECT row_security_active('current_check'); - row_security_active ---------------------- - t -(1 row) - -SELECT attname, most_common_vals FROM pg_stats - WHERE tablename = 'current_check' - ORDER BY 1; - attname | most_common_vals ----------+------------------ -(0 rows) - --- --- Collation support --- -BEGIN; -CREATE TABLE coll_t (c) AS VALUES ('bar'::text); -CREATE POLICY coll_p ON coll_t USING (c < ('foo'::text COLLATE "C")); -ALTER TABLE coll_t ENABLE ROW LEVEL SECURITY; -GRANT SELECT ON coll_t TO regress_rls_alice; -SELECT (string_to_array(polqual, ':'))[7] AS inputcollid FROM pg_policy WHERE polrelid = 'coll_t'::regclass; - inputcollid ------------------- - inputcollid 950 -(1 row) - -SET SESSION AUTHORIZATION regress_rls_alice; -SELECT * FROM coll_t; - c ------ - bar -(1 row) - -ROLLBACK; --- --- Shared Object Dependencies --- -RESET SESSION AUTHORIZATION; -BEGIN; -CREATE ROLE regress_rls_eve; -CREATE ROLE regress_rls_frank; -CREATE TABLE tbl1 (c) AS VALUES ('bar'::text); -GRANT SELECT ON TABLE tbl1 TO regress_rls_eve; -CREATE POLICY P ON tbl1 TO regress_rls_eve, regress_rls_frank USING (true); -SELECT refclassid::regclass, deptype - FROM pg_depend - WHERE classid = 'pg_policy'::regclass - AND refobjid = 'tbl1'::regclass; - refclassid | deptype -------------+--------- - pg_class | a -(1 row) - -SELECT refclassid::regclass, deptype - FROM pg_shdepend - WHERE classid = 'pg_policy'::regclass - AND refobjid IN ('regress_rls_eve'::regrole, 'regress_rls_frank'::regrole); - refclassid | deptype -------------+--------- - pg_authid | r - pg_authid | r -(2 rows) - -SAVEPOINT q; -DROP ROLE regress_rls_eve; --fails due to dependency on POLICY p -ERROR: role "regress_rls_eve" cannot be dropped because some objects depend on it -DETAIL: privileges for table tbl1 -target of policy p on table tbl1 -ROLLBACK TO q; -ALTER POLICY p ON tbl1 TO regress_rls_frank USING (true); -SAVEPOINT q; -DROP ROLE regress_rls_eve; --fails due to dependency on GRANT SELECT -ERROR: role "regress_rls_eve" cannot be dropped because some objects depend on it -DETAIL: privileges for table tbl1 -ROLLBACK TO q; -REVOKE ALL ON TABLE tbl1 FROM regress_rls_eve; -SAVEPOINT q; -DROP ROLE regress_rls_eve; --succeeds -ROLLBACK TO q; -SAVEPOINT q; -DROP ROLE regress_rls_frank; --fails due to dependency on POLICY p -ERROR: role "regress_rls_frank" cannot be dropped because some objects depend on it -DETAIL: target of policy p on table tbl1 -ROLLBACK TO q; -DROP POLICY p ON tbl1; -SAVEPOINT q; -DROP ROLE regress_rls_frank; -- succeeds -ROLLBACK TO q; -ROLLBACK; -- cleanup --- --- Policy expression handling --- -BEGIN; -CREATE TABLE t (c) AS VALUES ('bar'::text); -CREATE POLICY p ON t USING (max(c)); -- fails: aggregate functions are not allowed in policy expressions -ERROR: aggregate functions are not allowed in policy expressions -ROLLBACK; --- --- Non-target relations are only subject to SELECT policies --- -SET SESSION AUTHORIZATION regress_rls_alice; -CREATE TABLE r1 (a int); -CREATE TABLE r2 (a int); -INSERT INTO r1 VALUES (10), (20); -INSERT INTO r2 VALUES (10), (20); -GRANT ALL ON r1, r2 TO regress_rls_bob; -CREATE POLICY p1 ON r1 USING (true); -ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; -CREATE POLICY p1 ON r2 FOR SELECT USING (true); -CREATE POLICY p2 ON r2 FOR INSERT WITH CHECK (false); -CREATE POLICY p3 ON r2 FOR UPDATE USING (false); -CREATE POLICY p4 ON r2 FOR DELETE USING (false); -ALTER TABLE r2 ENABLE ROW LEVEL SECURITY; -SET SESSION AUTHORIZATION regress_rls_bob; -SELECT * FROM r1; - a ----- - 10 - 20 -(2 rows) - -SELECT * FROM r2; - a ----- - 10 - 20 -(2 rows) - --- r2 is read-only -INSERT INTO r2 VALUES (2); -- Not allowed -ERROR: new row violates row-level security policy for table "r2" -UPDATE r2 SET a = 2 RETURNING *; -- Updates nothing - a ---- -(0 rows) - -DELETE FROM r2 RETURNING *; -- Deletes nothing - a ---- -(0 rows) - --- r2 can be used as a non-target relation in DML -INSERT INTO r1 SELECT a + 1 FROM r2 RETURNING *; -- OK - a ----- - 11 - 21 -(2 rows) - -UPDATE r1 SET a = r2.a + 2 FROM r2 WHERE r1.a = r2.a RETURNING *; -- OK - a | a -----+---- - 12 | 10 - 22 | 20 -(2 rows) - -DELETE FROM r1 USING r2 WHERE r1.a = r2.a + 2 RETURNING *; -- OK - a | a -----+---- - 12 | 10 - 22 | 20 -(2 rows) - -SELECT * FROM r1; - a ----- - 11 - 21 -(2 rows) - -SELECT * FROM r2; - a ----- - 10 - 20 -(2 rows) - -SET SESSION AUTHORIZATION regress_rls_alice; -DROP TABLE r1; -DROP TABLE r2; --- --- FORCE ROW LEVEL SECURITY applies RLS to owners too --- -SET SESSION AUTHORIZATION regress_rls_alice; -SET row_security = on; -CREATE TABLE r1 (a int); -INSERT INTO r1 VALUES (10), (20); -CREATE POLICY p1 ON r1 USING (false); -ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; -ALTER TABLE r1 FORCE ROW LEVEL SECURITY; --- No error, but no rows -TABLE r1; - a ---- -(0 rows) - --- RLS error -INSERT INTO r1 VALUES (1); -ERROR: new row violates row-level security policy for table "r1" --- No error (unable to see any rows to update) -UPDATE r1 SET a = 1; -TABLE r1; - a ---- -(0 rows) - --- No error (unable to see any rows to delete) -DELETE FROM r1; -TABLE r1; - a ---- -(0 rows) - -SET row_security = off; --- these all fail, would be affected by RLS -TABLE r1; -ERROR: query would be affected by row-level security policy for table "r1" -HINT: To disable the policy for the table's owner, use ALTER TABLE NO FORCE ROW LEVEL SECURITY. -UPDATE r1 SET a = 1; -ERROR: query would be affected by row-level security policy for table "r1" -HINT: To disable the policy for the table's owner, use ALTER TABLE NO FORCE ROW LEVEL SECURITY. -DELETE FROM r1; -ERROR: query would be affected by row-level security policy for table "r1" -HINT: To disable the policy for the table's owner, use ALTER TABLE NO FORCE ROW LEVEL SECURITY. -DROP TABLE r1; --- --- FORCE ROW LEVEL SECURITY does not break RI --- -SET SESSION AUTHORIZATION regress_rls_alice; -SET row_security = on; -CREATE TABLE r1 (a int PRIMARY KEY); -CREATE TABLE r2 (a int REFERENCES r1); -INSERT INTO r1 VALUES (10), (20); -INSERT INTO r2 VALUES (10), (20); --- Create policies on r2 which prevent the --- owner from seeing any rows, but RI should --- still see them. -CREATE POLICY p1 ON r2 USING (false); -ALTER TABLE r2 ENABLE ROW LEVEL SECURITY; -ALTER TABLE r2 FORCE ROW LEVEL SECURITY; --- Errors due to rows in r2 -DELETE FROM r1; -ERROR: update or delete on table "r1" violates foreign key constraint "r2_a_fkey" on table "r2" -DETAIL: Key (a)=(10) is still referenced from table "r2". --- Reset r2 to no-RLS -DROP POLICY p1 ON r2; -ALTER TABLE r2 NO FORCE ROW LEVEL SECURITY; -ALTER TABLE r2 DISABLE ROW LEVEL SECURITY; --- clean out r2 for INSERT test below -DELETE FROM r2; --- Change r1 to not allow rows to be seen -CREATE POLICY p1 ON r1 USING (false); -ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; -ALTER TABLE r1 FORCE ROW LEVEL SECURITY; --- No rows seen -TABLE r1; - a ---- -(0 rows) - --- No error, RI still sees that row exists in r1 -INSERT INTO r2 VALUES (10); -DROP TABLE r2; -DROP TABLE r1; --- Ensure cascaded DELETE works -CREATE TABLE r1 (a int PRIMARY KEY); -CREATE TABLE r2 (a int REFERENCES r1 ON DELETE CASCADE); -INSERT INTO r1 VALUES (10), (20); -INSERT INTO r2 VALUES (10), (20); --- Create policies on r2 which prevent the --- owner from seeing any rows, but RI should --- still see them. -CREATE POLICY p1 ON r2 USING (false); -ALTER TABLE r2 ENABLE ROW LEVEL SECURITY; -ALTER TABLE r2 FORCE ROW LEVEL SECURITY; --- Deletes all records from both -DELETE FROM r1; --- Remove FORCE from r2 -ALTER TABLE r2 NO FORCE ROW LEVEL SECURITY; --- As owner, we now bypass RLS --- verify no rows in r2 now -TABLE r2; - a ---- -(0 rows) - -DROP TABLE r2; -DROP TABLE r1; --- Ensure cascaded UPDATE works -CREATE TABLE r1 (a int PRIMARY KEY); -CREATE TABLE r2 (a int REFERENCES r1 ON UPDATE CASCADE); -INSERT INTO r1 VALUES (10), (20); -INSERT INTO r2 VALUES (10), (20); --- Create policies on r2 which prevent the --- owner from seeing any rows, but RI should --- still see them. -CREATE POLICY p1 ON r2 USING (false); -ALTER TABLE r2 ENABLE ROW LEVEL SECURITY; -ALTER TABLE r2 FORCE ROW LEVEL SECURITY; --- Updates records in both -UPDATE r1 SET a = a+5; --- Remove FORCE from r2 -ALTER TABLE r2 NO FORCE ROW LEVEL SECURITY; --- As owner, we now bypass RLS --- verify records in r2 updated -TABLE r2; - a ----- - 15 - 25 -(2 rows) - -DROP TABLE r2; -DROP TABLE r1; --- --- Test INSERT+RETURNING applies SELECT policies as --- WithCheckOptions (meaning an error is thrown) --- -SET SESSION AUTHORIZATION regress_rls_alice; -SET row_security = on; -CREATE TABLE r1 (a int); -CREATE POLICY p1 ON r1 FOR SELECT USING (false); -CREATE POLICY p2 ON r1 FOR INSERT WITH CHECK (true); -ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; -ALTER TABLE r1 FORCE ROW LEVEL SECURITY; --- Works fine -INSERT INTO r1 VALUES (10), (20); --- No error, but no rows -TABLE r1; - a ---- -(0 rows) - -SET row_security = off; --- fail, would be affected by RLS -TABLE r1; -ERROR: query would be affected by row-level security policy for table "r1" -HINT: To disable the policy for the table's owner, use ALTER TABLE NO FORCE ROW LEVEL SECURITY. -SET row_security = on; --- Error -INSERT INTO r1 VALUES (10), (20) RETURNING *; -ERROR: new row violates row-level security policy for table "r1" -DROP TABLE r1; --- --- Test UPDATE+RETURNING applies SELECT policies as --- WithCheckOptions (meaning an error is thrown) --- -SET SESSION AUTHORIZATION regress_rls_alice; -SET row_security = on; -CREATE TABLE r1 (a int PRIMARY KEY); -CREATE POLICY p1 ON r1 FOR SELECT USING (a < 20); -CREATE POLICY p2 ON r1 FOR UPDATE USING (a < 20) WITH CHECK (true); -CREATE POLICY p3 ON r1 FOR INSERT WITH CHECK (true); -INSERT INTO r1 VALUES (10); -ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; -ALTER TABLE r1 FORCE ROW LEVEL SECURITY; --- Works fine -UPDATE r1 SET a = 30; --- Show updated rows -ALTER TABLE r1 NO FORCE ROW LEVEL SECURITY; -TABLE r1; - a ----- - 30 -(1 row) - --- reset value in r1 for test with RETURNING -UPDATE r1 SET a = 10; --- Verify row reset -TABLE r1; - a ----- - 10 -(1 row) - -ALTER TABLE r1 FORCE ROW LEVEL SECURITY; --- Error -UPDATE r1 SET a = 30 RETURNING *; -ERROR: new row violates row-level security policy for table "r1" --- UPDATE path of INSERT ... ON CONFLICT DO UPDATE should also error out -INSERT INTO r1 VALUES (10) - ON CONFLICT (a) DO UPDATE SET a = 30 RETURNING *; -ERROR: new row violates row-level security policy for table "r1" --- Should still error out without RETURNING (use of arbiter always requires --- SELECT permissions) -INSERT INTO r1 VALUES (10) - ON CONFLICT (a) DO UPDATE SET a = 30; -ERROR: new row violates row-level security policy for table "r1" -INSERT INTO r1 VALUES (10) - ON CONFLICT ON CONSTRAINT r1_pkey DO UPDATE SET a = 30; -ERROR: new row violates row-level security policy for table "r1" -DROP TABLE r1; --- Check dependency handling -RESET SESSION AUTHORIZATION; -CREATE TABLE dep1 (c1 int); -CREATE TABLE dep2 (c1 int); -CREATE POLICY dep_p1 ON dep1 TO regress_rls_bob USING (c1 > (select max(dep2.c1) from dep2)); -ALTER POLICY dep_p1 ON dep1 TO regress_rls_bob,regress_rls_carol; --- Should return one -SELECT count(*) = 1 FROM pg_depend - WHERE objid = (SELECT oid FROM pg_policy WHERE polname = 'dep_p1') - AND refobjid = (SELECT oid FROM pg_class WHERE relname = 'dep2'); - ?column? ----------- - t -(1 row) - -ALTER POLICY dep_p1 ON dep1 USING (true); --- Should return one -SELECT count(*) = 1 FROM pg_shdepend - WHERE objid = (SELECT oid FROM pg_policy WHERE polname = 'dep_p1') - AND refobjid = (SELECT oid FROM pg_authid WHERE rolname = 'regress_rls_bob'); - ?column? ----------- - t -(1 row) - --- Should return one -SELECT count(*) = 1 FROM pg_shdepend - WHERE objid = (SELECT oid FROM pg_policy WHERE polname = 'dep_p1') - AND refobjid = (SELECT oid FROM pg_authid WHERE rolname = 'regress_rls_carol'); - ?column? ----------- - t -(1 row) - --- Should return zero -SELECT count(*) = 0 FROM pg_depend - WHERE objid = (SELECT oid FROM pg_policy WHERE polname = 'dep_p1') - AND refobjid = (SELECT oid FROM pg_class WHERE relname = 'dep2'); - ?column? ----------- - t -(1 row) - --- DROP OWNED BY testing -RESET SESSION AUTHORIZATION; -CREATE ROLE regress_rls_dob_role1; -CREATE ROLE regress_rls_dob_role2; -CREATE TABLE dob_t1 (c1 int); -CREATE TABLE dob_t2 (c1 int) PARTITION BY RANGE (c1); -CREATE POLICY p1 ON dob_t1 TO regress_rls_dob_role1 USING (true); -DROP OWNED BY regress_rls_dob_role1; -DROP POLICY p1 ON dob_t1; -- should fail, already gone -ERROR: policy "p1" for table "dob_t1" does not exist -CREATE POLICY p1 ON dob_t1 TO regress_rls_dob_role1,regress_rls_dob_role2 USING (true); -DROP OWNED BY regress_rls_dob_role1; -DROP POLICY p1 ON dob_t1; -- should succeed --- same cases with duplicate polroles entries -CREATE POLICY p1 ON dob_t1 TO regress_rls_dob_role1,regress_rls_dob_role1 USING (true); -DROP OWNED BY regress_rls_dob_role1; -DROP POLICY p1 ON dob_t1; -- should fail, already gone -ERROR: policy "p1" for table "dob_t1" does not exist -CREATE POLICY p1 ON dob_t1 TO regress_rls_dob_role1,regress_rls_dob_role1,regress_rls_dob_role2 USING (true); -DROP OWNED BY regress_rls_dob_role1; -DROP POLICY p1 ON dob_t1; -- should succeed --- partitioned target -CREATE POLICY p1 ON dob_t2 TO regress_rls_dob_role1,regress_rls_dob_role2 USING (true); -DROP OWNED BY regress_rls_dob_role1; -DROP POLICY p1 ON dob_t2; -- should succeed -DROP USER regress_rls_dob_role1; -DROP USER regress_rls_dob_role2; --- Bug #15708: view + table with RLS should check policies as view owner -CREATE TABLE ref_tbl (a int); -INSERT INTO ref_tbl VALUES (1); -CREATE TABLE rls_tbl (a int); -INSERT INTO rls_tbl VALUES (10); -ALTER TABLE rls_tbl ENABLE ROW LEVEL SECURITY; -CREATE POLICY p1 ON rls_tbl USING (EXISTS (SELECT 1 FROM ref_tbl)); -GRANT SELECT ON ref_tbl TO regress_rls_bob; -GRANT SELECT ON rls_tbl TO regress_rls_bob; -CREATE VIEW rls_view AS SELECT * FROM rls_tbl; -ALTER VIEW rls_view OWNER TO regress_rls_bob; -GRANT SELECT ON rls_view TO regress_rls_alice; -SET SESSION AUTHORIZATION regress_rls_alice; -SELECT * FROM ref_tbl; -- Permission denied -ERROR: permission denied for table ref_tbl -SELECT * FROM rls_tbl; -- Permission denied -ERROR: permission denied for table rls_tbl -SELECT * FROM rls_view; -- OK - a ----- - 10 -(1 row) - -RESET SESSION AUTHORIZATION; -DROP VIEW rls_view; -DROP TABLE rls_tbl; -DROP TABLE ref_tbl; --- Leaky operator test -CREATE TABLE rls_tbl (a int); -INSERT INTO rls_tbl SELECT x/10 FROM generate_series(1, 100) x; -ANALYZE rls_tbl; -ALTER TABLE rls_tbl ENABLE ROW LEVEL SECURITY; -GRANT SELECT ON rls_tbl TO regress_rls_alice; -SET SESSION AUTHORIZATION regress_rls_alice; -CREATE FUNCTION op_leak(int, int) RETURNS bool - AS 'BEGIN RAISE NOTICE ''op_leak => %, %'', $1, $2; RETURN $1 < $2; END' - LANGUAGE plpgsql; -CREATE OPERATOR <<< (procedure = op_leak, leftarg = int, rightarg = int, - restrict = scalarltsel); -SELECT * FROM rls_tbl WHERE a <<< 1000; - a ---- -(0 rows) - -DROP OPERATOR <<< (int, int); -DROP FUNCTION op_leak(int, int); -RESET SESSION AUTHORIZATION; -DROP TABLE rls_tbl; --- Bug #16006: whole-row Vars in a policy don't play nice with sub-selects -SET SESSION AUTHORIZATION regress_rls_alice; -CREATE TABLE rls_tbl (a int, b int, c int); -CREATE POLICY p1 ON rls_tbl USING (rls_tbl >= ROW(1,1,1)); -ALTER TABLE rls_tbl ENABLE ROW LEVEL SECURITY; -ALTER TABLE rls_tbl FORCE ROW LEVEL SECURITY; -INSERT INTO rls_tbl SELECT 10, 20, 30; -EXPLAIN (VERBOSE, COSTS OFF) -INSERT INTO rls_tbl - SELECT * FROM (SELECT b, c FROM rls_tbl ORDER BY a) ss; - QUERY PLAN --------------------------------------------------------------------- - Insert on regress_rls_schema.rls_tbl - -> Subquery Scan on ss - Output: ss.b, ss.c, NULL::integer - -> Sort - Output: rls_tbl_1.b, rls_tbl_1.c, rls_tbl_1.a - Sort Key: rls_tbl_1.a - -> Seq Scan on regress_rls_schema.rls_tbl rls_tbl_1 - Output: rls_tbl_1.b, rls_tbl_1.c, rls_tbl_1.a - Filter: (rls_tbl_1.* >= '(1,1,1)'::record) -(9 rows) - -INSERT INTO rls_tbl - SELECT * FROM (SELECT b, c FROM rls_tbl ORDER BY a) ss; -SELECT * FROM rls_tbl; - a | b | c -----+----+---- - 10 | 20 | 30 - 20 | 30 | -(2 rows) - -DROP TABLE rls_tbl; -RESET SESSION AUTHORIZATION; --- CVE-2023-2455: inlining an SRF may introduce an RLS dependency -create table rls_t (c text); -insert into rls_t values ('invisible to bob'); -alter table rls_t enable row level security; -grant select on rls_t to regress_rls_alice, regress_rls_bob; -create policy p1 on rls_t for select to regress_rls_alice using (true); -create policy p2 on rls_t for select to regress_rls_bob using (false); -create function rls_f () returns setof rls_t - stable language sql - as $$ select * from rls_t $$; -prepare q as select current_user, * from rls_f(); -set role regress_rls_alice; -execute q; - current_user | c --------------------+------------------ - regress_rls_alice | invisible to bob -(1 row) - -set role regress_rls_bob; -execute q; - current_user | c ---------------+--- -(0 rows) - -RESET ROLE; -DROP FUNCTION rls_f(); -DROP TABLE rls_t; --- --- Clean up objects --- -RESET SESSION AUTHORIZATION; -DROP SCHEMA regress_rls_schema CASCADE; -NOTICE: drop cascades to 30 other objects -DETAIL: drop cascades to function f_leak(text) -drop cascades to table uaccount -drop cascades to table category -drop cascades to table document -drop cascades to table part_document -drop cascades to table dependent -drop cascades to table rec1 -drop cascades to table rec2 -drop cascades to view rec1v -drop cascades to view rec2v -drop cascades to table s1 -drop cascades to table s2 -drop cascades to view v2 -drop cascades to table b1 -drop cascades to view bv1 -drop cascades to table z1 -drop cascades to table z2 -drop cascades to table z1_blacklist -drop cascades to table x1 -drop cascades to table y1 -drop cascades to table y2 -drop cascades to table t1 -drop cascades to table t2 -drop cascades to table t3 -drop cascades to table t4 -drop cascades to table current_check -drop cascades to table dep1 -drop cascades to table dep2 -drop cascades to table dob_t1 -drop cascades to table dob_t2 -DROP USER regress_rls_alice; -DROP USER regress_rls_bob; -DROP USER regress_rls_carol; -DROP USER regress_rls_dave; -DROP USER regress_rls_exempt_user; -DROP ROLE regress_rls_group1; -DROP ROLE regress_rls_group2; --- Arrange to have a few policies left over, for testing --- pg_dump/pg_restore -CREATE SCHEMA regress_rls_schema; -CREATE TABLE rls_tbl (c1 int); -ALTER TABLE rls_tbl ENABLE ROW LEVEL SECURITY; -CREATE POLICY p1 ON rls_tbl USING (c1 > 5); -CREATE POLICY p2 ON rls_tbl FOR SELECT USING (c1 <= 3); -CREATE POLICY p3 ON rls_tbl FOR UPDATE USING (c1 <= 3) WITH CHECK (c1 > 5); -CREATE POLICY p4 ON rls_tbl FOR DELETE USING (c1 <= 3); -CREATE TABLE rls_tbl_force (c1 int); -ALTER TABLE rls_tbl_force ENABLE ROW LEVEL SECURITY; -ALTER TABLE rls_tbl_force FORCE ROW LEVEL SECURITY; -CREATE POLICY p1 ON rls_tbl_force USING (c1 = 5) WITH CHECK (c1 < 5); -CREATE POLICY p2 ON rls_tbl_force FOR SELECT USING (c1 = 8); -CREATE POLICY p3 ON rls_tbl_force FOR UPDATE USING (c1 = 8) WITH CHECK (c1 >= 5); -CREATE POLICY p4 ON rls_tbl_force FOR DELETE USING (c1 = 8); +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/groupingsets.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/groupingsets.out --- /tmp/cirrus-ci-build/src/test/regress/expected/groupingsets.out 2024-03-13 23:12:37.624138000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/groupingsets.out 2024-03-13 23:14:29.325279000 +0000 @@ -34,2234 +34,7 @@ -- (and with ordering differing from grouping) select a, b, grouping(a,b), sum(v), count(*), max(v) from gstest1 group by rollup (a,b); - a | b | grouping | sum | count | max ----+---+----------+-----+-------+----- - 1 | 1 | 0 | 21 | 2 | 11 - 1 | 2 | 0 | 25 | 2 | 13 - 1 | 3 | 0 | 14 | 1 | 14 - 1 | | 1 | 60 | 5 | 14 - 2 | 3 | 0 | 15 | 1 | 15 - 2 | | 1 | 15 | 1 | 15 - 3 | 3 | 0 | 16 | 1 | 16 - 3 | 4 | 0 | 17 | 1 | 17 - 3 | | 1 | 33 | 2 | 17 - 4 | 1 | 0 | 37 | 2 | 19 - 4 | | 1 | 37 | 2 | 19 - | | 3 | 145 | 10 | 19 -(12 rows) - -select a, b, grouping(a,b), sum(v), count(*), max(v) - from gstest1 group by rollup (a,b) order by a,b; - a | b | grouping | sum | count | max ----+---+----------+-----+-------+----- - 1 | 1 | 0 | 21 | 2 | 11 - 1 | 2 | 0 | 25 | 2 | 13 - 1 | 3 | 0 | 14 | 1 | 14 - 1 | | 1 | 60 | 5 | 14 - 2 | 3 | 0 | 15 | 1 | 15 - 2 | | 1 | 15 | 1 | 15 - 3 | 3 | 0 | 16 | 1 | 16 - 3 | 4 | 0 | 17 | 1 | 17 - 3 | | 1 | 33 | 2 | 17 - 4 | 1 | 0 | 37 | 2 | 19 - 4 | | 1 | 37 | 2 | 19 - | | 3 | 145 | 10 | 19 -(12 rows) - -select a, b, grouping(a,b), sum(v), count(*), max(v) - from gstest1 group by rollup (a,b) order by b desc, a; - a | b | grouping | sum | count | max ----+---+----------+-----+-------+----- - 1 | | 1 | 60 | 5 | 14 - 2 | | 1 | 15 | 1 | 15 - 3 | | 1 | 33 | 2 | 17 - 4 | | 1 | 37 | 2 | 19 - | | 3 | 145 | 10 | 19 - 3 | 4 | 0 | 17 | 1 | 17 - 1 | 3 | 0 | 14 | 1 | 14 - 2 | 3 | 0 | 15 | 1 | 15 - 3 | 3 | 0 | 16 | 1 | 16 - 1 | 2 | 0 | 25 | 2 | 13 - 1 | 1 | 0 | 21 | 2 | 11 - 4 | 1 | 0 | 37 | 2 | 19 -(12 rows) - -select a, b, grouping(a,b), sum(v), count(*), max(v) - from gstest1 group by rollup (a,b) order by coalesce(a,0)+coalesce(b,0); - a | b | grouping | sum | count | max ----+---+----------+-----+-------+----- - | | 3 | 145 | 10 | 19 - 1 | | 1 | 60 | 5 | 14 - 1 | 1 | 0 | 21 | 2 | 11 - 2 | | 1 | 15 | 1 | 15 - 3 | | 1 | 33 | 2 | 17 - 1 | 2 | 0 | 25 | 2 | 13 - 1 | 3 | 0 | 14 | 1 | 14 - 4 | | 1 | 37 | 2 | 19 - 4 | 1 | 0 | 37 | 2 | 19 - 2 | 3 | 0 | 15 | 1 | 15 - 3 | 3 | 0 | 16 | 1 | 16 - 3 | 4 | 0 | 17 | 1 | 17 -(12 rows) - --- various types of ordered aggs -select a, b, grouping(a,b), - array_agg(v order by v), - string_agg(v::text, ':' order by v desc), - percentile_disc(0.5) within group (order by v), - rank(1,2,12) within group (order by a,b,v) - from gstest1 group by rollup (a,b) order by a,b; - a | b | grouping | array_agg | string_agg | percentile_disc | rank ----+---+----------+---------------------------------+-------------------------------+-----------------+------ - 1 | 1 | 0 | {10,11} | 11:10 | 10 | 3 - 1 | 2 | 0 | {12,13} | 13:12 | 12 | 1 - 1 | 3 | 0 | {14} | 14 | 14 | 1 - 1 | | 1 | {10,11,12,13,14} | 14:13:12:11:10 | 12 | 3 - 2 | 3 | 0 | {15} | 15 | 15 | 1 - 2 | | 1 | {15} | 15 | 15 | 1 - 3 | 3 | 0 | {16} | 16 | 16 | 1 - 3 | 4 | 0 | {17} | 17 | 17 | 1 - 3 | | 1 | {16,17} | 17:16 | 16 | 1 - 4 | 1 | 0 | {18,19} | 19:18 | 18 | 1 - 4 | | 1 | {18,19} | 19:18 | 18 | 1 - | | 3 | {10,11,12,13,14,15,16,17,18,19} | 19:18:17:16:15:14:13:12:11:10 | 14 | 3 -(12 rows) - --- test usage of grouped columns in direct args of aggs -select grouping(a), a, array_agg(b), - rank(a) within group (order by b nulls first), - rank(a) within group (order by b nulls last) - from (values (1,1),(1,4),(1,5),(3,1),(3,2)) v(a,b) - group by rollup (a) order by a; - grouping | a | array_agg | rank | rank -----------+---+-------------+------+------ - 0 | 1 | {1,4,5} | 1 | 1 - 0 | 3 | {1,2} | 3 | 3 - 1 | | {1,4,5,1,2} | 1 | 6 -(3 rows) - --- nesting with window functions -select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum - from gstest2 group by rollup (a,b) order by rsum, a, b; - a | b | sum | rsum ----+---+-----+------ - 1 | 1 | 8 | 8 - 1 | 2 | 2 | 10 - 1 | | 10 | 20 - 2 | 2 | 2 | 22 - 2 | | 2 | 24 - | | 12 | 36 -(6 rows) - --- nesting with grouping sets -select sum(c) from gstest2 - group by grouping sets((), grouping sets((), grouping sets(()))) - order by 1 desc; - sum ------ - 12 - 12 - 12 -(3 rows) - -select sum(c) from gstest2 - group by grouping sets((), grouping sets((), grouping sets(((a, b))))) - order by 1 desc; - sum ------ - 12 - 12 - 8 - 2 - 2 -(5 rows) - -select sum(c) from gstest2 - group by grouping sets(grouping sets(rollup(c), grouping sets(cube(c)))) - order by 1 desc; - sum ------ - 12 - 12 - 6 - 6 - 6 - 6 -(6 rows) - -select sum(c) from gstest2 - group by grouping sets(a, grouping sets(a, cube(b))) - order by 1 desc; - sum ------ - 12 - 10 - 10 - 8 - 4 - 2 - 2 -(7 rows) - -select sum(c) from gstest2 - group by grouping sets(grouping sets((a, (b)))) - order by 1 desc; - sum ------ - 8 - 2 - 2 -(3 rows) - -select sum(c) from gstest2 - group by grouping sets(grouping sets((a, b))) - order by 1 desc; - sum ------ - 8 - 2 - 2 -(3 rows) - -select sum(c) from gstest2 - group by grouping sets(grouping sets(a, grouping sets(a), a)) - order by 1 desc; - sum ------ - 10 - 10 - 10 - 2 - 2 - 2 -(6 rows) - -select sum(c) from gstest2 - group by grouping sets(grouping sets(a, grouping sets(a, grouping sets(a), ((a)), a, grouping sets(a), (a)), a)) - order by 1 desc; - sum ------ - 10 - 10 - 10 - 10 - 10 - 10 - 10 - 10 - 2 - 2 - 2 - 2 - 2 - 2 - 2 - 2 -(16 rows) - -select sum(c) from gstest2 - group by grouping sets((a,(a,b)), grouping sets((a,(a,b)),a)) - order by 1 desc; - sum ------ - 10 - 8 - 8 - 2 - 2 - 2 - 2 - 2 -(8 rows) - --- empty input: first is 0 rows, second 1, third 3 etc. -select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a); - a | b | sum | count ----+---+-----+------- -(0 rows) - -select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),()); - a | b | sum | count ----+---+-----+------- - | | | 0 -(1 row) - -select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),(),(),()); - a | b | sum | count ----+---+-----+------- - | | | 0 - | | | 0 - | | | 0 -(3 rows) - -select sum(v), count(*) from gstest_empty group by grouping sets ((),(),()); - sum | count ------+------- - | 0 - | 0 - | 0 -(3 rows) - --- empty input with joins tests some important code paths -select t1.a, t2.b, sum(t1.v), count(*) from gstest_empty t1, gstest_empty t2 - group by grouping sets ((t1.a,t2.b),()); - a | b | sum | count ----+---+-----+------- - | | | 0 -(1 row) - --- simple joins, var resolution, GROUPING on join vars -select t1.a, t2.b, grouping(t1.a, t2.b), sum(t1.v), max(t2.a) - from gstest1 t1, gstest2 t2 - group by grouping sets ((t1.a, t2.b), ()); - a | b | grouping | sum | max ----+---+----------+------+----- - 1 | 1 | 0 | 420 | 1 - 1 | 2 | 0 | 120 | 2 - 2 | 1 | 0 | 105 | 1 - 2 | 2 | 0 | 30 | 2 - 3 | 1 | 0 | 231 | 1 - 3 | 2 | 0 | 66 | 2 - 4 | 1 | 0 | 259 | 1 - 4 | 2 | 0 | 74 | 2 - | | 3 | 1305 | 2 -(9 rows) - -select t1.a, t2.b, grouping(t1.a, t2.b), sum(t1.v), max(t2.a) - from gstest1 t1 join gstest2 t2 on (t1.a=t2.a) - group by grouping sets ((t1.a, t2.b), ()); - a | b | grouping | sum | max ----+---+----------+-----+----- - 1 | 1 | 0 | 420 | 1 - 1 | 2 | 0 | 60 | 1 - 2 | 2 | 0 | 15 | 2 - | | 3 | 495 | 2 -(4 rows) - -select a, b, grouping(a, b), sum(t1.v), max(t2.c) - from gstest1 t1 join gstest2 t2 using (a,b) - group by grouping sets ((a, b), ()); - a | b | grouping | sum | max ----+---+----------+-----+----- - 1 | 1 | 0 | 147 | 2 - 1 | 2 | 0 | 25 | 2 - | | 3 | 172 | 2 -(3 rows) - --- check that functionally dependent cols are not nulled -select a, d, grouping(a,b,c) - from gstest3 - group by grouping sets ((a,b), (a,c)); - a | d | grouping ----+---+---------- - 1 | 1 | 1 - 2 | 2 | 1 - 1 | 1 | 2 - 2 | 2 | 2 -(4 rows) - --- check that distinct grouping columns are kept separate --- even if they are equal() -explain (costs off) -select g as alias1, g as alias2 - from generate_series(1,3) g - group by alias1, rollup(alias2); - QUERY PLAN ------------------------------------------------- - GroupAggregate - Group Key: g, g - Group Key: g - -> Sort - Sort Key: g - -> Function Scan on generate_series g -(6 rows) - -select g as alias1, g as alias2 - from generate_series(1,3) g - group by alias1, rollup(alias2); - alias1 | alias2 ---------+-------- - 1 | 1 - 1 | - 2 | 2 - 2 | - 3 | 3 - 3 | -(6 rows) - --- check that pulled-up subquery outputs still go to null when appropriate -select four, x - from (select four, ten, 'foo'::text as x from tenk1) as t - group by grouping sets (four, x) - having x = 'foo'; - four | x -------+----- - | foo -(1 row) - -select four, x || 'x' - from (select four, ten, 'foo'::text as x from tenk1) as t - group by grouping sets (four, x) - order by four; - four | ?column? -------+---------- - 0 | - 1 | - 2 | - 3 | - | foox -(5 rows) - -select (x+y)*1, sum(z) - from (select 1 as x, 2 as y, 3 as z) s - group by grouping sets (x+y, x); - ?column? | sum -----------+----- - 3 | 3 - | 3 -(2 rows) - -select x, not x as not_x, q2 from - (select *, q1 = 1 as x from int8_tbl i1) as t - group by grouping sets(x, q2) - order by x, q2; - x | not_x | q2 ----+-------+------------------- - f | t | - | | -4567890123456789 - | | 123 - | | 456 - | | 4567890123456789 -(5 rows) - --- check qual push-down rules for a subquery with grouping sets -explain (verbose, costs off) -select * from ( - select 1 as x, q1, sum(q2) - from int8_tbl i1 - group by grouping sets(1, 2) -) ss -where x = 1 and q1 = 123; - QUERY PLAN --------------------------------------------------- - Subquery Scan on ss - Output: ss.x, ss.q1, ss.sum - Filter: ((ss.x = 1) AND (ss.q1 = 123)) - -> GroupAggregate - Output: (1), i1.q1, sum(i1.q2) - Group Key: (1) - Sort Key: i1.q1 - Group Key: i1.q1 - -> Sort - Output: (1), i1.q1, i1.q2 - Sort Key: (1) - -> Seq Scan on public.int8_tbl i1 - Output: 1, i1.q1, i1.q2 -(13 rows) - -select * from ( - select 1 as x, q1, sum(q2) - from int8_tbl i1 - group by grouping sets(1, 2) -) ss -where x = 1 and q1 = 123; - x | q1 | sum ----+----+----- -(0 rows) - --- check handling of pulled-up SubPlan in GROUPING() argument (bug #17479) -explain (verbose, costs off) -select grouping(ss.x) -from int8_tbl i1 -cross join lateral (select (select i1.q1) as x) ss -group by ss.x; - QUERY PLAN ------------------------------------------------- - GroupAggregate - Output: GROUPING((SubPlan 1)), ((SubPlan 2)) - Group Key: ((SubPlan 2)) - -> Sort - Output: ((SubPlan 2)), i1.q1 - Sort Key: ((SubPlan 2)) - -> Seq Scan on public.int8_tbl i1 - Output: (SubPlan 2), i1.q1 - SubPlan 2 - -> Result - Output: i1.q1 -(11 rows) - -select grouping(ss.x) -from int8_tbl i1 -cross join lateral (select (select i1.q1) as x) ss -group by ss.x; - grouping ----------- - 0 - 0 -(2 rows) - -explain (verbose, costs off) -select (select grouping(ss.x)) -from int8_tbl i1 -cross join lateral (select (select i1.q1) as x) ss -group by ss.x; - QUERY PLAN --------------------------------------------- - GroupAggregate - Output: (SubPlan 2), ((SubPlan 3)) - Group Key: ((SubPlan 3)) - -> Sort - Output: ((SubPlan 3)), i1.q1 - Sort Key: ((SubPlan 3)) - -> Seq Scan on public.int8_tbl i1 - Output: (SubPlan 3), i1.q1 - SubPlan 3 - -> Result - Output: i1.q1 - SubPlan 2 - -> Result - Output: GROUPING((SubPlan 1)) -(14 rows) - -select (select grouping(ss.x)) -from int8_tbl i1 -cross join lateral (select (select i1.q1) as x) ss -group by ss.x; - grouping ----------- - 0 - 0 -(2 rows) - --- simple rescan tests -select a, b, sum(v.x) - from (values (1),(2)) v(x), gstest_data(v.x) - group by rollup (a,b); - a | b | sum ----+---+----- - 1 | 1 | 1 - 1 | 2 | 1 - 1 | 3 | 1 - 1 | | 3 - 2 | 1 | 2 - 2 | 2 | 2 - 2 | 3 | 2 - 2 | | 6 - | | 9 -(9 rows) - -select * - from (values (1),(2)) v(x), - lateral (select a, b, sum(v.x) from gstest_data(v.x) group by rollup (a,b)) s; -ERROR: aggregate functions are not allowed in FROM clause of their own query level -LINE 3: lateral (select a, b, sum(v.x) from gstest_data(v.x) ... - ^ --- min max optimization should still work with GROUP BY () -explain (costs off) - select min(unique1) from tenk1 GROUP BY (); - QUERY PLAN ------------------------------------------------------------- - Result - InitPlan 1 (returns $0) - -> Limit - -> Index Only Scan using tenk1_unique1 on tenk1 - Index Cond: (unique1 IS NOT NULL) -(5 rows) - --- Views with GROUPING SET queries -CREATE VIEW gstest_view AS select a, b, grouping(a,b), sum(c), count(*), max(c) - from gstest2 group by rollup ((a,b,c),(c,d)); -NOTICE: view "gstest_view" will be a temporary view -select pg_get_viewdef('gstest_view'::regclass, true); - pg_get_viewdef ---------------------------------------- - SELECT a, + - b, + - GROUPING(a, b) AS "grouping", + - sum(c) AS sum, + - count(*) AS count, + - max(c) AS max + - FROM gstest2 + - GROUP BY ROLLUP((a, b, c), (c, d)); -(1 row) - --- Nested queries with 3 or more levels of nesting -select(select (select grouping(a,b) from (values (1)) v2(c)) from (values (1,2)) v1(a,b) group by (a,b)) from (values(6,7)) v3(e,f) GROUP BY ROLLUP(e,f); - grouping ----------- - 0 - 0 - 0 -(3 rows) - -select(select (select grouping(e,f) from (values (1)) v2(c)) from (values (1,2)) v1(a,b) group by (a,b)) from (values(6,7)) v3(e,f) GROUP BY ROLLUP(e,f); - grouping ----------- - 0 - 1 - 3 -(3 rows) - -select(select (select grouping(c) from (values (1)) v2(c) GROUP BY c) from (values (1,2)) v1(a,b) group by (a,b)) from (values(6,7)) v3(e,f) GROUP BY ROLLUP(e,f); - grouping ----------- - 0 - 0 - 0 -(3 rows) - --- Combinations of operations -select a, b, c, d from gstest2 group by rollup(a,b),grouping sets(c,d); - a | b | c | d ----+---+---+--- - 1 | 1 | 1 | - 1 | | 1 | - | | 1 | - 1 | 1 | 2 | - 1 | 2 | 2 | - 1 | | 2 | - 2 | 2 | 2 | - 2 | | 2 | - | | 2 | - 1 | 1 | | 1 - 1 | | | 1 - | | | 1 - 1 | 1 | | 2 - 1 | 2 | | 2 - 1 | | | 2 - 2 | 2 | | 2 - 2 | | | 2 - | | | 2 -(18 rows) - -select a, b from (values (1,2),(2,3)) v(a,b) group by a,b, grouping sets(a); - a | b ----+--- - 1 | 2 - 2 | 3 -(2 rows) - --- Tests for chained aggregates -select a, b, grouping(a,b), sum(v), count(*), max(v) - from gstest1 group by grouping sets ((a,b),(a+1,b+1),(a+2,b+2)) order by 3,6; - a | b | grouping | sum | count | max ----+---+----------+-----+-------+----- - 1 | 1 | 0 | 21 | 2 | 11 - 1 | 2 | 0 | 25 | 2 | 13 - 1 | 3 | 0 | 14 | 1 | 14 - 2 | 3 | 0 | 15 | 1 | 15 - 3 | 3 | 0 | 16 | 1 | 16 - 3 | 4 | 0 | 17 | 1 | 17 - 4 | 1 | 0 | 37 | 2 | 19 - | | 3 | 21 | 2 | 11 - | | 3 | 21 | 2 | 11 - | | 3 | 25 | 2 | 13 - | | 3 | 25 | 2 | 13 - | | 3 | 14 | 1 | 14 - | | 3 | 14 | 1 | 14 - | | 3 | 15 | 1 | 15 - | | 3 | 15 | 1 | 15 - | | 3 | 16 | 1 | 16 - | | 3 | 16 | 1 | 16 - | | 3 | 17 | 1 | 17 - | | 3 | 17 | 1 | 17 - | | 3 | 37 | 2 | 19 - | | 3 | 37 | 2 | 19 -(21 rows) - -select(select (select grouping(a,b) from (values (1)) v2(c)) from (values (1,2)) v1(a,b) group by (a,b)) from (values(6,7)) v3(e,f) GROUP BY ROLLUP((e+1),(f+1)); - grouping ----------- - 0 - 0 - 0 -(3 rows) - -select(select (select grouping(a,b) from (values (1)) v2(c)) from (values (1,2)) v1(a,b) group by (a,b)) from (values(6,7)) v3(e,f) GROUP BY CUBE((e+1),(f+1)) ORDER BY (e+1),(f+1); - grouping ----------- - 0 - 0 - 0 - 0 -(4 rows) - -select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum - from gstest2 group by cube (a,b) order by rsum, a, b; - a | b | sum | rsum ----+---+-----+------ - 1 | 1 | 8 | 8 - 1 | 2 | 2 | 10 - 1 | | 10 | 20 - 2 | 2 | 2 | 22 - 2 | | 2 | 24 - | 1 | 8 | 32 - | 2 | 4 | 36 - | | 12 | 48 -(8 rows) - -select a, b, sum(c) from (values (1,1,10),(1,1,11),(1,2,12),(1,2,13),(1,3,14),(2,3,15),(3,3,16),(3,4,17),(4,1,18),(4,1,19)) v(a,b,c) group by rollup (a,b); - a | b | sum ----+---+----- - 1 | 1 | 21 - 1 | 2 | 25 - 1 | 3 | 14 - 1 | | 60 - 2 | 3 | 15 - 2 | | 15 - 3 | 3 | 16 - 3 | 4 | 17 - 3 | | 33 - 4 | 1 | 37 - 4 | | 37 - | | 145 -(12 rows) - -select a, b, sum(v.x) - from (values (1),(2)) v(x), gstest_data(v.x) - group by cube (a,b) order by a,b; - a | b | sum ----+---+----- - 1 | 1 | 1 - 1 | 2 | 1 - 1 | 3 | 1 - 1 | | 3 - 2 | 1 | 2 - 2 | 2 | 2 - 2 | 3 | 2 - 2 | | 6 - | 1 | 3 - | 2 | 3 - | 3 | 3 - | | 9 -(12 rows) - --- Test reordering of grouping sets -explain (costs off) -select * from gstest1 group by grouping sets((a,b,v),(v)) order by v,b,a; - QUERY PLAN ------------------------------------------------------------------------------------- - Incremental Sort - Sort Key: "*VALUES*".column3, "*VALUES*".column2, "*VALUES*".column1 - Presorted Key: "*VALUES*".column3 - -> GroupAggregate - Group Key: "*VALUES*".column3, "*VALUES*".column2, "*VALUES*".column1 - Group Key: "*VALUES*".column3 - -> Sort - Sort Key: "*VALUES*".column3, "*VALUES*".column2, "*VALUES*".column1 - -> Values Scan on "*VALUES*" -(9 rows) - --- Agg level check. This query should error out. -select (select grouping(a,b) from gstest2) from gstest2 group by a,b; -ERROR: arguments to GROUPING must be grouping expressions of the associated query level -LINE 1: select (select grouping(a,b) from gstest2) from gstest2 grou... - ^ ---Nested queries -select a, b, sum(c), count(*) from gstest2 group by grouping sets (rollup(a,b),a); - a | b | sum | count ----+---+-----+------- - 1 | 1 | 8 | 7 - 1 | 2 | 2 | 1 - 1 | | 10 | 8 - 1 | | 10 | 8 - 2 | 2 | 2 | 1 - 2 | | 2 | 1 - 2 | | 2 | 1 - | | 12 | 9 -(8 rows) - --- HAVING queries -select ten, sum(distinct four) from onek a -group by grouping sets((ten,four),(ten)) -having exists (select 1 from onek b where sum(distinct a.four) = b.four); - ten | sum ------+----- - 0 | 0 - 0 | 2 - 0 | 2 - 1 | 1 - 1 | 3 - 2 | 0 - 2 | 2 - 2 | 2 - 3 | 1 - 3 | 3 - 4 | 0 - 4 | 2 - 4 | 2 - 5 | 1 - 5 | 3 - 6 | 0 - 6 | 2 - 6 | 2 - 7 | 1 - 7 | 3 - 8 | 0 - 8 | 2 - 8 | 2 - 9 | 1 - 9 | 3 -(25 rows) - --- Tests around pushdown of HAVING clauses, partially testing against previous bugs -select a,count(*) from gstest2 group by rollup(a) order by a; - a | count ----+------- - 1 | 8 - 2 | 1 - | 9 -(3 rows) - -select a,count(*) from gstest2 group by rollup(a) having a is distinct from 1 order by a; - a | count ----+------- - 2 | 1 - | 9 -(2 rows) - -explain (costs off) - select a,count(*) from gstest2 group by rollup(a) having a is distinct from 1 order by a; - QUERY PLAN ----------------------------------------- - Sort - Sort Key: a - -> GroupAggregate - Group Key: a - Group Key: () - Filter: (a IS DISTINCT FROM 1) - -> Sort - Sort Key: a - -> Seq Scan on gstest2 -(9 rows) - -select v.c, (select count(*) from gstest2 group by () having v.c) - from (values (false),(true)) v(c) order by v.c; - c | count ----+------- - f | - t | 9 -(2 rows) - -explain (costs off) - select v.c, (select count(*) from gstest2 group by () having v.c) - from (values (false),(true)) v(c) order by v.c; - QUERY PLAN ------------------------------------------------------------ - Sort - Sort Key: "*VALUES*".column1 - -> Values Scan on "*VALUES*" - SubPlan 1 - -> Aggregate - Group Key: () - Filter: "*VALUES*".column1 - -> Result - One-Time Filter: "*VALUES*".column1 - -> Seq Scan on gstest2 -(10 rows) - --- HAVING with GROUPING queries -select ten, grouping(ten) from onek -group by grouping sets(ten) having grouping(ten) >= 0 -order by 2,1; - ten | grouping ------+---------- - 0 | 0 - 1 | 0 - 2 | 0 - 3 | 0 - 4 | 0 - 5 | 0 - 6 | 0 - 7 | 0 - 8 | 0 - 9 | 0 -(10 rows) - -select ten, grouping(ten) from onek -group by grouping sets(ten, four) having grouping(ten) > 0 -order by 2,1; - ten | grouping ------+---------- - | 1 - | 1 - | 1 - | 1 -(4 rows) - -select ten, grouping(ten) from onek -group by rollup(ten) having grouping(ten) > 0 -order by 2,1; - ten | grouping ------+---------- - | 1 -(1 row) - -select ten, grouping(ten) from onek -group by cube(ten) having grouping(ten) > 0 -order by 2,1; - ten | grouping ------+---------- - | 1 -(1 row) - -select ten, grouping(ten) from onek -group by (ten) having grouping(ten) >= 0 -order by 2,1; - ten | grouping ------+---------- - 0 | 0 - 1 | 0 - 2 | 0 - 3 | 0 - 4 | 0 - 5 | 0 - 6 | 0 - 7 | 0 - 8 | 0 - 9 | 0 -(10 rows) - --- FILTER queries -select ten, sum(distinct four) filter (where four::text ~ '123') from onek a -group by rollup(ten); - ten | sum ------+----- - 0 | - 1 | - 2 | - 3 | - 4 | - 5 | - 6 | - 7 | - 8 | - 9 | - | -(11 rows) - --- More rescan tests -select * from (values (1),(2)) v(a) left join lateral (select v.a, four, ten, count(*) from onek group by cube(four,ten)) s on true order by v.a,four,ten; - a | a | four | ten | count ----+---+------+-----+------- - 1 | 1 | 0 | 0 | 50 - 1 | 1 | 0 | 2 | 50 - 1 | 1 | 0 | 4 | 50 - 1 | 1 | 0 | 6 | 50 - 1 | 1 | 0 | 8 | 50 - 1 | 1 | 0 | | 250 - 1 | 1 | 1 | 1 | 50 - 1 | 1 | 1 | 3 | 50 - 1 | 1 | 1 | 5 | 50 - 1 | 1 | 1 | 7 | 50 - 1 | 1 | 1 | 9 | 50 - 1 | 1 | 1 | | 250 - 1 | 1 | 2 | 0 | 50 - 1 | 1 | 2 | 2 | 50 - 1 | 1 | 2 | 4 | 50 - 1 | 1 | 2 | 6 | 50 - 1 | 1 | 2 | 8 | 50 - 1 | 1 | 2 | | 250 - 1 | 1 | 3 | 1 | 50 - 1 | 1 | 3 | 3 | 50 - 1 | 1 | 3 | 5 | 50 - 1 | 1 | 3 | 7 | 50 - 1 | 1 | 3 | 9 | 50 - 1 | 1 | 3 | | 250 - 1 | 1 | | 0 | 100 - 1 | 1 | | 1 | 100 - 1 | 1 | | 2 | 100 - 1 | 1 | | 3 | 100 - 1 | 1 | | 4 | 100 - 1 | 1 | | 5 | 100 - 1 | 1 | | 6 | 100 - 1 | 1 | | 7 | 100 - 1 | 1 | | 8 | 100 - 1 | 1 | | 9 | 100 - 1 | 1 | | | 1000 - 2 | 2 | 0 | 0 | 50 - 2 | 2 | 0 | 2 | 50 - 2 | 2 | 0 | 4 | 50 - 2 | 2 | 0 | 6 | 50 - 2 | 2 | 0 | 8 | 50 - 2 | 2 | 0 | | 250 - 2 | 2 | 1 | 1 | 50 - 2 | 2 | 1 | 3 | 50 - 2 | 2 | 1 | 5 | 50 - 2 | 2 | 1 | 7 | 50 - 2 | 2 | 1 | 9 | 50 - 2 | 2 | 1 | | 250 - 2 | 2 | 2 | 0 | 50 - 2 | 2 | 2 | 2 | 50 - 2 | 2 | 2 | 4 | 50 - 2 | 2 | 2 | 6 | 50 - 2 | 2 | 2 | 8 | 50 - 2 | 2 | 2 | | 250 - 2 | 2 | 3 | 1 | 50 - 2 | 2 | 3 | 3 | 50 - 2 | 2 | 3 | 5 | 50 - 2 | 2 | 3 | 7 | 50 - 2 | 2 | 3 | 9 | 50 - 2 | 2 | 3 | | 250 - 2 | 2 | | 0 | 100 - 2 | 2 | | 1 | 100 - 2 | 2 | | 2 | 100 - 2 | 2 | | 3 | 100 - 2 | 2 | | 4 | 100 - 2 | 2 | | 5 | 100 - 2 | 2 | | 6 | 100 - 2 | 2 | | 7 | 100 - 2 | 2 | | 8 | 100 - 2 | 2 | | 9 | 100 - 2 | 2 | | | 1000 -(70 rows) - -select array(select row(v.a,s1.*) from (select two,four, count(*) from onek group by cube(two,four) order by two,four) s1) from (values (1),(2)) v(a); - array ------------------------------------------------------------------------------------------------------------------------------------------------------- - {"(1,0,0,250)","(1,0,2,250)","(1,0,,500)","(1,1,1,250)","(1,1,3,250)","(1,1,,500)","(1,,0,250)","(1,,1,250)","(1,,2,250)","(1,,3,250)","(1,,,1000)"} - {"(2,0,0,250)","(2,0,2,250)","(2,0,,500)","(2,1,1,250)","(2,1,3,250)","(2,1,,500)","(2,,0,250)","(2,,1,250)","(2,,2,250)","(2,,3,250)","(2,,,1000)"} -(2 rows) - --- Grouping on text columns -select sum(ten) from onek group by two, rollup(four::text) order by 1; - sum ------- - 1000 - 1000 - 1250 - 1250 - 2000 - 2500 -(6 rows) - -select sum(ten) from onek group by rollup(four::text), two order by 1; - sum ------- - 1000 - 1000 - 1250 - 1250 - 2000 - 2500 -(6 rows) - --- hashing support -set enable_hashagg = true; --- failure cases -select count(*) from gstest4 group by rollup(unhashable_col,unsortable_col); -ERROR: could not implement GROUP BY -DETAIL: Some of the datatypes only support hashing, while others only support sorting. -select array_agg(v order by v) from gstest4 group by grouping sets ((id,unsortable_col),(id)); -ERROR: could not implement GROUP BY -DETAIL: Some of the datatypes only support hashing, while others only support sorting. --- simple cases -select a, b, grouping(a,b), sum(v), count(*), max(v) - from gstest1 group by grouping sets ((a),(b)) order by 3,1,2; - a | b | grouping | sum | count | max ----+---+----------+-----+-------+----- - 1 | | 1 | 60 | 5 | 14 - 2 | | 1 | 15 | 1 | 15 - 3 | | 1 | 33 | 2 | 17 - 4 | | 1 | 37 | 2 | 19 - | 1 | 2 | 58 | 4 | 19 - | 2 | 2 | 25 | 2 | 13 - | 3 | 2 | 45 | 3 | 16 - | 4 | 2 | 17 | 1 | 17 -(8 rows) - -explain (costs off) select a, b, grouping(a,b), sum(v), count(*), max(v) - from gstest1 group by grouping sets ((a),(b)) order by 3,1,2; - QUERY PLAN --------------------------------------------------------------------------------------------------------- - Sort - Sort Key: (GROUPING("*VALUES*".column1, "*VALUES*".column2)), "*VALUES*".column1, "*VALUES*".column2 - -> HashAggregate - Hash Key: "*VALUES*".column1 - Hash Key: "*VALUES*".column2 - -> Values Scan on "*VALUES*" -(6 rows) - -select a, b, grouping(a,b), sum(v), count(*), max(v) - from gstest1 group by cube(a,b) order by 3,1,2; - a | b | grouping | sum | count | max ----+---+----------+-----+-------+----- - 1 | 1 | 0 | 21 | 2 | 11 - 1 | 2 | 0 | 25 | 2 | 13 - 1 | 3 | 0 | 14 | 1 | 14 - 2 | 3 | 0 | 15 | 1 | 15 - 3 | 3 | 0 | 16 | 1 | 16 - 3 | 4 | 0 | 17 | 1 | 17 - 4 | 1 | 0 | 37 | 2 | 19 - 1 | | 1 | 60 | 5 | 14 - 2 | | 1 | 15 | 1 | 15 - 3 | | 1 | 33 | 2 | 17 - 4 | | 1 | 37 | 2 | 19 - | 1 | 2 | 58 | 4 | 19 - | 2 | 2 | 25 | 2 | 13 - | 3 | 2 | 45 | 3 | 16 - | 4 | 2 | 17 | 1 | 17 - | | 3 | 145 | 10 | 19 -(16 rows) - -explain (costs off) select a, b, grouping(a,b), sum(v), count(*), max(v) - from gstest1 group by cube(a,b) order by 3,1,2; - QUERY PLAN --------------------------------------------------------------------------------------------------------- - Sort - Sort Key: (GROUPING("*VALUES*".column1, "*VALUES*".column2)), "*VALUES*".column1, "*VALUES*".column2 - -> MixedAggregate - Hash Key: "*VALUES*".column1, "*VALUES*".column2 - Hash Key: "*VALUES*".column1 - Hash Key: "*VALUES*".column2 - Group Key: () - -> Values Scan on "*VALUES*" -(8 rows) - --- shouldn't try and hash -explain (costs off) - select a, b, grouping(a,b), array_agg(v order by v) - from gstest1 group by cube(a,b); - QUERY PLAN ----------------------------------------------------------- - GroupAggregate - Group Key: "*VALUES*".column1, "*VALUES*".column2 - Group Key: "*VALUES*".column1 - Group Key: () - Sort Key: "*VALUES*".column2 - Group Key: "*VALUES*".column2 - -> Sort - Sort Key: "*VALUES*".column1, "*VALUES*".column2 - -> Values Scan on "*VALUES*" -(9 rows) - --- unsortable cases -select unsortable_col, count(*) - from gstest4 group by grouping sets ((unsortable_col),(unsortable_col)) - order by unsortable_col::text; - unsortable_col | count -----------------+------- - 1 | 4 - 1 | 4 - 2 | 4 - 2 | 4 -(4 rows) - --- mixed hashable/sortable cases -select unhashable_col, unsortable_col, - grouping(unhashable_col, unsortable_col), - count(*), sum(v) - from gstest4 group by grouping sets ((unhashable_col),(unsortable_col)) - order by 3, 5; - unhashable_col | unsortable_col | grouping | count | sum -----------------+----------------+----------+-------+----- - 0000 | | 1 | 2 | 17 - 0001 | | 1 | 2 | 34 - 0010 | | 1 | 2 | 68 - 0011 | | 1 | 2 | 136 - | 2 | 2 | 4 | 60 - | 1 | 2 | 4 | 195 -(6 rows) - -explain (costs off) - select unhashable_col, unsortable_col, - grouping(unhashable_col, unsortable_col), - count(*), sum(v) - from gstest4 group by grouping sets ((unhashable_col),(unsortable_col)) - order by 3,5; - QUERY PLAN ------------------------------------------------------------------- - Sort - Sort Key: (GROUPING(unhashable_col, unsortable_col)), (sum(v)) - -> MixedAggregate - Hash Key: unsortable_col - Group Key: unhashable_col - -> Sort - Sort Key: unhashable_col - -> Seq Scan on gstest4 -(8 rows) - -select unhashable_col, unsortable_col, - grouping(unhashable_col, unsortable_col), - count(*), sum(v) - from gstest4 group by grouping sets ((v,unhashable_col),(v,unsortable_col)) - order by 3,5; - unhashable_col | unsortable_col | grouping | count | sum -----------------+----------------+----------+-------+----- - 0000 | | 1 | 1 | 1 - 0001 | | 1 | 1 | 2 - 0010 | | 1 | 1 | 4 - 0011 | | 1 | 1 | 8 - 0000 | | 1 | 1 | 16 - 0001 | | 1 | 1 | 32 - 0010 | | 1 | 1 | 64 - 0011 | | 1 | 1 | 128 - | 1 | 2 | 1 | 1 - | 1 | 2 | 1 | 2 - | 2 | 2 | 1 | 4 - | 2 | 2 | 1 | 8 - | 2 | 2 | 1 | 16 - | 2 | 2 | 1 | 32 - | 1 | 2 | 1 | 64 - | 1 | 2 | 1 | 128 -(16 rows) - -explain (costs off) - select unhashable_col, unsortable_col, - grouping(unhashable_col, unsortable_col), - count(*), sum(v) - from gstest4 group by grouping sets ((v,unhashable_col),(v,unsortable_col)) - order by 3,5; - QUERY PLAN ------------------------------------------------------------------- - Sort - Sort Key: (GROUPING(unhashable_col, unsortable_col)), (sum(v)) - -> MixedAggregate - Hash Key: v, unsortable_col - Group Key: v, unhashable_col - -> Sort - Sort Key: v, unhashable_col - -> Seq Scan on gstest4 -(8 rows) - --- empty input: first is 0 rows, second 1, third 3 etc. -select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a); - a | b | sum | count ----+---+-----+------- -(0 rows) - -explain (costs off) - select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a); - QUERY PLAN --------------------------------- - HashAggregate - Hash Key: a, b - Hash Key: a - -> Seq Scan on gstest_empty -(4 rows) - -select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),()); - a | b | sum | count ----+---+-----+------- - | | | 0 -(1 row) - -select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),(),(),()); - a | b | sum | count ----+---+-----+------- - | | | 0 - | | | 0 - | | | 0 -(3 rows) - -explain (costs off) - select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),(),(),()); - QUERY PLAN --------------------------------- - MixedAggregate - Hash Key: a, b - Group Key: () - Group Key: () - Group Key: () - -> Seq Scan on gstest_empty -(6 rows) - -select sum(v), count(*) from gstest_empty group by grouping sets ((),(),()); - sum | count ------+------- - | 0 - | 0 - | 0 -(3 rows) - -explain (costs off) - select sum(v), count(*) from gstest_empty group by grouping sets ((),(),()); - QUERY PLAN --------------------------------- - Aggregate - Group Key: () - Group Key: () - Group Key: () - -> Seq Scan on gstest_empty -(5 rows) - --- check that functionally dependent cols are not nulled -select a, d, grouping(a,b,c) - from gstest3 - group by grouping sets ((a,b), (a,c)); - a | d | grouping ----+---+---------- - 1 | 1 | 1 - 2 | 2 | 1 - 1 | 1 | 2 - 2 | 2 | 2 -(4 rows) - -explain (costs off) - select a, d, grouping(a,b,c) - from gstest3 - group by grouping sets ((a,b), (a,c)); - QUERY PLAN ---------------------------- - HashAggregate - Hash Key: a, b - Hash Key: a, c - -> Seq Scan on gstest3 -(4 rows) - --- simple rescan tests -select a, b, sum(v.x) - from (values (1),(2)) v(x), gstest_data(v.x) - group by grouping sets (a,b) - order by 1, 2, 3; - a | b | sum ----+---+----- - 1 | | 3 - 2 | | 6 - | 1 | 3 - | 2 | 3 - | 3 | 3 -(5 rows) - -explain (costs off) - select a, b, sum(v.x) - from (values (1),(2)) v(x), gstest_data(v.x) - group by grouping sets (a,b) - order by 3, 1, 2; - QUERY PLAN ---------------------------------------------------------------------- - Sort - Sort Key: (sum("*VALUES*".column1)), gstest_data.a, gstest_data.b - -> HashAggregate - Hash Key: gstest_data.a - Hash Key: gstest_data.b - -> Nested Loop - -> Values Scan on "*VALUES*" - -> Function Scan on gstest_data -(8 rows) - -select * - from (values (1),(2)) v(x), - lateral (select a, b, sum(v.x) from gstest_data(v.x) group by grouping sets (a,b)) s; -ERROR: aggregate functions are not allowed in FROM clause of their own query level -LINE 3: lateral (select a, b, sum(v.x) from gstest_data(v.x) ... - ^ -explain (costs off) - select * - from (values (1),(2)) v(x), - lateral (select a, b, sum(v.x) from gstest_data(v.x) group by grouping sets (a,b)) s; -ERROR: aggregate functions are not allowed in FROM clause of their own query level -LINE 4: lateral (select a, b, sum(v.x) from gstest_data(v.x... - ^ --- Tests for chained aggregates -select a, b, grouping(a,b), sum(v), count(*), max(v) - from gstest1 group by grouping sets ((a,b),(a+1,b+1),(a+2,b+2)) order by 3,6; - a | b | grouping | sum | count | max ----+---+----------+-----+-------+----- - 1 | 1 | 0 | 21 | 2 | 11 - 1 | 2 | 0 | 25 | 2 | 13 - 1 | 3 | 0 | 14 | 1 | 14 - 2 | 3 | 0 | 15 | 1 | 15 - 3 | 3 | 0 | 16 | 1 | 16 - 3 | 4 | 0 | 17 | 1 | 17 - 4 | 1 | 0 | 37 | 2 | 19 - | | 3 | 21 | 2 | 11 - | | 3 | 21 | 2 | 11 - | | 3 | 25 | 2 | 13 - | | 3 | 25 | 2 | 13 - | | 3 | 14 | 1 | 14 - | | 3 | 14 | 1 | 14 - | | 3 | 15 | 1 | 15 - | | 3 | 15 | 1 | 15 - | | 3 | 16 | 1 | 16 - | | 3 | 16 | 1 | 16 - | | 3 | 17 | 1 | 17 - | | 3 | 17 | 1 | 17 - | | 3 | 37 | 2 | 19 - | | 3 | 37 | 2 | 19 -(21 rows) - -explain (costs off) - select a, b, grouping(a,b), sum(v), count(*), max(v) - from gstest1 group by grouping sets ((a,b),(a+1,b+1),(a+2,b+2)) order by 3,6; - QUERY PLAN -------------------------------------------------------------------------------------------- - Sort - Sort Key: (GROUPING("*VALUES*".column1, "*VALUES*".column2)), (max("*VALUES*".column3)) - -> HashAggregate - Hash Key: "*VALUES*".column1, "*VALUES*".column2 - Hash Key: ("*VALUES*".column1 + 1), ("*VALUES*".column2 + 1) - Hash Key: ("*VALUES*".column1 + 2), ("*VALUES*".column2 + 2) - -> Values Scan on "*VALUES*" -(7 rows) - -select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum - from gstest2 group by cube (a,b) order by rsum, a, b; - a | b | sum | rsum ----+---+-----+------ - 1 | 1 | 8 | 8 - 1 | 2 | 2 | 10 - 1 | | 10 | 20 - 2 | 2 | 2 | 22 - 2 | | 2 | 24 - | 1 | 8 | 32 - | 2 | 4 | 36 - | | 12 | 48 -(8 rows) - -explain (costs off) - select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum - from gstest2 group by cube (a,b) order by rsum, a, b; - QUERY PLAN ---------------------------------------------- - Sort - Sort Key: (sum((sum(c))) OVER (?)), a, b - -> WindowAgg - -> Sort - Sort Key: a, b - -> MixedAggregate - Hash Key: a, b - Hash Key: a - Hash Key: b - Group Key: () - -> Seq Scan on gstest2 -(11 rows) - -select a, b, sum(v.x) - from (values (1),(2)) v(x), gstest_data(v.x) - group by cube (a,b) order by a,b; - a | b | sum ----+---+----- - 1 | 1 | 1 - 1 | 2 | 1 - 1 | 3 | 1 - 1 | | 3 - 2 | 1 | 2 - 2 | 2 | 2 - 2 | 3 | 2 - 2 | | 6 - | 1 | 3 - | 2 | 3 - | 3 | 3 - | | 9 -(12 rows) - -explain (costs off) - select a, b, sum(v.x) - from (values (1),(2)) v(x), gstest_data(v.x) - group by cube (a,b) order by a,b; - QUERY PLAN ------------------------------------------------- - Sort - Sort Key: gstest_data.a, gstest_data.b - -> MixedAggregate - Hash Key: gstest_data.a, gstest_data.b - Hash Key: gstest_data.a - Hash Key: gstest_data.b - Group Key: () - -> Nested Loop - -> Values Scan on "*VALUES*" - -> Function Scan on gstest_data -(10 rows) - --- Verify that we correctly handle the child node returning a --- non-minimal slot, which happens if the input is pre-sorted, --- e.g. due to an index scan. -BEGIN; -SET LOCAL enable_hashagg = false; -EXPLAIN (COSTS OFF) SELECT a, b, count(*), max(a), max(b) FROM gstest3 GROUP BY GROUPING SETS(a, b,()) ORDER BY a, b; - QUERY PLAN ---------------------------------------- - Sort - Sort Key: a, b - -> GroupAggregate - Group Key: a - Group Key: () - Sort Key: b - Group Key: b - -> Sort - Sort Key: a - -> Seq Scan on gstest3 -(10 rows) - -SELECT a, b, count(*), max(a), max(b) FROM gstest3 GROUP BY GROUPING SETS(a, b,()) ORDER BY a, b; - a | b | count | max | max ----+---+-------+-----+----- - 1 | | 1 | 1 | 1 - 2 | | 1 | 2 | 2 - | 1 | 1 | 1 | 1 - | 2 | 1 | 2 | 2 - | | 2 | 2 | 2 -(5 rows) - -SET LOCAL enable_seqscan = false; -EXPLAIN (COSTS OFF) SELECT a, b, count(*), max(a), max(b) FROM gstest3 GROUP BY GROUPING SETS(a, b,()) ORDER BY a, b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: a, b - -> GroupAggregate - Group Key: a - Group Key: () - Sort Key: b - Group Key: b - -> Index Scan using gstest3_pkey on gstest3 -(8 rows) - -SELECT a, b, count(*), max(a), max(b) FROM gstest3 GROUP BY GROUPING SETS(a, b,()) ORDER BY a, b; - a | b | count | max | max ----+---+-------+-----+----- - 1 | | 1 | 1 | 1 - 2 | | 1 | 2 | 2 - | 1 | 1 | 1 | 1 - | 2 | 1 | 2 | 2 - | | 2 | 2 | 2 -(5 rows) - -COMMIT; --- More rescan tests -select * from (values (1),(2)) v(a) left join lateral (select v.a, four, ten, count(*) from onek group by cube(four,ten)) s on true order by v.a,four,ten; - a | a | four | ten | count ----+---+------+-----+------- - 1 | 1 | 0 | 0 | 50 - 1 | 1 | 0 | 2 | 50 - 1 | 1 | 0 | 4 | 50 - 1 | 1 | 0 | 6 | 50 - 1 | 1 | 0 | 8 | 50 - 1 | 1 | 0 | | 250 - 1 | 1 | 1 | 1 | 50 - 1 | 1 | 1 | 3 | 50 - 1 | 1 | 1 | 5 | 50 - 1 | 1 | 1 | 7 | 50 - 1 | 1 | 1 | 9 | 50 - 1 | 1 | 1 | | 250 - 1 | 1 | 2 | 0 | 50 - 1 | 1 | 2 | 2 | 50 - 1 | 1 | 2 | 4 | 50 - 1 | 1 | 2 | 6 | 50 - 1 | 1 | 2 | 8 | 50 - 1 | 1 | 2 | | 250 - 1 | 1 | 3 | 1 | 50 - 1 | 1 | 3 | 3 | 50 - 1 | 1 | 3 | 5 | 50 - 1 | 1 | 3 | 7 | 50 - 1 | 1 | 3 | 9 | 50 - 1 | 1 | 3 | | 250 - 1 | 1 | | 0 | 100 - 1 | 1 | | 1 | 100 - 1 | 1 | | 2 | 100 - 1 | 1 | | 3 | 100 - 1 | 1 | | 4 | 100 - 1 | 1 | | 5 | 100 - 1 | 1 | | 6 | 100 - 1 | 1 | | 7 | 100 - 1 | 1 | | 8 | 100 - 1 | 1 | | 9 | 100 - 1 | 1 | | | 1000 - 2 | 2 | 0 | 0 | 50 - 2 | 2 | 0 | 2 | 50 - 2 | 2 | 0 | 4 | 50 - 2 | 2 | 0 | 6 | 50 - 2 | 2 | 0 | 8 | 50 - 2 | 2 | 0 | | 250 - 2 | 2 | 1 | 1 | 50 - 2 | 2 | 1 | 3 | 50 - 2 | 2 | 1 | 5 | 50 - 2 | 2 | 1 | 7 | 50 - 2 | 2 | 1 | 9 | 50 - 2 | 2 | 1 | | 250 - 2 | 2 | 2 | 0 | 50 - 2 | 2 | 2 | 2 | 50 - 2 | 2 | 2 | 4 | 50 - 2 | 2 | 2 | 6 | 50 - 2 | 2 | 2 | 8 | 50 - 2 | 2 | 2 | | 250 - 2 | 2 | 3 | 1 | 50 - 2 | 2 | 3 | 3 | 50 - 2 | 2 | 3 | 5 | 50 - 2 | 2 | 3 | 7 | 50 - 2 | 2 | 3 | 9 | 50 - 2 | 2 | 3 | | 250 - 2 | 2 | | 0 | 100 - 2 | 2 | | 1 | 100 - 2 | 2 | | 2 | 100 - 2 | 2 | | 3 | 100 - 2 | 2 | | 4 | 100 - 2 | 2 | | 5 | 100 - 2 | 2 | | 6 | 100 - 2 | 2 | | 7 | 100 - 2 | 2 | | 8 | 100 - 2 | 2 | | 9 | 100 - 2 | 2 | | | 1000 -(70 rows) - -select array(select row(v.a,s1.*) from (select two,four, count(*) from onek group by cube(two,four) order by two,four) s1) from (values (1),(2)) v(a); - array ------------------------------------------------------------------------------------------------------------------------------------------------------- - {"(1,0,0,250)","(1,0,2,250)","(1,0,,500)","(1,1,1,250)","(1,1,3,250)","(1,1,,500)","(1,,0,250)","(1,,1,250)","(1,,2,250)","(1,,3,250)","(1,,,1000)"} - {"(2,0,0,250)","(2,0,2,250)","(2,0,,500)","(2,1,1,250)","(2,1,3,250)","(2,1,,500)","(2,,0,250)","(2,,1,250)","(2,,2,250)","(2,,3,250)","(2,,,1000)"} -(2 rows) - --- Rescan logic changes when there are no empty grouping sets, so test --- that too: -select * from (values (1),(2)) v(a) left join lateral (select v.a, four, ten, count(*) from onek group by grouping sets(four,ten)) s on true order by v.a,four,ten; - a | a | four | ten | count ----+---+------+-----+------- - 1 | 1 | 0 | | 250 - 1 | 1 | 1 | | 250 - 1 | 1 | 2 | | 250 - 1 | 1 | 3 | | 250 - 1 | 1 | | 0 | 100 - 1 | 1 | | 1 | 100 - 1 | 1 | | 2 | 100 - 1 | 1 | | 3 | 100 - 1 | 1 | | 4 | 100 - 1 | 1 | | 5 | 100 - 1 | 1 | | 6 | 100 - 1 | 1 | | 7 | 100 - 1 | 1 | | 8 | 100 - 1 | 1 | | 9 | 100 - 2 | 2 | 0 | | 250 - 2 | 2 | 1 | | 250 - 2 | 2 | 2 | | 250 - 2 | 2 | 3 | | 250 - 2 | 2 | | 0 | 100 - 2 | 2 | | 1 | 100 - 2 | 2 | | 2 | 100 - 2 | 2 | | 3 | 100 - 2 | 2 | | 4 | 100 - 2 | 2 | | 5 | 100 - 2 | 2 | | 6 | 100 - 2 | 2 | | 7 | 100 - 2 | 2 | | 8 | 100 - 2 | 2 | | 9 | 100 -(28 rows) - -select array(select row(v.a,s1.*) from (select two,four, count(*) from onek group by grouping sets(two,four) order by two,four) s1) from (values (1),(2)) v(a); - array ---------------------------------------------------------------------------------- - {"(1,0,,500)","(1,1,,500)","(1,,0,250)","(1,,1,250)","(1,,2,250)","(1,,3,250)"} - {"(2,0,,500)","(2,1,,500)","(2,,0,250)","(2,,1,250)","(2,,2,250)","(2,,3,250)"} -(2 rows) - --- test the knapsack -set enable_indexscan = false; -set hash_mem_multiplier = 1.0; -set work_mem = '64kB'; -explain (costs off) - select unique1, - count(two), count(four), count(ten), - count(hundred), count(thousand), count(twothousand), - count(*) - from tenk1 group by grouping sets (unique1,twothousand,thousand,hundred,ten,four,two); - QUERY PLAN -------------------------------- - MixedAggregate - Hash Key: two - Hash Key: four - Hash Key: ten - Hash Key: hundred - Group Key: unique1 - Sort Key: twothousand - Group Key: twothousand - Sort Key: thousand - Group Key: thousand - -> Sort - Sort Key: unique1 - -> Seq Scan on tenk1 -(13 rows) - -explain (costs off) - select unique1, - count(two), count(four), count(ten), - count(hundred), count(thousand), count(twothousand), - count(*) - from tenk1 group by grouping sets (unique1,hundred,ten,four,two); - QUERY PLAN -------------------------------- - MixedAggregate - Hash Key: two - Hash Key: four - Hash Key: ten - Hash Key: hundred - Group Key: unique1 - -> Sort - Sort Key: unique1 - -> Seq Scan on tenk1 -(9 rows) - -set work_mem = '384kB'; -explain (costs off) - select unique1, - count(two), count(four), count(ten), - count(hundred), count(thousand), count(twothousand), - count(*) - from tenk1 group by grouping sets (unique1,twothousand,thousand,hundred,ten,four,two); - QUERY PLAN -------------------------------- - MixedAggregate - Hash Key: two - Hash Key: four - Hash Key: ten - Hash Key: hundred - Hash Key: thousand - Group Key: unique1 - Sort Key: twothousand - Group Key: twothousand - -> Sort - Sort Key: unique1 - -> Seq Scan on tenk1 -(12 rows) - --- check collation-sensitive matching between grouping expressions --- (similar to a check for aggregates, but there are additional code --- paths for GROUPING, so check again here) -select v||'a', case grouping(v||'a') when 1 then 1 else 0 end, count(*) - from unnest(array[1,1], array['a','b']) u(i,v) - group by rollup(i, v||'a') order by 1,3; - ?column? | case | count -----------+------+------- - aa | 0 | 1 - ba | 0 | 1 - | 1 | 2 - | 1 | 2 -(4 rows) - -select v||'a', case when grouping(v||'a') = 1 then 1 else 0 end, count(*) - from unnest(array[1,1], array['a','b']) u(i,v) - group by rollup(i, v||'a') order by 1,3; - ?column? | case | count -----------+------+------- - aa | 0 | 1 - ba | 0 | 1 - | 1 | 2 - | 1 | 2 -(4 rows) - --- Bug #16784 -create table bug_16784(i int, j int); -analyze bug_16784; -alter table bug_16784 set (autovacuum_enabled = 'false'); -update pg_class set reltuples = 10 where relname='bug_16784'; -insert into bug_16784 select g/10, g from generate_series(1,40) g; -set work_mem='64kB'; -set enable_sort = false; -select * from - (values (1),(2)) v(a), - lateral (select a, i, j, count(*) from - bug_16784 group by cube(i,j)) s - order by v.a, i, j; - a | a | i | j | count ----+---+---+----+------- - 1 | 1 | 0 | 1 | 1 - 1 | 1 | 0 | 2 | 1 - 1 | 1 | 0 | 3 | 1 - 1 | 1 | 0 | 4 | 1 - 1 | 1 | 0 | 5 | 1 - 1 | 1 | 0 | 6 | 1 - 1 | 1 | 0 | 7 | 1 - 1 | 1 | 0 | 8 | 1 - 1 | 1 | 0 | 9 | 1 - 1 | 1 | 0 | | 9 - 1 | 1 | 1 | 10 | 1 - 1 | 1 | 1 | 11 | 1 - 1 | 1 | 1 | 12 | 1 - 1 | 1 | 1 | 13 | 1 - 1 | 1 | 1 | 14 | 1 - 1 | 1 | 1 | 15 | 1 - 1 | 1 | 1 | 16 | 1 - 1 | 1 | 1 | 17 | 1 - 1 | 1 | 1 | 18 | 1 - 1 | 1 | 1 | 19 | 1 - 1 | 1 | 1 | | 10 - 1 | 1 | 2 | 20 | 1 - 1 | 1 | 2 | 21 | 1 - 1 | 1 | 2 | 22 | 1 - 1 | 1 | 2 | 23 | 1 - 1 | 1 | 2 | 24 | 1 - 1 | 1 | 2 | 25 | 1 - 1 | 1 | 2 | 26 | 1 - 1 | 1 | 2 | 27 | 1 - 1 | 1 | 2 | 28 | 1 - 1 | 1 | 2 | 29 | 1 - 1 | 1 | 2 | | 10 - 1 | 1 | 3 | 30 | 1 - 1 | 1 | 3 | 31 | 1 - 1 | 1 | 3 | 32 | 1 - 1 | 1 | 3 | 33 | 1 - 1 | 1 | 3 | 34 | 1 - 1 | 1 | 3 | 35 | 1 - 1 | 1 | 3 | 36 | 1 - 1 | 1 | 3 | 37 | 1 - 1 | 1 | 3 | 38 | 1 - 1 | 1 | 3 | 39 | 1 - 1 | 1 | 3 | | 10 - 1 | 1 | 4 | 40 | 1 - 1 | 1 | 4 | | 1 - 1 | 1 | | 1 | 1 - 1 | 1 | | 2 | 1 - 1 | 1 | | 3 | 1 - 1 | 1 | | 4 | 1 - 1 | 1 | | 5 | 1 - 1 | 1 | | 6 | 1 - 1 | 1 | | 7 | 1 - 1 | 1 | | 8 | 1 - 1 | 1 | | 9 | 1 - 1 | 1 | | 10 | 1 - 1 | 1 | | 11 | 1 - 1 | 1 | | 12 | 1 - 1 | 1 | | 13 | 1 - 1 | 1 | | 14 | 1 - 1 | 1 | | 15 | 1 - 1 | 1 | | 16 | 1 - 1 | 1 | | 17 | 1 - 1 | 1 | | 18 | 1 - 1 | 1 | | 19 | 1 - 1 | 1 | | 20 | 1 - 1 | 1 | | 21 | 1 - 1 | 1 | | 22 | 1 - 1 | 1 | | 23 | 1 - 1 | 1 | | 24 | 1 - 1 | 1 | | 25 | 1 - 1 | 1 | | 26 | 1 - 1 | 1 | | 27 | 1 - 1 | 1 | | 28 | 1 - 1 | 1 | | 29 | 1 - 1 | 1 | | 30 | 1 - 1 | 1 | | 31 | 1 - 1 | 1 | | 32 | 1 - 1 | 1 | | 33 | 1 - 1 | 1 | | 34 | 1 - 1 | 1 | | 35 | 1 - 1 | 1 | | 36 | 1 - 1 | 1 | | 37 | 1 - 1 | 1 | | 38 | 1 - 1 | 1 | | 39 | 1 - 1 | 1 | | 40 | 1 - 1 | 1 | | | 40 - 2 | 2 | 0 | 1 | 1 - 2 | 2 | 0 | 2 | 1 - 2 | 2 | 0 | 3 | 1 - 2 | 2 | 0 | 4 | 1 - 2 | 2 | 0 | 5 | 1 - 2 | 2 | 0 | 6 | 1 - 2 | 2 | 0 | 7 | 1 - 2 | 2 | 0 | 8 | 1 - 2 | 2 | 0 | 9 | 1 - 2 | 2 | 0 | | 9 - 2 | 2 | 1 | 10 | 1 - 2 | 2 | 1 | 11 | 1 - 2 | 2 | 1 | 12 | 1 - 2 | 2 | 1 | 13 | 1 - 2 | 2 | 1 | 14 | 1 - 2 | 2 | 1 | 15 | 1 - 2 | 2 | 1 | 16 | 1 - 2 | 2 | 1 | 17 | 1 - 2 | 2 | 1 | 18 | 1 - 2 | 2 | 1 | 19 | 1 - 2 | 2 | 1 | | 10 - 2 | 2 | 2 | 20 | 1 - 2 | 2 | 2 | 21 | 1 - 2 | 2 | 2 | 22 | 1 - 2 | 2 | 2 | 23 | 1 - 2 | 2 | 2 | 24 | 1 - 2 | 2 | 2 | 25 | 1 - 2 | 2 | 2 | 26 | 1 - 2 | 2 | 2 | 27 | 1 - 2 | 2 | 2 | 28 | 1 - 2 | 2 | 2 | 29 | 1 - 2 | 2 | 2 | | 10 - 2 | 2 | 3 | 30 | 1 - 2 | 2 | 3 | 31 | 1 - 2 | 2 | 3 | 32 | 1 - 2 | 2 | 3 | 33 | 1 - 2 | 2 | 3 | 34 | 1 - 2 | 2 | 3 | 35 | 1 - 2 | 2 | 3 | 36 | 1 - 2 | 2 | 3 | 37 | 1 - 2 | 2 | 3 | 38 | 1 - 2 | 2 | 3 | 39 | 1 - 2 | 2 | 3 | | 10 - 2 | 2 | 4 | 40 | 1 - 2 | 2 | 4 | | 1 - 2 | 2 | | 1 | 1 - 2 | 2 | | 2 | 1 - 2 | 2 | | 3 | 1 - 2 | 2 | | 4 | 1 - 2 | 2 | | 5 | 1 - 2 | 2 | | 6 | 1 - 2 | 2 | | 7 | 1 - 2 | 2 | | 8 | 1 - 2 | 2 | | 9 | 1 - 2 | 2 | | 10 | 1 - 2 | 2 | | 11 | 1 - 2 | 2 | | 12 | 1 - 2 | 2 | | 13 | 1 - 2 | 2 | | 14 | 1 - 2 | 2 | | 15 | 1 - 2 | 2 | | 16 | 1 - 2 | 2 | | 17 | 1 - 2 | 2 | | 18 | 1 - 2 | 2 | | 19 | 1 - 2 | 2 | | 20 | 1 - 2 | 2 | | 21 | 1 - 2 | 2 | | 22 | 1 - 2 | 2 | | 23 | 1 - 2 | 2 | | 24 | 1 - 2 | 2 | | 25 | 1 - 2 | 2 | | 26 | 1 - 2 | 2 | | 27 | 1 - 2 | 2 | | 28 | 1 - 2 | 2 | | 29 | 1 - 2 | 2 | | 30 | 1 - 2 | 2 | | 31 | 1 - 2 | 2 | | 32 | 1 - 2 | 2 | | 33 | 1 - 2 | 2 | | 34 | 1 - 2 | 2 | | 35 | 1 - 2 | 2 | | 36 | 1 - 2 | 2 | | 37 | 1 - 2 | 2 | | 38 | 1 - 2 | 2 | | 39 | 1 - 2 | 2 | | 40 | 1 - 2 | 2 | | | 40 -(172 rows) - --- --- Compare results between plans using sorting and plans using hash --- aggregation. Force spilling in both cases by setting work_mem low --- and altering the statistics. --- -create table gs_data_1 as -select g%1000 as g1000, g%100 as g100, g%10 as g10, g - from generate_series(0,1999) g; -analyze gs_data_1; -alter table gs_data_1 set (autovacuum_enabled = 'false'); -update pg_class set reltuples = 10 where relname='gs_data_1'; -set work_mem='64kB'; --- Produce results with sorting. -set enable_sort = true; -set enable_hashagg = false; -set jit_above_cost = 0; -explain (costs off) -select g100, g10, sum(g::numeric), count(*), max(g::text) -from gs_data_1 group by cube (g1000, g100,g10); - QUERY PLAN ------------------------------------- - GroupAggregate - Group Key: g1000, g100, g10 - Group Key: g1000, g100 - Group Key: g1000 - Group Key: () - Sort Key: g100, g10 - Group Key: g100, g10 - Group Key: g100 - Sort Key: g10, g1000 - Group Key: g10, g1000 - Group Key: g10 - -> Sort - Sort Key: g1000, g100, g10 - -> Seq Scan on gs_data_1 -(14 rows) - -create table gs_group_1 as -select g100, g10, sum(g::numeric), count(*), max(g::text) -from gs_data_1 group by cube (g1000, g100,g10); --- Produce results with hash aggregation. -set enable_hashagg = true; -set enable_sort = false; -explain (costs off) -select g100, g10, sum(g::numeric), count(*), max(g::text) -from gs_data_1 group by cube (g1000, g100,g10); - QUERY PLAN ------------------------------- - MixedAggregate - Hash Key: g1000, g100, g10 - Hash Key: g1000, g100 - Hash Key: g1000 - Hash Key: g100, g10 - Hash Key: g100 - Hash Key: g10, g1000 - Hash Key: g10 - Group Key: () - -> Seq Scan on gs_data_1 -(10 rows) - -create table gs_hash_1 as -select g100, g10, sum(g::numeric), count(*), max(g::text) -from gs_data_1 group by cube (g1000, g100,g10); -set enable_sort = true; -set work_mem to default; -set hash_mem_multiplier to default; --- Compare results -(select * from gs_hash_1 except select * from gs_group_1) - union all -(select * from gs_group_1 except select * from gs_hash_1); - g100 | g10 | sum | count | max -------+-----+-----+-------+----- -(0 rows) - -drop table gs_group_1; -drop table gs_hash_1; --- GROUP BY DISTINCT --- "normal" behavior... -select a, b, c -from (values (1, 2, 3), (4, null, 6), (7, 8, 9)) as t (a, b, c) -group by all rollup(a, b), rollup(a, c) -order by a, b, c; - a | b | c ----+---+--- - 1 | 2 | 3 - 1 | 2 | - 1 | 2 | - 1 | | 3 - 1 | | 3 - 1 | | - 1 | | - 1 | | - 4 | | 6 - 4 | | 6 - 4 | | 6 - 4 | | - 4 | | - 4 | | - 4 | | - 4 | | - 7 | 8 | 9 - 7 | 8 | - 7 | 8 | - 7 | | 9 - 7 | | 9 - 7 | | - 7 | | - 7 | | - | | -(25 rows) - --- ...which is also the default -select a, b, c -from (values (1, 2, 3), (4, null, 6), (7, 8, 9)) as t (a, b, c) -group by rollup(a, b), rollup(a, c) -order by a, b, c; - a | b | c ----+---+--- - 1 | 2 | 3 - 1 | 2 | - 1 | 2 | - 1 | | 3 - 1 | | 3 - 1 | | - 1 | | - 1 | | - 4 | | 6 - 4 | | 6 - 4 | | 6 - 4 | | - 4 | | - 4 | | - 4 | | - 4 | | - 7 | 8 | 9 - 7 | 8 | - 7 | 8 | - 7 | | 9 - 7 | | 9 - 7 | | - 7 | | - 7 | | - | | -(25 rows) - --- "group by distinct" behavior... -select a, b, c -from (values (1, 2, 3), (4, null, 6), (7, 8, 9)) as t (a, b, c) -group by distinct rollup(a, b), rollup(a, c) -order by a, b, c; - a | b | c ----+---+--- - 1 | 2 | 3 - 1 | 2 | - 1 | | 3 - 1 | | - 4 | | 6 - 4 | | 6 - 4 | | - 4 | | - 7 | 8 | 9 - 7 | 8 | - 7 | | 9 - 7 | | - | | -(13 rows) - --- ...which is not the same as "select distinct" -select distinct a, b, c -from (values (1, 2, 3), (4, null, 6), (7, 8, 9)) as t (a, b, c) -group by rollup(a, b), rollup(a, c) -order by a, b, c; - a | b | c ----+---+--- - 1 | 2 | 3 - 1 | 2 | - 1 | | 3 - 1 | | - 4 | | 6 - 4 | | - 7 | 8 | 9 - 7 | 8 | - 7 | | 9 - 7 | | - | | -(11 rows) - --- test handling of outer GroupingFunc within subqueries -explain (costs off) -select (select grouping(v1)) from (values ((select 1))) v(v1) group by cube(v1); - QUERY PLAN ---------------------------- - MixedAggregate - Hash Key: $2 - Group Key: () - InitPlan 1 (returns $1) - -> Result - InitPlan 3 (returns $2) - -> Result - -> Result - SubPlan 2 - -> Result -(10 rows) - -select (select grouping(v1)) from (values ((select 1))) v(v1) group by cube(v1); - grouping ----------- - 1 - 0 -(2 rows) - -explain (costs off) -select (select grouping(v1)) from (values ((select 1))) v(v1) group by v1; - QUERY PLAN ---------------------------- - GroupAggregate - InitPlan 1 (returns $1) - -> Result - InitPlan 3 (returns $2) - -> Result - -> Result - SubPlan 2 - -> Result -(8 rows) - -select (select grouping(v1)) from (values ((select 1))) v(v1) group by v1; - grouping ----------- - 0 -(1 row) - --- expressions nullable by grouping sets -explain (costs off) -select distinct on (a, b) a, b -from (values (1, 1), (2, 2)) as t (a, b) where a = b -group by grouping sets((a, b), (a)) -order by a, b; - QUERY PLAN ----------------------------------------------------------------- - Unique - -> Sort - Sort Key: "*VALUES*".column1, "*VALUES*".column2 - -> HashAggregate - Hash Key: "*VALUES*".column1, "*VALUES*".column2 - Hash Key: "*VALUES*".column1 - -> Values Scan on "*VALUES*" - Filter: (column1 = column2) -(8 rows) - -select distinct on (a, b) a, b -from (values (1, 1), (2, 2)) as t (a, b) where a = b -group by grouping sets((a, b), (a)) -order by a, b; - a | b ----+--- - 1 | 1 - 1 | - 2 | 2 - 2 | -(4 rows) - -explain (costs off) -select distinct on (a, b+1) a, b+1 -from (values (1, 0), (2, 1)) as t (a, b) where a = b+1 -group by grouping sets((a, b+1), (a)) -order by a, b+1; - QUERY PLAN ----------------------------------------------------------------------- - Unique - -> Sort - Sort Key: "*VALUES*".column1, (("*VALUES*".column2 + 1)) - -> HashAggregate - Hash Key: "*VALUES*".column1, ("*VALUES*".column2 + 1) - Hash Key: "*VALUES*".column1 - -> Values Scan on "*VALUES*" - Filter: (column1 = (column2 + 1)) -(8 rows) - -select distinct on (a, b+1) a, b+1 -from (values (1, 0), (2, 1)) as t (a, b) where a = b+1 -group by grouping sets((a, b+1), (a)) -order by a, b+1; - a | ?column? ----+---------- - 1 | 1 - 1 | - 2 | 2 - 2 | -(4 rows) - -explain (costs off) -select a, b -from (values (1, 1), (2, 2)) as t (a, b) where a = b -group by grouping sets((a, b), (a)) -order by a, b nulls first; - QUERY PLAN ----------------------------------------------------------------- - Sort - Sort Key: "*VALUES*".column1, "*VALUES*".column2 NULLS FIRST - -> HashAggregate - Hash Key: "*VALUES*".column1, "*VALUES*".column2 - Hash Key: "*VALUES*".column1 - -> Values Scan on "*VALUES*" - Filter: (column1 = column2) -(7 rows) - -select a, b -from (values (1, 1), (2, 2)) as t (a, b) where a = b -group by grouping sets((a, b), (a)) -order by a, b nulls first; - a | b ----+--- - 1 | - 1 | 1 - 2 | - 2 | 2 -(4 rows) - -explain (costs off) -select 1 as one group by rollup(one) order by one nulls first; - QUERY PLAN ------------------------------ - Sort - Sort Key: (1) NULLS FIRST - -> MixedAggregate - Hash Key: 1 - Group Key: () - -> Result -(6 rows) - -select 1 as one group by rollup(one) order by one nulls first; - one ------ - - 1 -(2 rows) - --- end +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/identity.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/identity.out --- /tmp/cirrus-ci-build/src/test/regress/expected/identity.out 2024-03-13 23:12:37.624413000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/identity.out 2024-03-13 23:14:29.329900000 +0000 @@ -618,264 +618,7 @@ CREATE TABLE pitest1_p1 PARTITION OF pitest1 FOR VALUES FROM ('2016-07-01') TO ('2016-08-01'); INSERT into pitest1(f1, f2) VALUES ('2016-07-2', 'from pitest1'); INSERT into pitest1_p1 (f1, f2) VALUES ('2016-07-3', 'from pitest1_p1'); --- attached partition -CREATE TABLE pitest1_p2 (f1 date NOT NULL, f2 text, f3 bigint); -INSERT INTO pitest1_p2 VALUES ('2016-08-2', 'before attaching', 100); -ALTER TABLE pitest1 ATTACH PARTITION pitest1_p2 FOR VALUES FROM ('2016-08-01') TO ('2016-09-01'); -- requires NOT NULL constraint -ERROR: column "f3" in child table must be marked NOT NULL -ALTER TABLE pitest1_p2 ALTER COLUMN f3 SET NOT NULL; -ALTER TABLE pitest1 ATTACH PARTITION pitest1_p2 FOR VALUES FROM ('2016-08-01') TO ('2016-09-01'); -INSERT INTO pitest1_p2 (f1, f2) VALUES ('2016-08-3', 'from pitest1_p2'); -INSERT INTO pitest1 (f1, f2) VALUES ('2016-08-4', 'from pitest1'); -SELECT tableoid::regclass, f1, f2, f3 FROM pitest1; - tableoid | f1 | f2 | f3 -------------+------------+------------------+----- - pitest1_p1 | 07-02-2016 | from pitest1 | 1 - pitest1_p1 | 07-03-2016 | from pitest1_p1 | 2 - pitest1_p2 | 08-02-2016 | before attaching | 100 - pitest1_p2 | 08-03-2016 | from pitest1_p2 | 3 - pitest1_p2 | 08-04-2016 | from pitest1 | 4 -(5 rows) - --- add identity column -CREATE TABLE pitest2 (f1 date NOT NULL, f2 text) PARTITION BY RANGE (f1); -CREATE TABLE pitest2_p1 PARTITION OF pitest2 FOR VALUES FROM ('2016-07-01') TO ('2016-08-01'); -CREATE TABLE pitest2_p2 PARTITION OF pitest2 FOR VALUES FROM ('2016-08-01') TO ('2016-09-01'); -INSERT into pitest2(f1, f2) VALUES ('2016-07-2', 'from pitest2'); -INSERT INTO pitest2 (f1, f2) VALUES ('2016-08-2', 'from pitest2'); -ALTER TABLE pitest2 ADD COLUMN f3 int GENERATED ALWAYS AS IDENTITY; -INSERT into pitest2_p1 (f1, f2) VALUES ('2016-07-3', 'from pitest2_p1'); -INSERT INTO pitest2_p2 (f1, f2) VALUES ('2016-08-3', 'from pitest2_p2'); -INSERT into pitest2(f1, f2) VALUES ('2016-07-4', 'from pitest2'); -INSERT INTO pitest2 (f1, f2) VALUES ('2016-08-4', 'from pitest2'); -SELECT tableoid::regclass, f1, f2, f3 FROM pitest2; - tableoid | f1 | f2 | f3 -------------+------------+-----------------+---- - pitest2_p1 | 07-02-2016 | from pitest2 | 1 - pitest2_p1 | 07-03-2016 | from pitest2_p1 | 3 - pitest2_p1 | 07-04-2016 | from pitest2 | 5 - pitest2_p2 | 08-02-2016 | from pitest2 | 2 - pitest2_p2 | 08-03-2016 | from pitest2_p2 | 4 - pitest2_p2 | 08-04-2016 | from pitest2 | 6 -(6 rows) - --- SET identity column -ALTER TABLE pitest2_p1 ALTER COLUMN f3 SET GENERATED BY DEFAULT; -- fails -ERROR: cannot change identity column of a partition -ALTER TABLE pitest2_p1 ALTER COLUMN f3 SET INCREMENT BY 2; -- fails -ERROR: cannot change identity column of a partition -ALTER TABLE ONLY pitest2 ALTER COLUMN f3 SET GENERATED BY DEFAULT SET INCREMENT BY 2 SET START WITH 1000 RESTART; -- fails -ERROR: cannot change identity column of only the partitioned table -HINT: Do not specify the ONLY keyword. -ALTER TABLE pitest2 ALTER COLUMN f3 SET GENERATED BY DEFAULT SET INCREMENT BY 2 SET START WITH 1000 RESTART; -INSERT into pitest2(f1, f2, f3) VALUES ('2016-07-5', 'from pitest2', 200); -INSERT INTO pitest2(f1, f2) VALUES ('2016-08-5', 'from pitest2'); -INSERT into pitest2_p1 (f1, f2) VALUES ('2016-07-6', 'from pitest2_p1'); -INSERT INTO pitest2_p2 (f1, f2, f3) VALUES ('2016-08-6', 'from pitest2_p2', 300); -SELECT tableoid::regclass, f1, f2, f3 FROM pitest2; - tableoid | f1 | f2 | f3 -------------+------------+-----------------+------ - pitest2_p1 | 07-02-2016 | from pitest2 | 1 - pitest2_p1 | 07-03-2016 | from pitest2_p1 | 3 - pitest2_p1 | 07-04-2016 | from pitest2 | 5 - pitest2_p1 | 07-05-2016 | from pitest2 | 200 - pitest2_p1 | 07-06-2016 | from pitest2_p1 | 1002 - pitest2_p2 | 08-02-2016 | from pitest2 | 2 - pitest2_p2 | 08-03-2016 | from pitest2_p2 | 4 - pitest2_p2 | 08-04-2016 | from pitest2 | 6 - pitest2_p2 | 08-05-2016 | from pitest2 | 1000 - pitest2_p2 | 08-06-2016 | from pitest2_p2 | 300 -(10 rows) - --- detaching a partition removes identity property -ALTER TABLE pitest2 DETACH PARTITION pitest2_p1; -INSERT into pitest2(f1, f2) VALUES ('2016-08-7', 'from pitest2'); -INSERT into pitest2_p1 (f1, f2) VALUES ('2016-07-7', 'from pitest2_p1'); -- error -ERROR: null value in column "f3" of relation "pitest2_p1" violates not-null constraint -DETAIL: Failing row contains (07-07-2016, from pitest2_p1, null). -INSERT into pitest2_p1 (f1, f2, f3) VALUES ('2016-07-7', 'from pitest2_p1', 2000); -SELECT tableoid::regclass, f1, f2, f3 FROM pitest2; - tableoid | f1 | f2 | f3 -------------+------------+-----------------+------ - pitest2_p2 | 08-02-2016 | from pitest2 | 2 - pitest2_p2 | 08-03-2016 | from pitest2_p2 | 4 - pitest2_p2 | 08-04-2016 | from pitest2 | 6 - pitest2_p2 | 08-05-2016 | from pitest2 | 1000 - pitest2_p2 | 08-06-2016 | from pitest2_p2 | 300 - pitest2_p2 | 08-07-2016 | from pitest2 | 1004 -(6 rows) - -SELECT tableoid::regclass, f1, f2, f3 FROM pitest2_p1; - tableoid | f1 | f2 | f3 -------------+------------+-----------------+------ - pitest2_p1 | 07-02-2016 | from pitest2 | 1 - pitest2_p1 | 07-03-2016 | from pitest2_p1 | 3 - pitest2_p1 | 07-04-2016 | from pitest2 | 5 - pitest2_p1 | 07-05-2016 | from pitest2 | 200 - pitest2_p1 | 07-06-2016 | from pitest2_p1 | 1002 - pitest2_p1 | 07-07-2016 | from pitest2_p1 | 2000 -(6 rows) - -DROP TABLE pitest2_p1; --- changing a regular column to identity column in a partitioned table -CREATE TABLE pitest3 (f1 date NOT NULL, f2 text, f3 int) PARTITION BY RANGE (f1); -CREATE TABLE pitest3_p1 PARTITION OF pitest3 FOR VALUES FROM ('2016-07-01') TO ('2016-08-01'); -INSERT into pitest3 VALUES ('2016-07-2', 'from pitest3', 1); -INSERT into pitest3_p1 VALUES ('2016-07-3', 'from pitest3_p1', 2); --- fails, changing only a partition not allowed -ALTER TABLE pitest3_p1 - ALTER COLUMN f3 SET NOT NULL, - ALTER COLUMN f3 ADD GENERATED ALWAYS AS IDENTITY (START WITH 3); -ERROR: cannot add identity to a column of a partition --- fails, changing only the partitioned table not allowed -ALTER TABLE ONLY pitest3 - ALTER COLUMN f3 SET NOT NULL, - ALTER COLUMN f3 ADD GENERATED ALWAYS AS IDENTITY (START WITH 3); -ERROR: constraint must be added to child tables too -HINT: Do not specify the ONLY keyword. -ALTER TABLE pitest3 - ALTER COLUMN f3 SET NOT NULL, - ALTER COLUMN f3 ADD GENERATED ALWAYS AS IDENTITY (START WITH 3); -INSERT into pitest3(f1, f2) VALUES ('2016-07-4', 'from pitest3'); -INSERT into pitest3_p1 (f1, f2) VALUES ('2016-07-5', 'from pitest3_p1'); -SELECT tableoid::regclass, f1, f2, f3 FROM pitest3; - tableoid | f1 | f2 | f3 -------------+------------+-----------------+---- - pitest3_p1 | 07-02-2016 | from pitest3 | 1 - pitest3_p1 | 07-03-2016 | from pitest3_p1 | 2 - pitest3_p1 | 07-04-2016 | from pitest3 | 3 - pitest3_p1 | 07-05-2016 | from pitest3_p1 | 4 -(4 rows) - --- changing an identity column to a non-identity column in a partitioned table -ALTER TABLE pitest3_p1 ALTER COLUMN f3 DROP IDENTITY; -- fails -ERROR: cannot drop identity from a column of a partition -ALTER TABLE ONLY pitest3 ALTER COLUMN f3 DROP IDENTITY; -- fails -ERROR: cannot drop identity from a column of only the partitioned table -HINT: Do not specify the ONLY keyword. -ALTER TABLE pitest3 ALTER COLUMN f3 DROP IDENTITY; -INSERT into pitest3(f1, f2) VALUES ('2016-07-4', 'from pitest3'); -- fails -ERROR: null value in column "f3" of relation "pitest3_p1" violates not-null constraint -DETAIL: Failing row contains (07-04-2016, from pitest3, null). -INSERT into pitest3_p1 (f1, f2) VALUES ('2016-07-5', 'from pitest3_p1'); -- fails -ERROR: null value in column "f3" of relation "pitest3_p1" violates not-null constraint -DETAIL: Failing row contains (07-05-2016, from pitest3_p1, null). -INSERT into pitest3(f1, f2, f3) VALUES ('2016-07-6', 'from pitest3', 5); -INSERT into pitest3_p1 (f1, f2, f3) VALUES ('2016-07-7', 'from pitest3_p1', 6); -SELECT tableoid::regclass, f1, f2, f3 FROM pitest3; - tableoid | f1 | f2 | f3 -------------+------------+-----------------+---- - pitest3_p1 | 07-02-2016 | from pitest3 | 1 - pitest3_p1 | 07-03-2016 | from pitest3_p1 | 2 - pitest3_p1 | 07-04-2016 | from pitest3 | 3 - pitest3_p1 | 07-05-2016 | from pitest3_p1 | 4 - pitest3_p1 | 07-06-2016 | from pitest3 | 5 - pitest3_p1 | 07-07-2016 | from pitest3_p1 | 6 -(6 rows) - --- Changing NOT NULL constraint of identity columns is not allowed -ALTER TABLE pitest1_p1 ALTER COLUMN f3 DROP NOT NULL; -ERROR: column "f3" of relation "pitest1_p1" is an identity column -ALTER TABLE pitest1 ALTER COLUMN f3 DROP NOT NULL; -ERROR: column "f3" of relation "pitest1" is an identity column --- Identity columns have their own default -ALTER TABLE pitest1_p2 ALTER COLUMN f3 SET DEFAULT 10000; -ERROR: column "f3" of relation "pitest1_p2" is an identity column -ALTER TABLE pitest1 ALTER COLUMN f3 SET DEFAULT 10000; -ERROR: column "f3" of relation "pitest1" is an identity column --- Adding identity to an identity column is not allowed -ALTER TABLE pitest1_p2 ALTER COLUMN f3 ADD GENERATED BY DEFAULT AS IDENTITY; -ERROR: cannot add identity to a column of a partition -ALTER TABLE pitest1 ALTER COLUMN f3 ADD GENERATED BY DEFAULT AS IDENTITY; -ERROR: column "f3" of relation "pitest1" is already an identity column --- partitions with their own identity columns are not allowed, even if the --- partitioned table does not have an identity column. -CREATE TABLE pitest1_pfail PARTITION OF pitest1 ( - f3 WITH OPTIONS GENERATED ALWAYS AS IDENTITY -) FOR VALUES FROM ('2016-11-01') TO ('2016-12-01'); -ERROR: identity columns are not supported on partitions -CREATE TABLE pitest_pfail PARTITION OF pitest3 ( - f3 WITH OPTIONS GENERATED ALWAYS AS IDENTITY -) FOR VALUES FROM ('2016-07-01') TO ('2016-08-01'); -ERROR: identity columns are not supported on partitions -CREATE TABLE pitest1_pfail (f1 date NOT NULL, f2 text, f3 bigint GENERATED ALWAYS AS IDENTITY); -ALTER TABLE pitest1 ATTACH PARTITION pitest1_pfail FOR VALUES FROM ('2016-11-01') TO ('2016-12-01'); -ERROR: table "pitest1_pfail" being attached contains an identity column "f3" -DETAIL: The new partition may not contain an identity column. -ALTER TABLE pitest3 ATTACH PARTITION pitest1_pfail FOR VALUES FROM ('2016-11-01') TO ('2016-12-01'); -ERROR: table "pitest1_pfail" being attached contains an identity column "f3" -DETAIL: The new partition may not contain an identity column. -DROP TABLE pitest1_pfail; -DROP TABLE pitest3; --- test that sequence of half-dropped serial column is properly ignored -CREATE TABLE itest14 (id serial); -ALTER TABLE itest14 ALTER id DROP DEFAULT; -ALTER TABLE itest14 ALTER id ADD GENERATED BY DEFAULT AS IDENTITY; -INSERT INTO itest14 (id) VALUES (DEFAULT); --- Identity columns must be NOT NULL (cf bug #16913) -CREATE TABLE itest15 (id integer GENERATED ALWAYS AS IDENTITY NULL); -- fail -ERROR: conflicting NULL/NOT NULL declarations for column "id" of table "itest15" -LINE 1: ...ABLE itest15 (id integer GENERATED ALWAYS AS IDENTITY NULL); - ^ -CREATE TABLE itest15 (id integer NULL GENERATED ALWAYS AS IDENTITY); -- fail -ERROR: conflicting NULL/NOT NULL declarations for column "id" of table "itest15" -LINE 1: CREATE TABLE itest15 (id integer NULL GENERATED ALWAYS AS ID... - ^ -CREATE TABLE itest15 (id integer GENERATED ALWAYS AS IDENTITY NOT NULL); -DROP TABLE itest15; -CREATE TABLE itest15 (id integer NOT NULL GENERATED ALWAYS AS IDENTITY); -DROP TABLE itest15; --- MERGE tests -CREATE TABLE itest15 (a int GENERATED ALWAYS AS IDENTITY, b text); -CREATE TABLE itest16 (a int GENERATED BY DEFAULT AS IDENTITY, b text); -MERGE INTO itest15 t -USING (SELECT 10 AS s_a, 'inserted by merge' AS s_b) s -ON t.a = s.s_a -WHEN NOT MATCHED THEN - INSERT (a, b) VALUES (s.s_a, s.s_b); -ERROR: cannot insert a non-DEFAULT value into column "a" -DETAIL: Column "a" is an identity column defined as GENERATED ALWAYS. -HINT: Use OVERRIDING SYSTEM VALUE to override. --- Used to fail, but now it works and ignores the user supplied value -MERGE INTO itest15 t -USING (SELECT 20 AS s_a, 'inserted by merge' AS s_b) s -ON t.a = s.s_a -WHEN NOT MATCHED THEN - INSERT (a, b) OVERRIDING USER VALUE VALUES (s.s_a, s.s_b); -MERGE INTO itest15 t -USING (SELECT 30 AS s_a, 'inserted by merge' AS s_b) s -ON t.a = s.s_a -WHEN NOT MATCHED THEN - INSERT (a, b) OVERRIDING SYSTEM VALUE VALUES (s.s_a, s.s_b); -MERGE INTO itest16 t -USING (SELECT 10 AS s_a, 'inserted by merge' AS s_b) s -ON t.a = s.s_a -WHEN NOT MATCHED THEN - INSERT (a, b) VALUES (s.s_a, s.s_b); -MERGE INTO itest16 t -USING (SELECT 20 AS s_a, 'inserted by merge' AS s_b) s -ON t.a = s.s_a -WHEN NOT MATCHED THEN - INSERT (a, b) OVERRIDING USER VALUE VALUES (s.s_a, s.s_b); -MERGE INTO itest16 t -USING (SELECT 30 AS s_a, 'inserted by merge' AS s_b) s -ON t.a = s.s_a -WHEN NOT MATCHED THEN - INSERT (a, b) OVERRIDING SYSTEM VALUE VALUES (s.s_a, s.s_b); -SELECT * FROM itest15; - a | b -----+------------------- - 1 | inserted by merge - 30 | inserted by merge -(2 rows) - -SELECT * FROM itest16; - a | b -----+------------------- - 10 | inserted by merge - 1 | inserted by merge - 30 | inserted by merge -(3 rows) - -DROP TABLE itest15; -DROP TABLE itest16; +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/generated.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/generated.out --- /tmp/cirrus-ci-build/src/test/regress/expected/generated.out 2024-03-13 23:12:37.623960000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/generated.out 2024-03-13 23:14:29.331468000 +0000 @@ -845,510 +845,7 @@ (3 rows) UPDATE gtest_parent SET f1 = f1 + 60 WHERE f2 = 1; -SELECT tableoid::regclass, * FROM gtest_parent ORDER BY 1, 2, 3; - tableoid | f1 | f2 | f3 ---------------+------------+----+---- - gtest_child | 07-15-2016 | 2 | 4 - gtest_child2 | 08-15-2016 | 3 | 66 - gtest_child3 | 09-13-2016 | 1 | 33 -(3 rows) - --- alter only parent's and one child's generation expression -ALTER TABLE ONLY gtest_parent ALTER COLUMN f3 SET EXPRESSION AS (f2 * 4); -ALTER TABLE gtest_child ALTER COLUMN f3 SET EXPRESSION AS (f2 * 10); -\d gtest_parent - Partitioned table "public.gtest_parent" - Column | Type | Collation | Nullable | Default ---------+--------+-----------+----------+------------------------------------- - f1 | date | | not null | - f2 | bigint | | | - f3 | bigint | | | generated always as (f2 * 4) stored -Partition key: RANGE (f1) -Number of partitions: 3 (Use \d+ to list them.) - -\d gtest_child - Table "public.gtest_child" - Column | Type | Collation | Nullable | Default ---------+--------+-----------+----------+-------------------------------------- - f1 | date | | not null | - f2 | bigint | | | - f3 | bigint | | | generated always as (f2 * 10) stored -Partition of: gtest_parent FOR VALUES FROM ('07-01-2016') TO ('08-01-2016') - -\d gtest_child2 - Table "public.gtest_child2" - Column | Type | Collation | Nullable | Default ---------+--------+-----------+----------+-------------------------------------- - f1 | date | | not null | - f2 | bigint | | | - f3 | bigint | | | generated always as (f2 * 22) stored -Partition of: gtest_parent FOR VALUES FROM ('08-01-2016') TO ('09-01-2016') - -\d gtest_child3 - Table "public.gtest_child3" - Column | Type | Collation | Nullable | Default ---------+--------+-----------+----------+-------------------------------------- - f1 | date | | not null | - f2 | bigint | | | - f3 | bigint | | | generated always as (f2 * 33) stored -Partition of: gtest_parent FOR VALUES FROM ('09-01-2016') TO ('10-01-2016') - -SELECT tableoid::regclass, * FROM gtest_parent ORDER BY 1, 2, 3; - tableoid | f1 | f2 | f3 ---------------+------------+----+---- - gtest_child | 07-15-2016 | 2 | 20 - gtest_child2 | 08-15-2016 | 3 | 66 - gtest_child3 | 09-13-2016 | 1 | 33 -(3 rows) - --- alter generation expression of parent and all its children altogether -ALTER TABLE gtest_parent ALTER COLUMN f3 SET EXPRESSION AS (f2 * 2); -\d gtest_parent - Partitioned table "public.gtest_parent" - Column | Type | Collation | Nullable | Default ---------+--------+-----------+----------+------------------------------------- - f1 | date | | not null | - f2 | bigint | | | - f3 | bigint | | | generated always as (f2 * 2) stored -Partition key: RANGE (f1) -Number of partitions: 3 (Use \d+ to list them.) - -\d gtest_child - Table "public.gtest_child" - Column | Type | Collation | Nullable | Default ---------+--------+-----------+----------+------------------------------------- - f1 | date | | not null | - f2 | bigint | | | - f3 | bigint | | | generated always as (f2 * 2) stored -Partition of: gtest_parent FOR VALUES FROM ('07-01-2016') TO ('08-01-2016') - -\d gtest_child2 - Table "public.gtest_child2" - Column | Type | Collation | Nullable | Default ---------+--------+-----------+----------+------------------------------------- - f1 | date | | not null | - f2 | bigint | | | - f3 | bigint | | | generated always as (f2 * 2) stored -Partition of: gtest_parent FOR VALUES FROM ('08-01-2016') TO ('09-01-2016') - -\d gtest_child3 - Table "public.gtest_child3" - Column | Type | Collation | Nullable | Default ---------+--------+-----------+----------+------------------------------------- - f1 | date | | not null | - f2 | bigint | | | - f3 | bigint | | | generated always as (f2 * 2) stored -Partition of: gtest_parent FOR VALUES FROM ('09-01-2016') TO ('10-01-2016') - -SELECT tableoid::regclass, * FROM gtest_parent ORDER BY 1, 2, 3; - tableoid | f1 | f2 | f3 ---------------+------------+----+---- - gtest_child | 07-15-2016 | 2 | 4 - gtest_child2 | 08-15-2016 | 3 | 6 - gtest_child3 | 09-13-2016 | 1 | 2 -(3 rows) - --- we leave these tables around for purposes of testing dump/reload/upgrade --- generated columns in partition key (not allowed) -CREATE TABLE gtest_part_key (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 2) STORED) PARTITION BY RANGE (f3); -ERROR: cannot use generated column in partition key -LINE 1: ...ENERATED ALWAYS AS (f2 * 2) STORED) PARTITION BY RANGE (f3); - ^ -DETAIL: Column "f3" is a generated column. -CREATE TABLE gtest_part_key (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 2) STORED) PARTITION BY RANGE ((f3 * 3)); -ERROR: cannot use generated column in partition key -LINE 1: ...ED ALWAYS AS (f2 * 2) STORED) PARTITION BY RANGE ((f3 * 3)); - ^ -DETAIL: Column "f3" is a generated column. --- ALTER TABLE ... ADD COLUMN -CREATE TABLE gtest25 (a int PRIMARY KEY); -INSERT INTO gtest25 VALUES (3), (4); -ALTER TABLE gtest25 ADD COLUMN b int GENERATED ALWAYS AS (a * 2) STORED, ALTER COLUMN b SET EXPRESSION AS (a * 3); -SELECT * FROM gtest25 ORDER BY a; - a | b ----+---- - 3 | 9 - 4 | 12 -(2 rows) - -ALTER TABLE gtest25 ADD COLUMN x int GENERATED ALWAYS AS (b * 4) STORED; -- error -ERROR: cannot use generated column "b" in column generation expression -DETAIL: A generated column cannot reference another generated column. -ALTER TABLE gtest25 ADD COLUMN x int GENERATED ALWAYS AS (z * 4) STORED; -- error -ERROR: column "z" does not exist -ALTER TABLE gtest25 ADD COLUMN c int DEFAULT 42, - ADD COLUMN x int GENERATED ALWAYS AS (c * 4) STORED; -ALTER TABLE gtest25 ADD COLUMN d int DEFAULT 101; -ALTER TABLE gtest25 ALTER COLUMN d SET DATA TYPE float8, - ADD COLUMN y float8 GENERATED ALWAYS AS (d * 4) STORED; -SELECT * FROM gtest25 ORDER BY a; - a | b | c | x | d | y ----+----+----+-----+-----+----- - 3 | 9 | 42 | 168 | 101 | 404 - 4 | 12 | 42 | 168 | 101 | 404 -(2 rows) - -\d gtest25 - Table "public.gtest25" - Column | Type | Collation | Nullable | Default ---------+------------------+-----------+----------+------------------------------------------------------ - a | integer | | not null | - b | integer | | | generated always as (a * 3) stored - c | integer | | | 42 - x | integer | | | generated always as (c * 4) stored - d | double precision | | | 101 - y | double precision | | | generated always as (d * 4::double precision) stored -Indexes: - "gtest25_pkey" PRIMARY KEY, btree (a) - --- ALTER TABLE ... ALTER COLUMN -CREATE TABLE gtest27 ( - a int, - b int, - x int GENERATED ALWAYS AS ((a + b) * 2) STORED -); -INSERT INTO gtest27 (a, b) VALUES (3, 7), (4, 11); -ALTER TABLE gtest27 ALTER COLUMN a TYPE text; -- error -ERROR: cannot alter type of a column used by a generated column -DETAIL: Column "a" is used by generated column "x". -ALTER TABLE gtest27 ALTER COLUMN x TYPE numeric; -\d gtest27 - Table "public.gtest27" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+-------------------------------------------- - a | integer | | | - b | integer | | | - x | numeric | | | generated always as (((a + b) * 2)) stored - -SELECT * FROM gtest27; - a | b | x ----+----+---- - 3 | 7 | 20 - 4 | 11 | 30 -(2 rows) - -ALTER TABLE gtest27 ALTER COLUMN x TYPE boolean USING x <> 0; -- error -ERROR: generation expression for column "x" cannot be cast automatically to type boolean -ALTER TABLE gtest27 ALTER COLUMN x DROP DEFAULT; -- error -ERROR: column "x" of relation "gtest27" is a generated column -HINT: Use ALTER TABLE ... ALTER COLUMN ... DROP EXPRESSION instead. --- It's possible to alter the column types this way: -ALTER TABLE gtest27 - DROP COLUMN x, - ALTER COLUMN a TYPE bigint, - ALTER COLUMN b TYPE bigint, - ADD COLUMN x bigint GENERATED ALWAYS AS ((a + b) * 2) STORED; -\d gtest27 - Table "public.gtest27" - Column | Type | Collation | Nullable | Default ---------+--------+-----------+----------+------------------------------------------ - a | bigint | | | - b | bigint | | | - x | bigint | | | generated always as ((a + b) * 2) stored - --- Ideally you could just do this, but not today (and should x change type?): -ALTER TABLE gtest27 - ALTER COLUMN a TYPE float8, - ALTER COLUMN b TYPE float8; -- error -ERROR: cannot alter type of a column used by a generated column -DETAIL: Column "a" is used by generated column "x". -\d gtest27 - Table "public.gtest27" - Column | Type | Collation | Nullable | Default ---------+--------+-----------+----------+------------------------------------------ - a | bigint | | | - b | bigint | | | - x | bigint | | | generated always as ((a + b) * 2) stored - -SELECT * FROM gtest27; - a | b | x ----+----+---- - 3 | 7 | 20 - 4 | 11 | 30 -(2 rows) - --- ALTER TABLE ... ALTER COLUMN ... DROP EXPRESSION -CREATE TABLE gtest29 ( - a int, - b int GENERATED ALWAYS AS (a * 2) STORED -); -INSERT INTO gtest29 (a) VALUES (3), (4); -SELECT * FROM gtest29; - a | b ----+--- - 3 | 6 - 4 | 8 -(2 rows) - -\d gtest29 - Table "public.gtest29" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - a | integer | | | - b | integer | | | generated always as (a * 2) stored - -ALTER TABLE gtest29 ALTER COLUMN a SET EXPRESSION AS (a * 3); -- error -ERROR: column "a" of relation "gtest29" is not a generated column -ALTER TABLE gtest29 ALTER COLUMN a DROP EXPRESSION; -- error -ERROR: column "a" of relation "gtest29" is not a stored generated column -ALTER TABLE gtest29 ALTER COLUMN a DROP EXPRESSION IF EXISTS; -- notice -NOTICE: column "a" of relation "gtest29" is not a stored generated column, skipping --- Change the expression -ALTER TABLE gtest29 ALTER COLUMN b SET EXPRESSION AS (a * 3); -SELECT * FROM gtest29; - a | b ----+---- - 3 | 9 - 4 | 12 -(2 rows) - -\d gtest29 - Table "public.gtest29" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - a | integer | | | - b | integer | | | generated always as (a * 3) stored - -ALTER TABLE gtest29 ALTER COLUMN b DROP EXPRESSION; -INSERT INTO gtest29 (a) VALUES (5); -INSERT INTO gtest29 (a, b) VALUES (6, 66); -SELECT * FROM gtest29; - a | b ----+---- - 3 | 9 - 4 | 12 - 5 | - 6 | 66 -(4 rows) - -\d gtest29 - Table "public.gtest29" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | - --- check that dependencies between columns have also been removed -ALTER TABLE gtest29 DROP COLUMN a; -- should not drop b -\d gtest29 - Table "public.gtest29" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - b | integer | | | - --- with inheritance -CREATE TABLE gtest30 ( - a int, - b int GENERATED ALWAYS AS (a * 2) STORED -); -CREATE TABLE gtest30_1 () INHERITS (gtest30); -ALTER TABLE gtest30 ALTER COLUMN b DROP EXPRESSION; -\d gtest30 - Table "public.gtest30" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Number of child tables: 1 (Use \d+ to list them.) - -\d gtest30_1 - Table "public.gtest30_1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Inherits: gtest30 - -DROP TABLE gtest30 CASCADE; -NOTICE: drop cascades to table gtest30_1 -CREATE TABLE gtest30 ( - a int, - b int GENERATED ALWAYS AS (a * 2) STORED -); -CREATE TABLE gtest30_1 () INHERITS (gtest30); -ALTER TABLE ONLY gtest30 ALTER COLUMN b DROP EXPRESSION; -- error -ERROR: ALTER TABLE / DROP EXPRESSION must be applied to child tables too -\d gtest30 - Table "public.gtest30" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - a | integer | | | - b | integer | | | generated always as (a * 2) stored -Number of child tables: 1 (Use \d+ to list them.) - -\d gtest30_1 - Table "public.gtest30_1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - a | integer | | | - b | integer | | | generated always as (a * 2) stored -Inherits: gtest30 - -ALTER TABLE gtest30_1 ALTER COLUMN b DROP EXPRESSION; -- error -ERROR: cannot drop generation expression from inherited column --- triggers -CREATE TABLE gtest26 ( - a int PRIMARY KEY, - b int GENERATED ALWAYS AS (a * 2) STORED -); -CREATE FUNCTION gtest_trigger_func() RETURNS trigger - LANGUAGE plpgsql -AS $$ -BEGIN - IF tg_op IN ('DELETE', 'UPDATE') THEN - RAISE INFO '%: %: old = %', TG_NAME, TG_WHEN, OLD; - END IF; - IF tg_op IN ('INSERT', 'UPDATE') THEN - RAISE INFO '%: %: new = %', TG_NAME, TG_WHEN, NEW; - END IF; - IF tg_op = 'DELETE' THEN - RETURN OLD; - ELSE - RETURN NEW; - END IF; -END -$$; -CREATE TRIGGER gtest1 BEFORE DELETE OR UPDATE ON gtest26 - FOR EACH ROW - WHEN (OLD.b < 0) -- ok - EXECUTE PROCEDURE gtest_trigger_func(); -CREATE TRIGGER gtest2a BEFORE INSERT OR UPDATE ON gtest26 - FOR EACH ROW - WHEN (NEW.b < 0) -- error - EXECUTE PROCEDURE gtest_trigger_func(); -ERROR: BEFORE trigger's WHEN condition cannot reference NEW generated columns -LINE 3: WHEN (NEW.b < 0) -- error - ^ -DETAIL: Column "b" is a generated column. -CREATE TRIGGER gtest2b BEFORE INSERT OR UPDATE ON gtest26 - FOR EACH ROW - WHEN (NEW.* IS NOT NULL) -- error - EXECUTE PROCEDURE gtest_trigger_func(); -ERROR: BEFORE trigger's WHEN condition cannot reference NEW generated columns -LINE 3: WHEN (NEW.* IS NOT NULL) -- error - ^ -DETAIL: A whole-row reference is used and the table contains generated columns. -CREATE TRIGGER gtest2 BEFORE INSERT ON gtest26 - FOR EACH ROW - WHEN (NEW.a < 0) - EXECUTE PROCEDURE gtest_trigger_func(); -CREATE TRIGGER gtest3 AFTER DELETE OR UPDATE ON gtest26 - FOR EACH ROW - WHEN (OLD.b < 0) -- ok - EXECUTE PROCEDURE gtest_trigger_func(); -CREATE TRIGGER gtest4 AFTER INSERT OR UPDATE ON gtest26 - FOR EACH ROW - WHEN (NEW.b < 0) -- ok - EXECUTE PROCEDURE gtest_trigger_func(); -INSERT INTO gtest26 (a) VALUES (-2), (0), (3); -INFO: gtest2: BEFORE: new = (-2,) -INFO: gtest4: AFTER: new = (-2,-4) -SELECT * FROM gtest26 ORDER BY a; - a | b -----+---- - -2 | -4 - 0 | 0 - 3 | 6 -(3 rows) - -UPDATE gtest26 SET a = a * -2; -INFO: gtest1: BEFORE: old = (-2,-4) -INFO: gtest1: BEFORE: new = (4,) -INFO: gtest3: AFTER: old = (-2,-4) -INFO: gtest3: AFTER: new = (4,8) -INFO: gtest4: AFTER: old = (3,6) -INFO: gtest4: AFTER: new = (-6,-12) -SELECT * FROM gtest26 ORDER BY a; - a | b -----+----- - -6 | -12 - 0 | 0 - 4 | 8 -(3 rows) - -DELETE FROM gtest26 WHERE a = -6; -INFO: gtest1: BEFORE: old = (-6,-12) -INFO: gtest3: AFTER: old = (-6,-12) -SELECT * FROM gtest26 ORDER BY a; - a | b ----+--- - 0 | 0 - 4 | 8 -(2 rows) - -DROP TRIGGER gtest1 ON gtest26; -DROP TRIGGER gtest2 ON gtest26; -DROP TRIGGER gtest3 ON gtest26; --- Check that an UPDATE of "a" fires the trigger for UPDATE OF b, per --- SQL standard. -CREATE FUNCTION gtest_trigger_func3() RETURNS trigger - LANGUAGE plpgsql -AS $$ -BEGIN - RAISE NOTICE 'OK'; - RETURN NEW; -END -$$; -CREATE TRIGGER gtest11 BEFORE UPDATE OF b ON gtest26 - FOR EACH ROW - EXECUTE PROCEDURE gtest_trigger_func3(); -UPDATE gtest26 SET a = 1 WHERE a = 0; -NOTICE: OK -DROP TRIGGER gtest11 ON gtest26; -TRUNCATE gtest26; --- check that modifications of stored generated columns in triggers do --- not get propagated -CREATE FUNCTION gtest_trigger_func4() RETURNS trigger - LANGUAGE plpgsql -AS $$ -BEGIN - NEW.a = 10; - NEW.b = 300; - RETURN NEW; -END; -$$; -CREATE TRIGGER gtest12_01 BEFORE UPDATE ON gtest26 - FOR EACH ROW - EXECUTE PROCEDURE gtest_trigger_func(); -CREATE TRIGGER gtest12_02 BEFORE UPDATE ON gtest26 - FOR EACH ROW - EXECUTE PROCEDURE gtest_trigger_func4(); -CREATE TRIGGER gtest12_03 BEFORE UPDATE ON gtest26 - FOR EACH ROW - EXECUTE PROCEDURE gtest_trigger_func(); -INSERT INTO gtest26 (a) VALUES (1); -UPDATE gtest26 SET a = 11 WHERE a = 1; -INFO: gtest12_01: BEFORE: old = (1,2) -INFO: gtest12_01: BEFORE: new = (11,) -INFO: gtest12_03: BEFORE: old = (1,2) -INFO: gtest12_03: BEFORE: new = (10,) -SELECT * FROM gtest26 ORDER BY a; - a | b -----+---- - 10 | 20 -(1 row) - --- LIKE INCLUDING GENERATED and dropped column handling -CREATE TABLE gtest28a ( - a int, - b int, - c int, - x int GENERATED ALWAYS AS (b * 2) STORED -); -ALTER TABLE gtest28a DROP COLUMN a; -CREATE TABLE gtest28b (LIKE gtest28a INCLUDING GENERATED); -\d gtest28* - Table "public.gtest28a" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - b | integer | | | - c | integer | | | - x | integer | | | generated always as (b * 2) stored - - Table "public.gtest28b" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - b | integer | | | - c | integer | | | - x | integer | | | generated always as (b * 2) stored - +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/join_hash.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/join_hash.out --- /tmp/cirrus-ci-build/src/test/regress/expected/join_hash.out 2024-03-13 23:12:37.624893000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/join_hash.out 2024-03-13 23:14:29.334322000 +0000 @@ -420,747 +420,10 @@ $$ select count(*) from simple r join bigger_than_it_looks s using (id); $$); - initially_multibatch | increased_batches -----------------------+------------------- - f | t -(1 row) - -rollback to settings; --- The "ugly" case: increasing the number of batches during execution --- doesn't help, so stop trying to fit in work_mem and hope for the --- best; in this case we plan for 1 batch, increases just once and --- then stop increasing because that didn't help at all, so we blow --- right through the work_mem budget and hope for the best... --- non-parallel -savepoint settings; -set local max_parallel_workers_per_gather = 0; -set local work_mem = '128kB'; -set local hash_mem_multiplier = 1.0; -explain (costs off) - select count(*) from simple r join extremely_skewed s using (id); - QUERY PLAN --------------------------------------------------- - Aggregate - -> Hash Join - Hash Cond: (r.id = s.id) - -> Seq Scan on simple r - -> Hash - -> Seq Scan on extremely_skewed s -(6 rows) - -select count(*) from simple r join extremely_skewed s using (id); - count -------- - 20000 -(1 row) - -select * from hash_join_batches( -$$ - select count(*) from simple r join extremely_skewed s using (id); -$$); - original | final -----------+------- - 1 | 2 -(1 row) - -rollback to settings; --- parallel with parallel-oblivious hash join -savepoint settings; -set local max_parallel_workers_per_gather = 2; -set local work_mem = '128kB'; -set local hash_mem_multiplier = 1.0; -set local enable_parallel_hash = off; -explain (costs off) - select count(*) from simple r join extremely_skewed s using (id); - QUERY PLAN --------------------------------------------------------- - Aggregate - -> Gather - Workers Planned: 2 - -> Hash Join - Hash Cond: (r.id = s.id) - -> Parallel Seq Scan on simple r - -> Hash - -> Seq Scan on extremely_skewed s -(8 rows) - -select count(*) from simple r join extremely_skewed s using (id); - count -------- - 20000 -(1 row) - -select * from hash_join_batches( -$$ - select count(*) from simple r join extremely_skewed s using (id); -$$); - original | final -----------+------- - 1 | 2 -(1 row) - -rollback to settings; --- parallel with parallel-aware hash join -savepoint settings; -set local max_parallel_workers_per_gather = 1; -set local work_mem = '128kB'; -set local hash_mem_multiplier = 1.0; -set local enable_parallel_hash = on; -explain (costs off) - select count(*) from simple r join extremely_skewed s using (id); - QUERY PLAN ------------------------------------------------------------------------ - Finalize Aggregate - -> Gather - Workers Planned: 1 - -> Partial Aggregate - -> Parallel Hash Join - Hash Cond: (r.id = s.id) - -> Parallel Seq Scan on simple r - -> Parallel Hash - -> Parallel Seq Scan on extremely_skewed s -(9 rows) - -select count(*) from simple r join extremely_skewed s using (id); - count -------- - 20000 -(1 row) - -select * from hash_join_batches( -$$ - select count(*) from simple r join extremely_skewed s using (id); -$$); - original | final -----------+------- - 1 | 4 -(1 row) - -rollback to settings; --- A couple of other hash join tests unrelated to work_mem management. --- Check that EXPLAIN ANALYZE has data even if the leader doesn't participate -savepoint settings; -set local max_parallel_workers_per_gather = 2; -set local work_mem = '4MB'; -set local hash_mem_multiplier = 1.0; -set local parallel_leader_participation = off; -select * from hash_join_batches( -$$ - select count(*) from simple r join simple s using (id); -$$); - original | final -----------+------- - 1 | 1 -(1 row) - -rollback to settings; --- Exercise rescans. We'll turn off parallel_leader_participation so --- that we can check that instrumentation comes back correctly. -create table join_foo as select generate_series(1, 3) as id, 'xxxxx'::text as t; -alter table join_foo set (parallel_workers = 0); -create table join_bar as select generate_series(1, 10000) as id, 'xxxxx'::text as t; -alter table join_bar set (parallel_workers = 2); --- multi-batch with rescan, parallel-oblivious -savepoint settings; -set enable_parallel_hash = off; -set parallel_leader_participation = off; -set min_parallel_table_scan_size = 0; -set parallel_setup_cost = 0; -set parallel_tuple_cost = 0; -set max_parallel_workers_per_gather = 2; -set enable_material = off; -set enable_mergejoin = off; -set work_mem = '64kB'; -set hash_mem_multiplier = 1.0; -explain (costs off) - select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; - QUERY PLAN ------------------------------------------------------------------------------------- - Aggregate - -> Nested Loop Left Join - Join Filter: ((join_foo.id < (b1.id + 1)) AND (join_foo.id > (b1.id - 1))) - -> Seq Scan on join_foo - -> Gather - Workers Planned: 2 - -> Hash Join - Hash Cond: (b1.id = b2.id) - -> Parallel Seq Scan on join_bar b1 - -> Hash - -> Seq Scan on join_bar b2 -(11 rows) - -select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; - count -------- - 3 -(1 row) - -select final > 1 as multibatch - from hash_join_batches( -$$ - select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; -$$); - multibatch ------------- - t -(1 row) - -rollback to settings; --- single-batch with rescan, parallel-oblivious -savepoint settings; -set enable_parallel_hash = off; -set parallel_leader_participation = off; -set min_parallel_table_scan_size = 0; -set parallel_setup_cost = 0; -set parallel_tuple_cost = 0; -set max_parallel_workers_per_gather = 2; -set enable_material = off; -set enable_mergejoin = off; -set work_mem = '4MB'; -set hash_mem_multiplier = 1.0; -explain (costs off) - select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; - QUERY PLAN ------------------------------------------------------------------------------------- - Aggregate - -> Nested Loop Left Join - Join Filter: ((join_foo.id < (b1.id + 1)) AND (join_foo.id > (b1.id - 1))) - -> Seq Scan on join_foo - -> Gather - Workers Planned: 2 - -> Hash Join - Hash Cond: (b1.id = b2.id) - -> Parallel Seq Scan on join_bar b1 - -> Hash - -> Seq Scan on join_bar b2 -(11 rows) - -select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; - count -------- - 3 -(1 row) - -select final > 1 as multibatch - from hash_join_batches( -$$ - select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; -$$); - multibatch ------------- - f -(1 row) - -rollback to settings; --- multi-batch with rescan, parallel-aware -savepoint settings; -set enable_parallel_hash = on; -set parallel_leader_participation = off; -set min_parallel_table_scan_size = 0; -set parallel_setup_cost = 0; -set parallel_tuple_cost = 0; -set max_parallel_workers_per_gather = 2; -set enable_material = off; -set enable_mergejoin = off; -set work_mem = '64kB'; -set hash_mem_multiplier = 1.0; -explain (costs off) - select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; - QUERY PLAN ------------------------------------------------------------------------------------- - Aggregate - -> Nested Loop Left Join - Join Filter: ((join_foo.id < (b1.id + 1)) AND (join_foo.id > (b1.id - 1))) - -> Seq Scan on join_foo - -> Gather - Workers Planned: 2 - -> Parallel Hash Join - Hash Cond: (b1.id = b2.id) - -> Parallel Seq Scan on join_bar b1 - -> Parallel Hash - -> Parallel Seq Scan on join_bar b2 -(11 rows) - -select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; - count -------- - 3 -(1 row) - -select final > 1 as multibatch - from hash_join_batches( -$$ - select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; -$$); - multibatch ------------- - t -(1 row) - -rollback to settings; --- single-batch with rescan, parallel-aware -savepoint settings; -set enable_parallel_hash = on; -set parallel_leader_participation = off; -set min_parallel_table_scan_size = 0; -set parallel_setup_cost = 0; -set parallel_tuple_cost = 0; -set max_parallel_workers_per_gather = 2; -set enable_material = off; -set enable_mergejoin = off; -set work_mem = '4MB'; -set hash_mem_multiplier = 1.0; -explain (costs off) - select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; - QUERY PLAN ------------------------------------------------------------------------------------- - Aggregate - -> Nested Loop Left Join - Join Filter: ((join_foo.id < (b1.id + 1)) AND (join_foo.id > (b1.id - 1))) - -> Seq Scan on join_foo - -> Gather - Workers Planned: 2 - -> Parallel Hash Join - Hash Cond: (b1.id = b2.id) - -> Parallel Seq Scan on join_bar b1 - -> Parallel Hash - -> Parallel Seq Scan on join_bar b2 -(11 rows) - -select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; - count -------- - 3 -(1 row) - -select final > 1 as multibatch - from hash_join_batches( -$$ - select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; -$$); - multibatch ------------- - f -(1 row) - -rollback to settings; --- A full outer join where every record is matched. --- non-parallel -savepoint settings; -set local max_parallel_workers_per_gather = 0; -explain (costs off) - select count(*) from simple r full outer join simple s using (id); - QUERY PLAN ----------------------------------------- - Aggregate - -> Hash Full Join - Hash Cond: (r.id = s.id) - -> Seq Scan on simple r - -> Hash - -> Seq Scan on simple s -(6 rows) - -select count(*) from simple r full outer join simple s using (id); - count -------- - 20000 -(1 row) - -rollback to settings; --- parallelism not possible with parallel-oblivious full hash join -savepoint settings; -set enable_parallel_hash = off; -set local max_parallel_workers_per_gather = 2; -explain (costs off) - select count(*) from simple r full outer join simple s using (id); - QUERY PLAN ----------------------------------------- - Aggregate - -> Hash Full Join - Hash Cond: (r.id = s.id) - -> Seq Scan on simple r - -> Hash - -> Seq Scan on simple s -(6 rows) - -select count(*) from simple r full outer join simple s using (id); - count -------- - 20000 -(1 row) - -rollback to settings; --- parallelism is possible with parallel-aware full hash join -savepoint settings; -set local max_parallel_workers_per_gather = 2; -explain (costs off) - select count(*) from simple r full outer join simple s using (id); - QUERY PLAN -------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 2 - -> Partial Aggregate - -> Parallel Hash Full Join - Hash Cond: (r.id = s.id) - -> Parallel Seq Scan on simple r - -> Parallel Hash - -> Parallel Seq Scan on simple s -(9 rows) - -select count(*) from simple r full outer join simple s using (id); - count -------- - 20000 -(1 row) - -rollback to settings; --- A full outer join where every record is not matched. --- non-parallel -savepoint settings; -set local max_parallel_workers_per_gather = 0; -explain (costs off) - select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); - QUERY PLAN ----------------------------------------- - Aggregate - -> Hash Full Join - Hash Cond: ((0 - s.id) = r.id) - -> Seq Scan on simple s - -> Hash - -> Seq Scan on simple r -(6 rows) - -select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); - count -------- - 40000 -(1 row) - -rollback to settings; --- parallelism not possible with parallel-oblivious full hash join -savepoint settings; -set enable_parallel_hash = off; -set local max_parallel_workers_per_gather = 2; -explain (costs off) - select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); - QUERY PLAN ----------------------------------------- - Aggregate - -> Hash Full Join - Hash Cond: ((0 - s.id) = r.id) - -> Seq Scan on simple s - -> Hash - -> Seq Scan on simple r -(6 rows) - -select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); - count -------- - 40000 -(1 row) - -rollback to settings; --- parallelism is possible with parallel-aware full hash join -savepoint settings; -set local max_parallel_workers_per_gather = 2; -explain (costs off) - select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); - QUERY PLAN -------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 2 - -> Partial Aggregate - -> Parallel Hash Full Join - Hash Cond: ((0 - s.id) = r.id) - -> Parallel Seq Scan on simple s - -> Parallel Hash - -> Parallel Seq Scan on simple r -(9 rows) - -select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); - count -------- - 40000 -(1 row) - -rollback to settings; --- exercise special code paths for huge tuples (note use of non-strict --- expression and left join required to get the detoasted tuple into --- the hash table) --- parallel with parallel-aware hash join (hits ExecParallelHashLoadTuple and --- sts_puttuple oversized tuple cases because it's multi-batch) -savepoint settings; -set max_parallel_workers_per_gather = 2; -set enable_parallel_hash = on; -set work_mem = '128kB'; -set hash_mem_multiplier = 1.0; -explain (costs off) - select length(max(s.t)) - from wide left join (select id, coalesce(t, '') || '' as t from wide) s using (id); - QUERY PLAN ----------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 2 - -> Partial Aggregate - -> Parallel Hash Left Join - Hash Cond: (wide.id = wide_1.id) - -> Parallel Seq Scan on wide - -> Parallel Hash - -> Parallel Seq Scan on wide wide_1 -(9 rows) - -select length(max(s.t)) -from wide left join (select id, coalesce(t, '') || '' as t from wide) s using (id); - length --------- - 320000 -(1 row) - -select final > 1 as multibatch - from hash_join_batches( -$$ - select length(max(s.t)) - from wide left join (select id, coalesce(t, '') || '' as t from wide) s using (id); -$$); - multibatch ------------- - t -(1 row) - -rollback to settings; --- Hash join reuses the HOT status bit to indicate match status. This can only --- be guaranteed to produce correct results if all the hash join tuple match --- bits are reset before reuse. This is done upon loading them into the --- hashtable. -SAVEPOINT settings; -SET enable_parallel_hash = on; -SET min_parallel_table_scan_size = 0; -SET parallel_setup_cost = 0; -SET parallel_tuple_cost = 0; -CREATE TABLE hjtest_matchbits_t1(id int); -CREATE TABLE hjtest_matchbits_t2(id int); -INSERT INTO hjtest_matchbits_t1 VALUES (1); -INSERT INTO hjtest_matchbits_t2 VALUES (2); --- Update should create a HOT tuple. If this status bit isn't cleared, we won't --- correctly emit the NULL-extended unmatching tuple in full hash join. -UPDATE hjtest_matchbits_t2 set id = 2; -SELECT * FROM hjtest_matchbits_t1 t1 FULL JOIN hjtest_matchbits_t2 t2 ON t1.id = t2.id - ORDER BY t1.id; - id | id -----+---- - 1 | - | 2 -(2 rows) - --- Test serial full hash join. --- Resetting parallel_setup_cost should force a serial plan. --- Just to be safe, however, set enable_parallel_hash to off, as parallel full --- hash joins are only supported with shared hashtables. -RESET parallel_setup_cost; -SET enable_parallel_hash = off; -SELECT * FROM hjtest_matchbits_t1 t1 FULL JOIN hjtest_matchbits_t2 t2 ON t1.id = t2.id; - id | id -----+---- - 1 | - | 2 -(2 rows) - -ROLLBACK TO settings; -rollback; --- Verify that hash key expressions reference the correct --- nodes. Hashjoin's hashkeys need to reference its outer plan, Hash's --- need to reference Hash's outer plan (which is below HashJoin's --- inner plan). It's not trivial to verify that the references are --- correct (we don't display the hashkeys themselves), but if the --- hashkeys contain subplan references, those will be displayed. Force --- subplans to appear just about everywhere. --- --- Bug report: --- https://www.postgresql.org/message-id/CAPpHfdvGVegF_TKKRiBrSmatJL2dR9uwFCuR%2BteQ_8tEXU8mxg%40mail.gmail.com --- -BEGIN; -SET LOCAL enable_sort = OFF; -- avoid mergejoins -SET LOCAL from_collapse_limit = 1; -- allows easy changing of join order -CREATE TABLE hjtest_1 (a text, b int, id int, c bool); -CREATE TABLE hjtest_2 (a bool, id int, b text, c int); -INSERT INTO hjtest_1(a, b, id, c) VALUES ('text', 2, 1, false); -- matches -INSERT INTO hjtest_1(a, b, id, c) VALUES ('text', 1, 2, false); -- fails id join condition -INSERT INTO hjtest_1(a, b, id, c) VALUES ('text', 20, 1, false); -- fails < 50 -INSERT INTO hjtest_1(a, b, id, c) VALUES ('text', 1, 1, false); -- fails (SELECT hjtest_1.b * 5) = (SELECT hjtest_2.c*5) -INSERT INTO hjtest_2(a, id, b, c) VALUES (true, 1, 'another', 2); -- matches -INSERT INTO hjtest_2(a, id, b, c) VALUES (true, 3, 'another', 7); -- fails id join condition -INSERT INTO hjtest_2(a, id, b, c) VALUES (true, 1, 'another', 90); -- fails < 55 -INSERT INTO hjtest_2(a, id, b, c) VALUES (true, 1, 'another', 3); -- fails (SELECT hjtest_1.b * 5) = (SELECT hjtest_2.c*5) -INSERT INTO hjtest_2(a, id, b, c) VALUES (true, 1, 'text', 1); -- fails hjtest_1.a <> hjtest_2.b; -EXPLAIN (COSTS OFF, VERBOSE) -SELECT hjtest_1.a a1, hjtest_2.a a2,hjtest_1.tableoid::regclass t1, hjtest_2.tableoid::regclass t2 -FROM hjtest_1, hjtest_2 -WHERE - hjtest_1.id = (SELECT 1 WHERE hjtest_2.id = 1) - AND (SELECT hjtest_1.b * 5) = (SELECT hjtest_2.c*5) - AND (SELECT hjtest_1.b * 5) < 50 - AND (SELECT hjtest_2.c * 5) < 55 - AND hjtest_1.a <> hjtest_2.b; - QUERY PLAN ------------------------------------------------------------------------------------------------- - Hash Join - Output: hjtest_1.a, hjtest_2.a, (hjtest_1.tableoid)::regclass, (hjtest_2.tableoid)::regclass - Hash Cond: ((hjtest_1.id = (SubPlan 1)) AND ((SubPlan 2) = (SubPlan 3))) - Join Filter: (hjtest_1.a <> hjtest_2.b) - -> Seq Scan on public.hjtest_1 - Output: hjtest_1.a, hjtest_1.tableoid, hjtest_1.id, hjtest_1.b - Filter: ((SubPlan 4) < 50) - SubPlan 4 - -> Result - Output: (hjtest_1.b * 5) - -> Hash - Output: hjtest_2.a, hjtest_2.tableoid, hjtest_2.id, hjtest_2.c, hjtest_2.b - -> Seq Scan on public.hjtest_2 - Output: hjtest_2.a, hjtest_2.tableoid, hjtest_2.id, hjtest_2.c, hjtest_2.b - Filter: ((SubPlan 5) < 55) - SubPlan 5 - -> Result - Output: (hjtest_2.c * 5) - SubPlan 1 - -> Result - Output: 1 - One-Time Filter: (hjtest_2.id = 1) - SubPlan 3 - -> Result - Output: (hjtest_2.c * 5) - SubPlan 2 - -> Result - Output: (hjtest_1.b * 5) -(28 rows) - -SELECT hjtest_1.a a1, hjtest_2.a a2,hjtest_1.tableoid::regclass t1, hjtest_2.tableoid::regclass t2 -FROM hjtest_1, hjtest_2 -WHERE - hjtest_1.id = (SELECT 1 WHERE hjtest_2.id = 1) - AND (SELECT hjtest_1.b * 5) = (SELECT hjtest_2.c*5) - AND (SELECT hjtest_1.b * 5) < 50 - AND (SELECT hjtest_2.c * 5) < 55 - AND hjtest_1.a <> hjtest_2.b; - a1 | a2 | t1 | t2 -------+----+----------+---------- - text | t | hjtest_1 | hjtest_2 -(1 row) - -EXPLAIN (COSTS OFF, VERBOSE) -SELECT hjtest_1.a a1, hjtest_2.a a2,hjtest_1.tableoid::regclass t1, hjtest_2.tableoid::regclass t2 -FROM hjtest_2, hjtest_1 -WHERE - hjtest_1.id = (SELECT 1 WHERE hjtest_2.id = 1) - AND (SELECT hjtest_1.b * 5) = (SELECT hjtest_2.c*5) - AND (SELECT hjtest_1.b * 5) < 50 - AND (SELECT hjtest_2.c * 5) < 55 - AND hjtest_1.a <> hjtest_2.b; - QUERY PLAN ------------------------------------------------------------------------------------------------- - Hash Join - Output: hjtest_1.a, hjtest_2.a, (hjtest_1.tableoid)::regclass, (hjtest_2.tableoid)::regclass - Hash Cond: (((SubPlan 1) = hjtest_1.id) AND ((SubPlan 3) = (SubPlan 2))) - Join Filter: (hjtest_1.a <> hjtest_2.b) - -> Seq Scan on public.hjtest_2 - Output: hjtest_2.a, hjtest_2.tableoid, hjtest_2.id, hjtest_2.c, hjtest_2.b - Filter: ((SubPlan 5) < 55) - SubPlan 5 - -> Result - Output: (hjtest_2.c * 5) - -> Hash - Output: hjtest_1.a, hjtest_1.tableoid, hjtest_1.id, hjtest_1.b - -> Seq Scan on public.hjtest_1 - Output: hjtest_1.a, hjtest_1.tableoid, hjtest_1.id, hjtest_1.b - Filter: ((SubPlan 4) < 50) - SubPlan 4 - -> Result - Output: (hjtest_1.b * 5) - SubPlan 2 - -> Result - Output: (hjtest_1.b * 5) - SubPlan 1 - -> Result - Output: 1 - One-Time Filter: (hjtest_2.id = 1) - SubPlan 3 - -> Result - Output: (hjtest_2.c * 5) -(28 rows) - -SELECT hjtest_1.a a1, hjtest_2.a a2,hjtest_1.tableoid::regclass t1, hjtest_2.tableoid::regclass t2 -FROM hjtest_2, hjtest_1 -WHERE - hjtest_1.id = (SELECT 1 WHERE hjtest_2.id = 1) - AND (SELECT hjtest_1.b * 5) = (SELECT hjtest_2.c*5) - AND (SELECT hjtest_1.b * 5) < 50 - AND (SELECT hjtest_2.c * 5) < 55 - AND hjtest_1.a <> hjtest_2.b; - a1 | a2 | t1 | t2 -------+----+----------+---------- - text | t | hjtest_1 | hjtest_2 -(1 row) - -ROLLBACK; --- Verify that we behave sanely when the inner hash keys contain parameters --- (that is, outer or lateral references). This situation has to defeat --- re-use of the inner hash table across rescans. -begin; -set local enable_hashjoin = on; -explain (costs off) -select i8.q2, ss.* from -int8_tbl i8, -lateral (select t1.fivethous, i4.f1 from tenk1 t1 join int4_tbl i4 - on t1.fivethous = i4.f1+i8.q2 order by 1,2) ss; - QUERY PLAN ------------------------------------------------------------ - Nested Loop - -> Seq Scan on int8_tbl i8 - -> Sort - Sort Key: t1.fivethous, i4.f1 - -> Hash Join - Hash Cond: (t1.fivethous = (i4.f1 + i8.q2)) - -> Seq Scan on tenk1 t1 - -> Hash - -> Seq Scan on int4_tbl i4 -(9 rows) - -select i8.q2, ss.* from -int8_tbl i8, -lateral (select t1.fivethous, i4.f1 from tenk1 t1 join int4_tbl i4 - on t1.fivethous = i4.f1+i8.q2 order by 1,2) ss; - q2 | fivethous | f1 ------+-----------+---- - 456 | 456 | 0 - 456 | 456 | 0 - 123 | 123 | 0 - 123 | 123 | 0 -(4 rows) - -rollback; +WARNING: terminating connection because of crash of another server process +DETAIL: The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory. +HINT: In a moment you should be able to reconnect to the database and repeat your command. +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/brin_bloom.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/brin_bloom.out --- /tmp/cirrus-ci-build/src/test/regress/expected/brin_bloom.out 2024-03-13 23:12:37.622569000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/brin_bloom.out 2024-03-13 23:14:29.426212000 +0000 @@ -1,428 +1,2 @@ -CREATE TABLE brintest_bloom (byteacol bytea, - charcol "char", - namecol name, - int8col bigint, - int2col smallint, - int4col integer, - textcol text, - oidcol oid, - float4col real, - float8col double precision, - macaddrcol macaddr, - inetcol inet, - cidrcol cidr, - bpcharcol character, - datecol date, - timecol time without time zone, - timestampcol timestamp without time zone, - timestamptzcol timestamp with time zone, - intervalcol interval, - timetzcol time with time zone, - numericcol numeric, - uuidcol uuid, - lsncol pg_lsn -) WITH (fillfactor=10); -INSERT INTO brintest_bloom SELECT - repeat(stringu1, 8)::bytea, - substr(stringu1, 1, 1)::"char", - stringu1::name, 142857 * tenthous, - thousand, - twothousand, - repeat(stringu1, 8), - unique1::oid, - (four + 1.0)/(hundred+1), - odd::float8 / (tenthous + 1), - format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, - inet '10.2.3.4/24' + tenthous, - cidr '10.2.3/24' + tenthous, - substr(stringu1, 1, 1)::bpchar, - date '1995-08-15' + tenthous, - time '01:20:30' + thousand * interval '18.5 second', - timestamp '1942-07-23 03:05:09' + tenthous * interval '36.38 hours', - timestamptz '1972-10-10 03:00' + thousand * interval '1 hour', - justify_days(justify_hours(tenthous * interval '12 minutes')), - timetz '01:30:20+02' + hundred * interval '15 seconds', - tenthous::numeric(36,30) * fivethous * even / (hundred + 1), - format('%s%s-%s-%s-%s-%s%s%s', to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'))::uuid, - format('%s/%s%s', odd, even, tenthous)::pg_lsn -FROM tenk1 ORDER BY unique2 LIMIT 100; --- throw in some NULL's and different values -INSERT INTO brintest_bloom (inetcol, cidrcol) SELECT - inet 'fe80::6e40:8ff:fea9:8c46' + tenthous, - cidr 'fe80::6e40:8ff:fea9:8c46' + tenthous -FROM tenk1 ORDER BY thousand, tenthous LIMIT 25; --- test bloom specific index options --- ndistinct must be >= -1.0 -CREATE INDEX brinidx_bloom ON brintest_bloom USING brin ( - byteacol bytea_bloom_ops(n_distinct_per_range = -1.1) -); -ERROR: value -1.1 out of bounds for option "n_distinct_per_range" -DETAIL: Valid values are between "-1.000000" and "2147483647.000000". --- false_positive_rate must be between 0.0001 and 0.25 -CREATE INDEX brinidx_bloom ON brintest_bloom USING brin ( - byteacol bytea_bloom_ops(false_positive_rate = 0.00009) -); -ERROR: value 0.00009 out of bounds for option "false_positive_rate" -DETAIL: Valid values are between "0.000100" and "0.250000". -CREATE INDEX brinidx_bloom ON brintest_bloom USING brin ( - byteacol bytea_bloom_ops(false_positive_rate = 0.26) -); -ERROR: value 0.26 out of bounds for option "false_positive_rate" -DETAIL: Valid values are between "0.000100" and "0.250000". -CREATE INDEX brinidx_bloom ON brintest_bloom USING brin ( - byteacol bytea_bloom_ops, - charcol char_bloom_ops, - namecol name_bloom_ops, - int8col int8_bloom_ops, - int2col int2_bloom_ops, - int4col int4_bloom_ops, - textcol text_bloom_ops, - oidcol oid_bloom_ops, - float4col float4_bloom_ops, - float8col float8_bloom_ops, - macaddrcol macaddr_bloom_ops, - inetcol inet_bloom_ops, - cidrcol inet_bloom_ops, - bpcharcol bpchar_bloom_ops, - datecol date_bloom_ops, - timecol time_bloom_ops, - timestampcol timestamp_bloom_ops, - timestamptzcol timestamptz_bloom_ops, - intervalcol interval_bloom_ops, - timetzcol timetz_bloom_ops, - numericcol numeric_bloom_ops, - uuidcol uuid_bloom_ops, - lsncol pg_lsn_bloom_ops -) with (pages_per_range = 1); -CREATE TABLE brinopers_bloom (colname name, typ text, - op text[], value text[], matches int[], - check (cardinality(op) = cardinality(value)), - check (cardinality(op) = cardinality(matches))); -INSERT INTO brinopers_bloom VALUES - ('byteacol', 'bytea', - '{=}', - '{BNAAAABNAAAABNAAAABNAAAABNAAAABNAAAABNAAAABNAAAA}', - '{1}'), - ('charcol', '"char"', - '{=}', - '{M}', - '{6}'), - ('namecol', 'name', - '{=}', - '{MAAAAA}', - '{2}'), - ('int2col', 'int2', - '{=}', - '{800}', - '{1}'), - ('int4col', 'int4', - '{=}', - '{800}', - '{1}'), - ('int8col', 'int8', - '{=}', - '{1257141600}', - '{1}'), - ('textcol', 'text', - '{=}', - '{BNAAAABNAAAABNAAAABNAAAABNAAAABNAAAABNAAAABNAAAA}', - '{1}'), - ('oidcol', 'oid', - '{=}', - '{8800}', - '{1}'), - ('float4col', 'float4', - '{=}', - '{1}', - '{4}'), - ('float8col', 'float8', - '{=}', - '{0}', - '{1}'), - ('macaddrcol', 'macaddr', - '{=}', - '{2c:00:2d:00:16:00}', - '{2}'), - ('inetcol', 'inet', - '{=}', - '{10.2.14.231/24}', - '{1}'), - ('inetcol', 'cidr', - '{=}', - '{fe80::6e40:8ff:fea9:8c46}', - '{1}'), - ('cidrcol', 'inet', - '{=}', - '{10.2.14/24}', - '{2}'), - ('cidrcol', 'inet', - '{=}', - '{fe80::6e40:8ff:fea9:8c46}', - '{1}'), - ('cidrcol', 'cidr', - '{=}', - '{10.2.14/24}', - '{2}'), - ('cidrcol', 'cidr', - '{=}', - '{fe80::6e40:8ff:fea9:8c46}', - '{1}'), - ('bpcharcol', 'bpchar', - '{=}', - '{W}', - '{6}'), - ('datecol', 'date', - '{=}', - '{2009-12-01}', - '{1}'), - ('timecol', 'time', - '{=}', - '{02:28:57}', - '{1}'), - ('timestampcol', 'timestamp', - '{=}', - '{1964-03-24 19:26:45}', - '{1}'), - ('timestamptzcol', 'timestamptz', - '{=}', - '{1972-10-19 09:00:00-07}', - '{1}'), - ('intervalcol', 'interval', - '{=}', - '{1 mons 13 days 12:24}', - '{1}'), - ('timetzcol', 'timetz', - '{=}', - '{01:35:50+02}', - '{2}'), - ('numericcol', 'numeric', - '{=}', - '{2268164.347826086956521739130434782609}', - '{1}'), - ('uuidcol', 'uuid', - '{=}', - '{52225222-5222-5222-5222-522252225222}', - '{1}'), - ('lsncol', 'pg_lsn', - '{=, IS, IS NOT}', - '{44/455222, NULL, NULL}', - '{1, 25, 100}'); -DO $x$ -DECLARE - r record; - r2 record; - cond text; - idx_ctids tid[]; - ss_ctids tid[]; - count int; - plan_ok bool; - plan_line text; -BEGIN - FOR r IN SELECT colname, oper, typ, value[ordinality], matches[ordinality] FROM brinopers_bloom, unnest(op) WITH ORDINALITY AS oper LOOP - - -- prepare the condition - IF r.value IS NULL THEN - cond := format('%I %s %L', r.colname, r.oper, r.value); - ELSE - cond := format('%I %s %L::%s', r.colname, r.oper, r.value, r.typ); - END IF; - - -- run the query using the brin index - SET enable_seqscan = 0; - SET enable_bitmapscan = 1; - - plan_ok := false; - FOR plan_line IN EXECUTE format($y$EXPLAIN SELECT array_agg(ctid) FROM brintest_bloom WHERE %s $y$, cond) LOOP - IF plan_line LIKE '%Bitmap Heap Scan on brintest_bloom%' THEN - plan_ok := true; - END IF; - END LOOP; - IF NOT plan_ok THEN - RAISE WARNING 'did not get bitmap indexscan plan for %', r; - END IF; - - EXECUTE format($y$SELECT array_agg(ctid) FROM brintest_bloom WHERE %s $y$, cond) - INTO idx_ctids; - - -- run the query using a seqscan - SET enable_seqscan = 1; - SET enable_bitmapscan = 0; - - plan_ok := false; - FOR plan_line IN EXECUTE format($y$EXPLAIN SELECT array_agg(ctid) FROM brintest_bloom WHERE %s $y$, cond) LOOP - IF plan_line LIKE '%Seq Scan on brintest_bloom%' THEN - plan_ok := true; - END IF; - END LOOP; - IF NOT plan_ok THEN - RAISE WARNING 'did not get seqscan plan for %', r; - END IF; - - EXECUTE format($y$SELECT array_agg(ctid) FROM brintest_bloom WHERE %s $y$, cond) - INTO ss_ctids; - - -- make sure both return the same results - count := array_length(idx_ctids, 1); - - IF NOT (count = array_length(ss_ctids, 1) AND - idx_ctids @> ss_ctids AND - idx_ctids <@ ss_ctids) THEN - -- report the results of each scan to make the differences obvious - RAISE WARNING 'something not right in %: count %', r, count; - SET enable_seqscan = 1; - SET enable_bitmapscan = 0; - FOR r2 IN EXECUTE 'SELECT ' || r.colname || ' FROM brintest_bloom WHERE ' || cond LOOP - RAISE NOTICE 'seqscan: %', r2; - END LOOP; - - SET enable_seqscan = 0; - SET enable_bitmapscan = 1; - FOR r2 IN EXECUTE 'SELECT ' || r.colname || ' FROM brintest_bloom WHERE ' || cond LOOP - RAISE NOTICE 'bitmapscan: %', r2; - END LOOP; - END IF; - - -- make sure we found expected number of matches - IF count != r.matches THEN RAISE WARNING 'unexpected number of results % for %', count, r; END IF; - END LOOP; -END; -$x$; -RESET enable_seqscan; -RESET enable_bitmapscan; -INSERT INTO brintest_bloom SELECT - repeat(stringu1, 42)::bytea, - substr(stringu1, 1, 1)::"char", - stringu1::name, 142857 * tenthous, - thousand, - twothousand, - repeat(stringu1, 42), - unique1::oid, - (four + 1.0)/(hundred+1), - odd::float8 / (tenthous + 1), - format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, - inet '10.2.3.4' + tenthous, - cidr '10.2.3/24' + tenthous, - substr(stringu1, 1, 1)::bpchar, - date '1995-08-15' + tenthous, - time '01:20:30' + thousand * interval '18.5 second', - timestamp '1942-07-23 03:05:09' + tenthous * interval '36.38 hours', - timestamptz '1972-10-10 03:00' + thousand * interval '1 hour', - justify_days(justify_hours(tenthous * interval '12 minutes')), - timetz '01:30:20' + hundred * interval '15 seconds', - tenthous::numeric(36,30) * fivethous * even / (hundred + 1), - format('%s%s-%s-%s-%s-%s%s%s', to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'))::uuid, - format('%s/%s%s', odd, even, tenthous)::pg_lsn -FROM tenk1 ORDER BY unique2 LIMIT 5 OFFSET 5; -SELECT brin_desummarize_range('brinidx_bloom', 0); - brin_desummarize_range ------------------------- - -(1 row) - -VACUUM brintest_bloom; -- force a summarization cycle in brinidx -UPDATE brintest_bloom SET int8col = int8col * int4col; -UPDATE brintest_bloom SET textcol = '' WHERE textcol IS NOT NULL; --- Tests for brin_summarize_new_values -SELECT brin_summarize_new_values('brintest_bloom'); -- error, not an index -ERROR: "brintest_bloom" is not an index -SELECT brin_summarize_new_values('tenk1_unique1'); -- error, not a BRIN index -ERROR: "tenk1_unique1" is not a BRIN index -SELECT brin_summarize_new_values('brinidx_bloom'); -- ok, no change expected - brin_summarize_new_values ---------------------------- - 0 -(1 row) - --- Tests for brin_desummarize_range -SELECT brin_desummarize_range('brinidx_bloom', -1); -- error, invalid range -ERROR: block number out of range: -1 -SELECT brin_desummarize_range('brinidx_bloom', 0); - brin_desummarize_range ------------------------- - -(1 row) - -SELECT brin_desummarize_range('brinidx_bloom', 0); - brin_desummarize_range ------------------------- - -(1 row) - -SELECT brin_desummarize_range('brinidx_bloom', 100000000); - brin_desummarize_range ------------------------- - -(1 row) - --- Test brin_summarize_range -CREATE TABLE brin_summarize_bloom ( - value int -) WITH (fillfactor=10, autovacuum_enabled=false); -CREATE INDEX brin_summarize_bloom_idx ON brin_summarize_bloom USING brin (value) WITH (pages_per_range=2); --- Fill a few pages -DO $$ -DECLARE curtid tid; -BEGIN - LOOP - INSERT INTO brin_summarize_bloom VALUES (1) RETURNING ctid INTO curtid; - EXIT WHEN curtid > tid '(2, 0)'; - END LOOP; -END; -$$; --- summarize one range -SELECT brin_summarize_range('brin_summarize_bloom_idx', 0); - brin_summarize_range ----------------------- - 0 -(1 row) - --- nothing: already summarized -SELECT brin_summarize_range('brin_summarize_bloom_idx', 1); - brin_summarize_range ----------------------- - 0 -(1 row) - --- summarize one range -SELECT brin_summarize_range('brin_summarize_bloom_idx', 2); - brin_summarize_range ----------------------- - 1 -(1 row) - --- nothing: page doesn't exist in table -SELECT brin_summarize_range('brin_summarize_bloom_idx', 4294967295); - brin_summarize_range ----------------------- - 0 -(1 row) - --- invalid block number values -SELECT brin_summarize_range('brin_summarize_bloom_idx', -1); -ERROR: block number out of range: -1 -SELECT brin_summarize_range('brin_summarize_bloom_idx', 4294967296); -ERROR: block number out of range: 4294967296 --- test brin cost estimates behave sanely based on correlation of values -CREATE TABLE brin_test_bloom (a INT, b INT); -INSERT INTO brin_test_bloom SELECT x/100,x%100 FROM generate_series(1,10000) x(x); -CREATE INDEX brin_test_bloom_a_idx ON brin_test_bloom USING brin (a) WITH (pages_per_range = 2); -CREATE INDEX brin_test_bloom_b_idx ON brin_test_bloom USING brin (b) WITH (pages_per_range = 2); -VACUUM ANALYZE brin_test_bloom; --- Ensure brin index is used when columns are perfectly correlated -EXPLAIN (COSTS OFF) SELECT * FROM brin_test_bloom WHERE a = 1; - QUERY PLAN --------------------------------------------------- - Bitmap Heap Scan on brin_test_bloom - Recheck Cond: (a = 1) - -> Bitmap Index Scan on brin_test_bloom_a_idx - Index Cond: (a = 1) -(4 rows) - --- Ensure brin index is not used when values are not correlated -EXPLAIN (COSTS OFF) SELECT * FROM brin_test_bloom WHERE b = 1; - QUERY PLAN ------------------------------ - Seq Scan on brin_test_bloom - Filter: (b = 1) -(2 rows) - +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/brin_multi.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/brin_multi.out --- /tmp/cirrus-ci-build/src/test/regress/expected/brin_multi.out 2024-03-13 23:12:37.622589000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/brin_multi.out 2024-03-13 23:14:29.427658000 +0000 @@ -1,974 +1,2 @@ -CREATE TABLE brintest_multi ( - int8col bigint, - int2col smallint, - int4col integer, - oidcol oid, - tidcol tid, - float4col real, - float8col double precision, - macaddrcol macaddr, - macaddr8col macaddr8, - inetcol inet, - cidrcol cidr, - datecol date, - timecol time without time zone, - timestampcol timestamp without time zone, - timestamptzcol timestamp with time zone, - intervalcol interval, - timetzcol time with time zone, - numericcol numeric, - uuidcol uuid, - lsncol pg_lsn -) WITH (fillfactor=10); -INSERT INTO brintest_multi SELECT - 142857 * tenthous, - thousand, - twothousand, - unique1::oid, - format('(%s,%s)', tenthous, twenty)::tid, - (four + 1.0)/(hundred+1), - odd::float8 / (tenthous + 1), - format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, - substr(fipshash(unique1::text), 1, 16)::macaddr8, - inet '10.2.3.4/24' + tenthous, - cidr '10.2.3/24' + tenthous, - date '1995-08-15' + tenthous, - time '01:20:30' + thousand * interval '18.5 second', - timestamp '1942-07-23 03:05:09' + tenthous * interval '36.38 hours', - timestamptz '1972-10-10 03:00' + thousand * interval '1 hour', - justify_days(justify_hours(tenthous * interval '12 minutes')), - timetz '01:30:20+02' + hundred * interval '15 seconds', - tenthous::numeric(36,30) * fivethous * even / (hundred + 1), - format('%s%s-%s-%s-%s-%s%s%s', to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'))::uuid, - format('%s/%s%s', odd, even, tenthous)::pg_lsn -FROM tenk1 ORDER BY unique2 LIMIT 100; --- throw in some NULL's and different values -INSERT INTO brintest_multi (inetcol, cidrcol) SELECT - inet 'fe80::6e40:8ff:fea9:8c46' + tenthous, - cidr 'fe80::6e40:8ff:fea9:8c46' + tenthous -FROM tenk1 ORDER BY thousand, tenthous LIMIT 25; --- test minmax-multi specific index options --- number of values must be >= 16 -CREATE INDEX brinidx_multi ON brintest_multi USING brin ( - int8col int8_minmax_multi_ops(values_per_range = 7) -); -ERROR: value 7 out of bounds for option "values_per_range" -DETAIL: Valid values are between "8" and "256". --- number of values must be <= 256 -CREATE INDEX brinidx_multi ON brintest_multi USING brin ( - int8col int8_minmax_multi_ops(values_per_range = 257) -); -ERROR: value 257 out of bounds for option "values_per_range" -DETAIL: Valid values are between "8" and "256". --- first create an index with a single page range, to force compaction --- due to exceeding the number of values per summary -CREATE INDEX brinidx_multi ON brintest_multi USING brin ( - int8col int8_minmax_multi_ops, - int2col int2_minmax_multi_ops, - int4col int4_minmax_multi_ops, - oidcol oid_minmax_multi_ops, - tidcol tid_minmax_multi_ops, - float4col float4_minmax_multi_ops, - float8col float8_minmax_multi_ops, - macaddrcol macaddr_minmax_multi_ops, - macaddr8col macaddr8_minmax_multi_ops, - inetcol inet_minmax_multi_ops, - cidrcol inet_minmax_multi_ops, - datecol date_minmax_multi_ops, - timecol time_minmax_multi_ops, - timestampcol timestamp_minmax_multi_ops, - timestamptzcol timestamptz_minmax_multi_ops, - intervalcol interval_minmax_multi_ops, - timetzcol timetz_minmax_multi_ops, - numericcol numeric_minmax_multi_ops, - uuidcol uuid_minmax_multi_ops, - lsncol pg_lsn_minmax_multi_ops -); -DROP INDEX brinidx_multi; -CREATE INDEX brinidx_multi ON brintest_multi USING brin ( - int8col int8_minmax_multi_ops, - int2col int2_minmax_multi_ops, - int4col int4_minmax_multi_ops, - oidcol oid_minmax_multi_ops, - tidcol tid_minmax_multi_ops, - float4col float4_minmax_multi_ops, - float8col float8_minmax_multi_ops, - macaddrcol macaddr_minmax_multi_ops, - macaddr8col macaddr8_minmax_multi_ops, - inetcol inet_minmax_multi_ops, - cidrcol inet_minmax_multi_ops, - datecol date_minmax_multi_ops, - timecol time_minmax_multi_ops, - timestampcol timestamp_minmax_multi_ops, - timestamptzcol timestamptz_minmax_multi_ops, - intervalcol interval_minmax_multi_ops, - timetzcol timetz_minmax_multi_ops, - numericcol numeric_minmax_multi_ops, - uuidcol uuid_minmax_multi_ops, - lsncol pg_lsn_minmax_multi_ops -) with (pages_per_range = 1); -CREATE TABLE brinopers_multi (colname name, typ text, - op text[], value text[], matches int[], - check (cardinality(op) = cardinality(value)), - check (cardinality(op) = cardinality(matches))); -INSERT INTO brinopers_multi VALUES - ('int2col', 'int2', - '{>, >=, =, <=, <}', - '{0, 0, 800, 999, 999}', - '{100, 100, 1, 100, 100}'), - ('int2col', 'int4', - '{>, >=, =, <=, <}', - '{0, 0, 800, 999, 1999}', - '{100, 100, 1, 100, 100}'), - ('int2col', 'int8', - '{>, >=, =, <=, <}', - '{0, 0, 800, 999, 1428427143}', - '{100, 100, 1, 100, 100}'), - ('int4col', 'int2', - '{>, >=, =, <=, <}', - '{0, 0, 800, 1999, 1999}', - '{100, 100, 1, 100, 100}'), - ('int4col', 'int4', - '{>, >=, =, <=, <}', - '{0, 0, 800, 1999, 1999}', - '{100, 100, 1, 100, 100}'), - ('int4col', 'int8', - '{>, >=, =, <=, <}', - '{0, 0, 800, 1999, 1428427143}', - '{100, 100, 1, 100, 100}'), - ('int8col', 'int2', - '{>, >=}', - '{0, 0}', - '{100, 100}'), - ('int8col', 'int4', - '{>, >=}', - '{0, 0}', - '{100, 100}'), - ('int8col', 'int8', - '{>, >=, =, <=, <}', - '{0, 0, 1257141600, 1428427143, 1428427143}', - '{100, 100, 1, 100, 100}'), - ('oidcol', 'oid', - '{>, >=, =, <=, <}', - '{0, 0, 8800, 9999, 9999}', - '{100, 100, 1, 100, 100}'), - ('tidcol', 'tid', - '{>, >=, =, <=, <}', - '{"(0,0)", "(0,0)", "(8800,0)", "(9999,19)", "(9999,19)"}', - '{100, 100, 1, 100, 100}'), - ('float4col', 'float4', - '{>, >=, =, <=, <}', - '{0.0103093, 0.0103093, 1, 1, 1}', - '{100, 100, 4, 100, 96}'), - ('float4col', 'float8', - '{>, >=, =, <=, <}', - '{0.0103093, 0.0103093, 1, 1, 1}', - '{100, 100, 4, 100, 96}'), - ('float8col', 'float4', - '{>, >=, =, <=, <}', - '{0, 0, 0, 1.98, 1.98}', - '{99, 100, 1, 100, 100}'), - ('float8col', 'float8', - '{>, >=, =, <=, <}', - '{0, 0, 0, 1.98, 1.98}', - '{99, 100, 1, 100, 100}'), - ('macaddrcol', 'macaddr', - '{>, >=, =, <=, <}', - '{00:00:01:00:00:00, 00:00:01:00:00:00, 2c:00:2d:00:16:00, ff:fe:00:00:00:00, ff:fe:00:00:00:00}', - '{99, 100, 2, 100, 100}'), - ('macaddr8col', 'macaddr8', - '{>, >=, =, <=, <}', - '{b1:d1:0e:7b:af:a4:42:12, d9:35:91:bd:f7:86:0e:1e, 72:8f:20:6c:2a:01:bf:57, 23:e8:46:63:86:07:ad:cb, 13:16:8e:6a:2e:6c:84:b4}', - '{31, 17, 1, 11, 4}'), - ('inetcol', 'inet', - '{=, <, <=, >, >=}', - '{10.2.14.231/24, 255.255.255.255, 255.255.255.255, 0.0.0.0, 0.0.0.0}', - '{1, 100, 100, 125, 125}'), - ('inetcol', 'cidr', - '{<, <=, >, >=}', - '{255.255.255.255, 255.255.255.255, 0.0.0.0, 0.0.0.0}', - '{100, 100, 125, 125}'), - ('cidrcol', 'inet', - '{=, <, <=, >, >=}', - '{10.2.14/24, 255.255.255.255, 255.255.255.255, 0.0.0.0, 0.0.0.0}', - '{2, 100, 100, 125, 125}'), - ('cidrcol', 'cidr', - '{=, <, <=, >, >=}', - '{10.2.14/24, 255.255.255.255, 255.255.255.255, 0.0.0.0, 0.0.0.0}', - '{2, 100, 100, 125, 125}'), - ('datecol', 'date', - '{>, >=, =, <=, <}', - '{1995-08-15, 1995-08-15, 2009-12-01, 2022-12-30, 2022-12-30}', - '{100, 100, 1, 100, 100}'), - ('timecol', 'time', - '{>, >=, =, <=, <}', - '{01:20:30, 01:20:30, 02:28:57, 06:28:31.5, 06:28:31.5}', - '{100, 100, 1, 100, 100}'), - ('timestampcol', 'timestamp', - '{>, >=, =, <=, <}', - '{1942-07-23 03:05:09, 1942-07-23 03:05:09, 1964-03-24 19:26:45, 1984-01-20 22:42:21, 1984-01-20 22:42:21}', - '{100, 100, 1, 100, 100}'), - ('timestampcol', 'timestamptz', - '{>, >=, =, <=, <}', - '{1942-07-23 03:05:09, 1942-07-23 03:05:09, 1964-03-24 19:26:45, 1984-01-20 22:42:21, 1984-01-20 22:42:21}', - '{100, 100, 1, 100, 100}'), - ('timestamptzcol', 'timestamptz', - '{>, >=, =, <=, <}', - '{1972-10-10 03:00:00-04, 1972-10-10 03:00:00-04, 1972-10-19 09:00:00-07, 1972-11-20 19:00:00-03, 1972-11-20 19:00:00-03}', - '{100, 100, 1, 100, 100}'), - ('intervalcol', 'interval', - '{>, >=, =, <=, <}', - '{00:00:00, 00:00:00, 1 mons 13 days 12:24, 2 mons 23 days 07:48:00, 1 year}', - '{100, 100, 1, 100, 100}'), - ('timetzcol', 'timetz', - '{>, >=, =, <=, <}', - '{01:30:20+02, 01:30:20+02, 01:35:50+02, 23:55:05+02, 23:55:05+02}', - '{99, 100, 2, 100, 100}'), - ('numericcol', 'numeric', - '{>, >=, =, <=, <}', - '{0.00, 0.01, 2268164.347826086956521739130434782609, 99470151.9, 99470151.9}', - '{100, 100, 1, 100, 100}'), - ('uuidcol', 'uuid', - '{>, >=, =, <=, <}', - '{00040004-0004-0004-0004-000400040004, 00040004-0004-0004-0004-000400040004, 52225222-5222-5222-5222-522252225222, 99989998-9998-9998-9998-999899989998, 99989998-9998-9998-9998-999899989998}', - '{100, 100, 1, 100, 100}'), - ('lsncol', 'pg_lsn', - '{>, >=, =, <=, <, IS, IS NOT}', - '{0/1200, 0/1200, 44/455222, 198/1999799, 198/1999799, NULL, NULL}', - '{100, 100, 1, 100, 100, 25, 100}'); -DO $x$ -DECLARE - r record; - r2 record; - cond text; - idx_ctids tid[]; - ss_ctids tid[]; - count int; - plan_ok bool; - plan_line text; -BEGIN - FOR r IN SELECT colname, oper, typ, value[ordinality], matches[ordinality] FROM brinopers_multi, unnest(op) WITH ORDINALITY AS oper LOOP - - -- prepare the condition - IF r.value IS NULL THEN - cond := format('%I %s %L', r.colname, r.oper, r.value); - ELSE - cond := format('%I %s %L::%s', r.colname, r.oper, r.value, r.typ); - END IF; - - -- run the query using the brin index - SET enable_seqscan = 0; - SET enable_bitmapscan = 1; - - plan_ok := false; - FOR plan_line IN EXECUTE format($y$EXPLAIN SELECT array_agg(ctid) FROM brintest_multi WHERE %s $y$, cond) LOOP - IF plan_line LIKE '%Bitmap Heap Scan on brintest_multi%' THEN - plan_ok := true; - END IF; - END LOOP; - IF NOT plan_ok THEN - RAISE WARNING 'did not get bitmap indexscan plan for %', r; - END IF; - - EXECUTE format($y$SELECT array_agg(ctid) FROM brintest_multi WHERE %s $y$, cond) - INTO idx_ctids; - - -- run the query using a seqscan - SET enable_seqscan = 1; - SET enable_bitmapscan = 0; - - plan_ok := false; - FOR plan_line IN EXECUTE format($y$EXPLAIN SELECT array_agg(ctid) FROM brintest_multi WHERE %s $y$, cond) LOOP - IF plan_line LIKE '%Seq Scan on brintest_multi%' THEN - plan_ok := true; - END IF; - END LOOP; - IF NOT plan_ok THEN - RAISE WARNING 'did not get seqscan plan for %', r; - END IF; - - EXECUTE format($y$SELECT array_agg(ctid) FROM brintest_multi WHERE %s $y$, cond) - INTO ss_ctids; - - -- make sure both return the same results - count := array_length(idx_ctids, 1); - - IF NOT (count = array_length(ss_ctids, 1) AND - idx_ctids @> ss_ctids AND - idx_ctids <@ ss_ctids) THEN - -- report the results of each scan to make the differences obvious - RAISE WARNING 'something not right in %: count %', r, count; - SET enable_seqscan = 1; - SET enable_bitmapscan = 0; - FOR r2 IN EXECUTE 'SELECT ' || r.colname || ' FROM brintest_multi WHERE ' || cond LOOP - RAISE NOTICE 'seqscan: %', r2; - END LOOP; - - SET enable_seqscan = 0; - SET enable_bitmapscan = 1; - FOR r2 IN EXECUTE 'SELECT ' || r.colname || ' FROM brintest_multi WHERE ' || cond LOOP - RAISE NOTICE 'bitmapscan: %', r2; - END LOOP; - END IF; - - -- make sure we found expected number of matches - IF count != r.matches THEN RAISE WARNING 'unexpected number of results % for %', count, r; END IF; - END LOOP; -END; -$x$; -RESET enable_seqscan; -RESET enable_bitmapscan; -INSERT INTO brintest_multi SELECT - 142857 * tenthous, - thousand, - twothousand, - unique1::oid, - format('(%s,%s)', tenthous, twenty)::tid, - (four + 1.0)/(hundred+1), - odd::float8 / (tenthous + 1), - format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, - substr(fipshash(unique1::text), 1, 16)::macaddr8, - inet '10.2.3.4' + tenthous, - cidr '10.2.3/24' + tenthous, - date '1995-08-15' + tenthous, - time '01:20:30' + thousand * interval '18.5 second', - timestamp '1942-07-23 03:05:09' + tenthous * interval '36.38 hours', - timestamptz '1972-10-10 03:00' + thousand * interval '1 hour', - justify_days(justify_hours(tenthous * interval '12 minutes')), - timetz '01:30:20' + hundred * interval '15 seconds', - tenthous::numeric(36,30) * fivethous * even / (hundred + 1), - format('%s%s-%s-%s-%s-%s%s%s', to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'))::uuid, - format('%s/%s%s', odd, even, tenthous)::pg_lsn -FROM tenk1 ORDER BY unique2 LIMIT 5 OFFSET 5; -SELECT brin_desummarize_range('brinidx_multi', 0); - brin_desummarize_range ------------------------- - -(1 row) - -VACUUM brintest_multi; -- force a summarization cycle in brinidx --- Try inserting a values with NaN, to test distance calculation. -insert into public.brintest_multi (float4col) values (real 'nan'); -insert into public.brintest_multi (float8col) values (real 'nan'); -UPDATE brintest_multi SET int8col = int8col * int4col; --- Test handling of inet netmasks with inet_minmax_multi_ops -CREATE TABLE brin_test_inet (a inet); -CREATE INDEX ON brin_test_inet USING brin (a inet_minmax_multi_ops); -INSERT INTO brin_test_inet VALUES ('127.0.0.1/0'); -INSERT INTO brin_test_inet VALUES ('0.0.0.0/12'); -DROP TABLE brin_test_inet; --- Tests for brin_summarize_new_values -SELECT brin_summarize_new_values('brintest_multi'); -- error, not an index -ERROR: "brintest_multi" is not an index -SELECT brin_summarize_new_values('tenk1_unique1'); -- error, not a BRIN index -ERROR: "tenk1_unique1" is not a BRIN index -SELECT brin_summarize_new_values('brinidx_multi'); -- ok, no change expected - brin_summarize_new_values ---------------------------- - 0 -(1 row) - --- Tests for brin_desummarize_range -SELECT brin_desummarize_range('brinidx_multi', -1); -- error, invalid range -ERROR: block number out of range: -1 -SELECT brin_desummarize_range('brinidx_multi', 0); - brin_desummarize_range ------------------------- - -(1 row) - -SELECT brin_desummarize_range('brinidx_multi', 0); - brin_desummarize_range ------------------------- - -(1 row) - -SELECT brin_desummarize_range('brinidx_multi', 100000000); - brin_desummarize_range ------------------------- - -(1 row) - --- test building an index with many values, to force compaction of the buffer -CREATE TABLE brin_large_range (a int4); -INSERT INTO brin_large_range SELECT i FROM generate_series(1,10000) s(i); -CREATE INDEX brin_large_range_idx ON brin_large_range USING brin (a int4_minmax_multi_ops); -DROP TABLE brin_large_range; --- Test brin_summarize_range -CREATE TABLE brin_summarize_multi ( - value int -) WITH (fillfactor=10, autovacuum_enabled=false); -CREATE INDEX brin_summarize_multi_idx ON brin_summarize_multi USING brin (value) WITH (pages_per_range=2); --- Fill a few pages -DO $$ -DECLARE curtid tid; -BEGIN - LOOP - INSERT INTO brin_summarize_multi VALUES (1) RETURNING ctid INTO curtid; - EXIT WHEN curtid > tid '(2, 0)'; - END LOOP; -END; -$$; --- summarize one range -SELECT brin_summarize_range('brin_summarize_multi_idx', 0); - brin_summarize_range ----------------------- - 0 -(1 row) - --- nothing: already summarized -SELECT brin_summarize_range('brin_summarize_multi_idx', 1); - brin_summarize_range ----------------------- - 0 -(1 row) - --- summarize one range -SELECT brin_summarize_range('brin_summarize_multi_idx', 2); - brin_summarize_range ----------------------- - 1 -(1 row) - --- nothing: page doesn't exist in table -SELECT brin_summarize_range('brin_summarize_multi_idx', 4294967295); - brin_summarize_range ----------------------- - 0 -(1 row) - --- invalid block number values -SELECT brin_summarize_range('brin_summarize_multi_idx', -1); -ERROR: block number out of range: -1 -SELECT brin_summarize_range('brin_summarize_multi_idx', 4294967296); -ERROR: block number out of range: 4294967296 --- test brin cost estimates behave sanely based on correlation of values -CREATE TABLE brin_test_multi (a INT, b INT); -INSERT INTO brin_test_multi SELECT x/100,x%100 FROM generate_series(1,10000) x(x); -CREATE INDEX brin_test_multi_a_idx ON brin_test_multi USING brin (a) WITH (pages_per_range = 2); -CREATE INDEX brin_test_multi_b_idx ON brin_test_multi USING brin (b) WITH (pages_per_range = 2); -VACUUM ANALYZE brin_test_multi; --- Ensure brin index is used when columns are perfectly correlated -EXPLAIN (COSTS OFF) SELECT * FROM brin_test_multi WHERE a = 1; - QUERY PLAN --------------------------------------------------- - Bitmap Heap Scan on brin_test_multi - Recheck Cond: (a = 1) - -> Bitmap Index Scan on brin_test_multi_a_idx - Index Cond: (a = 1) -(4 rows) - --- Ensure brin index is not used when values are not correlated -EXPLAIN (COSTS OFF) SELECT * FROM brin_test_multi WHERE b = 1; - QUERY PLAN ------------------------------ - Seq Scan on brin_test_multi - Filter: (b = 1) -(2 rows) - --- do some inequality tests -CREATE TABLE brin_test_multi_1 (a INT, b BIGINT) WITH (fillfactor=10); -INSERT INTO brin_test_multi_1 -SELECT i/5 + mod(911 * i + 483, 25), - i/10 + mod(751 * i + 221, 41) - FROM generate_series(1,1000) s(i); -CREATE INDEX brin_test_multi_1_idx_1 ON brin_test_multi_1 USING brin (a int4_minmax_multi_ops) WITH (pages_per_range=5); -CREATE INDEX brin_test_multi_1_idx_2 ON brin_test_multi_1 USING brin (b int8_minmax_multi_ops) WITH (pages_per_range=5); -SET enable_seqscan=off; --- int: less than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a < 37; - count -------- - 124 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a < 113; - count -------- - 504 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a <= 177; - count -------- - 829 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a <= 25; - count -------- - 69 -(1 row) - --- int: greater than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a > 120; - count -------- - 456 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a >= 180; - count -------- - 161 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a > 71; - count -------- - 701 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a >= 63; - count -------- - 746 -(1 row) - --- int: equals -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a = 207; - count -------- - 3 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a = 177; - count -------- - 5 -(1 row) - --- bigint: less than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b < 73; - count -------- - 529 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b <= 47; - count -------- - 279 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b < 199; - count -------- - 1000 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b <= 150; - count -------- - 1000 -(1 row) - --- bigint: greater than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b > 93; - count -------- - 261 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b > 37; - count -------- - 821 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b >= 215; - count -------- - 0 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b > 201; - count -------- - 0 -(1 row) - --- bigint: equals -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b = 88; - count -------- - 10 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b = 103; - count -------- - 9 -(1 row) - --- now do the same, but insert the rows with the indexes already created --- so that we don't use the "build callback" and instead use the regular --- approach of adding rows into existing ranges -TRUNCATE brin_test_multi_1; -INSERT INTO brin_test_multi_1 -SELECT i/5 + mod(911 * i + 483, 25), - i/10 + mod(751 * i + 221, 41) - FROM generate_series(1,1000) s(i); --- int: less than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a < 37; - count -------- - 124 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a < 113; - count -------- - 504 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a <= 177; - count -------- - 829 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a <= 25; - count -------- - 69 -(1 row) - --- int: greater than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a > 120; - count -------- - 456 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a >= 180; - count -------- - 161 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a > 71; - count -------- - 701 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a >= 63; - count -------- - 746 -(1 row) - --- int: equals -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a = 207; - count -------- - 3 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a = 177; - count -------- - 5 -(1 row) - --- bigint: less than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b < 73; - count -------- - 529 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b <= 47; - count -------- - 279 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b < 199; - count -------- - 1000 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b <= 150; - count -------- - 1000 -(1 row) - --- bigint: greater than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b > 93; - count -------- - 261 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b > 37; - count -------- - 821 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b >= 215; - count -------- - 0 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b > 201; - count -------- - 0 -(1 row) - --- bigint: equals -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b = 88; - count -------- - 10 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b = 103; - count -------- - 9 -(1 row) - -DROP TABLE brin_test_multi_1; -RESET enable_seqscan; --- do some inequality tests for varlena data types -CREATE TABLE brin_test_multi_2 (a UUID) WITH (fillfactor=10); -INSERT INTO brin_test_multi_2 -SELECT v::uuid FROM (SELECT row_number() OVER (ORDER BY v) c, v FROM (SELECT fipshash((i/13)::text) AS v FROM generate_series(1,1000) s(i)) foo) bar ORDER BY c + 25 * random(); -CREATE INDEX brin_test_multi_2_idx ON brin_test_multi_2 USING brin (a uuid_minmax_multi_ops) WITH (pages_per_range=5); -SET enable_seqscan=off; -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a < '3d914f93-48c9-cc0f-f8a7-9716700b9fcd'; - count -------- - 195 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a > '3d914f93-48c9-cc0f-f8a7-9716700b9fcd'; - count -------- - 792 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a <= 'f369cb89-fc62-7e66-8987-007d121ed1ea'; - count -------- - 961 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a >= 'aea92132-c4cb-eb26-3e6a-c2bf6c183b5d'; - count -------- - 273 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a = '5feceb66-ffc8-6f38-d952-786c6d696c79'; - count -------- - 12 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a = '86e50149-6586-6131-2a9e-0b35558d84f6'; - count -------- - 13 -(1 row) - --- now do the same, but insert the rows with the indexes already created --- so that we don't use the "build callback" and instead use the regular --- approach of adding rows into existing ranges -TRUNCATE brin_test_multi_2; -INSERT INTO brin_test_multi_2 -SELECT v::uuid FROM (SELECT row_number() OVER (ORDER BY v) c, v FROM (SELECT fipshash((i/13)::text) AS v FROM generate_series(1,1000) s(i)) foo) bar ORDER BY c + 25 * random(); -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a < '3d914f93-48c9-cc0f-f8a7-9716700b9fcd'; - count -------- - 195 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a > '3d914f93-48c9-cc0f-f8a7-9716700b9fcd'; - count -------- - 792 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a <= 'f369cb89-fc62-7e66-8987-007d121ed1ea'; - count -------- - 961 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a >= 'aea92132-c4cb-eb26-3e6a-c2bf6c183b5d'; - count -------- - 273 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a = '5feceb66-ffc8-6f38-d952-786c6d696c79'; - count -------- - 12 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a = '86e50149-6586-6131-2a9e-0b35558d84f6'; - count -------- - 13 -(1 row) - -DROP TABLE brin_test_multi_2; -RESET enable_seqscan; --- test overflows during CREATE INDEX with extreme timestamp values -CREATE TABLE brin_timestamp_test(a TIMESTAMPTZ); -SET datestyle TO iso; --- values close to timestamp minimum -INSERT INTO brin_timestamp_test -SELECT '4713-01-01 00:00:01 BC'::timestamptz + (i || ' seconds')::interval - FROM generate_series(1,30) s(i); --- values close to timestamp maximum -INSERT INTO brin_timestamp_test -SELECT '294276-12-01 00:00:01'::timestamptz + (i || ' seconds')::interval - FROM generate_series(1,30) s(i); -CREATE INDEX ON brin_timestamp_test USING brin (a timestamptz_minmax_multi_ops) WITH (pages_per_range=1); -DROP TABLE brin_timestamp_test; --- test overflows during CREATE INDEX with extreme date values -CREATE TABLE brin_date_test(a DATE); --- insert values close to date minimum -INSERT INTO brin_date_test SELECT '4713-01-01 BC'::date + i FROM generate_series(1, 30) s(i); --- insert values close to date minimum -INSERT INTO brin_date_test SELECT '5874897-12-01'::date + i FROM generate_series(1, 30) s(i); -CREATE INDEX ON brin_date_test USING brin (a date_minmax_multi_ops) WITH (pages_per_range=1); -SET enable_seqscan = off; --- make sure the ranges were built correctly and 2023-01-01 eliminates all -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) -SELECT * FROM brin_date_test WHERE a = '2023-01-01'::date; - QUERY PLAN -------------------------------------------------------------------------- - Bitmap Heap Scan on brin_date_test (actual rows=0 loops=1) - Recheck Cond: (a = '2023-01-01'::date) - -> Bitmap Index Scan on brin_date_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '2023-01-01'::date) -(4 rows) - -DROP TABLE brin_date_test; -RESET enable_seqscan; --- test handling of infinite timestamp values -CREATE TABLE brin_timestamp_test(a TIMESTAMP); -INSERT INTO brin_timestamp_test VALUES ('-infinity'), ('infinity'); -INSERT INTO brin_timestamp_test -SELECT i FROM generate_series('2000-01-01'::timestamp, '2000-02-09'::timestamp, '1 day'::interval) s(i); -CREATE INDEX ON brin_timestamp_test USING brin (a timestamp_minmax_multi_ops) WITH (pages_per_range=1); -SET enable_seqscan = off; -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) -SELECT * FROM brin_timestamp_test WHERE a = '2023-01-01'::timestamp; - QUERY PLAN ------------------------------------------------------------------------------- - Bitmap Heap Scan on brin_timestamp_test (actual rows=0 loops=1) - Recheck Cond: (a = '2023-01-01 00:00:00'::timestamp without time zone) - -> Bitmap Index Scan on brin_timestamp_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '2023-01-01 00:00:00'::timestamp without time zone) -(4 rows) - -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) -SELECT * FROM brin_timestamp_test WHERE a = '1900-01-01'::timestamp; - QUERY PLAN ------------------------------------------------------------------------------- - Bitmap Heap Scan on brin_timestamp_test (actual rows=0 loops=1) - Recheck Cond: (a = '1900-01-01 00:00:00'::timestamp without time zone) - -> Bitmap Index Scan on brin_timestamp_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '1900-01-01 00:00:00'::timestamp without time zone) -(4 rows) - -DROP TABLE brin_timestamp_test; -RESET enable_seqscan; --- test handling of infinite date values -CREATE TABLE brin_date_test(a DATE); -INSERT INTO brin_date_test VALUES ('-infinity'), ('infinity'); -INSERT INTO brin_date_test SELECT '2000-01-01'::date + i FROM generate_series(1, 40) s(i); -CREATE INDEX ON brin_date_test USING brin (a date_minmax_multi_ops) WITH (pages_per_range=1); -SET enable_seqscan = off; -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) -SELECT * FROM brin_date_test WHERE a = '2023-01-01'::date; - QUERY PLAN -------------------------------------------------------------------------- - Bitmap Heap Scan on brin_date_test (actual rows=0 loops=1) - Recheck Cond: (a = '2023-01-01'::date) - -> Bitmap Index Scan on brin_date_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '2023-01-01'::date) -(4 rows) - -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) -SELECT * FROM brin_date_test WHERE a = '1900-01-01'::date; - QUERY PLAN -------------------------------------------------------------------------- - Bitmap Heap Scan on brin_date_test (actual rows=0 loops=1) - Recheck Cond: (a = '1900-01-01'::date) - -> Bitmap Index Scan on brin_date_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '1900-01-01'::date) -(4 rows) - -DROP TABLE brin_date_test; -RESET enable_seqscan; -RESET datestyle; --- test handling of overflow for interval values -CREATE TABLE brin_interval_test(a INTERVAL); -INSERT INTO brin_interval_test SELECT (i || ' years')::interval FROM generate_series(-178000000, -177999980) s(i); -INSERT INTO brin_interval_test SELECT (i || ' years')::interval FROM generate_series( 177999980, 178000000) s(i); -CREATE INDEX ON brin_interval_test USING brin (a interval_minmax_multi_ops) WITH (pages_per_range=1); -SET enable_seqscan = off; -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) -SELECT * FROM brin_interval_test WHERE a = '-30 years'::interval; - QUERY PLAN ------------------------------------------------------------------------------ - Bitmap Heap Scan on brin_interval_test (actual rows=0 loops=1) - Recheck Cond: (a = '@ 30 years ago'::interval) - -> Bitmap Index Scan on brin_interval_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '@ 30 years ago'::interval) -(4 rows) - -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) -SELECT * FROM brin_interval_test WHERE a = '30 years'::interval; - QUERY PLAN ------------------------------------------------------------------------------ - Bitmap Heap Scan on brin_interval_test (actual rows=0 loops=1) - Recheck Cond: (a = '@ 30 years'::interval) - -> Bitmap Index Scan on brin_interval_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '@ 30 years'::interval) -(4 rows) - -DROP TABLE brin_interval_test; -RESET enable_seqscan; --- test handling of infinite interval values -CREATE TABLE brin_interval_test(a INTERVAL); -INSERT INTO brin_interval_test VALUES ('-infinity'), ('infinity'); -INSERT INTO brin_interval_test SELECT (i || ' days')::interval FROM generate_series(100, 140) s(i); -CREATE INDEX ON brin_interval_test USING brin (a interval_minmax_multi_ops) WITH (pages_per_range=1); -SET enable_seqscan = off; -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) -SELECT * FROM brin_interval_test WHERE a = '-30 years'::interval; - QUERY PLAN ------------------------------------------------------------------------------ - Bitmap Heap Scan on brin_interval_test (actual rows=0 loops=1) - Recheck Cond: (a = '@ 30 years ago'::interval) - -> Bitmap Index Scan on brin_interval_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '@ 30 years ago'::interval) -(4 rows) - -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) -SELECT * FROM brin_interval_test WHERE a = '30 years'::interval; - QUERY PLAN ------------------------------------------------------------------------------ - Bitmap Heap Scan on brin_interval_test (actual rows=0 loops=1) - Recheck Cond: (a = '@ 30 years'::interval) - -> Bitmap Index Scan on brin_interval_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '@ 30 years'::interval) -(4 rows) - -DROP TABLE brin_interval_test; -RESET enable_seqscan; -RESET datestyle; +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/create_table_like.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/create_table_like.out --- /tmp/cirrus-ci-build/src/test/regress/expected/create_table_like.out 2024-03-13 23:12:37.623411000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/create_table_like.out 2024-03-13 23:14:29.456693000 +0000 @@ -1,530 +1,2 @@ -/* Test inheritance of structure (LIKE) */ -CREATE TABLE inhx (xx text DEFAULT 'text'); -/* - * Test double inheritance - * - * Ensure that defaults are NOT included unless - * INCLUDING DEFAULTS is specified - */ -CREATE TABLE ctla (aa TEXT); -CREATE TABLE ctlb (bb TEXT) INHERITS (ctla); -CREATE TABLE foo (LIKE nonexistent); -ERROR: relation "nonexistent" does not exist -LINE 1: CREATE TABLE foo (LIKE nonexistent); - ^ -CREATE TABLE inhe (ee text, LIKE inhx) inherits (ctlb); -INSERT INTO inhe VALUES ('ee-col1', 'ee-col2', DEFAULT, 'ee-col4'); -SELECT * FROM inhe; /* Columns aa, bb, xx value NULL, ee */ - aa | bb | ee | xx ----------+---------+----+--------- - ee-col1 | ee-col2 | | ee-col4 -(1 row) - -SELECT * FROM inhx; /* Empty set since LIKE inherits structure only */ - xx ----- -(0 rows) - -SELECT * FROM ctlb; /* Has ee entry */ - aa | bb ----------+--------- - ee-col1 | ee-col2 -(1 row) - -SELECT * FROM ctla; /* Has ee entry */ - aa ---------- - ee-col1 -(1 row) - -CREATE TABLE inhf (LIKE inhx, LIKE inhx); /* Throw error */ -ERROR: column "xx" specified more than once -CREATE TABLE inhf (LIKE inhx INCLUDING DEFAULTS INCLUDING CONSTRAINTS); -INSERT INTO inhf DEFAULT VALUES; -SELECT * FROM inhf; /* Single entry with value 'text' */ - xx ------- - text -(1 row) - -ALTER TABLE inhx add constraint foo CHECK (xx = 'text'); -ALTER TABLE inhx ADD PRIMARY KEY (xx); -CREATE TABLE inhg (LIKE inhx); /* Doesn't copy constraint */ -INSERT INTO inhg VALUES ('foo'); -DROP TABLE inhg; -CREATE TABLE inhg (x text, LIKE inhx INCLUDING CONSTRAINTS, y text); /* Copies constraints */ -INSERT INTO inhg VALUES ('x', 'text', 'y'); /* Succeeds */ -INSERT INTO inhg VALUES ('x', 'text', 'y'); /* Succeeds -- Unique constraints not copied */ -INSERT INTO inhg VALUES ('x', 'foo', 'y'); /* fails due to constraint */ -ERROR: new row for relation "inhg" violates check constraint "foo" -DETAIL: Failing row contains (x, foo, y). -SELECT * FROM inhg; /* Two records with three columns in order x=x, xx=text, y=y */ - x | xx | y ----+------+--- - x | text | y - x | text | y -(2 rows) - -DROP TABLE inhg; -CREATE TABLE test_like_id_1 (a bigint GENERATED ALWAYS AS IDENTITY, b text); -\d test_like_id_1 - Table "public.test_like_id_1" - Column | Type | Collation | Nullable | Default ---------+--------+-----------+----------+------------------------------ - a | bigint | | not null | generated always as identity - b | text | | | - -INSERT INTO test_like_id_1 (b) VALUES ('b1'); -SELECT * FROM test_like_id_1; - a | b ----+---- - 1 | b1 -(1 row) - -CREATE TABLE test_like_id_2 (LIKE test_like_id_1); -\d test_like_id_2 - Table "public.test_like_id_2" - Column | Type | Collation | Nullable | Default ---------+--------+-----------+----------+--------- - a | bigint | | not null | - b | text | | | - -INSERT INTO test_like_id_2 (b) VALUES ('b2'); -ERROR: null value in column "a" of relation "test_like_id_2" violates not-null constraint -DETAIL: Failing row contains (null, b2). -SELECT * FROM test_like_id_2; -- identity was not copied - a | b ----+--- -(0 rows) - -CREATE TABLE test_like_id_3 (LIKE test_like_id_1 INCLUDING IDENTITY); -\d test_like_id_3 - Table "public.test_like_id_3" - Column | Type | Collation | Nullable | Default ---------+--------+-----------+----------+------------------------------ - a | bigint | | not null | generated always as identity - b | text | | | - -INSERT INTO test_like_id_3 (b) VALUES ('b3'); -SELECT * FROM test_like_id_3; -- identity was copied and applied - a | b ----+---- - 1 | b3 -(1 row) - -DROP TABLE test_like_id_1, test_like_id_2, test_like_id_3; -CREATE TABLE test_like_gen_1 (a int, b int GENERATED ALWAYS AS (a * 2) STORED); -\d test_like_gen_1 - Table "public.test_like_gen_1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - a | integer | | | - b | integer | | | generated always as (a * 2) stored - -INSERT INTO test_like_gen_1 (a) VALUES (1); -SELECT * FROM test_like_gen_1; - a | b ----+--- - 1 | 2 -(1 row) - -CREATE TABLE test_like_gen_2 (LIKE test_like_gen_1); -\d test_like_gen_2 - Table "public.test_like_gen_2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | - -INSERT INTO test_like_gen_2 (a) VALUES (1); -SELECT * FROM test_like_gen_2; - a | b ----+--- - 1 | -(1 row) - -CREATE TABLE test_like_gen_3 (LIKE test_like_gen_1 INCLUDING GENERATED); -\d test_like_gen_3 - Table "public.test_like_gen_3" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - a | integer | | | - b | integer | | | generated always as (a * 2) stored - -INSERT INTO test_like_gen_3 (a) VALUES (1); -SELECT * FROM test_like_gen_3; - a | b ----+--- - 1 | 2 -(1 row) - -DROP TABLE test_like_gen_1, test_like_gen_2, test_like_gen_3; --- also test generated column with a "forward" reference (bug #16342) -CREATE TABLE test_like_4 (b int DEFAULT 42, - c int GENERATED ALWAYS AS (a * 2) STORED, - a int CHECK (a > 0)); -\d test_like_4 - Table "public.test_like_4" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - b | integer | | | 42 - c | integer | | | generated always as (a * 2) stored - a | integer | | | -Check constraints: - "test_like_4_a_check" CHECK (a > 0) - -CREATE TABLE test_like_4a (LIKE test_like_4); -CREATE TABLE test_like_4b (LIKE test_like_4 INCLUDING DEFAULTS); -CREATE TABLE test_like_4c (LIKE test_like_4 INCLUDING GENERATED); -CREATE TABLE test_like_4d (LIKE test_like_4 INCLUDING DEFAULTS INCLUDING GENERATED); -\d test_like_4a - Table "public.test_like_4a" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - b | integer | | | - c | integer | | | - a | integer | | | - -INSERT INTO test_like_4a (a) VALUES(11); -SELECT a, b, c FROM test_like_4a; - a | b | c -----+---+--- - 11 | | -(1 row) - -\d test_like_4b - Table "public.test_like_4b" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - b | integer | | | 42 - c | integer | | | - a | integer | | | - -INSERT INTO test_like_4b (a) VALUES(11); -SELECT a, b, c FROM test_like_4b; - a | b | c -----+----+--- - 11 | 42 | -(1 row) - -\d test_like_4c - Table "public.test_like_4c" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - b | integer | | | - c | integer | | | generated always as (a * 2) stored - a | integer | | | - -INSERT INTO test_like_4c (a) VALUES(11); -SELECT a, b, c FROM test_like_4c; - a | b | c -----+---+---- - 11 | | 22 -(1 row) - -\d test_like_4d - Table "public.test_like_4d" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - b | integer | | | 42 - c | integer | | | generated always as (a * 2) stored - a | integer | | | - -INSERT INTO test_like_4d (a) VALUES(11); -SELECT a, b, c FROM test_like_4d; - a | b | c -----+----+---- - 11 | 42 | 22 -(1 row) - --- Test renumbering of Vars when combining LIKE with inheritance -CREATE TABLE test_like_5 (x point, y point, z point); -CREATE TABLE test_like_5x (p int CHECK (p > 0), - q int GENERATED ALWAYS AS (p * 2) STORED); -CREATE TABLE test_like_5c (LIKE test_like_4 INCLUDING ALL) - INHERITS (test_like_5, test_like_5x); -\d test_like_5c - Table "public.test_like_5c" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - x | point | | | - y | point | | | - z | point | | | - p | integer | | | - q | integer | | | generated always as (p * 2) stored - b | integer | | | 42 - c | integer | | | generated always as (a * 2) stored - a | integer | | | -Check constraints: - "test_like_4_a_check" CHECK (a > 0) - "test_like_5x_p_check" CHECK (p > 0) -Inherits: test_like_5, - test_like_5x - -DROP TABLE test_like_4, test_like_4a, test_like_4b, test_like_4c, test_like_4d; -DROP TABLE test_like_5, test_like_5x, test_like_5c; -CREATE TABLE inhg (x text, LIKE inhx INCLUDING INDEXES, y text); /* copies indexes */ -INSERT INTO inhg VALUES (5, 10); -INSERT INTO inhg VALUES (20, 10); -- should fail -ERROR: duplicate key value violates unique constraint "inhg_pkey" -DETAIL: Key (xx)=(10) already exists. -DROP TABLE inhg; -/* Multiple primary keys creation should fail */ -CREATE TABLE inhg (x text, LIKE inhx INCLUDING INDEXES, PRIMARY KEY(x)); /* fails */ -ERROR: multiple primary keys for table "inhg" are not allowed -CREATE TABLE inhz (xx text DEFAULT 'text', yy int UNIQUE); -CREATE UNIQUE INDEX inhz_xx_idx on inhz (xx) WHERE xx <> 'test'; -/* Ok to create multiple unique indexes */ -CREATE TABLE inhg (x text UNIQUE, LIKE inhz INCLUDING INDEXES); -INSERT INTO inhg (xx, yy, x) VALUES ('test', 5, 10); -INSERT INTO inhg (xx, yy, x) VALUES ('test', 10, 15); -INSERT INTO inhg (xx, yy, x) VALUES ('foo', 10, 15); -- should fail -ERROR: duplicate key value violates unique constraint "inhg_x_key" -DETAIL: Key (x)=(15) already exists. -DROP TABLE inhg; -DROP TABLE inhz; -/* Use primary key imported by LIKE for self-referential FK constraint */ -CREATE TABLE inhz (x text REFERENCES inhz, LIKE inhx INCLUDING INDEXES); -\d inhz - Table "public.inhz" - Column | Type | Collation | Nullable | Default ---------+------+-----------+----------+--------- - x | text | | | - xx | text | | not null | -Indexes: - "inhz_pkey" PRIMARY KEY, btree (xx) -Foreign-key constraints: - "inhz_x_fkey" FOREIGN KEY (x) REFERENCES inhz(xx) -Referenced by: - TABLE "inhz" CONSTRAINT "inhz_x_fkey" FOREIGN KEY (x) REFERENCES inhz(xx) - -DROP TABLE inhz; --- including storage and comments -CREATE TABLE ctlt1 (a text CHECK (length(a) > 2) PRIMARY KEY, b text); -CREATE INDEX ctlt1_b_key ON ctlt1 (b); -CREATE INDEX ctlt1_fnidx ON ctlt1 ((a || b)); -CREATE STATISTICS ctlt1_a_b_stat ON a,b FROM ctlt1; -CREATE STATISTICS ctlt1_expr_stat ON (a || b) FROM ctlt1; -COMMENT ON STATISTICS ctlt1_a_b_stat IS 'ab stats'; -COMMENT ON STATISTICS ctlt1_expr_stat IS 'ab expr stats'; -COMMENT ON COLUMN ctlt1.a IS 'A'; -COMMENT ON COLUMN ctlt1.b IS 'B'; -COMMENT ON CONSTRAINT ctlt1_a_check ON ctlt1 IS 't1_a_check'; -COMMENT ON INDEX ctlt1_pkey IS 'index pkey'; -COMMENT ON INDEX ctlt1_b_key IS 'index b_key'; -ALTER TABLE ctlt1 ALTER COLUMN a SET STORAGE MAIN; -CREATE TABLE ctlt2 (c text); -ALTER TABLE ctlt2 ALTER COLUMN c SET STORAGE EXTERNAL; -COMMENT ON COLUMN ctlt2.c IS 'C'; -CREATE TABLE ctlt3 (a text CHECK (length(a) < 5), c text CHECK (length(c) < 7)); -ALTER TABLE ctlt3 ALTER COLUMN c SET STORAGE EXTERNAL; -ALTER TABLE ctlt3 ALTER COLUMN a SET STORAGE MAIN; -CREATE INDEX ctlt3_fnidx ON ctlt3 ((a || c)); -COMMENT ON COLUMN ctlt3.a IS 'A3'; -COMMENT ON COLUMN ctlt3.c IS 'C'; -COMMENT ON CONSTRAINT ctlt3_a_check ON ctlt3 IS 't3_a_check'; -CREATE TABLE ctlt4 (a text, c text); -ALTER TABLE ctlt4 ALTER COLUMN c SET STORAGE EXTERNAL; -CREATE TABLE ctlt12_storage (LIKE ctlt1 INCLUDING STORAGE, LIKE ctlt2 INCLUDING STORAGE); -\d+ ctlt12_storage - Table "public.ctlt12_storage" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | main | | - b | text | | | | extended | | - c | text | | | | external | | -Not-null constraints: - "ctlt12_storage_a_not_null" NOT NULL "a" - -CREATE TABLE ctlt12_comments (LIKE ctlt1 INCLUDING COMMENTS, LIKE ctlt2 INCLUDING COMMENTS); -\d+ ctlt12_comments - Table "public.ctlt12_comments" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | extended | | A - b | text | | | | extended | | B - c | text | | | | extended | | C -Not-null constraints: - "ctlt12_comments_a_not_null" NOT NULL "a" - -CREATE TABLE ctlt1_inh (LIKE ctlt1 INCLUDING CONSTRAINTS INCLUDING COMMENTS) INHERITS (ctlt1); -NOTICE: merging column "a" with inherited definition -NOTICE: merging column "b" with inherited definition -NOTICE: merging constraint "ctlt1_a_check" with inherited definition -\d+ ctlt1_inh - Table "public.ctlt1_inh" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | main | | A - b | text | | | | extended | | B -Check constraints: - "ctlt1_a_check" CHECK (length(a) > 2) -Not-null constraints: - "ctlt1_inh_a_not_null" NOT NULL "a" (local, inherited) -Inherits: ctlt1 - -SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt1_inh'::regclass; - description -------------- - t1_a_check -(1 row) - -CREATE TABLE ctlt13_inh () INHERITS (ctlt1, ctlt3); -NOTICE: merging multiple inherited definitions of column "a" -\d+ ctlt13_inh - Table "public.ctlt13_inh" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | main | | - b | text | | | | extended | | - c | text | | | | external | | -Check constraints: - "ctlt1_a_check" CHECK (length(a) > 2) - "ctlt3_a_check" CHECK (length(a) < 5) - "ctlt3_c_check" CHECK (length(c) < 7) -Not-null constraints: - "ctlt13_inh_a_not_null" NOT NULL "a" (inherited) -Inherits: ctlt1, - ctlt3 - -CREATE TABLE ctlt13_like (LIKE ctlt3 INCLUDING CONSTRAINTS INCLUDING INDEXES INCLUDING COMMENTS INCLUDING STORAGE) INHERITS (ctlt1); -NOTICE: merging column "a" with inherited definition -\d+ ctlt13_like - Table "public.ctlt13_like" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | main | | A3 - b | text | | | | extended | | - c | text | | | | external | | C -Indexes: - "ctlt13_like_expr_idx" btree ((a || c)) -Check constraints: - "ctlt1_a_check" CHECK (length(a) > 2) - "ctlt3_a_check" CHECK (length(a) < 5) - "ctlt3_c_check" CHECK (length(c) < 7) -Not-null constraints: - "ctlt13_like_a_not_null" NOT NULL "a" (inherited) -Inherits: ctlt1 - -SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt13_like'::regclass; - description -------------- - t3_a_check -(1 row) - -CREATE TABLE ctlt_all (LIKE ctlt1 INCLUDING ALL); -\d+ ctlt_all - Table "public.ctlt_all" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | main | | A - b | text | | | | extended | | B -Indexes: - "ctlt_all_pkey" PRIMARY KEY, btree (a) - "ctlt_all_b_idx" btree (b) - "ctlt_all_expr_idx" btree ((a || b)) -Check constraints: - "ctlt1_a_check" CHECK (length(a) > 2) -Statistics objects: - "public.ctlt_all_a_b_stat" ON a, b FROM ctlt_all - "public.ctlt_all_expr_stat" ON (a || b) FROM ctlt_all - -SELECT c.relname, objsubid, description FROM pg_description, pg_index i, pg_class c WHERE classoid = 'pg_class'::regclass AND objoid = i.indexrelid AND c.oid = i.indexrelid AND i.indrelid = 'ctlt_all'::regclass ORDER BY c.relname, objsubid; - relname | objsubid | description -----------------+----------+------------- - ctlt_all_b_idx | 0 | index b_key - ctlt_all_pkey | 0 | index pkey -(2 rows) - -SELECT s.stxname, objsubid, description FROM pg_description, pg_statistic_ext s WHERE classoid = 'pg_statistic_ext'::regclass AND objoid = s.oid AND s.stxrelid = 'ctlt_all'::regclass ORDER BY s.stxname, objsubid; - stxname | objsubid | description ---------------------+----------+--------------- - ctlt_all_a_b_stat | 0 | ab stats - ctlt_all_expr_stat | 0 | ab expr stats -(2 rows) - -CREATE TABLE inh_error1 () INHERITS (ctlt1, ctlt4); -NOTICE: merging multiple inherited definitions of column "a" -ERROR: inherited column "a" has a storage parameter conflict -DETAIL: MAIN versus EXTENDED -CREATE TABLE inh_error2 (LIKE ctlt4 INCLUDING STORAGE) INHERITS (ctlt1); -NOTICE: merging column "a" with inherited definition -ERROR: column "a" has a storage parameter conflict -DETAIL: MAIN versus EXTENDED --- Check that LIKE isn't confused by a system catalog of the same name -CREATE TABLE pg_attrdef (LIKE ctlt1 INCLUDING ALL); -\d+ public.pg_attrdef - Table "public.pg_attrdef" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | main | | A - b | text | | | | extended | | B -Indexes: - "pg_attrdef_pkey" PRIMARY KEY, btree (a) - "pg_attrdef_b_idx" btree (b) - "pg_attrdef_expr_idx" btree ((a || b)) -Check constraints: - "ctlt1_a_check" CHECK (length(a) > 2) -Statistics objects: - "public.pg_attrdef_a_b_stat" ON a, b FROM public.pg_attrdef - "public.pg_attrdef_expr_stat" ON (a || b) FROM public.pg_attrdef - -DROP TABLE public.pg_attrdef; --- Check that LIKE isn't confused when new table masks the old, either -BEGIN; -CREATE SCHEMA ctl_schema; -SET LOCAL search_path = ctl_schema, public; -CREATE TABLE ctlt1 (LIKE ctlt1 INCLUDING ALL); -\d+ ctlt1 - Table "ctl_schema.ctlt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | main | | A - b | text | | | | extended | | B -Indexes: - "ctlt1_pkey" PRIMARY KEY, btree (a) - "ctlt1_b_idx" btree (b) - "ctlt1_expr_idx" btree ((a || b)) -Check constraints: - "ctlt1_a_check" CHECK (length(a) > 2) -Statistics objects: - "ctl_schema.ctlt1_a_b_stat" ON a, b FROM ctlt1 - "ctl_schema.ctlt1_expr_stat" ON (a || b) FROM ctlt1 - -ROLLBACK; -DROP TABLE ctlt1, ctlt2, ctlt3, ctlt4, ctlt12_storage, ctlt12_comments, ctlt1_inh, ctlt13_inh, ctlt13_like, ctlt_all, ctla, ctlb CASCADE; -NOTICE: drop cascades to table inhe --- LIKE must respect NO INHERIT property of constraints -CREATE TABLE noinh_con_copy (a int CHECK (a > 0) NO INHERIT); -CREATE TABLE noinh_con_copy1 (LIKE noinh_con_copy INCLUDING CONSTRAINTS); -\d noinh_con_copy1 - Table "public.noinh_con_copy1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Check constraints: - "noinh_con_copy_a_check" CHECK (a > 0) NO INHERIT - --- fail, as partitioned tables don't allow NO INHERIT constraints -CREATE TABLE noinh_con_copy1_parted (LIKE noinh_con_copy INCLUDING ALL) - PARTITION BY LIST (a); -ERROR: cannot add NO INHERIT constraint to partitioned table "noinh_con_copy1_parted" -DROP TABLE noinh_con_copy, noinh_con_copy1; -/* LIKE with other relation kinds */ -CREATE TABLE ctlt4 (a int, b text); -CREATE SEQUENCE ctlseq1; -CREATE TABLE ctlt10 (LIKE ctlseq1); -- fail -ERROR: relation "ctlseq1" is invalid in LIKE clause -LINE 1: CREATE TABLE ctlt10 (LIKE ctlseq1); - ^ -DETAIL: This operation is not supported for sequences. -CREATE VIEW ctlv1 AS SELECT * FROM ctlt4; -CREATE TABLE ctlt11 (LIKE ctlv1); -CREATE TABLE ctlt11a (LIKE ctlv1 INCLUDING ALL); -CREATE TYPE ctlty1 AS (a int, b text); -CREATE TABLE ctlt12 (LIKE ctlty1); -DROP SEQUENCE ctlseq1; -DROP TYPE ctlty1; -DROP VIEW ctlv1; -DROP TABLE IF EXISTS ctlt4, ctlt10, ctlt11, ctlt11a, ctlt12; -NOTICE: table "ctlt10" does not exist, skipping +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/alter_generic.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/alter_generic.out --- /tmp/cirrus-ci-build/src/test/regress/expected/alter_generic.out 2024-03-13 23:12:37.622300000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/alter_generic.out 2024-03-13 23:14:29.453894000 +0000 @@ -1,755 +1,2 @@ --- --- Test for ALTER some_object {RENAME TO, OWNER TO, SET SCHEMA} --- --- directory paths and dlsuffix are passed to us in environment variables -\getenv libdir PG_LIBDIR -\getenv dlsuffix PG_DLSUFFIX -\set regresslib :libdir '/regress' :dlsuffix -CREATE FUNCTION test_opclass_options_func(internal) - RETURNS void - AS :'regresslib', 'test_opclass_options_func' - LANGUAGE C; --- Clean up in case a prior regression run failed -SET client_min_messages TO 'warning'; -DROP ROLE IF EXISTS regress_alter_generic_user1; -DROP ROLE IF EXISTS regress_alter_generic_user2; -DROP ROLE IF EXISTS regress_alter_generic_user3; -RESET client_min_messages; -CREATE USER regress_alter_generic_user3; -CREATE USER regress_alter_generic_user2; -CREATE USER regress_alter_generic_user1 IN ROLE regress_alter_generic_user3; -CREATE SCHEMA alt_nsp1; -CREATE SCHEMA alt_nsp2; -GRANT ALL ON SCHEMA alt_nsp1, alt_nsp2 TO public; -SET search_path = alt_nsp1, public; --- --- Function and Aggregate --- -SET SESSION AUTHORIZATION regress_alter_generic_user1; -CREATE FUNCTION alt_func1(int) RETURNS int LANGUAGE sql - AS 'SELECT $1 + 1'; -CREATE FUNCTION alt_func2(int) RETURNS int LANGUAGE sql - AS 'SELECT $1 - 1'; -CREATE AGGREGATE alt_agg1 ( - sfunc1 = int4pl, basetype = int4, stype1 = int4, initcond = 0 -); -CREATE AGGREGATE alt_agg2 ( - sfunc1 = int4mi, basetype = int4, stype1 = int4, initcond = 0 -); -ALTER AGGREGATE alt_func1(int) RENAME TO alt_func3; -- failed (not aggregate) -ERROR: function alt_func1(integer) is not an aggregate -ALTER AGGREGATE alt_func1(int) OWNER TO regress_alter_generic_user3; -- failed (not aggregate) -ERROR: function alt_func1(integer) is not an aggregate -ALTER AGGREGATE alt_func1(int) SET SCHEMA alt_nsp2; -- failed (not aggregate) -ERROR: function alt_func1(integer) is not an aggregate -ALTER FUNCTION alt_func1(int) RENAME TO alt_func2; -- failed (name conflict) -ERROR: function alt_func2(integer) already exists in schema "alt_nsp1" -ALTER FUNCTION alt_func1(int) RENAME TO alt_func3; -- OK -ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_generic_user3; -- OK -ALTER FUNCTION alt_func2(int) SET SCHEMA alt_nsp1; -- OK, already there -ALTER FUNCTION alt_func2(int) SET SCHEMA alt_nsp2; -- OK -ALTER AGGREGATE alt_agg1(int) RENAME TO alt_agg2; -- failed (name conflict) -ERROR: function alt_agg2(integer) already exists in schema "alt_nsp1" -ALTER AGGREGATE alt_agg1(int) RENAME TO alt_agg3; -- OK -ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user3; -- OK -ALTER AGGREGATE alt_agg2(int) SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_generic_user2; -CREATE FUNCTION alt_func1(int) RETURNS int LANGUAGE sql - AS 'SELECT $1 + 2'; -CREATE FUNCTION alt_func2(int) RETURNS int LANGUAGE sql - AS 'SELECT $1 - 2'; -CREATE AGGREGATE alt_agg1 ( - sfunc1 = int4pl, basetype = int4, stype1 = int4, initcond = 100 -); -CREATE AGGREGATE alt_agg2 ( - sfunc1 = int4mi, basetype = int4, stype1 = int4, initcond = -100 -); -ALTER FUNCTION alt_func3(int) RENAME TO alt_func4; -- failed (not owner) -ERROR: must be owner of function alt_func3 -ALTER FUNCTION alt_func1(int) RENAME TO alt_func4; -- OK -ALTER FUNCTION alt_func3(int) OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of function alt_func3 -ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER FUNCTION alt_func3(int) SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of function alt_func3 -ALTER FUNCTION alt_func2(int) SET SCHEMA alt_nsp2; -- failed (name conflicts) -ERROR: function alt_func2(integer) already exists in schema "alt_nsp2" -ALTER AGGREGATE alt_agg3(int) RENAME TO alt_agg4; -- failed (not owner) -ERROR: must be owner of function alt_agg3 -ALTER AGGREGATE alt_agg1(int) RENAME TO alt_agg4; -- OK -ALTER AGGREGATE alt_agg3(int) OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of function alt_agg3 -ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER AGGREGATE alt_agg3(int) SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of function alt_agg3 -ALTER AGGREGATE alt_agg2(int) SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: function alt_agg2(integer) already exists in schema "alt_nsp2" -RESET SESSION AUTHORIZATION; -SELECT n.nspname, proname, prorettype::regtype, prokind, a.rolname - FROM pg_proc p, pg_namespace n, pg_authid a - WHERE p.pronamespace = n.oid AND p.proowner = a.oid - AND n.nspname IN ('alt_nsp1', 'alt_nsp2') - ORDER BY nspname, proname; - nspname | proname | prorettype | prokind | rolname -----------+-----------+------------+---------+----------------------------- - alt_nsp1 | alt_agg2 | integer | a | regress_alter_generic_user2 - alt_nsp1 | alt_agg3 | integer | a | regress_alter_generic_user1 - alt_nsp1 | alt_agg4 | integer | a | regress_alter_generic_user2 - alt_nsp1 | alt_func2 | integer | f | regress_alter_generic_user2 - alt_nsp1 | alt_func3 | integer | f | regress_alter_generic_user1 - alt_nsp1 | alt_func4 | integer | f | regress_alter_generic_user2 - alt_nsp2 | alt_agg2 | integer | a | regress_alter_generic_user3 - alt_nsp2 | alt_func2 | integer | f | regress_alter_generic_user3 -(8 rows) - --- --- We would test collations here, but it's not possible because the error --- messages tend to be nonportable. --- --- --- Conversion --- -SET SESSION AUTHORIZATION regress_alter_generic_user1; -CREATE CONVERSION alt_conv1 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; -CREATE CONVERSION alt_conv2 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; -ALTER CONVERSION alt_conv1 RENAME TO alt_conv2; -- failed (name conflict) -ERROR: conversion "alt_conv2" already exists in schema "alt_nsp1" -ALTER CONVERSION alt_conv1 RENAME TO alt_conv3; -- OK -ALTER CONVERSION alt_conv2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER CONVERSION alt_conv2 OWNER TO regress_alter_generic_user3; -- OK -ALTER CONVERSION alt_conv2 SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_generic_user2; -CREATE CONVERSION alt_conv1 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; -CREATE CONVERSION alt_conv2 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; -ALTER CONVERSION alt_conv3 RENAME TO alt_conv4; -- failed (not owner) -ERROR: must be owner of conversion alt_conv3 -ALTER CONVERSION alt_conv1 RENAME TO alt_conv4; -- OK -ALTER CONVERSION alt_conv3 OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of conversion alt_conv3 -ALTER CONVERSION alt_conv2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER CONVERSION alt_conv3 SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of conversion alt_conv3 -ALTER CONVERSION alt_conv2 SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: conversion "alt_conv2" already exists in schema "alt_nsp2" -RESET SESSION AUTHORIZATION; -SELECT n.nspname, c.conname, a.rolname - FROM pg_conversion c, pg_namespace n, pg_authid a - WHERE c.connamespace = n.oid AND c.conowner = a.oid - AND n.nspname IN ('alt_nsp1', 'alt_nsp2') - ORDER BY nspname, conname; - nspname | conname | rolname -----------+-----------+----------------------------- - alt_nsp1 | alt_conv2 | regress_alter_generic_user2 - alt_nsp1 | alt_conv3 | regress_alter_generic_user1 - alt_nsp1 | alt_conv4 | regress_alter_generic_user2 - alt_nsp2 | alt_conv2 | regress_alter_generic_user3 -(4 rows) - --- --- Foreign Data Wrapper and Foreign Server --- -CREATE FOREIGN DATA WRAPPER alt_fdw1; -CREATE FOREIGN DATA WRAPPER alt_fdw2; -CREATE SERVER alt_fserv1 FOREIGN DATA WRAPPER alt_fdw1; -CREATE SERVER alt_fserv2 FOREIGN DATA WRAPPER alt_fdw2; -ALTER FOREIGN DATA WRAPPER alt_fdw1 RENAME TO alt_fdw2; -- failed (name conflict) -ERROR: foreign-data wrapper "alt_fdw2" already exists -ALTER FOREIGN DATA WRAPPER alt_fdw1 RENAME TO alt_fdw3; -- OK -ALTER SERVER alt_fserv1 RENAME TO alt_fserv2; -- failed (name conflict) -ERROR: server "alt_fserv2" already exists -ALTER SERVER alt_fserv1 RENAME TO alt_fserv3; -- OK -SELECT fdwname FROM pg_foreign_data_wrapper WHERE fdwname like 'alt_fdw%'; - fdwname ----------- - alt_fdw2 - alt_fdw3 -(2 rows) - -SELECT srvname FROM pg_foreign_server WHERE srvname like 'alt_fserv%'; - srvname ------------- - alt_fserv2 - alt_fserv3 -(2 rows) - --- --- Procedural Language --- -CREATE LANGUAGE alt_lang1 HANDLER plpgsql_call_handler; -CREATE LANGUAGE alt_lang2 HANDLER plpgsql_call_handler; -ALTER LANGUAGE alt_lang1 OWNER TO regress_alter_generic_user1; -- OK -ALTER LANGUAGE alt_lang2 OWNER TO regress_alter_generic_user2; -- OK -SET SESSION AUTHORIZATION regress_alter_generic_user1; -ALTER LANGUAGE alt_lang1 RENAME TO alt_lang2; -- failed (name conflict) -ERROR: language "alt_lang2" already exists -ALTER LANGUAGE alt_lang2 RENAME TO alt_lang3; -- failed (not owner) -ERROR: must be owner of language alt_lang2 -ALTER LANGUAGE alt_lang1 RENAME TO alt_lang3; -- OK -ALTER LANGUAGE alt_lang2 OWNER TO regress_alter_generic_user3; -- failed (not owner) -ERROR: must be owner of language alt_lang2 -ALTER LANGUAGE alt_lang3 OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER LANGUAGE alt_lang3 OWNER TO regress_alter_generic_user3; -- OK -RESET SESSION AUTHORIZATION; -SELECT lanname, a.rolname - FROM pg_language l, pg_authid a - WHERE l.lanowner = a.oid AND l.lanname like 'alt_lang%' - ORDER BY lanname; - lanname | rolname ------------+----------------------------- - alt_lang2 | regress_alter_generic_user2 - alt_lang3 | regress_alter_generic_user3 -(2 rows) - --- --- Operator --- -SET SESSION AUTHORIZATION regress_alter_generic_user1; -CREATE OPERATOR @-@ ( leftarg = int4, rightarg = int4, procedure = int4mi ); -CREATE OPERATOR @+@ ( leftarg = int4, rightarg = int4, procedure = int4pl ); -ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_generic_user3; -- OK -ALTER OPERATOR @-@(int4, int4) SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_generic_user2; -CREATE OPERATOR @-@ ( leftarg = int4, rightarg = int4, procedure = int4mi ); -ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of operator @+@ -ALTER OPERATOR @-@(int4, int4) OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER OPERATOR @+@(int4, int4) SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of operator @+@ --- can't test this: the error message includes the raw oid of namespace --- ALTER OPERATOR @-@(int4, int4) SET SCHEMA alt_nsp2; -- failed (name conflict) -RESET SESSION AUTHORIZATION; -SELECT n.nspname, oprname, a.rolname, - oprleft::regtype, oprright::regtype, oprcode::regproc - FROM pg_operator o, pg_namespace n, pg_authid a - WHERE o.oprnamespace = n.oid AND o.oprowner = a.oid - AND n.nspname IN ('alt_nsp1', 'alt_nsp2') - ORDER BY nspname, oprname; - nspname | oprname | rolname | oprleft | oprright | oprcode -----------+---------+-----------------------------+---------+----------+--------- - alt_nsp1 | @+@ | regress_alter_generic_user3 | integer | integer | int4pl - alt_nsp1 | @-@ | regress_alter_generic_user2 | integer | integer | int4mi - alt_nsp2 | @-@ | regress_alter_generic_user1 | integer | integer | int4mi -(3 rows) - --- --- OpFamily and OpClass --- -CREATE OPERATOR FAMILY alt_opf1 USING hash; -CREATE OPERATOR FAMILY alt_opf2 USING hash; -ALTER OPERATOR FAMILY alt_opf1 USING hash OWNER TO regress_alter_generic_user1; -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user1; -CREATE OPERATOR CLASS alt_opc1 FOR TYPE uuid USING hash AS STORAGE uuid; -CREATE OPERATOR CLASS alt_opc2 FOR TYPE uuid USING hash AS STORAGE uuid; -ALTER OPERATOR CLASS alt_opc1 USING hash OWNER TO regress_alter_generic_user1; -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user1; -SET SESSION AUTHORIZATION regress_alter_generic_user1; -ALTER OPERATOR FAMILY alt_opf1 USING hash RENAME TO alt_opf2; -- failed (name conflict) -ERROR: operator family "alt_opf2" for access method "hash" already exists in schema "alt_nsp1" -ALTER OPERATOR FAMILY alt_opf1 USING hash RENAME TO alt_opf3; -- OK -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user3; -- OK -ALTER OPERATOR FAMILY alt_opf2 USING hash SET SCHEMA alt_nsp2; -- OK -ALTER OPERATOR CLASS alt_opc1 USING hash RENAME TO alt_opc2; -- failed (name conflict) -ERROR: operator class "alt_opc2" for access method "hash" already exists in schema "alt_nsp1" -ALTER OPERATOR CLASS alt_opc1 USING hash RENAME TO alt_opc3; -- OK -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user3; -- OK -ALTER OPERATOR CLASS alt_opc2 USING hash SET SCHEMA alt_nsp2; -- OK -RESET SESSION AUTHORIZATION; -CREATE OPERATOR FAMILY alt_opf1 USING hash; -CREATE OPERATOR FAMILY alt_opf2 USING hash; -ALTER OPERATOR FAMILY alt_opf1 USING hash OWNER TO regress_alter_generic_user2; -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user2; -CREATE OPERATOR CLASS alt_opc1 FOR TYPE macaddr USING hash AS STORAGE macaddr; -CREATE OPERATOR CLASS alt_opc2 FOR TYPE macaddr USING hash AS STORAGE macaddr; -ALTER OPERATOR CLASS alt_opc1 USING hash OWNER TO regress_alter_generic_user2; -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user2; -SET SESSION AUTHORIZATION regress_alter_generic_user2; -ALTER OPERATOR FAMILY alt_opf3 USING hash RENAME TO alt_opf4; -- failed (not owner) -ERROR: must be owner of operator family alt_opf3 -ALTER OPERATOR FAMILY alt_opf1 USING hash RENAME TO alt_opf4; -- OK -ALTER OPERATOR FAMILY alt_opf3 USING hash OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of operator family alt_opf3 -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER OPERATOR FAMILY alt_opf3 USING hash SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of operator family alt_opf3 -ALTER OPERATOR FAMILY alt_opf2 USING hash SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: operator family "alt_opf2" for access method "hash" already exists in schema "alt_nsp2" -ALTER OPERATOR CLASS alt_opc3 USING hash RENAME TO alt_opc4; -- failed (not owner) -ERROR: must be owner of operator class alt_opc3 -ALTER OPERATOR CLASS alt_opc1 USING hash RENAME TO alt_opc4; -- OK -ALTER OPERATOR CLASS alt_opc3 USING hash OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of operator class alt_opc3 -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER OPERATOR CLASS alt_opc3 USING hash SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of operator class alt_opc3 -ALTER OPERATOR CLASS alt_opc2 USING hash SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: operator class "alt_opc2" for access method "hash" already exists in schema "alt_nsp2" -RESET SESSION AUTHORIZATION; -SELECT nspname, opfname, amname, rolname - FROM pg_opfamily o, pg_am m, pg_namespace n, pg_authid a - WHERE o.opfmethod = m.oid AND o.opfnamespace = n.oid AND o.opfowner = a.oid - AND n.nspname IN ('alt_nsp1', 'alt_nsp2') - AND NOT opfname LIKE 'alt_opc%' - ORDER BY nspname, opfname; - nspname | opfname | amname | rolname -----------+----------+--------+----------------------------- - alt_nsp1 | alt_opf2 | hash | regress_alter_generic_user2 - alt_nsp1 | alt_opf3 | hash | regress_alter_generic_user1 - alt_nsp1 | alt_opf4 | hash | regress_alter_generic_user2 - alt_nsp2 | alt_opf2 | hash | regress_alter_generic_user3 -(4 rows) - -SELECT nspname, opcname, amname, rolname - FROM pg_opclass o, pg_am m, pg_namespace n, pg_authid a - WHERE o.opcmethod = m.oid AND o.opcnamespace = n.oid AND o.opcowner = a.oid - AND n.nspname IN ('alt_nsp1', 'alt_nsp2') - ORDER BY nspname, opcname; - nspname | opcname | amname | rolname -----------+----------+--------+----------------------------- - alt_nsp1 | alt_opc2 | hash | regress_alter_generic_user2 - alt_nsp1 | alt_opc3 | hash | regress_alter_generic_user1 - alt_nsp1 | alt_opc4 | hash | regress_alter_generic_user2 - alt_nsp2 | alt_opc2 | hash | regress_alter_generic_user3 -(4 rows) - --- ALTER OPERATOR FAMILY ... ADD/DROP --- Should work. Textbook case of CREATE / ALTER ADD / ALTER DROP / DROP -BEGIN TRANSACTION; -CREATE OPERATOR FAMILY alt_opf4 USING btree; -ALTER OPERATOR FAMILY alt_opf4 USING btree ADD - -- int4 vs int2 - OPERATOR 1 < (int4, int2) , - OPERATOR 2 <= (int4, int2) , - OPERATOR 3 = (int4, int2) , - OPERATOR 4 >= (int4, int2) , - OPERATOR 5 > (int4, int2) , - FUNCTION 1 btint42cmp(int4, int2); -ALTER OPERATOR FAMILY alt_opf4 USING btree DROP - -- int4 vs int2 - OPERATOR 1 (int4, int2) , - OPERATOR 2 (int4, int2) , - OPERATOR 3 (int4, int2) , - OPERATOR 4 (int4, int2) , - OPERATOR 5 (int4, int2) , - FUNCTION 1 (int4, int2) ; -DROP OPERATOR FAMILY alt_opf4 USING btree; -ROLLBACK; --- Should fail. Invalid values for ALTER OPERATOR FAMILY .. ADD / DROP -CREATE OPERATOR FAMILY alt_opf4 USING btree; -ALTER OPERATOR FAMILY alt_opf4 USING invalid_index_method ADD OPERATOR 1 < (int4, int2); -- invalid indexing_method -ERROR: access method "invalid_index_method" does not exist -ALTER OPERATOR FAMILY alt_opf4 USING btree ADD OPERATOR 6 < (int4, int2); -- operator number should be between 1 and 5 -ERROR: invalid operator number 6, must be between 1 and 5 -ALTER OPERATOR FAMILY alt_opf4 USING btree ADD OPERATOR 0 < (int4, int2); -- operator number should be between 1 and 5 -ERROR: invalid operator number 0, must be between 1 and 5 -ALTER OPERATOR FAMILY alt_opf4 USING btree ADD OPERATOR 1 < ; -- operator without argument types -ERROR: operator argument types must be specified in ALTER OPERATOR FAMILY -ALTER OPERATOR FAMILY alt_opf4 USING btree ADD FUNCTION 0 btint42cmp(int4, int2); -- invalid options parsing function -ERROR: invalid function number 0, must be between 1 and 5 -ALTER OPERATOR FAMILY alt_opf4 USING btree ADD FUNCTION 6 btint42cmp(int4, int2); -- function number should be between 1 and 5 -ERROR: invalid function number 6, must be between 1 and 5 -ALTER OPERATOR FAMILY alt_opf4 USING btree ADD STORAGE invalid_storage; -- Ensure STORAGE is not a part of ALTER OPERATOR FAMILY -ERROR: STORAGE cannot be specified in ALTER OPERATOR FAMILY -DROP OPERATOR FAMILY alt_opf4 USING btree; --- Should fail. Need to be SUPERUSER to do ALTER OPERATOR FAMILY .. ADD / DROP -BEGIN TRANSACTION; -CREATE ROLE regress_alter_generic_user5 NOSUPERUSER; -CREATE OPERATOR FAMILY alt_opf5 USING btree; -SET ROLE regress_alter_generic_user5; -ALTER OPERATOR FAMILY alt_opf5 USING btree ADD OPERATOR 1 < (int4, int2), FUNCTION 1 btint42cmp(int4, int2); -ERROR: must be superuser to alter an operator family -RESET ROLE; -ERROR: current transaction is aborted, commands ignored until end of transaction block -DROP OPERATOR FAMILY alt_opf5 USING btree; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- Should fail. Need rights to namespace for ALTER OPERATOR FAMILY .. ADD / DROP -BEGIN TRANSACTION; -CREATE ROLE regress_alter_generic_user6; -CREATE SCHEMA alt_nsp6; -REVOKE ALL ON SCHEMA alt_nsp6 FROM regress_alter_generic_user6; -CREATE OPERATOR FAMILY alt_nsp6.alt_opf6 USING btree; -SET ROLE regress_alter_generic_user6; -ALTER OPERATOR FAMILY alt_nsp6.alt_opf6 USING btree ADD OPERATOR 1 < (int4, int2); -ERROR: permission denied for schema alt_nsp6 -ROLLBACK; --- Should fail. Only two arguments required for ALTER OPERATOR FAMILY ... DROP OPERATOR -CREATE OPERATOR FAMILY alt_opf7 USING btree; -ALTER OPERATOR FAMILY alt_opf7 USING btree ADD OPERATOR 1 < (int4, int2); -ALTER OPERATOR FAMILY alt_opf7 USING btree DROP OPERATOR 1 (int4, int2, int8); -ERROR: one or two argument types must be specified -DROP OPERATOR FAMILY alt_opf7 USING btree; --- Should work. During ALTER OPERATOR FAMILY ... DROP OPERATOR --- when left type is the same as right type, a DROP with only one argument type should work -CREATE OPERATOR FAMILY alt_opf8 USING btree; -ALTER OPERATOR FAMILY alt_opf8 USING btree ADD OPERATOR 1 < (int4, int4); -DROP OPERATOR FAMILY alt_opf8 USING btree; --- Should work. Textbook case of ALTER OPERATOR FAMILY ... ADD OPERATOR with FOR ORDER BY -CREATE OPERATOR FAMILY alt_opf9 USING gist; -ALTER OPERATOR FAMILY alt_opf9 USING gist ADD OPERATOR 1 < (int4, int4) FOR ORDER BY float_ops; -DROP OPERATOR FAMILY alt_opf9 USING gist; --- Should fail. Ensure correct ordering methods in ALTER OPERATOR FAMILY ... ADD OPERATOR .. FOR ORDER BY -CREATE OPERATOR FAMILY alt_opf10 USING btree; -ALTER OPERATOR FAMILY alt_opf10 USING btree ADD OPERATOR 1 < (int4, int4) FOR ORDER BY float_ops; -ERROR: access method "btree" does not support ordering operators -DROP OPERATOR FAMILY alt_opf10 USING btree; --- Should work. Textbook case of ALTER OPERATOR FAMILY ... ADD OPERATOR with FOR ORDER BY -CREATE OPERATOR FAMILY alt_opf11 USING gist; -ALTER OPERATOR FAMILY alt_opf11 USING gist ADD OPERATOR 1 < (int4, int4) FOR ORDER BY float_ops; -ALTER OPERATOR FAMILY alt_opf11 USING gist DROP OPERATOR 1 (int4, int4); -DROP OPERATOR FAMILY alt_opf11 USING gist; --- Should fail. btree comparison functions should return INTEGER in ALTER OPERATOR FAMILY ... ADD FUNCTION -BEGIN TRANSACTION; -CREATE OPERATOR FAMILY alt_opf12 USING btree; -CREATE FUNCTION fn_opf12 (int4, int2) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; -ALTER OPERATOR FAMILY alt_opf12 USING btree ADD FUNCTION 1 fn_opf12(int4, int2); -ERROR: btree comparison functions must return integer -DROP OPERATOR FAMILY alt_opf12 USING btree; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- Should fail. hash comparison functions should return INTEGER in ALTER OPERATOR FAMILY ... ADD FUNCTION -BEGIN TRANSACTION; -CREATE OPERATOR FAMILY alt_opf13 USING hash; -CREATE FUNCTION fn_opf13 (int4) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; -ALTER OPERATOR FAMILY alt_opf13 USING hash ADD FUNCTION 1 fn_opf13(int4); -ERROR: hash function 1 must return integer -DROP OPERATOR FAMILY alt_opf13 USING hash; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- Should fail. btree comparison functions should have two arguments in ALTER OPERATOR FAMILY ... ADD FUNCTION -BEGIN TRANSACTION; -CREATE OPERATOR FAMILY alt_opf14 USING btree; -CREATE FUNCTION fn_opf14 (int4) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; -ALTER OPERATOR FAMILY alt_opf14 USING btree ADD FUNCTION 1 fn_opf14(int4); -ERROR: btree comparison functions must have two arguments -DROP OPERATOR FAMILY alt_opf14 USING btree; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- Should fail. hash comparison functions should have one argument in ALTER OPERATOR FAMILY ... ADD FUNCTION -BEGIN TRANSACTION; -CREATE OPERATOR FAMILY alt_opf15 USING hash; -CREATE FUNCTION fn_opf15 (int4, int2) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; -ALTER OPERATOR FAMILY alt_opf15 USING hash ADD FUNCTION 1 fn_opf15(int4, int2); -ERROR: hash function 1 must have one argument -DROP OPERATOR FAMILY alt_opf15 USING hash; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- Should fail. In gist throw an error when giving different data types for function argument --- without defining left / right type in ALTER OPERATOR FAMILY ... ADD FUNCTION -CREATE OPERATOR FAMILY alt_opf16 USING gist; -ALTER OPERATOR FAMILY alt_opf16 USING gist ADD FUNCTION 1 btint42cmp(int4, int2); -ERROR: associated data types must be specified for index support function -DROP OPERATOR FAMILY alt_opf16 USING gist; --- Should fail. duplicate operator number / function number in ALTER OPERATOR FAMILY ... ADD FUNCTION -CREATE OPERATOR FAMILY alt_opf17 USING btree; -ALTER OPERATOR FAMILY alt_opf17 USING btree ADD OPERATOR 1 < (int4, int4), OPERATOR 1 < (int4, int4); -- operator # appears twice in same statement -ERROR: operator number 1 for (integer,integer) appears more than once -ALTER OPERATOR FAMILY alt_opf17 USING btree ADD OPERATOR 1 < (int4, int4); -- operator 1 requested first-time -ALTER OPERATOR FAMILY alt_opf17 USING btree ADD OPERATOR 1 < (int4, int4); -- operator 1 requested again in separate statement -ERROR: operator 1(integer,integer) already exists in operator family "alt_opf17" -ALTER OPERATOR FAMILY alt_opf17 USING btree ADD - OPERATOR 1 < (int4, int2) , - OPERATOR 2 <= (int4, int2) , - OPERATOR 3 = (int4, int2) , - OPERATOR 4 >= (int4, int2) , - OPERATOR 5 > (int4, int2) , - FUNCTION 1 btint42cmp(int4, int2) , - FUNCTION 1 btint42cmp(int4, int2); -- procedure 1 appears twice in same statement -ERROR: function number 1 for (integer,smallint) appears more than once -ALTER OPERATOR FAMILY alt_opf17 USING btree ADD - OPERATOR 1 < (int4, int2) , - OPERATOR 2 <= (int4, int2) , - OPERATOR 3 = (int4, int2) , - OPERATOR 4 >= (int4, int2) , - OPERATOR 5 > (int4, int2) , - FUNCTION 1 btint42cmp(int4, int2); -- procedure 1 appears first time -ALTER OPERATOR FAMILY alt_opf17 USING btree ADD - OPERATOR 1 < (int4, int2) , - OPERATOR 2 <= (int4, int2) , - OPERATOR 3 = (int4, int2) , - OPERATOR 4 >= (int4, int2) , - OPERATOR 5 > (int4, int2) , - FUNCTION 1 btint42cmp(int4, int2); -- procedure 1 requested again in separate statement -ERROR: operator 1(integer,smallint) already exists in operator family "alt_opf17" -DROP OPERATOR FAMILY alt_opf17 USING btree; --- Should fail. Ensure that DROP requests for missing OPERATOR / FUNCTIONS --- return appropriate message in ALTER OPERATOR FAMILY ... DROP OPERATOR / FUNCTION -CREATE OPERATOR FAMILY alt_opf18 USING btree; -ALTER OPERATOR FAMILY alt_opf18 USING btree DROP OPERATOR 1 (int4, int4); -ERROR: operator 1(integer,integer) does not exist in operator family "alt_opf18" -ALTER OPERATOR FAMILY alt_opf18 USING btree ADD - OPERATOR 1 < (int4, int2) , - OPERATOR 2 <= (int4, int2) , - OPERATOR 3 = (int4, int2) , - OPERATOR 4 >= (int4, int2) , - OPERATOR 5 > (int4, int2) , - FUNCTION 1 btint42cmp(int4, int2); --- Should fail. Not allowed to have cross-type equalimage function. -ALTER OPERATOR FAMILY alt_opf18 USING btree - ADD FUNCTION 4 (int4, int2) btequalimage(oid); -ERROR: btree equal image functions must not be cross-type -ALTER OPERATOR FAMILY alt_opf18 USING btree DROP FUNCTION 2 (int4, int4); -ERROR: function 2(integer,integer) does not exist in operator family "alt_opf18" -DROP OPERATOR FAMILY alt_opf18 USING btree; --- Should fail. Invalid opclass options function (#5) specifications. -CREATE OPERATOR FAMILY alt_opf19 USING btree; -ALTER OPERATOR FAMILY alt_opf19 USING btree ADD FUNCTION 5 test_opclass_options_func(internal, text[], bool); -ERROR: function test_opclass_options_func(internal, text[], boolean) does not exist -ALTER OPERATOR FAMILY alt_opf19 USING btree ADD FUNCTION 5 (int4) btint42cmp(int4, int2); -ERROR: invalid operator class options parsing function -HINT: Valid signature of operator class options parsing function is (internal) RETURNS void. -ALTER OPERATOR FAMILY alt_opf19 USING btree ADD FUNCTION 5 (int4, int2) btint42cmp(int4, int2); -ERROR: left and right associated data types for operator class options parsing functions must match -ALTER OPERATOR FAMILY alt_opf19 USING btree ADD FUNCTION 5 (int4) test_opclass_options_func(internal); -- Ok -ALTER OPERATOR FAMILY alt_opf19 USING btree DROP FUNCTION 5 (int4, int4); -DROP OPERATOR FAMILY alt_opf19 USING btree; --- --- Statistics --- -SET SESSION AUTHORIZATION regress_alter_generic_user1; -CREATE TABLE alt_regress_1 (a INTEGER, b INTEGER); -CREATE STATISTICS alt_stat1 ON a, b FROM alt_regress_1; -CREATE STATISTICS alt_stat2 ON a, b FROM alt_regress_1; -ALTER STATISTICS alt_stat1 RENAME TO alt_stat2; -- failed (name conflict) -ERROR: statistics object "alt_stat2" already exists in schema "alt_nsp1" -ALTER STATISTICS alt_stat1 RENAME TO alt_stat3; -- OK -ALTER STATISTICS alt_stat2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER STATISTICS alt_stat2 OWNER TO regress_alter_generic_user3; -- OK -ALTER STATISTICS alt_stat2 SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_generic_user2; -CREATE TABLE alt_regress_2 (a INTEGER, b INTEGER); -CREATE STATISTICS alt_stat1 ON a, b FROM alt_regress_2; -CREATE STATISTICS alt_stat2 ON a, b FROM alt_regress_2; -ALTER STATISTICS alt_stat3 RENAME TO alt_stat4; -- failed (not owner) -ERROR: must be owner of statistics object alt_stat3 -ALTER STATISTICS alt_stat1 RENAME TO alt_stat4; -- OK -ALTER STATISTICS alt_stat3 OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of statistics object alt_stat3 -ALTER STATISTICS alt_stat2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER STATISTICS alt_stat3 SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of statistics object alt_stat3 -ALTER STATISTICS alt_stat2 SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: statistics object "alt_stat2" already exists in schema "alt_nsp2" -RESET SESSION AUTHORIZATION; -SELECT nspname, stxname, rolname - FROM pg_statistic_ext s, pg_namespace n, pg_authid a - WHERE s.stxnamespace = n.oid AND s.stxowner = a.oid - AND n.nspname in ('alt_nsp1', 'alt_nsp2') - ORDER BY nspname, stxname; - nspname | stxname | rolname -----------+-----------+----------------------------- - alt_nsp1 | alt_stat2 | regress_alter_generic_user2 - alt_nsp1 | alt_stat3 | regress_alter_generic_user1 - alt_nsp1 | alt_stat4 | regress_alter_generic_user2 - alt_nsp2 | alt_stat2 | regress_alter_generic_user3 -(4 rows) - --- --- Text Search Dictionary --- -SET SESSION AUTHORIZATION regress_alter_generic_user1; -CREATE TEXT SEARCH DICTIONARY alt_ts_dict1 (template=simple); -CREATE TEXT SEARCH DICTIONARY alt_ts_dict2 (template=simple); -ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 RENAME TO alt_ts_dict2; -- failed (name conflict) -ERROR: text search dictionary "alt_ts_dict2" already exists in schema "alt_nsp1" -ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 RENAME TO alt_ts_dict3; -- OK -ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_generic_user3; -- OK -ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_generic_user2; -CREATE TEXT SEARCH DICTIONARY alt_ts_dict1 (template=simple); -CREATE TEXT SEARCH DICTIONARY alt_ts_dict2 (template=simple); -ALTER TEXT SEARCH DICTIONARY alt_ts_dict3 RENAME TO alt_ts_dict4; -- failed (not owner) -ERROR: must be owner of text search dictionary alt_ts_dict3 -ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 RENAME TO alt_ts_dict4; -- OK -ALTER TEXT SEARCH DICTIONARY alt_ts_dict3 OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of text search dictionary alt_ts_dict3 -ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER TEXT SEARCH DICTIONARY alt_ts_dict3 SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of text search dictionary alt_ts_dict3 -ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: text search dictionary "alt_ts_dict2" already exists in schema "alt_nsp2" -RESET SESSION AUTHORIZATION; -SELECT nspname, dictname, rolname - FROM pg_ts_dict t, pg_namespace n, pg_authid a - WHERE t.dictnamespace = n.oid AND t.dictowner = a.oid - AND n.nspname in ('alt_nsp1', 'alt_nsp2') - ORDER BY nspname, dictname; - nspname | dictname | rolname -----------+--------------+----------------------------- - alt_nsp1 | alt_ts_dict2 | regress_alter_generic_user2 - alt_nsp1 | alt_ts_dict3 | regress_alter_generic_user1 - alt_nsp1 | alt_ts_dict4 | regress_alter_generic_user2 - alt_nsp2 | alt_ts_dict2 | regress_alter_generic_user3 -(4 rows) - --- --- Text Search Configuration --- -SET SESSION AUTHORIZATION regress_alter_generic_user1; -CREATE TEXT SEARCH CONFIGURATION alt_ts_conf1 (copy=english); -CREATE TEXT SEARCH CONFIGURATION alt_ts_conf2 (copy=english); -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 RENAME TO alt_ts_conf2; -- failed (name conflict) -ERROR: text search configuration "alt_ts_conf2" already exists in schema "alt_nsp1" -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 RENAME TO alt_ts_conf3; -- OK -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_generic_user3; -- OK -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_generic_user2; -CREATE TEXT SEARCH CONFIGURATION alt_ts_conf1 (copy=english); -CREATE TEXT SEARCH CONFIGURATION alt_ts_conf2 (copy=english); -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf3 RENAME TO alt_ts_conf4; -- failed (not owner) -ERROR: must be owner of text search configuration alt_ts_conf3 -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 RENAME TO alt_ts_conf4; -- OK -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf3 OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of text search configuration alt_ts_conf3 -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf3 SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of text search configuration alt_ts_conf3 -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: text search configuration "alt_ts_conf2" already exists in schema "alt_nsp2" -RESET SESSION AUTHORIZATION; -SELECT nspname, cfgname, rolname - FROM pg_ts_config t, pg_namespace n, pg_authid a - WHERE t.cfgnamespace = n.oid AND t.cfgowner = a.oid - AND n.nspname in ('alt_nsp1', 'alt_nsp2') - ORDER BY nspname, cfgname; - nspname | cfgname | rolname -----------+--------------+----------------------------- - alt_nsp1 | alt_ts_conf2 | regress_alter_generic_user2 - alt_nsp1 | alt_ts_conf3 | regress_alter_generic_user1 - alt_nsp1 | alt_ts_conf4 | regress_alter_generic_user2 - alt_nsp2 | alt_ts_conf2 | regress_alter_generic_user3 -(4 rows) - --- --- Text Search Template --- -CREATE TEXT SEARCH TEMPLATE alt_ts_temp1 (lexize=dsimple_lexize); -CREATE TEXT SEARCH TEMPLATE alt_ts_temp2 (lexize=dsimple_lexize); -ALTER TEXT SEARCH TEMPLATE alt_ts_temp1 RENAME TO alt_ts_temp2; -- failed (name conflict) -ERROR: text search template "alt_ts_temp2" already exists in schema "alt_nsp1" -ALTER TEXT SEARCH TEMPLATE alt_ts_temp1 RENAME TO alt_ts_temp3; -- OK -ALTER TEXT SEARCH TEMPLATE alt_ts_temp2 SET SCHEMA alt_nsp2; -- OK -CREATE TEXT SEARCH TEMPLATE alt_ts_temp2 (lexize=dsimple_lexize); -ALTER TEXT SEARCH TEMPLATE alt_ts_temp2 SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: text search template "alt_ts_temp2" already exists in schema "alt_nsp2" --- invalid: non-lowercase quoted identifiers -CREATE TEXT SEARCH TEMPLATE tstemp_case ("Init" = init_function); -ERROR: text search template parameter "Init" not recognized -SELECT nspname, tmplname - FROM pg_ts_template t, pg_namespace n - WHERE t.tmplnamespace = n.oid AND nspname like 'alt_nsp%' - ORDER BY nspname, tmplname; - nspname | tmplname -----------+-------------- - alt_nsp1 | alt_ts_temp2 - alt_nsp1 | alt_ts_temp3 - alt_nsp2 | alt_ts_temp2 -(3 rows) - --- --- Text Search Parser --- -CREATE TEXT SEARCH PARSER alt_ts_prs1 - (start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype); -CREATE TEXT SEARCH PARSER alt_ts_prs2 - (start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype); -ALTER TEXT SEARCH PARSER alt_ts_prs1 RENAME TO alt_ts_prs2; -- failed (name conflict) -ERROR: text search parser "alt_ts_prs2" already exists in schema "alt_nsp1" -ALTER TEXT SEARCH PARSER alt_ts_prs1 RENAME TO alt_ts_prs3; -- OK -ALTER TEXT SEARCH PARSER alt_ts_prs2 SET SCHEMA alt_nsp2; -- OK -CREATE TEXT SEARCH PARSER alt_ts_prs2 - (start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype); -ALTER TEXT SEARCH PARSER alt_ts_prs2 SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: text search parser "alt_ts_prs2" already exists in schema "alt_nsp2" --- invalid: non-lowercase quoted identifiers -CREATE TEXT SEARCH PARSER tspars_case ("Start" = start_function); -ERROR: text search parser parameter "Start" not recognized -SELECT nspname, prsname - FROM pg_ts_parser t, pg_namespace n - WHERE t.prsnamespace = n.oid AND nspname like 'alt_nsp%' - ORDER BY nspname, prsname; - nspname | prsname -----------+------------- - alt_nsp1 | alt_ts_prs2 - alt_nsp1 | alt_ts_prs3 - alt_nsp2 | alt_ts_prs2 -(3 rows) - ---- ---- Cleanup resources ---- -DROP FOREIGN DATA WRAPPER alt_fdw2 CASCADE; -NOTICE: drop cascades to server alt_fserv2 -DROP FOREIGN DATA WRAPPER alt_fdw3 CASCADE; -NOTICE: drop cascades to server alt_fserv3 -DROP LANGUAGE alt_lang2 CASCADE; -DROP LANGUAGE alt_lang3 CASCADE; -DROP SCHEMA alt_nsp1 CASCADE; -NOTICE: drop cascades to 28 other objects -DETAIL: drop cascades to function alt_func3(integer) -drop cascades to function alt_agg3(integer) -drop cascades to function alt_func4(integer) -drop cascades to function alt_func2(integer) -drop cascades to function alt_agg4(integer) -drop cascades to function alt_agg2(integer) -drop cascades to conversion alt_conv3 -drop cascades to conversion alt_conv4 -drop cascades to conversion alt_conv2 -drop cascades to operator @+@(integer,integer) -drop cascades to operator @-@(integer,integer) -drop cascades to operator family alt_opf3 for access method hash -drop cascades to operator family alt_opc1 for access method hash -drop cascades to operator family alt_opc2 for access method hash -drop cascades to operator family alt_opf4 for access method hash -drop cascades to operator family alt_opf2 for access method hash -drop cascades to table alt_regress_1 -drop cascades to table alt_regress_2 -drop cascades to text search dictionary alt_ts_dict3 -drop cascades to text search dictionary alt_ts_dict4 -drop cascades to text search dictionary alt_ts_dict2 -drop cascades to text search configuration alt_ts_conf3 -drop cascades to text search configuration alt_ts_conf4 -drop cascades to text search configuration alt_ts_conf2 -drop cascades to text search template alt_ts_temp3 -drop cascades to text search template alt_ts_temp2 -drop cascades to text search parser alt_ts_prs3 -drop cascades to text search parser alt_ts_prs2 -DROP SCHEMA alt_nsp2 CASCADE; -NOTICE: drop cascades to 9 other objects -DETAIL: drop cascades to function alt_nsp2.alt_func2(integer) -drop cascades to function alt_nsp2.alt_agg2(integer) -drop cascades to conversion alt_nsp2.alt_conv2 -drop cascades to operator alt_nsp2.@-@(integer,integer) -drop cascades to operator family alt_nsp2.alt_opf2 for access method hash -drop cascades to text search dictionary alt_nsp2.alt_ts_dict2 -drop cascades to text search configuration alt_nsp2.alt_ts_conf2 -drop cascades to text search template alt_nsp2.alt_ts_temp2 -drop cascades to text search parser alt_nsp2.alt_ts_prs2 -DROP USER regress_alter_generic_user1; -DROP USER regress_alter_generic_user2; -DROP USER regress_alter_generic_user3; +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/alter_operator.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/alter_operator.out --- /tmp/cirrus-ci-build/src/test/regress/expected/alter_operator.out 2024-03-13 23:12:37.622314000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/alter_operator.out 2024-03-13 23:14:29.462493000 +0000 @@ -1,267 +1,2 @@ -CREATE FUNCTION alter_op_test_fn(boolean, boolean) -RETURNS boolean AS $$ SELECT NULL::BOOLEAN; $$ LANGUAGE sql IMMUTABLE; -CREATE FUNCTION customcontsel(internal, oid, internal, integer) -RETURNS float8 AS 'contsel' LANGUAGE internal STABLE STRICT; -CREATE OPERATOR === ( - LEFTARG = boolean, - RIGHTARG = boolean, - PROCEDURE = alter_op_test_fn, - COMMUTATOR = ===, - NEGATOR = !==, - RESTRICT = customcontsel, - JOIN = contjoinsel, - HASHES, MERGES -); -SELECT pg_describe_object(refclassid,refobjid,refobjsubid) as ref, deptype -FROM pg_depend -WHERE classid = 'pg_operator'::regclass AND - objid = '===(bool,bool)'::regoperator -ORDER BY 1; - ref | deptype --------------------------------------------------------+--------- - function alter_op_test_fn(boolean,boolean) | n - function customcontsel(internal,oid,internal,integer) | n - schema public | n -(3 rows) - --- --- Test resetting and setting restrict and join attributes. --- -ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = NONE); -ALTER OPERATOR === (boolean, boolean) SET (JOIN = NONE); -SELECT oprrest, oprjoin FROM pg_operator WHERE oprname = '===' - AND oprleft = 'boolean'::regtype AND oprright = 'boolean'::regtype; - oprrest | oprjoin ----------+--------- - - | - -(1 row) - -SELECT pg_describe_object(refclassid,refobjid,refobjsubid) as ref, deptype -FROM pg_depend -WHERE classid = 'pg_operator'::regclass AND - objid = '===(bool,bool)'::regoperator -ORDER BY 1; - ref | deptype ---------------------------------------------+--------- - function alter_op_test_fn(boolean,boolean) | n - schema public | n -(2 rows) - -ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = contsel); -ALTER OPERATOR === (boolean, boolean) SET (JOIN = contjoinsel); -SELECT oprrest, oprjoin FROM pg_operator WHERE oprname = '===' - AND oprleft = 'boolean'::regtype AND oprright = 'boolean'::regtype; - oprrest | oprjoin ----------+------------- - contsel | contjoinsel -(1 row) - -SELECT pg_describe_object(refclassid,refobjid,refobjsubid) as ref, deptype -FROM pg_depend -WHERE classid = 'pg_operator'::regclass AND - objid = '===(bool,bool)'::regoperator -ORDER BY 1; - ref | deptype ---------------------------------------------+--------- - function alter_op_test_fn(boolean,boolean) | n - schema public | n -(2 rows) - -ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = NONE, JOIN = NONE); -SELECT oprrest, oprjoin FROM pg_operator WHERE oprname = '===' - AND oprleft = 'boolean'::regtype AND oprright = 'boolean'::regtype; - oprrest | oprjoin ----------+--------- - - | - -(1 row) - -SELECT pg_describe_object(refclassid,refobjid,refobjsubid) as ref, deptype -FROM pg_depend -WHERE classid = 'pg_operator'::regclass AND - objid = '===(bool,bool)'::regoperator -ORDER BY 1; - ref | deptype ---------------------------------------------+--------- - function alter_op_test_fn(boolean,boolean) | n - schema public | n -(2 rows) - -ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = customcontsel, JOIN = contjoinsel); -SELECT oprrest, oprjoin FROM pg_operator WHERE oprname = '===' - AND oprleft = 'boolean'::regtype AND oprright = 'boolean'::regtype; - oprrest | oprjoin ----------------+------------- - customcontsel | contjoinsel -(1 row) - -SELECT pg_describe_object(refclassid,refobjid,refobjsubid) as ref, deptype -FROM pg_depend -WHERE classid = 'pg_operator'::regclass AND - objid = '===(bool,bool)'::regoperator -ORDER BY 1; - ref | deptype --------------------------------------------------------+--------- - function alter_op_test_fn(boolean,boolean) | n - function customcontsel(internal,oid,internal,integer) | n - schema public | n -(3 rows) - --- --- Test invalid options. --- -ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = non_existent_func); -ERROR: function non_existent_func(internal, oid, internal, integer) does not exist -ALTER OPERATOR === (boolean, boolean) SET (JOIN = non_existent_func); -ERROR: function non_existent_func(internal, oid, internal, smallint, internal) does not exist --- invalid: non-lowercase quoted identifiers -ALTER OPERATOR & (bit, bit) SET ("Restrict" = _int_contsel, "Join" = _int_contjoinsel); -ERROR: operator attribute "Restrict" not recognized --- --- Test permission check. Must be owner to ALTER OPERATOR. --- -CREATE USER regress_alter_op_user; -SET SESSION AUTHORIZATION regress_alter_op_user; -ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = NONE); -ERROR: must be owner of operator === -RESET SESSION AUTHORIZATION; --- --- Test setting commutator, negator, merges, and hashes attributes, --- which can only be set if not already set --- -CREATE FUNCTION alter_op_test_fn_bool_real(boolean, real) -RETURNS boolean AS $$ SELECT NULL::BOOLEAN; $$ LANGUAGE sql IMMUTABLE; -CREATE FUNCTION alter_op_test_fn_real_bool(real, boolean) -RETURNS boolean AS $$ SELECT NULL::BOOLEAN; $$ LANGUAGE sql IMMUTABLE; --- operator -CREATE OPERATOR === ( - LEFTARG = boolean, - RIGHTARG = real, - PROCEDURE = alter_op_test_fn_bool_real -); --- commutator -CREATE OPERATOR ==== ( - LEFTARG = real, - RIGHTARG = boolean, - PROCEDURE = alter_op_test_fn_real_bool -); --- negator -CREATE OPERATOR !==== ( - LEFTARG = boolean, - RIGHTARG = real, - PROCEDURE = alter_op_test_fn_bool_real -); --- No-op setting already false hashes and merges to false works -ALTER OPERATOR === (boolean, real) SET (MERGES = false); -ALTER OPERATOR === (boolean, real) SET (HASHES = false); --- Test setting merges and hashes -ALTER OPERATOR === (boolean, real) SET (MERGES); -ALTER OPERATOR === (boolean, real) SET (HASHES); -SELECT oprcanmerge, oprcanhash -FROM pg_operator WHERE oprname = '===' - AND oprleft = 'boolean'::regtype AND oprright = 'real'::regtype; - oprcanmerge | oprcanhash --------------+------------ - t | t -(1 row) - --- Test setting commutator -ALTER OPERATOR === (boolean, real) SET (COMMUTATOR = ====); --- Check that oprcom has been set on both the operator and commutator, --- that they reference each other, and that the operator used is the existing --- one we created and not a new shell operator. -SELECT op.oprname AS operator_name, com.oprname AS commutator_name, - com.oprcode AS commutator_func - FROM pg_operator op - INNER JOIN pg_operator com ON (op.oid = com.oprcom AND op.oprcom = com.oid) - WHERE op.oprname = '===' - AND op.oprleft = 'boolean'::regtype AND op.oprright = 'real'::regtype; - operator_name | commutator_name | commutator_func ----------------+-----------------+---------------------------- - === | ==== | alter_op_test_fn_real_bool -(1 row) - --- Cannot set self as negator -ALTER OPERATOR === (boolean, real) SET (NEGATOR = ===); -ERROR: operator cannot be its own negator --- Test setting negator -ALTER OPERATOR === (boolean, real) SET (NEGATOR = !====); --- Check that oprnegate has been set on both the operator and negator, --- that they reference each other, and that the operator used is the existing --- one we created and not a new shell operator. -SELECT op.oprname AS operator_name, neg.oprname AS negator_name, - neg.oprcode AS negator_func - FROM pg_operator op - INNER JOIN pg_operator neg ON (op.oid = neg.oprnegate AND op.oprnegate = neg.oid) - WHERE op.oprname = '===' - AND op.oprleft = 'boolean'::regtype AND op.oprright = 'real'::regtype; - operator_name | negator_name | negator_func ----------------+--------------+---------------------------- - === | !==== | alter_op_test_fn_bool_real -(1 row) - --- Test that no-op set succeeds -ALTER OPERATOR === (boolean, real) SET (NEGATOR = !====); -ALTER OPERATOR === (boolean, real) SET (COMMUTATOR = ====); -ALTER OPERATOR === (boolean, real) SET (MERGES); -ALTER OPERATOR === (boolean, real) SET (HASHES); --- Check that the final state of the operator is as we expect -SELECT oprcanmerge, oprcanhash, - pg_describe_object('pg_operator'::regclass, oprcom, 0) AS commutator, - pg_describe_object('pg_operator'::regclass, oprnegate, 0) AS negator - FROM pg_operator WHERE oprname = '===' - AND oprleft = 'boolean'::regtype AND oprright = 'real'::regtype; - oprcanmerge | oprcanhash | commutator | negator --------------+------------+-----------------------------+------------------------------ - t | t | operator ====(real,boolean) | operator !====(boolean,real) -(1 row) - --- Cannot change commutator, negator, merges, and hashes when already set -CREATE OPERATOR @= ( - LEFTARG = real, - RIGHTARG = boolean, - PROCEDURE = alter_op_test_fn_real_bool -); -CREATE OPERATOR @!= ( - LEFTARG = boolean, - RIGHTARG = real, - PROCEDURE = alter_op_test_fn_bool_real -); -ALTER OPERATOR === (boolean, real) SET (COMMUTATOR = @=); -ERROR: operator attribute "commutator" cannot be changed if it has already been set -ALTER OPERATOR === (boolean, real) SET (NEGATOR = @!=); -ERROR: operator attribute "negator" cannot be changed if it has already been set -ALTER OPERATOR === (boolean, real) SET (MERGES = false); -ERROR: operator attribute "merges" cannot be changed if it has already been set -ALTER OPERATOR === (boolean, real) SET (HASHES = false); -ERROR: operator attribute "hashes" cannot be changed if it has already been set --- Cannot set an operator that already has a commutator as the commutator -ALTER OPERATOR @=(real, boolean) SET (COMMUTATOR = ===); -ERROR: commutator operator === is already the commutator of operator ==== --- Cannot set an operator that already has a negator as the negator -ALTER OPERATOR @!=(boolean, real) SET (NEGATOR = ===); -ERROR: negator operator === is already the negator of operator !==== --- Check no changes made -SELECT oprcanmerge, oprcanhash, - pg_describe_object('pg_operator'::regclass, oprcom, 0) AS commutator, - pg_describe_object('pg_operator'::regclass, oprnegate, 0) AS negator - FROM pg_operator WHERE oprname = '===' - AND oprleft = 'boolean'::regtype AND oprright = 'real'::regtype; - oprcanmerge | oprcanhash | commutator | negator --------------+------------+-----------------------------+------------------------------ - t | t | operator ====(real,boolean) | operator !====(boolean,real) -(1 row) - --- --- Clean up --- -DROP USER regress_alter_op_user; -DROP OPERATOR === (boolean, boolean); -DROP OPERATOR === (boolean, real); -DROP OPERATOR ==== (real, boolean); -DROP OPERATOR !==== (boolean, real); -DROP OPERATOR @= (real, boolean); -DROP OPERATOR @!= (boolean, real); -DROP FUNCTION customcontsel(internal, oid, internal, integer); -DROP FUNCTION alter_op_test_fn(boolean, boolean); -DROP FUNCTION alter_op_test_fn_bool_real(boolean, real); -DROP FUNCTION alter_op_test_fn_real_bool(real, boolean); +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/misc.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/misc.out --- /tmp/cirrus-ci-build/src/test/regress/expected/misc.out 2024-03-13 23:12:37.625329000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/misc.out 2024-03-13 23:14:29.460970000 +0000 @@ -1,398 +1,2 @@ --- --- MISC --- --- directory paths and dlsuffix are passed to us in environment variables -\getenv abs_srcdir PG_ABS_SRCDIR -\getenv abs_builddir PG_ABS_BUILDDIR -\getenv libdir PG_LIBDIR -\getenv dlsuffix PG_DLSUFFIX -\set regresslib :libdir '/regress' :dlsuffix -CREATE FUNCTION overpaid(emp) - RETURNS bool - AS :'regresslib' - LANGUAGE C STRICT; -CREATE FUNCTION reverse_name(name) - RETURNS name - AS :'regresslib' - LANGUAGE C STRICT; --- --- BTREE --- -UPDATE onek - SET unique1 = onek.unique1 + 1; -UPDATE onek - SET unique1 = onek.unique1 - 1; --- --- BTREE partial --- --- UPDATE onek2 --- SET unique1 = onek2.unique1 + 1; ---UPDATE onek2 --- SET unique1 = onek2.unique1 - 1; --- --- BTREE shutting out non-functional updates --- --- the following two tests seem to take a long time on some --- systems. This non-func update stuff needs to be examined --- more closely. - jolly (2/22/96) --- -SELECT two, stringu1, ten, string4 - INTO TABLE tmp - FROM onek; -UPDATE tmp - SET stringu1 = reverse_name(onek.stringu1) - FROM onek - WHERE onek.stringu1 = 'JBAAAA' and - onek.stringu1 = tmp.stringu1; -UPDATE tmp - SET stringu1 = reverse_name(onek2.stringu1) - FROM onek2 - WHERE onek2.stringu1 = 'JCAAAA' and - onek2.stringu1 = tmp.stringu1; -DROP TABLE tmp; ---UPDATE person* --- SET age = age + 1; ---UPDATE person* --- SET age = age + 3 --- WHERE name = 'linda'; --- --- copy --- -\set filename :abs_builddir '/results/onek.data' -COPY onek TO :'filename'; -CREATE TEMP TABLE onek_copy (LIKE onek); -COPY onek_copy FROM :'filename'; -SELECT * FROM onek EXCEPT ALL SELECT * FROM onek_copy; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- -(0 rows) - -SELECT * FROM onek_copy EXCEPT ALL SELECT * FROM onek; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- -(0 rows) - -\set filename :abs_builddir '/results/stud_emp.data' -COPY BINARY stud_emp TO :'filename'; -CREATE TEMP TABLE stud_emp_copy (LIKE stud_emp); -COPY BINARY stud_emp_copy FROM :'filename'; -SELECT * FROM stud_emp_copy; - name | age | location | salary | manager | gpa | percent --------+-----+------------+--------+---------+-----+--------- - jeff | 23 | (8,7.7) | 600 | sharon | 3.5 | - cim | 30 | (10.5,4.7) | 400 | | 3.4 | - linda | 19 | (0.9,6.1) | 100 | | 2.9 | -(3 rows) - --- --- test data for postquel functions --- -CREATE TABLE hobbies_r ( - name text, - person text -); -CREATE TABLE equipment_r ( - name text, - hobby text -); -INSERT INTO hobbies_r (name, person) - SELECT 'posthacking', p.name - FROM person* p - WHERE p.name = 'mike' or p.name = 'jeff'; -INSERT INTO hobbies_r (name, person) - SELECT 'basketball', p.name - FROM person p - WHERE p.name = 'joe' or p.name = 'sally'; -INSERT INTO hobbies_r (name) VALUES ('skywalking'); -INSERT INTO equipment_r (name, hobby) VALUES ('advil', 'posthacking'); -INSERT INTO equipment_r (name, hobby) VALUES ('peet''s coffee', 'posthacking'); -INSERT INTO equipment_r (name, hobby) VALUES ('hightops', 'basketball'); -INSERT INTO equipment_r (name, hobby) VALUES ('guts', 'skywalking'); --- --- postquel functions --- -CREATE FUNCTION hobbies(person) - RETURNS setof hobbies_r - AS 'select * from hobbies_r where person = $1.name' - LANGUAGE SQL; -CREATE FUNCTION hobby_construct(text, text) - RETURNS hobbies_r - AS 'select $1 as name, $2 as hobby' - LANGUAGE SQL; -CREATE FUNCTION hobby_construct_named(name text, hobby text) - RETURNS hobbies_r - AS 'select name, hobby' - LANGUAGE SQL; -CREATE FUNCTION hobbies_by_name(hobbies_r.name%TYPE) - RETURNS hobbies_r.person%TYPE - AS 'select person from hobbies_r where name = $1' - LANGUAGE SQL; -NOTICE: type reference hobbies_r.name%TYPE converted to text -NOTICE: type reference hobbies_r.person%TYPE converted to text -CREATE FUNCTION equipment(hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where hobby = $1.name' - LANGUAGE SQL; -CREATE FUNCTION equipment_named(hobby hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where equipment_r.hobby = equipment_named.hobby.name' - LANGUAGE SQL; -CREATE FUNCTION equipment_named_ambiguous_1a(hobby hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where hobby = equipment_named_ambiguous_1a.hobby.name' - LANGUAGE SQL; -CREATE FUNCTION equipment_named_ambiguous_1b(hobby hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where equipment_r.hobby = hobby.name' - LANGUAGE SQL; -CREATE FUNCTION equipment_named_ambiguous_1c(hobby hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where hobby = hobby.name' - LANGUAGE SQL; -CREATE FUNCTION equipment_named_ambiguous_2a(hobby text) - RETURNS setof equipment_r - AS 'select * from equipment_r where hobby = equipment_named_ambiguous_2a.hobby' - LANGUAGE SQL; -CREATE FUNCTION equipment_named_ambiguous_2b(hobby text) - RETURNS setof equipment_r - AS 'select * from equipment_r where equipment_r.hobby = hobby' - LANGUAGE SQL; --- --- mike does post_hacking, --- joe and sally play basketball, and --- everyone else does nothing. --- -SELECT p.name, name(p.hobbies) FROM ONLY person p; - name | name --------+------------- - mike | posthacking - joe | basketball - sally | basketball -(3 rows) - --- --- as above, but jeff also does post_hacking. --- -SELECT p.name, name(p.hobbies) FROM person* p; - name | name --------+------------- - mike | posthacking - joe | basketball - sally | basketball - jeff | posthacking -(4 rows) - --- --- the next two queries demonstrate how functions generate bogus duplicates. --- this is a "feature" .. --- -SELECT DISTINCT hobbies_r.name, name(hobbies_r.equipment) FROM hobbies_r - ORDER BY 1,2; - name | name --------------+--------------- - basketball | hightops - posthacking | advil - posthacking | peet's coffee - skywalking | guts -(4 rows) - -SELECT hobbies_r.name, (hobbies_r.equipment).name FROM hobbies_r; - name | name --------------+--------------- - posthacking | advil - posthacking | peet's coffee - posthacking | advil - posthacking | peet's coffee - basketball | hightops - basketball | hightops - skywalking | guts -(7 rows) - --- --- mike needs advil and peet's coffee, --- joe and sally need hightops, and --- everyone else is fine. --- -SELECT p.name, name(p.hobbies), name(equipment(p.hobbies)) FROM ONLY person p; - name | name | name --------+-------------+--------------- - mike | posthacking | advil - mike | posthacking | peet's coffee - joe | basketball | hightops - sally | basketball | hightops -(4 rows) - --- --- as above, but jeff needs advil and peet's coffee as well. --- -SELECT p.name, name(p.hobbies), name(equipment(p.hobbies)) FROM person* p; - name | name | name --------+-------------+--------------- - mike | posthacking | advil - mike | posthacking | peet's coffee - joe | basketball | hightops - sally | basketball | hightops - jeff | posthacking | advil - jeff | posthacking | peet's coffee -(6 rows) - --- --- just like the last two, but make sure that the target list fixup and --- unflattening is being done correctly. --- -SELECT name(equipment(p.hobbies)), p.name, name(p.hobbies) FROM ONLY person p; - name | name | name ----------------+-------+------------- - advil | mike | posthacking - peet's coffee | mike | posthacking - hightops | joe | basketball - hightops | sally | basketball -(4 rows) - -SELECT (p.hobbies).equipment.name, p.name, name(p.hobbies) FROM person* p; - name | name | name ----------------+-------+------------- - advil | mike | posthacking - peet's coffee | mike | posthacking - hightops | joe | basketball - hightops | sally | basketball - advil | jeff | posthacking - peet's coffee | jeff | posthacking -(6 rows) - -SELECT (p.hobbies).equipment.name, name(p.hobbies), p.name FROM ONLY person p; - name | name | name ----------------+-------------+------- - advil | posthacking | mike - peet's coffee | posthacking | mike - hightops | basketball | joe - hightops | basketball | sally -(4 rows) - -SELECT name(equipment(p.hobbies)), name(p.hobbies), p.name FROM person* p; - name | name | name ----------------+-------------+------- - advil | posthacking | mike - peet's coffee | posthacking | mike - hightops | basketball | joe - hightops | basketball | sally - advil | posthacking | jeff - peet's coffee | posthacking | jeff -(6 rows) - -SELECT name(equipment(hobby_construct(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment(hobby_construct_named(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment_named(hobby_construct_named(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment_named_ambiguous_1a(hobby_construct_named(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment_named_ambiguous_1b(hobby_construct_named(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment_named_ambiguous_1c(hobby_construct_named(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment_named_ambiguous_2a(text 'skywalking')); - name ------- - guts -(1 row) - -SELECT name(equipment_named_ambiguous_2b(text 'skywalking')); - name ---------------- - advil - peet's coffee - hightops - guts -(4 rows) - -SELECT hobbies_by_name('basketball'); - hobbies_by_name ------------------ - joe -(1 row) - -SELECT name, overpaid(emp.*) FROM emp; - name | overpaid ---------+---------- - sharon | t - sam | t - bill | t - jeff | f - cim | f - linda | f -(6 rows) - --- --- Try a few cases with SQL-spec row constructor expressions --- -SELECT * FROM equipment(ROW('skywalking', 'mer')); - name | hobby -------+------------ - guts | skywalking -(1 row) - -SELECT name(equipment(ROW('skywalking', 'mer'))); - name ------- - guts -(1 row) - -SELECT *, name(equipment(h.*)) FROM hobbies_r h; - name | person | name --------------+--------+--------------- - posthacking | mike | advil - posthacking | mike | peet's coffee - posthacking | jeff | advil - posthacking | jeff | peet's coffee - basketball | joe | hightops - basketball | sally | hightops - skywalking | | guts -(7 rows) - -SELECT *, (equipment(CAST((h.*) AS hobbies_r))).name FROM hobbies_r h; - name | person | name --------------+--------+--------------- - posthacking | mike | advil - posthacking | mike | peet's coffee - posthacking | jeff | advil - posthacking | jeff | peet's coffee - basketball | joe | hightops - basketball | sally | hightops - skywalking | | guts -(7 rows) - --- --- functional joins --- --- --- instance rules --- --- --- rewrite rules --- +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/async.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/async.out --- /tmp/cirrus-ci-build/src/test/regress/expected/async.out 2024-03-13 23:12:37.622465000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/async.out 2024-03-13 23:14:29.474811000 +0000 @@ -1,42 +1,2 @@ --- --- ASYNC --- ---Should work. Send a valid message via a valid channel name -SELECT pg_notify('notify_async1','sample message1'); - pg_notify ------------ - -(1 row) - -SELECT pg_notify('notify_async1',''); - pg_notify ------------ - -(1 row) - -SELECT pg_notify('notify_async1',NULL); - pg_notify ------------ - -(1 row) - --- Should fail. Send a valid message via an invalid channel name -SELECT pg_notify('','sample message1'); -ERROR: channel name cannot be empty -SELECT pg_notify(NULL,'sample message1'); -ERROR: channel name cannot be empty -SELECT pg_notify('notify_async_channel_name_too_long______________________________','sample_message1'); -ERROR: channel name too long ---Should work. Valid NOTIFY/LISTEN/UNLISTEN commands -NOTIFY notify_async2; -LISTEN notify_async2; -UNLISTEN notify_async2; -UNLISTEN *; --- Should return zero while there are no pending notifications. --- src/test/isolation/specs/async-notify.spec tests for actual usage. -SELECT pg_notification_queue_usage(); - pg_notification_queue_usage ------------------------------ - 0 -(1 row) - +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/dbsize.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/dbsize.out --- /tmp/cirrus-ci-build/src/test/regress/expected/dbsize.out 2024-03-13 23:12:37.623477000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/dbsize.out 2024-03-13 23:14:29.464905000 +0000 @@ -1,195 +1,2 @@ -SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM - (VALUES (10::bigint), (1000::bigint), (1000000::bigint), - (1000000000::bigint), (1000000000000::bigint), - (1000000000000000::bigint)) x(size); - size | pg_size_pretty | pg_size_pretty -------------------+----------------+---------------- - 10 | 10 bytes | -10 bytes - 1000 | 1000 bytes | -1000 bytes - 1000000 | 977 kB | -977 kB - 1000000000 | 954 MB | -954 MB - 1000000000000 | 931 GB | -931 GB - 1000000000000000 | 909 TB | -909 TB -(6 rows) - -SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM - (VALUES (10::numeric), (1000::numeric), (1000000::numeric), - (1000000000::numeric), (1000000000000::numeric), - (1000000000000000::numeric), - (10.5::numeric), (1000.5::numeric), (1000000.5::numeric), - (1000000000.5::numeric), (1000000000000.5::numeric), - (1000000000000000.5::numeric)) x(size); - size | pg_size_pretty | pg_size_pretty ---------------------+----------------+---------------- - 10 | 10 bytes | -10 bytes - 1000 | 1000 bytes | -1000 bytes - 1000000 | 977 kB | -977 kB - 1000000000 | 954 MB | -954 MB - 1000000000000 | 931 GB | -931 GB - 1000000000000000 | 909 TB | -909 TB - 10.5 | 10.5 bytes | -10.5 bytes - 1000.5 | 1000.5 bytes | -1000.5 bytes - 1000000.5 | 977 kB | -977 kB - 1000000000.5 | 954 MB | -954 MB - 1000000000000.5 | 931 GB | -931 GB - 1000000000000000.5 | 909 TB | -909 TB -(12 rows) - --- test where units change up -SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM - (VALUES (10239::bigint), (10240::bigint), - (10485247::bigint), (10485248::bigint), - (10736893951::bigint), (10736893952::bigint), - (10994579406847::bigint), (10994579406848::bigint), - (11258449312612351::bigint), (11258449312612352::bigint)) x(size); - size | pg_size_pretty | pg_size_pretty --------------------+----------------+---------------- - 10239 | 10239 bytes | -10239 bytes - 10240 | 10 kB | -10 kB - 10485247 | 10239 kB | -10239 kB - 10485248 | 10 MB | -10 MB - 10736893951 | 10239 MB | -10239 MB - 10736893952 | 10 GB | -10 GB - 10994579406847 | 10239 GB | -10239 GB - 10994579406848 | 10 TB | -10 TB - 11258449312612351 | 10239 TB | -10239 TB - 11258449312612352 | 10 PB | -10 PB -(10 rows) - -SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM - (VALUES (10239::numeric), (10240::numeric), - (10485247::numeric), (10485248::numeric), - (10736893951::numeric), (10736893952::numeric), - (10994579406847::numeric), (10994579406848::numeric), - (11258449312612351::numeric), (11258449312612352::numeric), - (11528652096115048447::numeric), (11528652096115048448::numeric)) x(size); - size | pg_size_pretty | pg_size_pretty -----------------------+----------------+---------------- - 10239 | 10239 bytes | -10239 bytes - 10240 | 10 kB | -10 kB - 10485247 | 10239 kB | -10239 kB - 10485248 | 10 MB | -10 MB - 10736893951 | 10239 MB | -10239 MB - 10736893952 | 10 GB | -10 GB - 10994579406847 | 10239 GB | -10239 GB - 10994579406848 | 10 TB | -10 TB - 11258449312612351 | 10239 TB | -10239 TB - 11258449312612352 | 10 PB | -10 PB - 11528652096115048447 | 10239 PB | -10239 PB - 11528652096115048448 | 10240 PB | -10240 PB -(12 rows) - --- pg_size_bytes() tests -SELECT size, pg_size_bytes(size) FROM - (VALUES ('1'), ('123bytes'), ('256 B'), ('1kB'), ('1MB'), (' 1 GB'), ('1.5 GB '), - ('1TB'), ('3000 TB'), ('1e6 MB'), ('99 PB')) x(size); - size | pg_size_bytes -----------+-------------------- - 1 | 1 - 123bytes | 123 - 256 B | 256 - 1kB | 1024 - 1MB | 1048576 - 1 GB | 1073741824 - 1.5 GB | 1610612736 - 1TB | 1099511627776 - 3000 TB | 3298534883328000 - 1e6 MB | 1048576000000 - 99 PB | 111464090777419776 -(11 rows) - --- case-insensitive units are supported -SELECT size, pg_size_bytes(size) FROM - (VALUES ('1'), ('123bYteS'), ('1kb'), ('1mb'), (' 1 Gb'), ('1.5 gB '), - ('1tb'), ('3000 tb'), ('1e6 mb'), ('99 pb')) x(size); - size | pg_size_bytes -----------+-------------------- - 1 | 1 - 123bYteS | 123 - 1kb | 1024 - 1mb | 1048576 - 1 Gb | 1073741824 - 1.5 gB | 1610612736 - 1tb | 1099511627776 - 3000 tb | 3298534883328000 - 1e6 mb | 1048576000000 - 99 pb | 111464090777419776 -(10 rows) - --- negative numbers are supported -SELECT size, pg_size_bytes(size) FROM - (VALUES ('-1'), ('-123bytes'), ('-1kb'), ('-1mb'), (' -1 Gb'), ('-1.5 gB '), - ('-1tb'), ('-3000 TB'), ('-10e-1 MB'), ('-99 PB')) x(size); - size | pg_size_bytes ------------+--------------------- - -1 | -1 - -123bytes | -123 - -1kb | -1024 - -1mb | -1048576 - -1 Gb | -1073741824 - -1.5 gB | -1610612736 - -1tb | -1099511627776 - -3000 TB | -3298534883328000 - -10e-1 MB | -1048576 - -99 PB | -111464090777419776 -(10 rows) - --- different cases with allowed points -SELECT size, pg_size_bytes(size) FROM - (VALUES ('-1.'), ('-1.kb'), ('-1. kb'), ('-0. gb'), - ('-.1'), ('-.1kb'), ('-.1 kb'), ('-.0 gb')) x(size); - size | pg_size_bytes ---------+--------------- - -1. | -1 - -1.kb | -1024 - -1. kb | -1024 - -0. gb | 0 - -.1 | 0 - -.1kb | -102 - -.1 kb | -102 - -.0 gb | 0 -(8 rows) - --- invalid inputs -SELECT pg_size_bytes('1 AB'); -ERROR: invalid size: "1 AB" -DETAIL: Invalid size unit: "AB". -HINT: Valid units are "bytes", "B", "kB", "MB", "GB", "TB", and "PB". -SELECT pg_size_bytes('1 AB A'); -ERROR: invalid size: "1 AB A" -DETAIL: Invalid size unit: "AB A". -HINT: Valid units are "bytes", "B", "kB", "MB", "GB", "TB", and "PB". -SELECT pg_size_bytes('1 AB A '); -ERROR: invalid size: "1 AB A " -DETAIL: Invalid size unit: "AB A". -HINT: Valid units are "bytes", "B", "kB", "MB", "GB", "TB", and "PB". -SELECT pg_size_bytes('9223372036854775807.9'); -ERROR: bigint out of range -SELECT pg_size_bytes('1e100'); -ERROR: bigint out of range -SELECT pg_size_bytes('1e1000000000000000000'); -ERROR: value overflows numeric format -SELECT pg_size_bytes('1 byte'); -- the singular "byte" is not supported -ERROR: invalid size: "1 byte" -DETAIL: Invalid size unit: "byte". -HINT: Valid units are "bytes", "B", "kB", "MB", "GB", "TB", and "PB". -SELECT pg_size_bytes(''); -ERROR: invalid size: "" -SELECT pg_size_bytes('kb'); -ERROR: invalid size: "kb" -SELECT pg_size_bytes('..'); -ERROR: invalid size: ".." -SELECT pg_size_bytes('-.'); -ERROR: invalid size: "-." -SELECT pg_size_bytes('-.kb'); -ERROR: invalid size: "-.kb" -SELECT pg_size_bytes('-. kb'); -ERROR: invalid size: "-. kb" -SELECT pg_size_bytes('.+912'); -ERROR: invalid size: ".+912" -SELECT pg_size_bytes('+912+ kB'); -ERROR: invalid size: "+912+ kB" -DETAIL: Invalid size unit: "+ kB". -HINT: Valid units are "bytes", "B", "kB", "MB", "GB", "TB", and "PB". -SELECT pg_size_bytes('++123 kB'); -ERROR: invalid size: "++123 kB" +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/merge.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/merge.out --- /tmp/cirrus-ci-build/src/test/regress/expected/merge.out 2024-03-13 23:12:37.625313000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/merge.out 2024-03-13 23:14:29.490630000 +0000 @@ -1,2250 +1,2 @@ --- --- MERGE --- -CREATE USER regress_merge_privs; -CREATE USER regress_merge_no_privs; -CREATE USER regress_merge_none; -DROP TABLE IF EXISTS target; -NOTICE: table "target" does not exist, skipping -DROP TABLE IF EXISTS source; -NOTICE: table "source" does not exist, skipping -CREATE TABLE target (tid integer, balance integer) - WITH (autovacuum_enabled=off); -CREATE TABLE source (sid integer, delta integer) -- no index - WITH (autovacuum_enabled=off); -INSERT INTO target VALUES (1, 10); -INSERT INTO target VALUES (2, 20); -INSERT INTO target VALUES (3, 30); -SELECT t.ctid is not null as matched, t.*, s.* FROM source s FULL OUTER JOIN target t ON s.sid = t.tid ORDER BY t.tid, s.sid; - matched | tid | balance | sid | delta ----------+-----+---------+-----+------- - t | 1 | 10 | | - t | 2 | 20 | | - t | 3 | 30 | | -(3 rows) - -ALTER TABLE target OWNER TO regress_merge_privs; -ALTER TABLE source OWNER TO regress_merge_privs; -CREATE TABLE target2 (tid integer, balance integer) - WITH (autovacuum_enabled=off); -CREATE TABLE source2 (sid integer, delta integer) - WITH (autovacuum_enabled=off); -ALTER TABLE target2 OWNER TO regress_merge_no_privs; -ALTER TABLE source2 OWNER TO regress_merge_no_privs; -GRANT INSERT ON target TO regress_merge_no_privs; -SET SESSION AUTHORIZATION regress_merge_privs; -EXPLAIN (COSTS OFF) -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - DELETE; - QUERY PLAN ----------------------------------------- - Merge on target t - -> Merge Join - Merge Cond: (t.tid = s.sid) - -> Sort - Sort Key: t.tid - -> Seq Scan on target t - -> Sort - Sort Key: s.sid - -> Seq Scan on source s -(9 rows) - --- --- Errors --- -MERGE INTO target t RANDOMWORD -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -ERROR: syntax error at or near "RANDOMWORD" -LINE 1: MERGE INTO target t RANDOMWORD - ^ --- MATCHED/INSERT error -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - INSERT DEFAULT VALUES; -ERROR: syntax error at or near "INSERT" -LINE 5: INSERT DEFAULT VALUES; - ^ --- incorrectly specifying INTO target -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT INTO target DEFAULT VALUES; -ERROR: syntax error at or near "INTO" -LINE 5: INSERT INTO target DEFAULT VALUES; - ^ --- Multiple VALUES clause -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT VALUES (1,1), (2,2); -ERROR: syntax error at or near "," -LINE 5: INSERT VALUES (1,1), (2,2); - ^ --- SELECT query for INSERT -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT SELECT (1, 1); -ERROR: syntax error at or near "SELECT" -LINE 5: INSERT SELECT (1, 1); - ^ --- NOT MATCHED/UPDATE -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - UPDATE SET balance = 0; -ERROR: syntax error at or near "UPDATE" -LINE 5: UPDATE SET balance = 0; - ^ --- UPDATE tablename -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE target SET balance = 0; -ERROR: syntax error at or near "target" -LINE 5: UPDATE target SET balance = 0; - ^ --- source and target names the same -MERGE INTO target -USING target -ON tid = tid -WHEN MATCHED THEN DO NOTHING; -ERROR: name "target" specified more than once -DETAIL: The name is used both as MERGE target table and data source. --- used in a CTE -WITH foo AS ( - MERGE INTO target USING source ON (true) - WHEN MATCHED THEN DELETE -) SELECT * FROM foo; -ERROR: MERGE not supported in WITH query -LINE 1: WITH foo AS ( - ^ --- used in COPY -COPY ( - MERGE INTO target USING source ON (true) - WHEN MATCHED THEN DELETE -) TO stdout; -ERROR: MERGE not supported in COPY --- unsupported relation types --- materialized view -CREATE MATERIALIZED VIEW mv AS SELECT * FROM target; -MERGE INTO mv t -USING source s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT DEFAULT VALUES; -ERROR: cannot execute MERGE on relation "mv" -DETAIL: This operation is not supported for materialized views. -DROP MATERIALIZED VIEW mv; --- permissions -SET SESSION AUTHORIZATION regress_merge_none; -MERGE INTO target -USING (SELECT 1) -ON true -WHEN MATCHED THEN - DO NOTHING; -ERROR: permission denied for table target -SET SESSION AUTHORIZATION regress_merge_privs; -MERGE INTO target -USING source2 -ON target.tid = source2.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -ERROR: permission denied for table source2 -GRANT INSERT ON target TO regress_merge_no_privs; -SET SESSION AUTHORIZATION regress_merge_no_privs; -MERGE INTO target -USING source2 -ON target.tid = source2.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -ERROR: permission denied for table target -GRANT UPDATE ON target2 TO regress_merge_privs; -SET SESSION AUTHORIZATION regress_merge_privs; -MERGE INTO target2 -USING source -ON target2.tid = source.sid -WHEN MATCHED THEN - DELETE; -ERROR: permission denied for table target2 -MERGE INTO target2 -USING source -ON target2.tid = source.sid -WHEN NOT MATCHED THEN - INSERT DEFAULT VALUES; -ERROR: permission denied for table target2 --- check if the target can be accessed from source relation subquery; we should --- not be able to do so -MERGE INTO target t -USING (SELECT * FROM source WHERE t.tid > sid) s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT DEFAULT VALUES; -ERROR: invalid reference to FROM-clause entry for table "t" -LINE 2: USING (SELECT * FROM source WHERE t.tid > sid) s - ^ -DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. --- --- initial tests --- --- zero rows in source has no effect -MERGE INTO target -USING source -ON target.tid = source.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - DELETE; -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT DEFAULT VALUES; -ROLLBACK; --- insert some non-matching source rows to work from -INSERT INTO source VALUES (4, 40); -SELECT * FROM source ORDER BY sid; - sid | delta ------+------- - 4 | 40 -(1 row) - -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 -(3 rows) - -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - DO NOTHING; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - DELETE; -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT DEFAULT VALUES; -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - | -(4 rows) - -ROLLBACK; --- index plans -INSERT INTO target SELECT generate_series(1000,2500), 0; -ALTER TABLE target ADD PRIMARY KEY (tid); -ANALYZE target; -EXPLAIN (COSTS OFF) -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; - QUERY PLAN ----------------------------------------- - Merge on target t - -> Hash Join - Hash Cond: (s.sid = t.tid) - -> Seq Scan on source s - -> Hash - -> Seq Scan on target t -(6 rows) - -EXPLAIN (COSTS OFF) -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - DELETE; - QUERY PLAN ----------------------------------------- - Merge on target t - -> Hash Join - Hash Cond: (s.sid = t.tid) - -> Seq Scan on source s - -> Hash - -> Seq Scan on target t -(6 rows) - -EXPLAIN (COSTS OFF) -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT VALUES (4, NULL); - QUERY PLAN ----------------------------------------- - Merge on target t - -> Hash Left Join - Hash Cond: (s.sid = t.tid) - -> Seq Scan on source s - -> Hash - -> Seq Scan on target t -(6 rows) - -DELETE FROM target WHERE tid > 100; -ANALYZE target; --- insert some matching source rows to work from -INSERT INTO source VALUES (2, 5); -INSERT INTO source VALUES (3, 20); -SELECT * FROM source ORDER BY sid; - sid | delta ------+------- - 2 | 5 - 3 | 20 - 4 | 40 -(3 rows) - -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 -(3 rows) - --- equivalent of an UPDATE join -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 0 - 3 | 0 -(3 rows) - -ROLLBACK; --- equivalent of a DELETE join -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - DELETE; -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 -(1 row) - -ROLLBACK; -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - DO NOTHING; -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 -(3 rows) - -ROLLBACK; -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT VALUES (4, NULL); -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 4 | -(4 rows) - -ROLLBACK; --- duplicate source row causes multiple target row update ERROR -INSERT INTO source VALUES (2, 5); -SELECT * FROM source ORDER BY sid; - sid | delta ------+------- - 2 | 5 - 2 | 5 - 3 | 20 - 4 | 40 -(4 rows) - -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 -(3 rows) - -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -ERROR: MERGE command cannot affect row a second time -HINT: Ensure that not more than one source row matches any one target row. -ROLLBACK; -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - DELETE; -ERROR: MERGE command cannot affect row a second time -HINT: Ensure that not more than one source row matches any one target row. -ROLLBACK; --- remove duplicate MATCHED data from source data -DELETE FROM source WHERE sid = 2; -INSERT INTO source VALUES (2, 5); -SELECT * FROM source ORDER BY sid; - sid | delta ------+------- - 2 | 5 - 3 | 20 - 4 | 40 -(3 rows) - -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 -(3 rows) - --- duplicate source row on INSERT should fail because of target_pkey -INSERT INTO source VALUES (4, 40); -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT VALUES (4, NULL); -ERROR: duplicate key value violates unique constraint "target_pkey" -DETAIL: Key (tid)=(4) already exists. -SELECT * FROM target ORDER BY tid; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- remove duplicate NOT MATCHED data from source data -DELETE FROM source WHERE sid = 4; -INSERT INTO source VALUES (4, 40); -SELECT * FROM source ORDER BY sid; - sid | delta ------+------- - 2 | 5 - 3 | 20 - 4 | 40 -(3 rows) - -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 -(3 rows) - --- remove constraints -alter table target drop CONSTRAINT target_pkey; -alter table target alter column tid drop not null; --- multiple actions -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT VALUES (4, 4) -WHEN MATCHED THEN - UPDATE SET balance = 0; -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 0 - 3 | 0 - 4 | 4 -(4 rows) - -ROLLBACK; --- should be equivalent -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = 0 -WHEN NOT MATCHED THEN - INSERT VALUES (4, 4); -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 0 - 3 | 0 - 4 | 4 -(4 rows) - -ROLLBACK; --- column references --- do a simple equivalent of an UPDATE join -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = t.balance + s.delta; -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 25 - 3 | 50 -(3 rows) - -ROLLBACK; --- do a simple equivalent of an INSERT SELECT -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT VALUES (s.sid, s.delta); -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 4 | 40 -(4 rows) - -ROLLBACK; --- and again with duplicate source rows -INSERT INTO source VALUES (5, 50); -INSERT INTO source VALUES (5, 50); --- do a simple equivalent of an INSERT SELECT -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT VALUES (s.sid, s.delta); -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 4 | 40 - 5 | 50 - 5 | 50 -(6 rows) - -ROLLBACK; --- removing duplicate source rows -DELETE FROM source WHERE sid = 5; --- and again with explicitly identified column list -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (s.sid, s.delta); -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 4 | 40 -(4 rows) - -ROLLBACK; --- and again with a subtle error: referring to non-existent target row for NOT MATCHED -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (t.tid, s.delta); -ERROR: invalid reference to FROM-clause entry for table "t" -LINE 5: INSERT (tid, balance) VALUES (t.tid, s.delta); - ^ -DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. --- and again with a constant ON clause -BEGIN; -MERGE INTO target t -USING source AS s -ON (SELECT true) -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (t.tid, s.delta); -ERROR: invalid reference to FROM-clause entry for table "t" -LINE 5: INSERT (tid, balance) VALUES (t.tid, s.delta); - ^ -DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. -SELECT * FROM target ORDER BY tid; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- now the classic UPSERT -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = t.balance + s.delta -WHEN NOT MATCHED THEN - INSERT VALUES (s.sid, s.delta); -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 25 - 3 | 50 - 4 | 40 -(4 rows) - -ROLLBACK; --- unreachable WHEN clause should ERROR -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN /* Terminal WHEN clause for MATCHED */ - DELETE -WHEN MATCHED THEN - UPDATE SET balance = t.balance - s.delta; -ERROR: unreachable WHEN clause specified after unconditional WHEN clause -ROLLBACK; --- conditional WHEN clause -CREATE TABLE wq_target (tid integer not null, balance integer DEFAULT -1) - WITH (autovacuum_enabled=off); -CREATE TABLE wq_source (balance integer, sid integer) - WITH (autovacuum_enabled=off); -INSERT INTO wq_source (sid, balance) VALUES (1, 100); -BEGIN; --- try a simple INSERT with default values first -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid) VALUES (s.sid); -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | -1 -(1 row) - -ROLLBACK; --- this time with a FALSE condition -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN NOT MATCHED AND FALSE THEN - INSERT (tid) VALUES (s.sid); -SELECT * FROM wq_target; - tid | balance ------+--------- -(0 rows) - --- this time with an actual condition which returns false -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN NOT MATCHED AND s.balance <> 100 THEN - INSERT (tid) VALUES (s.sid); -SELECT * FROM wq_target; - tid | balance ------+--------- -(0 rows) - -BEGIN; --- and now with a condition which returns true -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN NOT MATCHED AND s.balance = 100 THEN - INSERT (tid) VALUES (s.sid); -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | -1 -(1 row) - -ROLLBACK; --- conditions in the NOT MATCHED clause can only refer to source columns -BEGIN; -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN NOT MATCHED AND t.balance = 100 THEN - INSERT (tid) VALUES (s.sid); -ERROR: invalid reference to FROM-clause entry for table "t" -LINE 3: WHEN NOT MATCHED AND t.balance = 100 THEN - ^ -DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. -SELECT * FROM wq_target; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN NOT MATCHED AND s.balance = 100 THEN - INSERT (tid) VALUES (s.sid); -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | -1 -(1 row) - --- conditions in MATCHED clause can refer to both source and target -SELECT * FROM wq_source; - balance | sid ----------+----- - 100 | 1 -(1 row) - -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND s.balance = 100 THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 99 -(1 row) - -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.balance = 100 THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 99 -(1 row) - --- check if AND works -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.balance = 99 AND s.balance > 100 THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 99 -(1 row) - -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.balance = 99 AND s.balance = 100 THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 199 -(1 row) - --- check if OR works -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.balance = 99 OR s.balance > 100 THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 199 -(1 row) - -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.balance = 199 OR s.balance > 100 THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 299 -(1 row) - --- check source-side whole-row references -BEGIN; -MERGE INTO wq_target t -USING wq_source s ON (t.tid = s.sid) -WHEN matched and t = s or t.tid = s.sid THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 399 -(1 row) - -ROLLBACK; --- check if subqueries work in the conditions? -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.balance > (SELECT max(balance) FROM target) THEN - UPDATE SET balance = t.balance + s.balance; --- check if we can access system columns in the conditions -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.xmin = t.xmax THEN - UPDATE SET balance = t.balance + s.balance; -ERROR: cannot use system column "xmin" in MERGE WHEN condition -LINE 3: WHEN MATCHED AND t.xmin = t.xmax THEN - ^ -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.tableoid >= 0 THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 499 -(1 row) - -DROP TABLE wq_target, wq_source; --- test triggers -create or replace function merge_trigfunc () returns trigger -language plpgsql as -$$ -DECLARE - line text; -BEGIN - SELECT INTO line format('%s %s %s trigger%s', - TG_WHEN, TG_OP, TG_LEVEL, CASE - WHEN TG_OP = 'INSERT' AND TG_LEVEL = 'ROW' - THEN format(' row: %s', NEW) - WHEN TG_OP = 'UPDATE' AND TG_LEVEL = 'ROW' - THEN format(' row: %s -> %s', OLD, NEW) - WHEN TG_OP = 'DELETE' AND TG_LEVEL = 'ROW' - THEN format(' row: %s', OLD) - END); - - RAISE NOTICE '%', line; - IF (TG_WHEN = 'BEFORE' AND TG_LEVEL = 'ROW') THEN - IF (TG_OP = 'DELETE') THEN - RETURN OLD; - ELSE - RETURN NEW; - END IF; - ELSE - RETURN NULL; - END IF; -END; -$$; -CREATE TRIGGER merge_bsi BEFORE INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_bsu BEFORE UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_bsd BEFORE DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_asi AFTER INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_asu AFTER UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_asd AFTER DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_bri BEFORE INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_bru BEFORE UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_brd BEFORE DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_ari AFTER INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_aru AFTER UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_ard AFTER DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); --- now the classic UPSERT, with a DELETE -BEGIN; -UPDATE target SET balance = 0 WHERE tid = 3; -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE UPDATE ROW trigger row: (3,30) -> (3,0) -NOTICE: AFTER UPDATE ROW trigger row: (3,30) -> (3,0) -NOTICE: AFTER UPDATE STATEMENT trigger ---EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED AND t.balance > s.delta THEN - UPDATE SET balance = t.balance - s.delta -WHEN MATCHED THEN - DELETE -WHEN NOT MATCHED THEN - INSERT VALUES (s.sid, s.delta); -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE DELETE STATEMENT trigger -NOTICE: BEFORE DELETE ROW trigger row: (3,0) -NOTICE: BEFORE UPDATE ROW trigger row: (2,20) -> (2,15) -NOTICE: BEFORE INSERT ROW trigger row: (4,40) -NOTICE: AFTER DELETE ROW trigger row: (3,0) -NOTICE: AFTER UPDATE ROW trigger row: (2,20) -> (2,15) -NOTICE: AFTER INSERT ROW trigger row: (4,40) -NOTICE: AFTER DELETE STATEMENT trigger -NOTICE: AFTER UPDATE STATEMENT trigger -NOTICE: AFTER INSERT STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 15 - 4 | 40 -(3 rows) - -ROLLBACK; --- Test behavior of triggers that turn UPDATE/DELETE into no-ops -create or replace function skip_merge_op() returns trigger -language plpgsql as -$$ -BEGIN - RETURN NULL; -END; -$$; -SELECT * FROM target full outer join source on (sid = tid); - tid | balance | sid | delta ------+---------+-----+------- - 3 | 30 | 3 | 20 - 2 | 20 | 2 | 5 - | | 4 | 40 - 1 | 10 | | -(4 rows) - -create trigger merge_skip BEFORE INSERT OR UPDATE or DELETE - ON target FOR EACH ROW EXECUTE FUNCTION skip_merge_op(); -DO $$ -DECLARE - result integer; -BEGIN -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED AND s.sid = 3 THEN UPDATE SET balance = t.balance + s.delta -WHEN MATCHED THEN DELETE -WHEN NOT MATCHED THEN INSERT VALUES (sid, delta); -IF FOUND THEN - RAISE NOTICE 'Found'; -ELSE - RAISE NOTICE 'Not found'; -END IF; -GET DIAGNOSTICS result := ROW_COUNT; -RAISE NOTICE 'ROW_COUNT = %', result; -END; -$$; -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE DELETE STATEMENT trigger -NOTICE: BEFORE UPDATE ROW trigger row: (3,30) -> (3,50) -NOTICE: BEFORE DELETE ROW trigger row: (2,20) -NOTICE: BEFORE INSERT ROW trigger row: (4,40) -NOTICE: AFTER DELETE STATEMENT trigger -NOTICE: AFTER UPDATE STATEMENT trigger -NOTICE: AFTER INSERT STATEMENT trigger -NOTICE: Not found -NOTICE: ROW_COUNT = 0 -SELECT * FROM target FULL OUTER JOIN source ON (sid = tid); - tid | balance | sid | delta ------+---------+-----+------- - 3 | 30 | 3 | 20 - 2 | 20 | 2 | 5 - | | 4 | 40 - 1 | 10 | | -(4 rows) - -DROP TRIGGER merge_skip ON target; -DROP FUNCTION skip_merge_op(); --- test from PL/pgSQL --- make sure MERGE INTO isn't interpreted to mean returning variables like SELECT INTO -BEGIN; -DO LANGUAGE plpgsql $$ -BEGIN -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED AND t.balance > s.delta THEN - UPDATE SET balance = t.balance - s.delta; -END; -$$; -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE UPDATE ROW trigger row: (3,30) -> (3,10) -NOTICE: BEFORE UPDATE ROW trigger row: (2,20) -> (2,15) -NOTICE: AFTER UPDATE ROW trigger row: (3,30) -> (3,10) -NOTICE: AFTER UPDATE ROW trigger row: (2,20) -> (2,15) -NOTICE: AFTER UPDATE STATEMENT trigger -ROLLBACK; ---source constants -BEGIN; -MERGE INTO target t -USING (SELECT 9 AS sid, 57 AS delta) AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (s.sid, s.delta); -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: BEFORE INSERT ROW trigger row: (9,57) -NOTICE: AFTER INSERT ROW trigger row: (9,57) -NOTICE: AFTER INSERT STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 9 | 57 -(4 rows) - -ROLLBACK; ---source query -BEGIN; -MERGE INTO target t -USING (SELECT sid, delta FROM source WHERE delta > 0) AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (s.sid, s.delta); -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: BEFORE INSERT ROW trigger row: (4,40) -NOTICE: AFTER INSERT ROW trigger row: (4,40) -NOTICE: AFTER INSERT STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 4 | 40 -(4 rows) - -ROLLBACK; -BEGIN; -MERGE INTO target t -USING (SELECT sid, delta as newname FROM source WHERE delta > 0) AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (s.sid, s.newname); -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: BEFORE INSERT ROW trigger row: (4,40) -NOTICE: AFTER INSERT ROW trigger row: (4,40) -NOTICE: AFTER INSERT STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 4 | 40 -(4 rows) - -ROLLBACK; ---self-merge -BEGIN; -MERGE INTO target t1 -USING target t2 -ON t1.tid = t2.tid -WHEN MATCHED THEN - UPDATE SET balance = t1.balance + t2.balance -WHEN NOT MATCHED THEN - INSERT VALUES (t2.tid, t2.balance); -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE UPDATE ROW trigger row: (1,10) -> (1,20) -NOTICE: BEFORE UPDATE ROW trigger row: (2,20) -> (2,40) -NOTICE: BEFORE UPDATE ROW trigger row: (3,30) -> (3,60) -NOTICE: AFTER UPDATE ROW trigger row: (1,10) -> (1,20) -NOTICE: AFTER UPDATE ROW trigger row: (2,20) -> (2,40) -NOTICE: AFTER UPDATE ROW trigger row: (3,30) -> (3,60) -NOTICE: AFTER UPDATE STATEMENT trigger -NOTICE: AFTER INSERT STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 20 - 2 | 40 - 3 | 60 -(3 rows) - -ROLLBACK; -BEGIN; -MERGE INTO target t -USING (SELECT tid as sid, balance as delta FROM target WHERE balance > 0) AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (s.sid, s.delta); -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: AFTER INSERT STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 -(3 rows) - -ROLLBACK; -BEGIN; -MERGE INTO target t -USING -(SELECT sid, max(delta) AS delta - FROM source - GROUP BY sid - HAVING count(*) = 1 - ORDER BY sid ASC) AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (s.sid, s.delta); -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: BEFORE INSERT ROW trigger row: (4,40) -NOTICE: AFTER INSERT ROW trigger row: (4,40) -NOTICE: AFTER INSERT STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 4 | 40 -(4 rows) - -ROLLBACK; --- plpgsql parameters and results -BEGIN; -CREATE FUNCTION merge_func (p_id integer, p_bal integer) -RETURNS INTEGER -LANGUAGE plpgsql -AS $$ -DECLARE - result integer; -BEGIN -MERGE INTO target t -USING (SELECT p_id AS sid) AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = t.balance - p_bal; -IF FOUND THEN - GET DIAGNOSTICS result := ROW_COUNT; -END IF; -RETURN result; -END; -$$; -SELECT merge_func(3, 4); -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE UPDATE ROW trigger row: (3,30) -> (3,26) -NOTICE: AFTER UPDATE ROW trigger row: (3,30) -> (3,26) -NOTICE: AFTER UPDATE STATEMENT trigger - merge_func ------------- - 1 -(1 row) - -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 26 -(3 rows) - -ROLLBACK; --- PREPARE -BEGIN; -prepare foom as merge into target t using (select 1 as sid) s on (t.tid = s.sid) when matched then update set balance = 1; -execute foom; -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE UPDATE ROW trigger row: (1,10) -> (1,1) -NOTICE: AFTER UPDATE ROW trigger row: (1,10) -> (1,1) -NOTICE: AFTER UPDATE STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 1 - 2 | 20 - 3 | 30 -(3 rows) - -ROLLBACK; -BEGIN; -PREPARE foom2 (integer, integer) AS -MERGE INTO target t -USING (SELECT 1) s -ON t.tid = $1 -WHEN MATCHED THEN -UPDATE SET balance = $2; ---EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) -execute foom2 (1, 1); -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE UPDATE ROW trigger row: (1,10) -> (1,1) -NOTICE: AFTER UPDATE ROW trigger row: (1,10) -> (1,1) -NOTICE: AFTER UPDATE STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 1 - 2 | 20 - 3 | 30 -(3 rows) - -ROLLBACK; --- subqueries in source relation -CREATE TABLE sq_target (tid integer NOT NULL, balance integer) - WITH (autovacuum_enabled=off); -CREATE TABLE sq_source (delta integer, sid integer, balance integer DEFAULT 0) - WITH (autovacuum_enabled=off); -INSERT INTO sq_target(tid, balance) VALUES (1,100), (2,200), (3,300); -INSERT INTO sq_source(sid, delta) VALUES (1,10), (2,20), (4,40); -BEGIN; -MERGE INTO sq_target t -USING (SELECT * FROM sq_source) s -ON tid = sid -WHEN MATCHED AND t.balance > delta THEN - UPDATE SET balance = t.balance + delta; -SELECT * FROM sq_target; - tid | balance ------+--------- - 3 | 300 - 1 | 110 - 2 | 220 -(3 rows) - -ROLLBACK; --- try a view -CREATE VIEW v AS SELECT * FROM sq_source WHERE sid < 2; -BEGIN; -MERGE INTO sq_target -USING v -ON tid = sid -WHEN MATCHED THEN - UPDATE SET balance = v.balance + delta; -SELECT * FROM sq_target; - tid | balance ------+--------- - 2 | 200 - 3 | 300 - 1 | 10 -(3 rows) - -ROLLBACK; --- ambiguous reference to a column -BEGIN; -MERGE INTO sq_target -USING v -ON tid = sid -WHEN MATCHED AND tid > 2 THEN - UPDATE SET balance = balance + delta -WHEN NOT MATCHED THEN - INSERT (balance, tid) VALUES (balance + delta, sid) -WHEN MATCHED AND tid < 2 THEN - DELETE; -ERROR: column reference "balance" is ambiguous -LINE 5: UPDATE SET balance = balance + delta - ^ -ROLLBACK; -BEGIN; -INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); -MERGE INTO sq_target t -USING v -ON tid = sid -WHEN MATCHED AND tid > 2 THEN - UPDATE SET balance = t.balance + delta -WHEN NOT MATCHED THEN - INSERT (balance, tid) VALUES (balance + delta, sid) -WHEN MATCHED AND tid < 2 THEN - DELETE; -SELECT * FROM sq_target; - tid | balance ------+--------- - 2 | 200 - 3 | 300 - -1 | -11 -(3 rows) - -ROLLBACK; --- CTEs -BEGIN; -INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); -WITH targq AS ( - SELECT * FROM v -) -MERGE INTO sq_target t -USING v -ON tid = sid -WHEN MATCHED AND tid > 2 THEN - UPDATE SET balance = t.balance + delta -WHEN NOT MATCHED THEN - INSERT (balance, tid) VALUES (balance + delta, sid) -WHEN MATCHED AND tid < 2 THEN - DELETE; -ROLLBACK; --- RETURNING -BEGIN; -INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); -MERGE INTO sq_target t -USING v -ON tid = sid -WHEN MATCHED AND tid > 2 THEN - UPDATE SET balance = t.balance + delta -WHEN NOT MATCHED THEN - INSERT (balance, tid) VALUES (balance + delta, sid) -WHEN MATCHED AND tid < 2 THEN - DELETE -RETURNING *; -ERROR: syntax error at or near "RETURNING" -LINE 10: RETURNING *; - ^ -ROLLBACK; --- EXPLAIN -CREATE TABLE ex_mtarget (a int, b int) - WITH (autovacuum_enabled=off); -CREATE TABLE ex_msource (a int, b int) - WITH (autovacuum_enabled=off); -INSERT INTO ex_mtarget SELECT i, i*10 FROM generate_series(1,100,2) i; -INSERT INTO ex_msource SELECT i, i*10 FROM generate_series(1,100,1) i; -CREATE FUNCTION explain_merge(query text) RETURNS SETOF text -LANGUAGE plpgsql AS -$$ -DECLARE ln text; -BEGIN - FOR ln IN - EXECUTE 'explain (analyze, timing off, summary off, costs off) ' || - query - LOOP - ln := regexp_replace(ln, '(Memory( Usage)?|Buckets|Batches): \S*', '\1: xxx', 'g'); - RETURN NEXT ln; - END LOOP; -END; -$$; --- only updates -SELECT explain_merge(' -MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a -WHEN MATCHED THEN - UPDATE SET b = t.b + 1'); - explain_merge ----------------------------------------------------------------------- - Merge on ex_mtarget t (actual rows=0 loops=1) - Tuples: updated=50 - -> Merge Join (actual rows=50 loops=1) - Merge Cond: (t.a = s.a) - -> Sort (actual rows=50 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_mtarget t (actual rows=50 loops=1) - -> Sort (actual rows=100 loops=1) - Sort Key: s.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_msource s (actual rows=100 loops=1) -(12 rows) - --- only updates to selected tuples -SELECT explain_merge(' -MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a -WHEN MATCHED AND t.a < 10 THEN - UPDATE SET b = t.b + 1'); - explain_merge ----------------------------------------------------------------------- - Merge on ex_mtarget t (actual rows=0 loops=1) - Tuples: updated=5 skipped=45 - -> Merge Join (actual rows=50 loops=1) - Merge Cond: (t.a = s.a) - -> Sort (actual rows=50 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_mtarget t (actual rows=50 loops=1) - -> Sort (actual rows=100 loops=1) - Sort Key: s.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_msource s (actual rows=100 loops=1) -(12 rows) - --- updates + deletes -SELECT explain_merge(' -MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a -WHEN MATCHED AND t.a < 10 THEN - UPDATE SET b = t.b + 1 -WHEN MATCHED AND t.a >= 10 AND t.a <= 20 THEN - DELETE'); - explain_merge ----------------------------------------------------------------------- - Merge on ex_mtarget t (actual rows=0 loops=1) - Tuples: updated=5 deleted=5 skipped=40 - -> Merge Join (actual rows=50 loops=1) - Merge Cond: (t.a = s.a) - -> Sort (actual rows=50 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_mtarget t (actual rows=50 loops=1) - -> Sort (actual rows=100 loops=1) - Sort Key: s.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_msource s (actual rows=100 loops=1) -(12 rows) - --- only inserts -SELECT explain_merge(' -MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a -WHEN NOT MATCHED AND s.a < 10 THEN - INSERT VALUES (a, b)'); - explain_merge ----------------------------------------------------------------------- - Merge on ex_mtarget t (actual rows=0 loops=1) - Tuples: inserted=4 skipped=96 - -> Merge Left Join (actual rows=100 loops=1) - Merge Cond: (s.a = t.a) - -> Sort (actual rows=100 loops=1) - Sort Key: s.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_msource s (actual rows=100 loops=1) - -> Sort (actual rows=45 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_mtarget t (actual rows=45 loops=1) -(12 rows) - --- all three -SELECT explain_merge(' -MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a -WHEN MATCHED AND t.a < 10 THEN - UPDATE SET b = t.b + 1 -WHEN MATCHED AND t.a >= 30 AND t.a <= 40 THEN - DELETE -WHEN NOT MATCHED AND s.a < 20 THEN - INSERT VALUES (a, b)'); - explain_merge ----------------------------------------------------------------------- - Merge on ex_mtarget t (actual rows=0 loops=1) - Tuples: inserted=10 updated=9 deleted=5 skipped=76 - -> Merge Left Join (actual rows=100 loops=1) - Merge Cond: (s.a = t.a) - -> Sort (actual rows=100 loops=1) - Sort Key: s.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_msource s (actual rows=100 loops=1) - -> Sort (actual rows=49 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_mtarget t (actual rows=49 loops=1) -(12 rows) - --- nothing -SELECT explain_merge(' -MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a AND t.a < -1000 -WHEN MATCHED AND t.a < 10 THEN - DO NOTHING'); - explain_merge --------------------------------------------------------------------- - Merge on ex_mtarget t (actual rows=0 loops=1) - -> Merge Join (actual rows=0 loops=1) - Merge Cond: (t.a = s.a) - -> Sort (actual rows=0 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_mtarget t (actual rows=0 loops=1) - Filter: (a < '-1000'::integer) - Rows Removed by Filter: 54 - -> Sort (never executed) - Sort Key: s.a - -> Seq Scan on ex_msource s (never executed) -(12 rows) - -DROP TABLE ex_msource, ex_mtarget; -DROP FUNCTION explain_merge(text); --- Subqueries -BEGIN; -MERGE INTO sq_target t -USING v -ON tid = sid -WHEN MATCHED THEN - UPDATE SET balance = (SELECT count(*) FROM sq_target); -SELECT * FROM sq_target WHERE tid = 1; - tid | balance ------+--------- - 1 | 3 -(1 row) - -ROLLBACK; -BEGIN; -MERGE INTO sq_target t -USING v -ON tid = sid -WHEN MATCHED AND (SELECT count(*) > 0 FROM sq_target) THEN - UPDATE SET balance = 42; -SELECT * FROM sq_target WHERE tid = 1; - tid | balance ------+--------- - 1 | 42 -(1 row) - -ROLLBACK; -BEGIN; -MERGE INTO sq_target t -USING v -ON tid = sid AND (SELECT count(*) > 0 FROM sq_target) -WHEN MATCHED THEN - UPDATE SET balance = 42; -SELECT * FROM sq_target WHERE tid = 1; - tid | balance ------+--------- - 1 | 42 -(1 row) - -ROLLBACK; -DROP TABLE sq_target, sq_source CASCADE; -NOTICE: drop cascades to view v -CREATE TABLE pa_target (tid integer, balance float, val text) - PARTITION BY LIST (tid); -CREATE TABLE part1 PARTITION OF pa_target FOR VALUES IN (1,4) - WITH (autovacuum_enabled=off); -CREATE TABLE part2 PARTITION OF pa_target FOR VALUES IN (2,5,6) - WITH (autovacuum_enabled=off); -CREATE TABLE part3 PARTITION OF pa_target FOR VALUES IN (3,8,9) - WITH (autovacuum_enabled=off); -CREATE TABLE part4 PARTITION OF pa_target DEFAULT - WITH (autovacuum_enabled=off); -CREATE TABLE pa_source (sid integer, delta float); --- insert many rows to the source table -INSERT INTO pa_source SELECT id, id * 10 FROM generate_series(1,14) AS id; --- insert a few rows in the target table (odd numbered tid) -INSERT INTO pa_target SELECT id, id * 100, 'initial' FROM generate_series(1,14,2) AS id; --- try simple MERGE -BEGIN; -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid - WHEN MATCHED THEN - UPDATE SET balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge'); -SELECT * FROM pa_target ORDER BY tid; - tid | balance | val ------+---------+-------------------------- - 1 | 110 | initial updated by merge - 2 | 20 | inserted by merge - 3 | 330 | initial updated by merge - 4 | 40 | inserted by merge - 5 | 550 | initial updated by merge - 6 | 60 | inserted by merge - 7 | 770 | initial updated by merge - 8 | 80 | inserted by merge - 9 | 990 | initial updated by merge - 10 | 100 | inserted by merge - 11 | 1210 | initial updated by merge - 12 | 120 | inserted by merge - 13 | 1430 | initial updated by merge - 14 | 140 | inserted by merge -(14 rows) - -ROLLBACK; --- same with a constant qual -BEGIN; -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid AND tid = 1 - WHEN MATCHED THEN - UPDATE SET balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge'); -SELECT * FROM pa_target ORDER BY tid; - tid | balance | val ------+---------+-------------------------- - 1 | 110 | initial updated by merge - 2 | 20 | inserted by merge - 3 | 30 | inserted by merge - 3 | 300 | initial - 4 | 40 | inserted by merge - 5 | 500 | initial - 5 | 50 | inserted by merge - 6 | 60 | inserted by merge - 7 | 700 | initial - 7 | 70 | inserted by merge - 8 | 80 | inserted by merge - 9 | 90 | inserted by merge - 9 | 900 | initial - 10 | 100 | inserted by merge - 11 | 1100 | initial - 11 | 110 | inserted by merge - 12 | 120 | inserted by merge - 13 | 1300 | initial - 13 | 130 | inserted by merge - 14 | 140 | inserted by merge -(20 rows) - -ROLLBACK; --- try updating the partition key column -BEGIN; -CREATE FUNCTION merge_func() RETURNS integer LANGUAGE plpgsql AS $$ -DECLARE - result integer; -BEGIN -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid - WHEN MATCHED THEN - UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge'); -IF FOUND THEN - GET DIAGNOSTICS result := ROW_COUNT; -END IF; -RETURN result; -END; -$$; -SELECT merge_func(); - merge_func ------------- - 14 -(1 row) - -SELECT * FROM pa_target ORDER BY tid; - tid | balance | val ------+---------+-------------------------- - 2 | 110 | initial updated by merge - 2 | 20 | inserted by merge - 4 | 40 | inserted by merge - 4 | 330 | initial updated by merge - 6 | 550 | initial updated by merge - 6 | 60 | inserted by merge - 8 | 80 | inserted by merge - 8 | 770 | initial updated by merge - 10 | 990 | initial updated by merge - 10 | 100 | inserted by merge - 12 | 1210 | initial updated by merge - 12 | 120 | inserted by merge - 14 | 1430 | initial updated by merge - 14 | 140 | inserted by merge -(14 rows) - -ROLLBACK; -DROP TABLE pa_target CASCADE; --- The target table is partitioned in the same way, but this time by attaching --- partitions which have columns in different order, dropped columns etc. -CREATE TABLE pa_target (tid integer, balance float, val text) - PARTITION BY LIST (tid); -CREATE TABLE part1 (tid integer, balance float, val text) - WITH (autovacuum_enabled=off); -CREATE TABLE part2 (balance float, tid integer, val text) - WITH (autovacuum_enabled=off); -CREATE TABLE part3 (tid integer, balance float, val text) - WITH (autovacuum_enabled=off); -CREATE TABLE part4 (extraid text, tid integer, balance float, val text) - WITH (autovacuum_enabled=off); -ALTER TABLE part4 DROP COLUMN extraid; -ALTER TABLE pa_target ATTACH PARTITION part1 FOR VALUES IN (1,4); -ALTER TABLE pa_target ATTACH PARTITION part2 FOR VALUES IN (2,5,6); -ALTER TABLE pa_target ATTACH PARTITION part3 FOR VALUES IN (3,8,9); -ALTER TABLE pa_target ATTACH PARTITION part4 DEFAULT; --- insert a few rows in the target table (odd numbered tid) -INSERT INTO pa_target SELECT id, id * 100, 'initial' FROM generate_series(1,14,2) AS id; --- try simple MERGE -BEGIN; -DO $$ -DECLARE - result integer; -BEGIN -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid - WHEN MATCHED THEN - UPDATE SET balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge'); -GET DIAGNOSTICS result := ROW_COUNT; -RAISE NOTICE 'ROW_COUNT = %', result; -END; -$$; -NOTICE: ROW_COUNT = 14 -SELECT * FROM pa_target ORDER BY tid; - tid | balance | val ------+---------+-------------------------- - 1 | 110 | initial updated by merge - 2 | 20 | inserted by merge - 3 | 330 | initial updated by merge - 4 | 40 | inserted by merge - 5 | 550 | initial updated by merge - 6 | 60 | inserted by merge - 7 | 770 | initial updated by merge - 8 | 80 | inserted by merge - 9 | 990 | initial updated by merge - 10 | 100 | inserted by merge - 11 | 1210 | initial updated by merge - 12 | 120 | inserted by merge - 13 | 1430 | initial updated by merge - 14 | 140 | inserted by merge -(14 rows) - -ROLLBACK; --- same with a constant qual -BEGIN; -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid AND tid IN (1, 5) - WHEN MATCHED AND tid % 5 = 0 THEN DELETE - WHEN MATCHED THEN - UPDATE SET balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge'); -SELECT * FROM pa_target ORDER BY tid; - tid | balance | val ------+---------+-------------------------- - 1 | 110 | initial updated by merge - 2 | 20 | inserted by merge - 3 | 30 | inserted by merge - 3 | 300 | initial - 4 | 40 | inserted by merge - 6 | 60 | inserted by merge - 7 | 700 | initial - 7 | 70 | inserted by merge - 8 | 80 | inserted by merge - 9 | 900 | initial - 9 | 90 | inserted by merge - 10 | 100 | inserted by merge - 11 | 110 | inserted by merge - 11 | 1100 | initial - 12 | 120 | inserted by merge - 13 | 1300 | initial - 13 | 130 | inserted by merge - 14 | 140 | inserted by merge -(18 rows) - -ROLLBACK; --- try updating the partition key column -BEGIN; -DO $$ -DECLARE - result integer; -BEGIN -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid - WHEN MATCHED THEN - UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge'); -GET DIAGNOSTICS result := ROW_COUNT; -RAISE NOTICE 'ROW_COUNT = %', result; -END; -$$; -NOTICE: ROW_COUNT = 14 -SELECT * FROM pa_target ORDER BY tid; - tid | balance | val ------+---------+-------------------------- - 2 | 110 | initial updated by merge - 2 | 20 | inserted by merge - 4 | 40 | inserted by merge - 4 | 330 | initial updated by merge - 6 | 550 | initial updated by merge - 6 | 60 | inserted by merge - 8 | 80 | inserted by merge - 8 | 770 | initial updated by merge - 10 | 990 | initial updated by merge - 10 | 100 | inserted by merge - 12 | 1210 | initial updated by merge - 12 | 120 | inserted by merge - 14 | 1430 | initial updated by merge - 14 | 140 | inserted by merge -(14 rows) - -ROLLBACK; --- as above, but blocked by BEFORE DELETE ROW trigger -BEGIN; -CREATE FUNCTION trig_fn() RETURNS trigger LANGUAGE plpgsql AS - $$ BEGIN RETURN NULL; END; $$; -CREATE TRIGGER del_trig BEFORE DELETE ON pa_target - FOR EACH ROW EXECUTE PROCEDURE trig_fn(); -DO $$ -DECLARE - result integer; -BEGIN -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid - WHEN MATCHED THEN - UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge'); -GET DIAGNOSTICS result := ROW_COUNT; -RAISE NOTICE 'ROW_COUNT = %', result; -END; -$$; -NOTICE: ROW_COUNT = 10 -SELECT * FROM pa_target ORDER BY tid; - tid | balance | val ------+---------+-------------------------- - 1 | 100 | initial - 2 | 20 | inserted by merge - 3 | 300 | initial - 4 | 40 | inserted by merge - 6 | 550 | initial updated by merge - 6 | 60 | inserted by merge - 7 | 700 | initial - 8 | 80 | inserted by merge - 9 | 900 | initial - 10 | 100 | inserted by merge - 12 | 1210 | initial updated by merge - 12 | 120 | inserted by merge - 14 | 1430 | initial updated by merge - 14 | 140 | inserted by merge -(14 rows) - -ROLLBACK; --- as above, but blocked by BEFORE INSERT ROW trigger -BEGIN; -CREATE FUNCTION trig_fn() RETURNS trigger LANGUAGE plpgsql AS - $$ BEGIN RETURN NULL; END; $$; -CREATE TRIGGER ins_trig BEFORE INSERT ON pa_target - FOR EACH ROW EXECUTE PROCEDURE trig_fn(); -DO $$ -DECLARE - result integer; -BEGIN -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid - WHEN MATCHED THEN - UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge'); -GET DIAGNOSTICS result := ROW_COUNT; -RAISE NOTICE 'ROW_COUNT = %', result; -END; -$$; -NOTICE: ROW_COUNT = 3 -SELECT * FROM pa_target ORDER BY tid; - tid | balance | val ------+---------+-------------------------- - 6 | 550 | initial updated by merge - 12 | 1210 | initial updated by merge - 14 | 1430 | initial updated by merge -(3 rows) - -ROLLBACK; --- test RLS enforcement -BEGIN; -ALTER TABLE pa_target ENABLE ROW LEVEL SECURITY; -ALTER TABLE pa_target FORCE ROW LEVEL SECURITY; -CREATE POLICY pa_target_pol ON pa_target USING (tid != 0); -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid AND t.tid IN (1,2,3,4) - WHEN MATCHED THEN - UPDATE SET tid = tid - 1; -ERROR: new row violates row-level security policy for table "pa_target" -ROLLBACK; -DROP TABLE pa_source; -DROP TABLE pa_target CASCADE; --- Sub-partitioning -CREATE TABLE pa_target (logts timestamp, tid integer, balance float, val text) - PARTITION BY RANGE (logts); -CREATE TABLE part_m01 PARTITION OF pa_target - FOR VALUES FROM ('2017-01-01') TO ('2017-02-01') - PARTITION BY LIST (tid); -CREATE TABLE part_m01_odd PARTITION OF part_m01 - FOR VALUES IN (1,3,5,7,9) WITH (autovacuum_enabled=off); -CREATE TABLE part_m01_even PARTITION OF part_m01 - FOR VALUES IN (2,4,6,8) WITH (autovacuum_enabled=off); -CREATE TABLE part_m02 PARTITION OF pa_target - FOR VALUES FROM ('2017-02-01') TO ('2017-03-01') - PARTITION BY LIST (tid); -CREATE TABLE part_m02_odd PARTITION OF part_m02 - FOR VALUES IN (1,3,5,7,9) WITH (autovacuum_enabled=off); -CREATE TABLE part_m02_even PARTITION OF part_m02 - FOR VALUES IN (2,4,6,8) WITH (autovacuum_enabled=off); -CREATE TABLE pa_source (sid integer, delta float) - WITH (autovacuum_enabled=off); --- insert many rows to the source table -INSERT INTO pa_source SELECT id, id * 10 FROM generate_series(1,14) AS id; --- insert a few rows in the target table (odd numbered tid) -INSERT INTO pa_target SELECT '2017-01-31', id, id * 100, 'initial' FROM generate_series(1,9,3) AS id; -INSERT INTO pa_target SELECT '2017-02-28', id, id * 100, 'initial' FROM generate_series(2,9,3) AS id; --- try simple MERGE -BEGIN; -MERGE INTO pa_target t - USING (SELECT '2017-01-15' AS slogts, * FROM pa_source WHERE sid < 10) s - ON t.tid = s.sid - WHEN MATCHED THEN - UPDATE SET balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (slogts::timestamp, sid, delta, 'inserted by merge'); -SELECT * FROM pa_target ORDER BY tid; - logts | tid | balance | val ---------------------------+-----+---------+-------------------------- - Tue Jan 31 00:00:00 2017 | 1 | 110 | initial updated by merge - Tue Feb 28 00:00:00 2017 | 2 | 220 | initial updated by merge - Sun Jan 15 00:00:00 2017 | 3 | 30 | inserted by merge - Tue Jan 31 00:00:00 2017 | 4 | 440 | initial updated by merge - Tue Feb 28 00:00:00 2017 | 5 | 550 | initial updated by merge - Sun Jan 15 00:00:00 2017 | 6 | 60 | inserted by merge - Tue Jan 31 00:00:00 2017 | 7 | 770 | initial updated by merge - Tue Feb 28 00:00:00 2017 | 8 | 880 | initial updated by merge - Sun Jan 15 00:00:00 2017 | 9 | 90 | inserted by merge -(9 rows) - -ROLLBACK; -DROP TABLE pa_source; -DROP TABLE pa_target CASCADE; --- Partitioned table with primary key -CREATE TABLE pa_target (tid integer PRIMARY KEY) PARTITION BY LIST (tid); -CREATE TABLE pa_targetp PARTITION OF pa_target DEFAULT; -CREATE TABLE pa_source (sid integer); -INSERT INTO pa_source VALUES (1), (2); -EXPLAIN (VERBOSE, COSTS OFF) -MERGE INTO pa_target t USING pa_source s ON t.tid = s.sid - WHEN NOT MATCHED THEN INSERT VALUES (s.sid); - QUERY PLAN -------------------------------------------------------------- - Merge on public.pa_target t - Merge on public.pa_targetp t_1 - -> Hash Left Join - Output: s.sid, s.ctid, t_1.tableoid, t_1.ctid - Inner Unique: true - Hash Cond: (s.sid = t_1.tid) - -> Seq Scan on public.pa_source s - Output: s.sid, s.ctid - -> Hash - Output: t_1.tid, t_1.tableoid, t_1.ctid - -> Seq Scan on public.pa_targetp t_1 - Output: t_1.tid, t_1.tableoid, t_1.ctid -(12 rows) - -MERGE INTO pa_target t USING pa_source s ON t.tid = s.sid - WHEN NOT MATCHED THEN INSERT VALUES (s.sid); -TABLE pa_target; - tid ------ - 1 - 2 -(2 rows) - --- Partition-less partitioned table --- (the bug we are checking for appeared only if table had partitions before) -DROP TABLE pa_targetp; -EXPLAIN (VERBOSE, COSTS OFF) -MERGE INTO pa_target t USING pa_source s ON t.tid = s.sid - WHEN NOT MATCHED THEN INSERT VALUES (s.sid); - QUERY PLAN --------------------------------------------- - Merge on public.pa_target t - -> Hash Left Join - Output: s.sid, s.ctid, t.ctid - Inner Unique: true - Hash Cond: (s.sid = t.tid) - -> Seq Scan on public.pa_source s - Output: s.sid, s.ctid - -> Hash - Output: t.tid, t.ctid - -> Result - Output: t.tid, t.ctid - One-Time Filter: false -(12 rows) - -MERGE INTO pa_target t USING pa_source s ON t.tid = s.sid - WHEN NOT MATCHED THEN INSERT VALUES (s.sid); -ERROR: no partition of relation "pa_target" found for row -DETAIL: Partition key of the failing row contains (tid) = (1). -DROP TABLE pa_source; -DROP TABLE pa_target CASCADE; --- some complex joins on the source side -CREATE TABLE cj_target (tid integer, balance float, val text) - WITH (autovacuum_enabled=off); -CREATE TABLE cj_source1 (sid1 integer, scat integer, delta integer) - WITH (autovacuum_enabled=off); -CREATE TABLE cj_source2 (sid2 integer, sval text) - WITH (autovacuum_enabled=off); -INSERT INTO cj_source1 VALUES (1, 10, 100); -INSERT INTO cj_source1 VALUES (1, 20, 200); -INSERT INTO cj_source1 VALUES (2, 20, 300); -INSERT INTO cj_source1 VALUES (3, 10, 400); -INSERT INTO cj_source2 VALUES (1, 'initial source2'); -INSERT INTO cj_source2 VALUES (2, 'initial source2'); -INSERT INTO cj_source2 VALUES (3, 'initial source2'); --- source relation is an unaliased join -MERGE INTO cj_target t -USING cj_source1 s1 - INNER JOIN cj_source2 s2 ON sid1 = sid2 -ON t.tid = sid1 -WHEN NOT MATCHED THEN - INSERT VALUES (sid1, delta, sval); --- try accessing columns from either side of the source join -MERGE INTO cj_target t -USING cj_source2 s2 - INNER JOIN cj_source1 s1 ON sid1 = sid2 AND scat = 20 -ON t.tid = sid1 -WHEN NOT MATCHED THEN - INSERT VALUES (sid2, delta, sval) -WHEN MATCHED THEN - DELETE; --- some simple expressions in INSERT targetlist -MERGE INTO cj_target t -USING cj_source2 s2 - INNER JOIN cj_source1 s1 ON sid1 = sid2 -ON t.tid = sid1 -WHEN NOT MATCHED THEN - INSERT VALUES (sid2, delta + scat, sval) -WHEN MATCHED THEN - UPDATE SET val = val || ' updated by merge'; -MERGE INTO cj_target t -USING cj_source2 s2 - INNER JOIN cj_source1 s1 ON sid1 = sid2 AND scat = 20 -ON t.tid = sid1 -WHEN MATCHED THEN - UPDATE SET val = val || ' ' || delta::text; -SELECT * FROM cj_target; - tid | balance | val ------+---------+---------------------------------- - 3 | 400 | initial source2 updated by merge - 1 | 220 | initial source2 200 - 1 | 110 | initial source2 200 - 2 | 320 | initial source2 300 -(4 rows) - --- try it with an outer join and PlaceHolderVar -MERGE INTO cj_target t -USING (SELECT *, 'join input'::text AS phv FROM cj_source1) fj - FULL JOIN cj_source2 fj2 ON fj.scat = fj2.sid2 * 10 -ON t.tid = fj.scat -WHEN NOT MATCHED THEN - INSERT (tid, balance, val) VALUES (fj.scat, fj.delta, fj.phv); -SELECT * FROM cj_target; - tid | balance | val ------+---------+---------------------------------- - 3 | 400 | initial source2 updated by merge - 1 | 220 | initial source2 200 - 1 | 110 | initial source2 200 - 2 | 320 | initial source2 300 - 10 | 100 | join input - 10 | 400 | join input - 20 | 200 | join input - 20 | 300 | join input - | | -(9 rows) - -ALTER TABLE cj_source1 RENAME COLUMN sid1 TO sid; -ALTER TABLE cj_source2 RENAME COLUMN sid2 TO sid; -TRUNCATE cj_target; -MERGE INTO cj_target t -USING cj_source1 s1 - INNER JOIN cj_source2 s2 ON s1.sid = s2.sid -ON t.tid = s1.sid -WHEN NOT MATCHED THEN - INSERT VALUES (s2.sid, delta, sval); -DROP TABLE cj_source2, cj_source1, cj_target; --- Function scans -CREATE TABLE fs_target (a int, b int, c text) - WITH (autovacuum_enabled=off); -MERGE INTO fs_target t -USING generate_series(1,100,1) AS id -ON t.a = id -WHEN MATCHED THEN - UPDATE SET b = b + id -WHEN NOT MATCHED THEN - INSERT VALUES (id, -1); -MERGE INTO fs_target t -USING generate_series(1,100,2) AS id -ON t.a = id -WHEN MATCHED THEN - UPDATE SET b = b + id, c = 'updated '|| id.*::text -WHEN NOT MATCHED THEN - INSERT VALUES (id, -1, 'inserted ' || id.*::text); -SELECT count(*) FROM fs_target; - count -------- - 100 -(1 row) - -DROP TABLE fs_target; --- SERIALIZABLE test --- handled in isolation tests --- Inheritance-based partitioning -CREATE TABLE measurement ( - city_id int not null, - logdate date not null, - peaktemp int, - unitsales int -) WITH (autovacuum_enabled=off); -CREATE TABLE measurement_y2006m02 ( - CHECK ( logdate >= DATE '2006-02-01' AND logdate < DATE '2006-03-01' ) -) INHERITS (measurement) WITH (autovacuum_enabled=off); -CREATE TABLE measurement_y2006m03 ( - CHECK ( logdate >= DATE '2006-03-01' AND logdate < DATE '2006-04-01' ) -) INHERITS (measurement) WITH (autovacuum_enabled=off); -CREATE TABLE measurement_y2007m01 ( - filler text, - peaktemp int, - logdate date not null, - city_id int not null, - unitsales int - CHECK ( logdate >= DATE '2007-01-01' AND logdate < DATE '2007-02-01') -) WITH (autovacuum_enabled=off); -ALTER TABLE measurement_y2007m01 DROP COLUMN filler; -ALTER TABLE measurement_y2007m01 INHERIT measurement; -INSERT INTO measurement VALUES (0, '2005-07-21', 5, 15); -CREATE OR REPLACE FUNCTION measurement_insert_trigger() -RETURNS TRIGGER AS $$ -BEGIN - IF ( NEW.logdate >= DATE '2006-02-01' AND - NEW.logdate < DATE '2006-03-01' ) THEN - INSERT INTO measurement_y2006m02 VALUES (NEW.*); - ELSIF ( NEW.logdate >= DATE '2006-03-01' AND - NEW.logdate < DATE '2006-04-01' ) THEN - INSERT INTO measurement_y2006m03 VALUES (NEW.*); - ELSIF ( NEW.logdate >= DATE '2007-01-01' AND - NEW.logdate < DATE '2007-02-01' ) THEN - INSERT INTO measurement_y2007m01 (city_id, logdate, peaktemp, unitsales) - VALUES (NEW.*); - ELSE - RAISE EXCEPTION 'Date out of range. Fix the measurement_insert_trigger() function!'; - END IF; - RETURN NULL; -END; -$$ LANGUAGE plpgsql ; -CREATE TRIGGER insert_measurement_trigger - BEFORE INSERT ON measurement - FOR EACH ROW EXECUTE PROCEDURE measurement_insert_trigger(); -INSERT INTO measurement VALUES (1, '2006-02-10', 35, 10); -INSERT INTO measurement VALUES (1, '2006-02-16', 45, 20); -INSERT INTO measurement VALUES (1, '2006-03-17', 25, 10); -INSERT INTO measurement VALUES (1, '2006-03-27', 15, 40); -INSERT INTO measurement VALUES (1, '2007-01-15', 10, 10); -INSERT INTO measurement VALUES (1, '2007-01-17', 10, 10); -SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate; - tableoid | city_id | logdate | peaktemp | unitsales -----------------------+---------+------------+----------+----------- - measurement | 0 | 07-21-2005 | 5 | 15 - measurement_y2006m02 | 1 | 02-10-2006 | 35 | 10 - measurement_y2006m02 | 1 | 02-16-2006 | 45 | 20 - measurement_y2006m03 | 1 | 03-17-2006 | 25 | 10 - measurement_y2006m03 | 1 | 03-27-2006 | 15 | 40 - measurement_y2007m01 | 1 | 01-15-2007 | 10 | 10 - measurement_y2007m01 | 1 | 01-17-2007 | 10 | 10 -(7 rows) - -CREATE TABLE new_measurement (LIKE measurement) WITH (autovacuum_enabled=off); -INSERT INTO new_measurement VALUES (0, '2005-07-21', 25, 20); -INSERT INTO new_measurement VALUES (1, '2006-03-01', 20, 10); -INSERT INTO new_measurement VALUES (1, '2006-02-16', 50, 10); -INSERT INTO new_measurement VALUES (2, '2006-02-10', 20, 20); -INSERT INTO new_measurement VALUES (1, '2006-03-27', NULL, NULL); -INSERT INTO new_measurement VALUES (1, '2007-01-17', NULL, NULL); -INSERT INTO new_measurement VALUES (1, '2007-01-15', 5, NULL); -INSERT INTO new_measurement VALUES (1, '2007-01-16', 10, 10); -BEGIN; -MERGE INTO ONLY measurement m - USING new_measurement nm ON - (m.city_id = nm.city_id and m.logdate=nm.logdate) -WHEN MATCHED AND nm.peaktemp IS NULL THEN DELETE -WHEN MATCHED THEN UPDATE - SET peaktemp = greatest(m.peaktemp, nm.peaktemp), - unitsales = m.unitsales + coalesce(nm.unitsales, 0) -WHEN NOT MATCHED THEN INSERT - (city_id, logdate, peaktemp, unitsales) - VALUES (city_id, logdate, peaktemp, unitsales); -SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate, peaktemp; - tableoid | city_id | logdate | peaktemp | unitsales -----------------------+---------+------------+----------+----------- - measurement | 0 | 07-21-2005 | 25 | 35 - measurement_y2006m02 | 1 | 02-10-2006 | 35 | 10 - measurement_y2006m02 | 1 | 02-16-2006 | 45 | 20 - measurement_y2006m02 | 1 | 02-16-2006 | 50 | 10 - measurement_y2006m03 | 1 | 03-01-2006 | 20 | 10 - measurement_y2006m03 | 1 | 03-17-2006 | 25 | 10 - measurement_y2006m03 | 1 | 03-27-2006 | 15 | 40 - measurement_y2006m03 | 1 | 03-27-2006 | | - measurement_y2007m01 | 1 | 01-15-2007 | 5 | - measurement_y2007m01 | 1 | 01-15-2007 | 10 | 10 - measurement_y2007m01 | 1 | 01-16-2007 | 10 | 10 - measurement_y2007m01 | 1 | 01-17-2007 | 10 | 10 - measurement_y2007m01 | 1 | 01-17-2007 | | - measurement_y2006m02 | 2 | 02-10-2006 | 20 | 20 -(14 rows) - -ROLLBACK; -MERGE into measurement m - USING new_measurement nm ON - (m.city_id = nm.city_id and m.logdate=nm.logdate) -WHEN MATCHED AND nm.peaktemp IS NULL THEN DELETE -WHEN MATCHED THEN UPDATE - SET peaktemp = greatest(m.peaktemp, nm.peaktemp), - unitsales = m.unitsales + coalesce(nm.unitsales, 0) -WHEN NOT MATCHED THEN INSERT - (city_id, logdate, peaktemp, unitsales) - VALUES (city_id, logdate, peaktemp, unitsales); -SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate; - tableoid | city_id | logdate | peaktemp | unitsales -----------------------+---------+------------+----------+----------- - measurement | 0 | 07-21-2005 | 25 | 35 - measurement_y2006m02 | 1 | 02-10-2006 | 35 | 10 - measurement_y2006m02 | 1 | 02-16-2006 | 50 | 30 - measurement_y2006m03 | 1 | 03-01-2006 | 20 | 10 - measurement_y2006m03 | 1 | 03-17-2006 | 25 | 10 - measurement_y2007m01 | 1 | 01-15-2007 | 10 | 10 - measurement_y2007m01 | 1 | 01-16-2007 | 10 | 10 - measurement_y2006m02 | 2 | 02-10-2006 | 20 | 20 -(8 rows) - -BEGIN; -MERGE INTO new_measurement nm - USING ONLY measurement m ON - (nm.city_id = m.city_id and nm.logdate=m.logdate) -WHEN MATCHED THEN DELETE; -SELECT * FROM new_measurement ORDER BY city_id, logdate; - city_id | logdate | peaktemp | unitsales ----------+------------+----------+----------- - 1 | 02-16-2006 | 50 | 10 - 1 | 03-01-2006 | 20 | 10 - 1 | 03-27-2006 | | - 1 | 01-15-2007 | 5 | - 1 | 01-16-2007 | 10 | 10 - 1 | 01-17-2007 | | - 2 | 02-10-2006 | 20 | 20 -(7 rows) - -ROLLBACK; -MERGE INTO new_measurement nm - USING measurement m ON - (nm.city_id = m.city_id and nm.logdate=m.logdate) -WHEN MATCHED THEN DELETE; -SELECT * FROM new_measurement ORDER BY city_id, logdate; - city_id | logdate | peaktemp | unitsales ----------+------------+----------+----------- - 1 | 03-27-2006 | | - 1 | 01-17-2007 | | -(2 rows) - -DROP TABLE measurement, new_measurement CASCADE; -NOTICE: drop cascades to 3 other objects -DETAIL: drop cascades to table measurement_y2006m02 -drop cascades to table measurement_y2006m03 -drop cascades to table measurement_y2007m01 -DROP FUNCTION measurement_insert_trigger(); --- prepare -RESET SESSION AUTHORIZATION; -DROP TABLE target, target2; -DROP TABLE source, source2; -DROP FUNCTION merge_trigfunc(); -DROP USER regress_merge_privs; -DROP USER regress_merge_no_privs; -DROP USER regress_merge_none; +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/misc_functions.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/misc_functions.out --- /tmp/cirrus-ci-build/src/test/regress/expected/misc_functions.out 2024-03-13 23:12:37.625342000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/misc_functions.out 2024-03-13 23:14:29.462025000 +0000 @@ -1,705 +1,2 @@ --- directory paths and dlsuffix are passed to us in environment variables -\getenv libdir PG_LIBDIR -\getenv dlsuffix PG_DLSUFFIX -\set regresslib :libdir '/regress' :dlsuffix --- --- num_nulls() --- -SELECT num_nonnulls(NULL); - num_nonnulls --------------- - 0 -(1 row) - -SELECT num_nonnulls('1'); - num_nonnulls --------------- - 1 -(1 row) - -SELECT num_nonnulls(NULL::text); - num_nonnulls --------------- - 0 -(1 row) - -SELECT num_nonnulls(NULL::text, NULL::int); - num_nonnulls --------------- - 0 -(1 row) - -SELECT num_nonnulls(1, 2, NULL::text, NULL::point, '', int8 '9', 1.0 / NULL); - num_nonnulls --------------- - 4 -(1 row) - -SELECT num_nonnulls(VARIADIC '{1,2,NULL,3}'::int[]); - num_nonnulls --------------- - 3 -(1 row) - -SELECT num_nonnulls(VARIADIC '{"1","2","3","4"}'::text[]); - num_nonnulls --------------- - 4 -(1 row) - -SELECT num_nonnulls(VARIADIC ARRAY(SELECT CASE WHEN i <> 40 THEN i END FROM generate_series(1, 100) i)); - num_nonnulls --------------- - 99 -(1 row) - -SELECT num_nulls(NULL); - num_nulls ------------ - 1 -(1 row) - -SELECT num_nulls('1'); - num_nulls ------------ - 0 -(1 row) - -SELECT num_nulls(NULL::text); - num_nulls ------------ - 1 -(1 row) - -SELECT num_nulls(NULL::text, NULL::int); - num_nulls ------------ - 2 -(1 row) - -SELECT num_nulls(1, 2, NULL::text, NULL::point, '', int8 '9', 1.0 / NULL); - num_nulls ------------ - 3 -(1 row) - -SELECT num_nulls(VARIADIC '{1,2,NULL,3}'::int[]); - num_nulls ------------ - 1 -(1 row) - -SELECT num_nulls(VARIADIC '{"1","2","3","4"}'::text[]); - num_nulls ------------ - 0 -(1 row) - -SELECT num_nulls(VARIADIC ARRAY(SELECT CASE WHEN i <> 40 THEN i END FROM generate_series(1, 100) i)); - num_nulls ------------ - 1 -(1 row) - --- special cases -SELECT num_nonnulls(VARIADIC NULL::text[]); - num_nonnulls --------------- - -(1 row) - -SELECT num_nonnulls(VARIADIC '{}'::int[]); - num_nonnulls --------------- - 0 -(1 row) - -SELECT num_nulls(VARIADIC NULL::text[]); - num_nulls ------------ - -(1 row) - -SELECT num_nulls(VARIADIC '{}'::int[]); - num_nulls ------------ - 0 -(1 row) - --- should fail, one or more arguments is required -SELECT num_nonnulls(); -ERROR: function num_nonnulls() does not exist -LINE 1: SELECT num_nonnulls(); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -SELECT num_nulls(); -ERROR: function num_nulls() does not exist -LINE 1: SELECT num_nulls(); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. --- --- canonicalize_path() --- -CREATE FUNCTION test_canonicalize_path(text) - RETURNS text - AS :'regresslib' - LANGUAGE C STRICT IMMUTABLE; -SELECT test_canonicalize_path('/'); - test_canonicalize_path ------------------------- - / -(1 row) - -SELECT test_canonicalize_path('/./abc/def/'); - test_canonicalize_path ------------------------- - /abc/def -(1 row) - -SELECT test_canonicalize_path('/./../abc/def'); - test_canonicalize_path ------------------------- - /abc/def -(1 row) - -SELECT test_canonicalize_path('/./../../abc/def/'); - test_canonicalize_path ------------------------- - /abc/def -(1 row) - -SELECT test_canonicalize_path('/abc/.././def/ghi'); - test_canonicalize_path ------------------------- - /def/ghi -(1 row) - -SELECT test_canonicalize_path('/abc/./../def/ghi//'); - test_canonicalize_path ------------------------- - /def/ghi -(1 row) - -SELECT test_canonicalize_path('/abc/def/../..'); - test_canonicalize_path ------------------------- - / -(1 row) - -SELECT test_canonicalize_path('/abc/def/../../..'); - test_canonicalize_path ------------------------- - / -(1 row) - -SELECT test_canonicalize_path('/abc/def/../../../../ghi/jkl'); - test_canonicalize_path ------------------------- - /ghi/jkl -(1 row) - -SELECT test_canonicalize_path('.'); - test_canonicalize_path ------------------------- - . -(1 row) - -SELECT test_canonicalize_path('./'); - test_canonicalize_path ------------------------- - . -(1 row) - -SELECT test_canonicalize_path('./abc/..'); - test_canonicalize_path ------------------------- - . -(1 row) - -SELECT test_canonicalize_path('abc/../'); - test_canonicalize_path ------------------------- - . -(1 row) - -SELECT test_canonicalize_path('abc/../def'); - test_canonicalize_path ------------------------- - def -(1 row) - -SELECT test_canonicalize_path('..'); - test_canonicalize_path ------------------------- - .. -(1 row) - -SELECT test_canonicalize_path('../abc/def'); - test_canonicalize_path ------------------------- - ../abc/def -(1 row) - -SELECT test_canonicalize_path('../abc/..'); - test_canonicalize_path ------------------------- - .. -(1 row) - -SELECT test_canonicalize_path('../abc/../def'); - test_canonicalize_path ------------------------- - ../def -(1 row) - -SELECT test_canonicalize_path('../abc/../../def/ghi'); - test_canonicalize_path ------------------------- - ../../def/ghi -(1 row) - -SELECT test_canonicalize_path('./abc/./def/.'); - test_canonicalize_path ------------------------- - abc/def -(1 row) - -SELECT test_canonicalize_path('./abc/././def/.'); - test_canonicalize_path ------------------------- - abc/def -(1 row) - -SELECT test_canonicalize_path('./abc/./def/.././ghi/../../../jkl/mno'); - test_canonicalize_path ------------------------- - ../jkl/mno -(1 row) - --- --- pg_log_backend_memory_contexts() --- --- Memory contexts are logged and they are not returned to the function. --- Furthermore, their contents can vary depending on the timing. However, --- we can at least verify that the code doesn't fail, and that the --- permissions are set properly. --- -SELECT pg_log_backend_memory_contexts(pg_backend_pid()); - pg_log_backend_memory_contexts --------------------------------- - t -(1 row) - -SELECT pg_log_backend_memory_contexts(pid) FROM pg_stat_activity - WHERE backend_type = 'checkpointer'; - pg_log_backend_memory_contexts --------------------------------- - t -(1 row) - -CREATE ROLE regress_log_memory; -SELECT has_function_privilege('regress_log_memory', - 'pg_log_backend_memory_contexts(integer)', 'EXECUTE'); -- no - has_function_privilege ------------------------- - f -(1 row) - -GRANT EXECUTE ON FUNCTION pg_log_backend_memory_contexts(integer) - TO regress_log_memory; -SELECT has_function_privilege('regress_log_memory', - 'pg_log_backend_memory_contexts(integer)', 'EXECUTE'); -- yes - has_function_privilege ------------------------- - t -(1 row) - -SET ROLE regress_log_memory; -SELECT pg_log_backend_memory_contexts(pg_backend_pid()); - pg_log_backend_memory_contexts --------------------------------- - t -(1 row) - -RESET ROLE; -REVOKE EXECUTE ON FUNCTION pg_log_backend_memory_contexts(integer) - FROM regress_log_memory; -DROP ROLE regress_log_memory; --- --- Test some built-in SRFs --- --- The outputs of these are variable, so we can't just print their results --- directly, but we can at least verify that the code doesn't fail. --- -select setting as segsize -from pg_settings where name = 'wal_segment_size' -\gset -select count(*) > 0 as ok from pg_ls_waldir(); - ok ----- - t -(1 row) - --- Test ProjectSet as well as FunctionScan -select count(*) > 0 as ok from (select pg_ls_waldir()) ss; - ok ----- - t -(1 row) - --- Test not-run-to-completion cases. -select * from pg_ls_waldir() limit 0; - name | size | modification -------+------+-------------- -(0 rows) - -select count(*) > 0 as ok from (select * from pg_ls_waldir() limit 1) ss; - ok ----- - t -(1 row) - -select (w).size = :segsize as ok -from (select pg_ls_waldir() w) ss where length((w).name) = 24 limit 1; - ok ----- - t -(1 row) - -select count(*) >= 0 as ok from pg_ls_archive_statusdir(); - ok ----- - t -(1 row) - --- pg_read_file() -select length(pg_read_file('postmaster.pid')) > 20; - ?column? ----------- - t -(1 row) - -select length(pg_read_file('postmaster.pid', 1, 20)); - length --------- - 20 -(1 row) - --- Test missing_ok -select pg_read_file('does not exist'); -- error -ERROR: could not open file "does not exist" for reading: No such file or directory -select pg_read_file('does not exist', true) IS NULL; -- ok - ?column? ----------- - t -(1 row) - --- Test invalid argument -select pg_read_file('does not exist', 0, -1); -- error -ERROR: requested length cannot be negative -select pg_read_file('does not exist', 0, -1, true); -- error -ERROR: requested length cannot be negative --- pg_read_binary_file() -select length(pg_read_binary_file('postmaster.pid')) > 20; - ?column? ----------- - t -(1 row) - -select length(pg_read_binary_file('postmaster.pid', 1, 20)); - length --------- - 20 -(1 row) - --- Test missing_ok -select pg_read_binary_file('does not exist'); -- error -ERROR: could not open file "does not exist" for reading: No such file or directory -select pg_read_binary_file('does not exist', true) IS NULL; -- ok - ?column? ----------- - t -(1 row) - --- Test invalid argument -select pg_read_binary_file('does not exist', 0, -1); -- error -ERROR: requested length cannot be negative -select pg_read_binary_file('does not exist', 0, -1, true); -- error -ERROR: requested length cannot be negative --- pg_stat_file() -select size > 20, isdir from pg_stat_file('postmaster.pid'); - ?column? | isdir -----------+------- - t | f -(1 row) - --- pg_ls_dir() -select * from (select pg_ls_dir('.') a) a where a = 'base' limit 1; - a ------- - base -(1 row) - --- Test missing_ok (second argument) -select pg_ls_dir('does not exist', false, false); -- error -ERROR: could not open directory "does not exist": No such file or directory -select pg_ls_dir('does not exist', true, false); -- ok - pg_ls_dir ------------ -(0 rows) - --- Test include_dot_dirs (third argument) -select count(*) = 1 as dot_found - from pg_ls_dir('.', false, true) as ls where ls = '.'; - dot_found ------------ - t -(1 row) - -select count(*) = 1 as dot_found - from pg_ls_dir('.', false, false) as ls where ls = '.'; - dot_found ------------ - f -(1 row) - --- pg_timezone_names() -select * from (select (pg_timezone_names()).name) ptn where name='UTC' limit 1; - name ------- - UTC -(1 row) - --- pg_tablespace_databases() -select count(*) > 0 from - (select pg_tablespace_databases(oid) as pts from pg_tablespace - where spcname = 'pg_default') pts - join pg_database db on pts.pts = db.oid; - ?column? ----------- - t -(1 row) - --- --- Test replication slot directory functions --- -CREATE ROLE regress_slot_dir_funcs; --- Not available by default. -SELECT has_function_privilege('regress_slot_dir_funcs', - 'pg_ls_logicalsnapdir()', 'EXECUTE'); - has_function_privilege ------------------------- - f -(1 row) - -SELECT has_function_privilege('regress_slot_dir_funcs', - 'pg_ls_logicalmapdir()', 'EXECUTE'); - has_function_privilege ------------------------- - f -(1 row) - -SELECT has_function_privilege('regress_slot_dir_funcs', - 'pg_ls_replslotdir(text)', 'EXECUTE'); - has_function_privilege ------------------------- - f -(1 row) - -GRANT pg_monitor TO regress_slot_dir_funcs; --- Role is now part of pg_monitor, so these are available. -SELECT has_function_privilege('regress_slot_dir_funcs', - 'pg_ls_logicalsnapdir()', 'EXECUTE'); - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_slot_dir_funcs', - 'pg_ls_logicalmapdir()', 'EXECUTE'); - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_slot_dir_funcs', - 'pg_ls_replslotdir(text)', 'EXECUTE'); - has_function_privilege ------------------------- - t -(1 row) - -DROP ROLE regress_slot_dir_funcs; --- --- Test adding a support function to a subject function --- -CREATE FUNCTION my_int_eq(int, int) RETURNS bool - LANGUAGE internal STRICT IMMUTABLE PARALLEL SAFE - AS $$int4eq$$; --- By default, planner does not think that's selective -EXPLAIN (COSTS OFF) -SELECT * FROM tenk1 a JOIN tenk1 b ON a.unique1 = b.unique1 -WHERE my_int_eq(a.unique2, 42); - QUERY PLAN ----------------------------------------------- - Hash Join - Hash Cond: (b.unique1 = a.unique1) - -> Seq Scan on tenk1 b - -> Hash - -> Seq Scan on tenk1 a - Filter: my_int_eq(unique2, 42) -(6 rows) - --- With support function that knows it's int4eq, we get a different plan -CREATE FUNCTION test_support_func(internal) - RETURNS internal - AS :'regresslib', 'test_support_func' - LANGUAGE C STRICT; -ALTER FUNCTION my_int_eq(int, int) SUPPORT test_support_func; -EXPLAIN (COSTS OFF) -SELECT * FROM tenk1 a JOIN tenk1 b ON a.unique1 = b.unique1 -WHERE my_int_eq(a.unique2, 42); - QUERY PLAN -------------------------------------------------- - Nested Loop - -> Seq Scan on tenk1 a - Filter: my_int_eq(unique2, 42) - -> Index Scan using tenk1_unique1 on tenk1 b - Index Cond: (unique1 = a.unique1) -(5 rows) - --- Also test non-default rowcount estimate -CREATE FUNCTION my_gen_series(int, int) RETURNS SETOF integer - LANGUAGE internal STRICT IMMUTABLE PARALLEL SAFE - AS $$generate_series_int4$$ - SUPPORT test_support_func; -EXPLAIN (COSTS OFF) -SELECT * FROM tenk1 a JOIN my_gen_series(1,1000) g ON a.unique1 = g; - QUERY PLAN ----------------------------------------- - Hash Join - Hash Cond: (g.g = a.unique1) - -> Function Scan on my_gen_series g - -> Hash - -> Seq Scan on tenk1 a -(5 rows) - -EXPLAIN (COSTS OFF) -SELECT * FROM tenk1 a JOIN my_gen_series(1,10) g ON a.unique1 = g; - QUERY PLAN -------------------------------------------------- - Nested Loop - -> Function Scan on my_gen_series g - -> Index Scan using tenk1_unique1 on tenk1 a - Index Cond: (unique1 = g.g) -(4 rows) - --- Test functions for control data -SELECT count(*) > 0 AS ok FROM pg_control_checkpoint(); - ok ----- - t -(1 row) - -SELECT count(*) > 0 AS ok FROM pg_control_init(); - ok ----- - t -(1 row) - -SELECT count(*) > 0 AS ok FROM pg_control_recovery(); - ok ----- - t -(1 row) - -SELECT count(*) > 0 AS ok FROM pg_control_system(); - ok ----- - t -(1 row) - --- pg_split_walfile_name, pg_walfile_name & pg_walfile_name_offset -SELECT * FROM pg_split_walfile_name(NULL); - segment_number | timeline_id -----------------+------------- - | -(1 row) - -SELECT * FROM pg_split_walfile_name('invalid'); -ERROR: invalid WAL file name "invalid" -SELECT segment_number > 0 AS ok_segment_number, timeline_id - FROM pg_split_walfile_name('000000010000000100000000'); - ok_segment_number | timeline_id --------------------+------------- - t | 1 -(1 row) - -SELECT segment_number > 0 AS ok_segment_number, timeline_id - FROM pg_split_walfile_name('ffffffFF00000001000000af'); - ok_segment_number | timeline_id --------------------+------------- - t | 4294967295 -(1 row) - -SELECT setting::int8 AS segment_size -FROM pg_settings -WHERE name = 'wal_segment_size' -\gset -SELECT segment_number, file_offset -FROM pg_walfile_name_offset('0/0'::pg_lsn + :segment_size), - pg_split_walfile_name(file_name); - segment_number | file_offset -----------------+------------- - 1 | 0 -(1 row) - -SELECT segment_number, file_offset -FROM pg_walfile_name_offset('0/0'::pg_lsn + :segment_size + 1), - pg_split_walfile_name(file_name); - segment_number | file_offset -----------------+------------- - 1 | 1 -(1 row) - -SELECT segment_number, file_offset = :segment_size - 1 -FROM pg_walfile_name_offset('0/0'::pg_lsn + :segment_size - 1), - pg_split_walfile_name(file_name); - segment_number | ?column? -----------------+---------- - 0 | t -(1 row) - --- test stratnum support functions -SELECT gist_stratnum_identity(3::smallint); - gist_stratnum_identity ------------------------- - 3 -(1 row) - -SELECT gist_stratnum_identity(18::smallint); - gist_stratnum_identity ------------------------- - 18 -(1 row) - --- pg_current_logfile -CREATE ROLE regress_current_logfile; --- not available by default -SELECT has_function_privilege('regress_current_logfile', - 'pg_current_logfile()', 'EXECUTE'); - has_function_privilege ------------------------- - f -(1 row) - -GRANT pg_monitor TO regress_current_logfile; --- role has privileges of pg_monitor and can execute the function -SELECT has_function_privilege('regress_current_logfile', - 'pg_current_logfile()', 'EXECUTE'); - has_function_privilege ------------------------- - t -(1 row) - -DROP ROLE regress_current_logfile; +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/sysviews.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/sysviews.out --- /tmp/cirrus-ci-build/src/test/regress/expected/sysviews.out 2024-03-13 23:12:37.627560000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/sysviews.out 2024-03-13 23:14:29.495084000 +0000 @@ -1,187 +1,2 @@ --- --- Test assorted system views --- --- This test is mainly meant to provide some code coverage for the --- set-returning functions that underlie certain system views. --- The output of most of these functions is very environment-dependent, --- so our ability to test with fixed expected output is pretty limited; --- but even a trivial check of count(*) will exercise the normal code path --- through the SRF. -select count(*) >= 0 as ok from pg_available_extension_versions; - ok ----- - t -(1 row) - -select count(*) >= 0 as ok from pg_available_extensions; - ok ----- - t -(1 row) - --- The entire output of pg_backend_memory_contexts is not stable, --- we test only the existence and basic condition of TopMemoryContext. -select name, ident, parent, level, total_bytes >= free_bytes - from pg_backend_memory_contexts where level = 0; - name | ident | parent | level | ?column? -------------------+-------+--------+-------+---------- - TopMemoryContext | | | 0 | t -(1 row) - --- At introduction, pg_config had 23 entries; it may grow -select count(*) > 20 as ok from pg_config; - ok ----- - t -(1 row) - --- We expect no cursors in this test; see also portals.sql -select count(*) = 0 as ok from pg_cursors; - ok ----- - t -(1 row) - -select count(*) >= 0 as ok from pg_file_settings; - ok ----- - t -(1 row) - --- There will surely be at least one rule, with no errors. -select count(*) > 0 as ok, count(*) FILTER (WHERE error IS NOT NULL) = 0 AS no_err - from pg_hba_file_rules; - ok | no_err -----+-------- - t | t -(1 row) - --- There may be no rules, and there should be no errors. -select count(*) >= 0 as ok, count(*) FILTER (WHERE error IS NOT NULL) = 0 AS no_err - from pg_ident_file_mappings; - ok | no_err -----+-------- - t | t -(1 row) - --- There will surely be at least one active lock -select count(*) > 0 as ok from pg_locks; - ok ----- - t -(1 row) - --- We expect no prepared statements in this test; see also prepare.sql -select count(*) = 0 as ok from pg_prepared_statements; - ok ----- - t -(1 row) - --- See also prepared_xacts.sql -select count(*) >= 0 as ok from pg_prepared_xacts; - ok ----- - t -(1 row) - --- There will surely be at least one SLRU cache -select count(*) > 0 as ok from pg_stat_slru; - ok ----- - t -(1 row) - --- There must be only one record -select count(*) = 1 as ok from pg_stat_wal; - ok ----- - t -(1 row) - --- We expect no walreceiver running in this test -select count(*) = 0 as ok from pg_stat_wal_receiver; - ok ----- - t -(1 row) - --- This is to record the prevailing planner enable_foo settings during --- a regression test run. -select name, setting from pg_settings where name like 'enable%'; - name | setting ---------------------------------+--------- - enable_async_append | on - enable_bitmapscan | on - enable_gathermerge | on - enable_group_by_reordering | on - enable_hashagg | on - enable_hashjoin | on - enable_incremental_sort | on - enable_indexonlyscan | on - enable_indexscan | on - enable_material | on - enable_memoize | on - enable_mergejoin | on - enable_nestloop | on - enable_parallel_append | on - enable_parallel_hash | on - enable_partition_pruning | on - enable_partitionwise_aggregate | off - enable_partitionwise_join | off - enable_presorted_aggregate | on - enable_self_join_removal | on - enable_seqscan | on - enable_sort | on - enable_tidscan | on -(23 rows) - --- There are always wait event descriptions for various types. -select type, count(*) > 0 as ok FROM pg_wait_events - group by type order by type COLLATE "C"; - type | ok ------------+---- - Activity | t - BufferPin | t - Client | t - Extension | t - IO | t - IPC | t - LWLock | t - Lock | t - Timeout | t -(9 rows) - --- Test that the pg_timezone_names and pg_timezone_abbrevs views are --- more-or-less working. We can't test their contents in any great detail --- without the outputs changing anytime IANA updates the underlying data, --- but it seems reasonable to expect at least one entry per major meridian. --- (At the time of writing, the actual counts are around 38 because of --- zones using fractional GMT offsets, so this is a pretty loose test.) -select count(distinct utc_offset) >= 24 as ok from pg_timezone_names; - ok ----- - t -(1 row) - -select count(distinct utc_offset) >= 24 as ok from pg_timezone_abbrevs; - ok ----- - t -(1 row) - --- Let's check the non-default timezone abbreviation sets, too -set timezone_abbreviations = 'Australia'; -select count(distinct utc_offset) >= 24 as ok from pg_timezone_abbrevs; - ok ----- - t -(1 row) - -set timezone_abbreviations = 'India'; -select count(distinct utc_offset) >= 24 as ok from pg_timezone_abbrevs; - ok ----- - t -(1 row) - +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/tsrf.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/tsrf.out --- /tmp/cirrus-ci-build/src/test/regress/expected/tsrf.out 2024-03-13 23:12:37.628099000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/tsrf.out 2024-03-13 23:14:29.467118000 +0000 @@ -1,712 +1,2 @@ --- --- tsrf - targetlist set returning function tests --- --- simple srf -SELECT generate_series(1, 3); - generate_series ------------------ - 1 - 2 - 3 -(3 rows) - --- parallel iteration -SELECT generate_series(1, 3), generate_series(3,5); - generate_series | generate_series ------------------+----------------- - 1 | 3 - 2 | 4 - 3 | 5 -(3 rows) - --- parallel iteration, different number of rows -SELECT generate_series(1, 2), generate_series(1,4); - generate_series | generate_series ------------------+----------------- - 1 | 1 - 2 | 2 - | 3 - | 4 -(4 rows) - --- srf, with SRF argument -SELECT generate_series(1, generate_series(1, 3)); - generate_series ------------------ - 1 - 1 - 2 - 1 - 2 - 3 -(6 rows) - --- but we've traditionally rejected the same in FROM -SELECT * FROM generate_series(1, generate_series(1, 3)); -ERROR: set-returning functions must appear at top level of FROM -LINE 1: SELECT * FROM generate_series(1, generate_series(1, 3)); - ^ --- srf, with two SRF arguments -SELECT generate_series(generate_series(1,3), generate_series(2, 4)); - generate_series ------------------ - 1 - 2 - 2 - 3 - 3 - 4 -(6 rows) - --- check proper nesting of SRFs in different expressions -explain (verbose, costs off) -SELECT generate_series(1, generate_series(1, 3)), generate_series(2, 4); - QUERY PLAN --------------------------------------------------------------------------------- - ProjectSet - Output: generate_series(1, (generate_series(1, 3))), (generate_series(2, 4)) - -> ProjectSet - Output: generate_series(1, 3), generate_series(2, 4) - -> Result -(5 rows) - -SELECT generate_series(1, generate_series(1, 3)), generate_series(2, 4); - generate_series | generate_series ------------------+----------------- - 1 | 2 - 1 | 3 - 2 | 3 - 1 | 4 - 2 | 4 - 3 | 4 -(6 rows) - -CREATE TABLE few(id int, dataa text, datab text); -INSERT INTO few VALUES(1, 'a', 'foo'),(2, 'a', 'bar'),(3, 'b', 'bar'); --- SRF with a provably-dummy relation -explain (verbose, costs off) -SELECT unnest(ARRAY[1, 2]) FROM few WHERE false; - QUERY PLAN --------------------------------------- - ProjectSet - Output: unnest('{1,2}'::integer[]) - -> Result - One-Time Filter: false -(4 rows) - -SELECT unnest(ARRAY[1, 2]) FROM few WHERE false; - unnest --------- -(0 rows) - --- SRF shouldn't prevent upper query from recognizing lower as dummy -explain (verbose, costs off) -SELECT * FROM few f1, - (SELECT unnest(ARRAY[1,2]) FROM few f2 WHERE false OFFSET 0) ss; - QUERY PLAN ------------------------------------------------- - Result - Output: f1.id, f1.dataa, f1.datab, ss.unnest - One-Time Filter: false -(3 rows) - -SELECT * FROM few f1, - (SELECT unnest(ARRAY[1,2]) FROM few f2 WHERE false OFFSET 0) ss; - id | dataa | datab | unnest -----+-------+-------+-------- -(0 rows) - --- SRF output order of sorting is maintained, if SRF is not referenced -SELECT few.id, generate_series(1,3) g FROM few ORDER BY id DESC; - id | g -----+--- - 3 | 1 - 3 | 2 - 3 | 3 - 2 | 1 - 2 | 2 - 2 | 3 - 1 | 1 - 1 | 2 - 1 | 3 -(9 rows) - --- but SRFs can be referenced in sort -SELECT few.id, generate_series(1,3) g FROM few ORDER BY id, g DESC; - id | g -----+--- - 1 | 3 - 1 | 2 - 1 | 1 - 2 | 3 - 2 | 2 - 2 | 1 - 3 | 3 - 3 | 2 - 3 | 1 -(9 rows) - -SELECT few.id, generate_series(1,3) g FROM few ORDER BY id, generate_series(1,3) DESC; - id | g -----+--- - 1 | 3 - 1 | 2 - 1 | 1 - 2 | 3 - 2 | 2 - 2 | 1 - 3 | 3 - 3 | 2 - 3 | 1 -(9 rows) - --- it's weird to have ORDER BYs that increase the number of results -SELECT few.id FROM few ORDER BY id, generate_series(1,3) DESC; - id ----- - 1 - 1 - 1 - 2 - 2 - 2 - 3 - 3 - 3 -(9 rows) - --- SRFs are computed after aggregation -SET enable_hashagg TO 0; -- stable output order -SELECT few.dataa, count(*), min(id), max(id), unnest('{1,1,3}'::int[]) FROM few WHERE few.id = 1 GROUP BY few.dataa; - dataa | count | min | max | unnest --------+-------+-----+-----+-------- - a | 1 | 1 | 1 | 1 - a | 1 | 1 | 1 | 1 - a | 1 | 1 | 1 | 3 -(3 rows) - --- unless referenced in GROUP BY clause -SELECT few.dataa, count(*), min(id), max(id), unnest('{1,1,3}'::int[]) FROM few WHERE few.id = 1 GROUP BY few.dataa, unnest('{1,1,3}'::int[]); - dataa | count | min | max | unnest --------+-------+-----+-----+-------- - a | 2 | 1 | 1 | 1 - a | 1 | 1 | 1 | 3 -(2 rows) - -SELECT few.dataa, count(*), min(id), max(id), unnest('{1,1,3}'::int[]) FROM few WHERE few.id = 1 GROUP BY few.dataa, 5; - dataa | count | min | max | unnest --------+-------+-----+-----+-------- - a | 2 | 1 | 1 | 1 - a | 1 | 1 | 1 | 3 -(2 rows) - -RESET enable_hashagg; --- check HAVING works when GROUP BY does [not] reference SRF output -SELECT dataa, generate_series(1,1), count(*) FROM few GROUP BY 1 HAVING count(*) > 1; - dataa | generate_series | count --------+-----------------+------- - a | 1 | 2 -(1 row) - -SELECT dataa, generate_series(1,1), count(*) FROM few GROUP BY 1, 2 HAVING count(*) > 1; - dataa | generate_series | count --------+-----------------+------- - a | 1 | 2 -(1 row) - --- it's weird to have GROUP BYs that increase the number of results -SELECT few.dataa, count(*) FROM few WHERE dataa = 'a' GROUP BY few.dataa ORDER BY 2; - dataa | count --------+------- - a | 2 -(1 row) - -SELECT few.dataa, count(*) FROM few WHERE dataa = 'a' GROUP BY few.dataa, unnest('{1,1,3}'::int[]) ORDER BY 2; - dataa | count --------+------- - a | 2 - a | 4 -(2 rows) - --- SRFs are not allowed if they'd need to be conditionally executed -SELECT q1, case when q1 > 0 then generate_series(1,3) else 0 end FROM int8_tbl; -ERROR: set-returning functions are not allowed in CASE -LINE 1: SELECT q1, case when q1 > 0 then generate_series(1,3) else 0... - ^ -HINT: You might be able to move the set-returning function into a LATERAL FROM item. -SELECT q1, coalesce(generate_series(1,3), 0) FROM int8_tbl; -ERROR: set-returning functions are not allowed in COALESCE -LINE 1: SELECT q1, coalesce(generate_series(1,3), 0) FROM int8_tbl; - ^ -HINT: You might be able to move the set-returning function into a LATERAL FROM item. --- SRFs are not allowed in aggregate arguments -SELECT min(generate_series(1, 3)) FROM few; -ERROR: aggregate function calls cannot contain set-returning function calls -LINE 1: SELECT min(generate_series(1, 3)) FROM few; - ^ -HINT: You might be able to move the set-returning function into a LATERAL FROM item. --- ... unless they're within a sub-select -SELECT sum((3 = ANY(SELECT generate_series(1,4)))::int); - sum ------ - 1 -(1 row) - -SELECT sum((3 = ANY(SELECT lag(x) over(order by x) - FROM generate_series(1,4) x))::int); - sum ------ - 1 -(1 row) - --- SRFs are not allowed in window function arguments, either -SELECT min(generate_series(1, 3)) OVER() FROM few; -ERROR: window function calls cannot contain set-returning function calls -LINE 1: SELECT min(generate_series(1, 3)) OVER() FROM few; - ^ -HINT: You might be able to move the set-returning function into a LATERAL FROM item. --- SRFs are normally computed after window functions -SELECT id,lag(id) OVER(), count(*) OVER(), generate_series(1,3) FROM few; - id | lag | count | generate_series -----+-----+-------+----------------- - 1 | | 3 | 1 - 1 | | 3 | 2 - 1 | | 3 | 3 - 2 | 1 | 3 | 1 - 2 | 1 | 3 | 2 - 2 | 1 | 3 | 3 - 3 | 2 | 3 | 1 - 3 | 2 | 3 | 2 - 3 | 2 | 3 | 3 -(9 rows) - --- unless referencing SRFs -SELECT SUM(count(*)) OVER(PARTITION BY generate_series(1,3) ORDER BY generate_series(1,3)), generate_series(1,3) g FROM few GROUP BY g; - sum | g ------+--- - 3 | 1 - 3 | 2 - 3 | 3 -(3 rows) - --- sorting + grouping -SELECT few.dataa, count(*), min(id), max(id), generate_series(1,3) FROM few GROUP BY few.dataa ORDER BY 5, 1; - dataa | count | min | max | generate_series --------+-------+-----+-----+----------------- - a | 2 | 1 | 2 | 1 - b | 1 | 3 | 3 | 1 - a | 2 | 1 | 2 | 2 - b | 1 | 3 | 3 | 2 - a | 2 | 1 | 2 | 3 - b | 1 | 3 | 3 | 3 -(6 rows) - --- grouping sets are a bit special, they produce NULLs in columns not actually NULL -set enable_hashagg = false; -SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab); - dataa | b | g | count --------+-----+---+------- - a | bar | 1 | 1 - a | bar | 2 | 1 - a | foo | 1 | 1 - a | foo | 2 | 1 - a | | 1 | 2 - a | | 2 | 2 - b | bar | 1 | 1 - b | bar | 2 | 1 - b | | 1 | 1 - b | | 2 | 1 - | | 1 | 3 - | | 2 | 3 - | bar | 1 | 2 - | bar | 2 | 2 - | foo | 1 | 1 - | foo | 2 | 1 -(16 rows) - -SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab) ORDER BY dataa; - dataa | b | g | count --------+-----+---+------- - a | bar | 1 | 1 - a | bar | 2 | 1 - a | foo | 1 | 1 - a | foo | 2 | 1 - a | | 1 | 2 - a | | 2 | 2 - b | bar | 1 | 1 - b | bar | 2 | 1 - b | | 1 | 1 - b | | 2 | 1 - | | 1 | 3 - | | 2 | 3 - | bar | 1 | 2 - | bar | 2 | 2 - | foo | 1 | 1 - | foo | 2 | 1 -(16 rows) - -SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab) ORDER BY g; - dataa | b | g | count --------+-----+---+------- - a | bar | 1 | 1 - a | foo | 1 | 1 - a | | 1 | 2 - b | bar | 1 | 1 - b | | 1 | 1 - | | 1 | 3 - | bar | 1 | 2 - | foo | 1 | 1 - | foo | 2 | 1 - a | bar | 2 | 1 - b | | 2 | 1 - a | foo | 2 | 1 - | bar | 2 | 2 - a | | 2 | 2 - | | 2 | 3 - b | bar | 2 | 1 -(16 rows) - -SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g); - dataa | b | g | count --------+-----+---+------- - a | bar | 1 | 1 - a | bar | 2 | 1 - a | bar | | 2 - a | foo | 1 | 1 - a | foo | 2 | 1 - a | foo | | 2 - a | | | 4 - b | bar | 1 | 1 - b | bar | 2 | 1 - b | bar | | 2 - b | | | 2 - | | | 6 - | bar | 1 | 2 - | bar | 2 | 2 - | bar | | 4 - | foo | 1 | 1 - | foo | 2 | 1 - | foo | | 2 - a | | 1 | 2 - b | | 1 | 1 - | | 1 | 3 - a | | 2 | 2 - b | | 2 | 1 - | | 2 | 3 -(24 rows) - -SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g) ORDER BY dataa; - dataa | b | g | count --------+-----+---+------- - a | foo | | 2 - a | | | 4 - a | | 2 | 2 - a | bar | 1 | 1 - a | bar | 2 | 1 - a | bar | | 2 - a | foo | 1 | 1 - a | foo | 2 | 1 - a | | 1 | 2 - b | bar | 1 | 1 - b | | | 2 - b | | 1 | 1 - b | bar | 2 | 1 - b | bar | | 2 - b | | 2 | 1 - | | 2 | 3 - | | | 6 - | bar | 1 | 2 - | bar | 2 | 2 - | bar | | 4 - | foo | 1 | 1 - | foo | 2 | 1 - | foo | | 2 - | | 1 | 3 -(24 rows) - -SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g) ORDER BY g; - dataa | b | g | count --------+-----+---+------- - a | bar | 1 | 1 - a | foo | 1 | 1 - b | bar | 1 | 1 - | bar | 1 | 2 - | foo | 1 | 1 - a | | 1 | 2 - b | | 1 | 1 - | | 1 | 3 - a | | 2 | 2 - b | | 2 | 1 - | bar | 2 | 2 - | | 2 | 3 - | foo | 2 | 1 - a | bar | 2 | 1 - a | foo | 2 | 1 - b | bar | 2 | 1 - a | | | 4 - b | bar | | 2 - b | | | 2 - | | | 6 - a | foo | | 2 - a | bar | | 2 - | bar | | 4 - | foo | | 2 -(24 rows) - -reset enable_hashagg; --- case with degenerate ORDER BY -explain (verbose, costs off) -select 'foo' as f, generate_series(1,2) as g from few order by 1; - QUERY PLAN ----------------------------------------------- - ProjectSet - Output: 'foo'::text, generate_series(1, 2) - -> Seq Scan on public.few - Output: id, dataa, datab -(4 rows) - -select 'foo' as f, generate_series(1,2) as g from few order by 1; - f | g ------+--- - foo | 1 - foo | 2 - foo | 1 - foo | 2 - foo | 1 - foo | 2 -(6 rows) - --- data modification -CREATE TABLE fewmore AS SELECT generate_series(1,3) AS data; -INSERT INTO fewmore VALUES(generate_series(4,5)); -SELECT * FROM fewmore; - data ------- - 1 - 2 - 3 - 4 - 5 -(5 rows) - --- SRFs are not allowed in UPDATE (they once were, but it was nonsense) -UPDATE fewmore SET data = generate_series(4,9); -ERROR: set-returning functions are not allowed in UPDATE -LINE 1: UPDATE fewmore SET data = generate_series(4,9); - ^ --- SRFs are not allowed in RETURNING -INSERT INTO fewmore VALUES(1) RETURNING generate_series(1,3); -ERROR: set-returning functions are not allowed in RETURNING -LINE 1: INSERT INTO fewmore VALUES(1) RETURNING generate_series(1,3)... - ^ --- nor standalone VALUES (but surely this is a bug?) -VALUES(1, generate_series(1,2)); -ERROR: set-returning functions are not allowed in VALUES -LINE 1: VALUES(1, generate_series(1,2)); - ^ --- We allow tSRFs that are not at top level -SELECT int4mul(generate_series(1,2), 10); - int4mul ---------- - 10 - 20 -(2 rows) - -SELECT generate_series(1,3) IS DISTINCT FROM 2; - ?column? ----------- - t - f - t -(3 rows) - --- but SRFs in function RTEs must be at top level (annoying restriction) -SELECT * FROM int4mul(generate_series(1,2), 10); -ERROR: set-returning functions must appear at top level of FROM -LINE 1: SELECT * FROM int4mul(generate_series(1,2), 10); - ^ --- DISTINCT ON is evaluated before tSRF evaluation if SRF is not --- referenced either in ORDER BY or in the DISTINCT ON list. The ORDER --- BY reference can be implicitly generated, if there's no other ORDER BY. --- implicit reference (via implicit ORDER) to all columns -SELECT DISTINCT ON (a) a, b, generate_series(1,3) g -FROM (VALUES (3, 2), (3,1), (1,1), (1,4), (5,3), (5,1)) AS t(a, b); - a | b | g ----+---+--- - 1 | 1 | 1 - 3 | 2 | 1 - 5 | 3 | 1 -(3 rows) - --- unreferenced in DISTINCT ON or ORDER BY -SELECT DISTINCT ON (a) a, b, generate_series(1,3) g -FROM (VALUES (3, 2), (3,1), (1,1), (1,4), (5,3), (5,1)) AS t(a, b) -ORDER BY a, b DESC; - a | b | g ----+---+--- - 1 | 4 | 1 - 1 | 4 | 2 - 1 | 4 | 3 - 3 | 2 | 1 - 3 | 2 | 2 - 3 | 2 | 3 - 5 | 3 | 1 - 5 | 3 | 2 - 5 | 3 | 3 -(9 rows) - --- referenced in ORDER BY -SELECT DISTINCT ON (a) a, b, generate_series(1,3) g -FROM (VALUES (3, 2), (3,1), (1,1), (1,4), (5,3), (5,1)) AS t(a, b) -ORDER BY a, b DESC, g DESC; - a | b | g ----+---+--- - 1 | 4 | 3 - 3 | 2 | 3 - 5 | 3 | 3 -(3 rows) - --- referenced in ORDER BY and DISTINCT ON -SELECT DISTINCT ON (a, b, g) a, b, generate_series(1,3) g -FROM (VALUES (3, 2), (3,1), (1,1), (1,4), (5,3), (5,1)) AS t(a, b) -ORDER BY a, b DESC, g DESC; - a | b | g ----+---+--- - 1 | 4 | 3 - 1 | 4 | 2 - 1 | 4 | 1 - 1 | 1 | 3 - 1 | 1 | 2 - 1 | 1 | 1 - 3 | 2 | 3 - 3 | 2 | 2 - 3 | 2 | 1 - 3 | 1 | 3 - 3 | 1 | 2 - 3 | 1 | 1 - 5 | 3 | 3 - 5 | 3 | 2 - 5 | 3 | 1 - 5 | 1 | 3 - 5 | 1 | 2 - 5 | 1 | 1 -(18 rows) - --- only SRF mentioned in DISTINCT ON -SELECT DISTINCT ON (g) a, b, generate_series(1,3) g -FROM (VALUES (3, 2), (3,1), (1,1), (1,4), (5,3), (5,1)) AS t(a, b); - a | b | g ----+---+--- - 3 | 2 | 1 - 5 | 1 | 2 - 3 | 1 | 3 -(3 rows) - --- LIMIT / OFFSET is evaluated after SRF evaluation -SELECT a, generate_series(1,2) FROM (VALUES(1),(2),(3)) r(a) LIMIT 2 OFFSET 2; - a | generate_series ----+----------------- - 2 | 1 - 2 | 2 -(2 rows) - --- SRFs are not allowed in LIMIT. -SELECT 1 LIMIT generate_series(1,3); -ERROR: set-returning functions are not allowed in LIMIT -LINE 1: SELECT 1 LIMIT generate_series(1,3); - ^ --- tSRF in correlated subquery, referencing table outside -SELECT (SELECT generate_series(1,3) LIMIT 1 OFFSET few.id) FROM few; - generate_series ------------------ - 2 - 3 - -(3 rows) - --- tSRF in correlated subquery, referencing SRF outside -SELECT (SELECT generate_series(1,3) LIMIT 1 OFFSET g.i) FROM generate_series(0,3) g(i); - generate_series ------------------ - 1 - 2 - 3 - -(4 rows) - --- Operators can return sets too -CREATE OPERATOR |@| (PROCEDURE = unnest, RIGHTARG = ANYARRAY); -SELECT |@|ARRAY[1,2,3]; - ?column? ----------- - 1 - 2 - 3 -(3 rows) - --- Some fun cases involving duplicate SRF calls -explain (verbose, costs off) -select generate_series(1,3) as x, generate_series(1,3) + 1 as xp1; - QUERY PLAN ------------------------------------------------------------------- - Result - Output: (generate_series(1, 3)), ((generate_series(1, 3)) + 1) - -> ProjectSet - Output: generate_series(1, 3) - -> Result -(5 rows) - -select generate_series(1,3) as x, generate_series(1,3) + 1 as xp1; - x | xp1 ----+----- - 1 | 2 - 2 | 3 - 3 | 4 -(3 rows) - -explain (verbose, costs off) -select generate_series(1,3)+1 order by generate_series(1,3); - QUERY PLAN ------------------------------------------------------------------------- - Sort - Output: (((generate_series(1, 3)) + 1)), (generate_series(1, 3)) - Sort Key: (generate_series(1, 3)) - -> Result - Output: ((generate_series(1, 3)) + 1), (generate_series(1, 3)) - -> ProjectSet - Output: generate_series(1, 3) - -> Result -(8 rows) - -select generate_series(1,3)+1 order by generate_series(1,3); - ?column? ----------- - 2 - 3 - 4 -(3 rows) - --- Check that SRFs of same nesting level run in lockstep -explain (verbose, costs off) -select generate_series(1,3) as x, generate_series(3,6) + 1 as y; - QUERY PLAN ------------------------------------------------------------------- - Result - Output: (generate_series(1, 3)), ((generate_series(3, 6)) + 1) - -> ProjectSet - Output: generate_series(1, 3), generate_series(3, 6) - -> Result -(5 rows) - -select generate_series(1,3) as x, generate_series(3,6) + 1 as y; - x | y ----+--- - 1 | 4 - 2 | 5 - 3 | 6 - | 7 -(4 rows) - --- Clean up -DROP TABLE few; -DROP TABLE fewmore; +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/tid.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/tid.out --- /tmp/cirrus-ci-build/src/test/regress/expected/tid.out 2024-03-13 23:12:37.625939000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/tid.out 2024-03-13 23:14:29.466075000 +0000 @@ -1,121 +1,2 @@ --- basic tests for the TID data type -SELECT - '(0,0)'::tid as tid00, - '(0,1)'::tid as tid01, - '(-1,0)'::tid as tidm10, - '(4294967295,65535)'::tid as tidmax; - tid00 | tid01 | tidm10 | tidmax --------+-------+----------------+-------------------- - (0,0) | (0,1) | (4294967295,0) | (4294967295,65535) -(1 row) - -SELECT '(4294967296,1)'::tid; -- error -ERROR: invalid input syntax for type tid: "(4294967296,1)" -LINE 1: SELECT '(4294967296,1)'::tid; - ^ -SELECT '(1,65536)'::tid; -- error -ERROR: invalid input syntax for type tid: "(1,65536)" -LINE 1: SELECT '(1,65536)'::tid; - ^ --- Also try it with non-error-throwing API -SELECT pg_input_is_valid('(0)', 'tid'); - pg_input_is_valid -------------------- - f -(1 row) - -SELECT * FROM pg_input_error_info('(0)', 'tid'); - message | detail | hint | sql_error_code -------------------------------------------+--------+------+---------------- - invalid input syntax for type tid: "(0)" | | | 22P02 -(1 row) - -SELECT pg_input_is_valid('(0,-1)', 'tid'); - pg_input_is_valid -------------------- - f -(1 row) - -SELECT * FROM pg_input_error_info('(0,-1)', 'tid'); - message | detail | hint | sql_error_code ----------------------------------------------+--------+------+---------------- - invalid input syntax for type tid: "(0,-1)" | | | 22P02 -(1 row) - --- tests for functions related to TID handling -CREATE TABLE tid_tab (a int); --- min() and max() for TIDs -INSERT INTO tid_tab VALUES (1), (2); -SELECT min(ctid) FROM tid_tab; - min -------- - (0,1) -(1 row) - -SELECT max(ctid) FROM tid_tab; - max -------- - (0,2) -(1 row) - -TRUNCATE tid_tab; --- Tests for currtid2() with various relation kinds --- Materialized view -CREATE MATERIALIZED VIEW tid_matview AS SELECT a FROM tid_tab; -SELECT currtid2('tid_matview'::text, '(0,1)'::tid); -- fails -ERROR: tid (0, 1) is not valid for relation "tid_matview" -INSERT INTO tid_tab VALUES (1); -REFRESH MATERIALIZED VIEW tid_matview; -SELECT currtid2('tid_matview'::text, '(0,1)'::tid); -- ok - currtid2 ----------- - (0,1) -(1 row) - -DROP MATERIALIZED VIEW tid_matview; -TRUNCATE tid_tab; --- Sequence -CREATE SEQUENCE tid_seq; -SELECT currtid2('tid_seq'::text, '(0,1)'::tid); -- ok - currtid2 ----------- - (0,1) -(1 row) - -DROP SEQUENCE tid_seq; --- Index, fails with incorrect relation type -CREATE INDEX tid_ind ON tid_tab(a); -SELECT currtid2('tid_ind'::text, '(0,1)'::tid); -- fails -ERROR: cannot open relation "tid_ind" -DETAIL: This operation is not supported for indexes. -DROP INDEX tid_ind; --- Partitioned table, no storage -CREATE TABLE tid_part (a int) PARTITION BY RANGE (a); -SELECT currtid2('tid_part'::text, '(0,1)'::tid); -- fails -ERROR: cannot look at latest visible tid for relation "public.tid_part" -DROP TABLE tid_part; --- Views --- ctid not defined in the view -CREATE VIEW tid_view_no_ctid AS SELECT a FROM tid_tab; -SELECT currtid2('tid_view_no_ctid'::text, '(0,1)'::tid); -- fails -ERROR: currtid cannot handle views with no CTID -DROP VIEW tid_view_no_ctid; --- ctid fetched directly from the source table. -CREATE VIEW tid_view_with_ctid AS SELECT ctid, a FROM tid_tab; -SELECT currtid2('tid_view_with_ctid'::text, '(0,1)'::tid); -- fails -ERROR: tid (0, 1) is not valid for relation "tid_tab" -INSERT INTO tid_tab VALUES (1); -SELECT currtid2('tid_view_with_ctid'::text, '(0,1)'::tid); -- ok - currtid2 ----------- - (0,1) -(1 row) - -DROP VIEW tid_view_with_ctid; -TRUNCATE tid_tab; --- ctid attribute with incorrect data type -CREATE VIEW tid_view_fake_ctid AS SELECT 1 AS ctid, 2 AS a; -SELECT currtid2('tid_view_fake_ctid'::text, '(0,1)'::tid); -- fails -ERROR: ctid isn't of type TID -DROP VIEW tid_view_fake_ctid; -DROP TABLE tid_tab CASCADE; +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/tidscan.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/tidscan.out --- /tmp/cirrus-ci-build/src/test/regress/expected/tidscan.out 2024-03-13 23:12:37.627684000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/tidscan.out 2024-03-13 23:14:29.491173000 +0000 @@ -1,296 +1,2 @@ --- tests for tidscans -CREATE TABLE tidscan(id integer); --- only insert a few rows, we don't want to spill onto a second table page -INSERT INTO tidscan VALUES (1), (2), (3); --- show ctids -SELECT ctid, * FROM tidscan; - ctid | id --------+---- - (0,1) | 1 - (0,2) | 2 - (0,3) | 3 -(3 rows) - --- ctid equality - implemented as tidscan -EXPLAIN (COSTS OFF) -SELECT ctid, * FROM tidscan WHERE ctid = '(0,1)'; - QUERY PLAN ------------------------------------ - Tid Scan on tidscan - TID Cond: (ctid = '(0,1)'::tid) -(2 rows) - -SELECT ctid, * FROM tidscan WHERE ctid = '(0,1)'; - ctid | id --------+---- - (0,1) | 1 -(1 row) - -EXPLAIN (COSTS OFF) -SELECT ctid, * FROM tidscan WHERE '(0,1)' = ctid; - QUERY PLAN ------------------------------------ - Tid Scan on tidscan - TID Cond: ('(0,1)'::tid = ctid) -(2 rows) - -SELECT ctid, * FROM tidscan WHERE '(0,1)' = ctid; - ctid | id --------+---- - (0,1) | 1 -(1 row) - --- OR'd clauses -EXPLAIN (COSTS OFF) -SELECT ctid, * FROM tidscan WHERE ctid = '(0,2)' OR '(0,1)' = ctid; - QUERY PLAN --------------------------------------------------------------- - Tid Scan on tidscan - TID Cond: ((ctid = '(0,2)'::tid) OR ('(0,1)'::tid = ctid)) -(2 rows) - -SELECT ctid, * FROM tidscan WHERE ctid = '(0,2)' OR '(0,1)' = ctid; - ctid | id --------+---- - (0,1) | 1 - (0,2) | 2 -(2 rows) - --- ctid = ScalarArrayOp - implemented as tidscan -EXPLAIN (COSTS OFF) -SELECT ctid, * FROM tidscan WHERE ctid = ANY(ARRAY['(0,1)', '(0,2)']::tid[]); - QUERY PLAN -------------------------------------------------------- - Tid Scan on tidscan - TID Cond: (ctid = ANY ('{"(0,1)","(0,2)"}'::tid[])) -(2 rows) - -SELECT ctid, * FROM tidscan WHERE ctid = ANY(ARRAY['(0,1)', '(0,2)']::tid[]); - ctid | id --------+---- - (0,1) | 1 - (0,2) | 2 -(2 rows) - --- ctid != ScalarArrayOp - can't be implemented as tidscan -EXPLAIN (COSTS OFF) -SELECT ctid, * FROM tidscan WHERE ctid != ANY(ARRAY['(0,1)', '(0,2)']::tid[]); - QUERY PLAN ------------------------------------------------------- - Seq Scan on tidscan - Filter: (ctid <> ANY ('{"(0,1)","(0,2)"}'::tid[])) -(2 rows) - -SELECT ctid, * FROM tidscan WHERE ctid != ANY(ARRAY['(0,1)', '(0,2)']::tid[]); - ctid | id --------+---- - (0,1) | 1 - (0,2) | 2 - (0,3) | 3 -(3 rows) - --- tid equality extracted from sub-AND clauses -EXPLAIN (COSTS OFF) -SELECT ctid, * FROM tidscan -WHERE (id = 3 AND ctid IN ('(0,2)', '(0,3)')) OR (ctid = '(0,1)' AND id = 1); - QUERY PLAN --------------------------------------------------------------------------------------------------------------- - Tid Scan on tidscan - TID Cond: ((ctid = ANY ('{"(0,2)","(0,3)"}'::tid[])) OR (ctid = '(0,1)'::tid)) - Filter: (((id = 3) AND (ctid = ANY ('{"(0,2)","(0,3)"}'::tid[]))) OR ((ctid = '(0,1)'::tid) AND (id = 1))) -(3 rows) - -SELECT ctid, * FROM tidscan -WHERE (id = 3 AND ctid IN ('(0,2)', '(0,3)')) OR (ctid = '(0,1)' AND id = 1); - ctid | id --------+---- - (0,1) | 1 - (0,3) | 3 -(2 rows) - --- nestloop-with-inner-tidscan joins on tid -SET enable_hashjoin TO off; -- otherwise hash join might win -EXPLAIN (COSTS OFF) -SELECT t1.ctid, t1.*, t2.ctid, t2.* -FROM tidscan t1 JOIN tidscan t2 ON t1.ctid = t2.ctid WHERE t1.id = 1; - QUERY PLAN ------------------------------------- - Nested Loop - -> Seq Scan on tidscan t1 - Filter: (id = 1) - -> Tid Scan on tidscan t2 - TID Cond: (t1.ctid = ctid) -(5 rows) - -SELECT t1.ctid, t1.*, t2.ctid, t2.* -FROM tidscan t1 JOIN tidscan t2 ON t1.ctid = t2.ctid WHERE t1.id = 1; - ctid | id | ctid | id --------+----+-------+---- - (0,1) | 1 | (0,1) | 1 -(1 row) - -EXPLAIN (COSTS OFF) -SELECT t1.ctid, t1.*, t2.ctid, t2.* -FROM tidscan t1 LEFT JOIN tidscan t2 ON t1.ctid = t2.ctid WHERE t1.id = 1; - QUERY PLAN ------------------------------------- - Nested Loop Left Join - -> Seq Scan on tidscan t1 - Filter: (id = 1) - -> Tid Scan on tidscan t2 - TID Cond: (t1.ctid = ctid) -(5 rows) - -SELECT t1.ctid, t1.*, t2.ctid, t2.* -FROM tidscan t1 LEFT JOIN tidscan t2 ON t1.ctid = t2.ctid WHERE t1.id = 1; - ctid | id | ctid | id --------+----+-------+---- - (0,1) | 1 | (0,1) | 1 -(1 row) - -RESET enable_hashjoin; --- exercise backward scan and rewind -BEGIN; -DECLARE c CURSOR FOR -SELECT ctid, * FROM tidscan WHERE ctid = ANY(ARRAY['(0,1)', '(0,2)']::tid[]); -FETCH ALL FROM c; - ctid | id --------+---- - (0,1) | 1 - (0,2) | 2 -(2 rows) - -FETCH BACKWARD 1 FROM c; - ctid | id --------+---- - (0,2) | 2 -(1 row) - -FETCH FIRST FROM c; - ctid | id --------+---- - (0,1) | 1 -(1 row) - -ROLLBACK; --- tidscan via CURRENT OF -BEGIN; -DECLARE c CURSOR FOR SELECT ctid, * FROM tidscan; -FETCH NEXT FROM c; -- skip one row - ctid | id --------+---- - (0,1) | 1 -(1 row) - -FETCH NEXT FROM c; - ctid | id --------+---- - (0,2) | 2 -(1 row) - --- perform update -EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) -UPDATE tidscan SET id = -id WHERE CURRENT OF c RETURNING *; - QUERY PLAN ---------------------------------------------------- - Update on tidscan (actual rows=1 loops=1) - -> Tid Scan on tidscan (actual rows=1 loops=1) - TID Cond: CURRENT OF c -(3 rows) - -FETCH NEXT FROM c; - ctid | id --------+---- - (0,3) | 3 -(1 row) - --- perform update -EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) -UPDATE tidscan SET id = -id WHERE CURRENT OF c RETURNING *; - QUERY PLAN ---------------------------------------------------- - Update on tidscan (actual rows=1 loops=1) - -> Tid Scan on tidscan (actual rows=1 loops=1) - TID Cond: CURRENT OF c -(3 rows) - -SELECT * FROM tidscan; - id ----- - 1 - -2 - -3 -(3 rows) - --- position cursor past any rows -FETCH NEXT FROM c; - ctid | id -------+---- -(0 rows) - --- should error out -EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) -UPDATE tidscan SET id = -id WHERE CURRENT OF c RETURNING *; -ERROR: cursor "c" is not positioned on a row -ROLLBACK; --- bulk joins on CTID --- (these plans don't use TID scans, but this still seems like an --- appropriate place for these tests) -EXPLAIN (COSTS OFF) -SELECT count(*) FROM tenk1 t1 JOIN tenk1 t2 ON t1.ctid = t2.ctid; - QUERY PLAN ----------------------------------------- - Aggregate - -> Hash Join - Hash Cond: (t1.ctid = t2.ctid) - -> Seq Scan on tenk1 t1 - -> Hash - -> Seq Scan on tenk1 t2 -(6 rows) - -SELECT count(*) FROM tenk1 t1 JOIN tenk1 t2 ON t1.ctid = t2.ctid; - count -------- - 10000 -(1 row) - -SET enable_hashjoin TO off; -EXPLAIN (COSTS OFF) -SELECT count(*) FROM tenk1 t1 JOIN tenk1 t2 ON t1.ctid = t2.ctid; - QUERY PLAN ------------------------------------------ - Aggregate - -> Merge Join - Merge Cond: (t1.ctid = t2.ctid) - -> Sort - Sort Key: t1.ctid - -> Seq Scan on tenk1 t1 - -> Sort - Sort Key: t2.ctid - -> Seq Scan on tenk1 t2 -(9 rows) - -SELECT count(*) FROM tenk1 t1 JOIN tenk1 t2 ON t1.ctid = t2.ctid; - count -------- - 10000 -(1 row) - -RESET enable_hashjoin; --- check predicate lock on CTID -BEGIN ISOLATION LEVEL SERIALIZABLE; -SELECT * FROM tidscan WHERE ctid = '(0,1)'; - id ----- - 1 -(1 row) - --- locktype should be 'tuple' -SELECT locktype, mode FROM pg_locks WHERE pid = pg_backend_pid() AND mode = 'SIReadLock'; - locktype | mode -----------+------------ - tuple | SIReadLock -(1 row) - -ROLLBACK; -DROP TABLE tidscan; +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/tidrangescan.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/tidrangescan.out --- /tmp/cirrus-ci-build/src/test/regress/expected/tidrangescan.out 2024-03-13 23:12:37.627673000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/tidrangescan.out 2024-03-13 23:14:29.463296000 +0000 @@ -1,300 +1,2 @@ --- tests for tidrangescans -SET enable_seqscan TO off; -CREATE TABLE tidrangescan(id integer, data text); --- empty table -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid < '(1, 0)'; - QUERY PLAN ------------------------------------ - Tid Range Scan on tidrangescan - TID Cond: (ctid < '(1,0)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid < '(1, 0)'; - ctid ------- -(0 rows) - -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid > '(9, 0)'; - QUERY PLAN ------------------------------------ - Tid Range Scan on tidrangescan - TID Cond: (ctid > '(9,0)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid > '(9, 0)'; - ctid ------- -(0 rows) - --- insert enough tuples to fill at least two pages -INSERT INTO tidrangescan SELECT i,repeat('x', 100) FROM generate_series(1,200) AS s(i); --- remove all tuples after the 10th tuple on each page. Trying to ensure --- we get the same layout with all CPU architectures and smaller than standard --- page sizes. -DELETE FROM tidrangescan -WHERE substring(ctid::text FROM ',(\d+)\)')::integer > 10 OR substring(ctid::text FROM '\((\d+),')::integer > 2; -VACUUM tidrangescan; --- range scans with upper bound -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid < '(1,0)'; - QUERY PLAN ------------------------------------ - Tid Range Scan on tidrangescan - TID Cond: (ctid < '(1,0)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid < '(1,0)'; - ctid --------- - (0,1) - (0,2) - (0,3) - (0,4) - (0,5) - (0,6) - (0,7) - (0,8) - (0,9) - (0,10) -(10 rows) - -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid <= '(1,5)'; - QUERY PLAN ------------------------------------- - Tid Range Scan on tidrangescan - TID Cond: (ctid <= '(1,5)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid <= '(1,5)'; - ctid --------- - (0,1) - (0,2) - (0,3) - (0,4) - (0,5) - (0,6) - (0,7) - (0,8) - (0,9) - (0,10) - (1,1) - (1,2) - (1,3) - (1,4) - (1,5) -(15 rows) - -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid < '(0,0)'; - QUERY PLAN ------------------------------------ - Tid Range Scan on tidrangescan - TID Cond: (ctid < '(0,0)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid < '(0,0)'; - ctid ------- -(0 rows) - --- range scans with lower bound -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid > '(2,8)'; - QUERY PLAN ------------------------------------ - Tid Range Scan on tidrangescan - TID Cond: (ctid > '(2,8)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid > '(2,8)'; - ctid --------- - (2,9) - (2,10) -(2 rows) - -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE '(2,8)' < ctid; - QUERY PLAN ------------------------------------ - Tid Range Scan on tidrangescan - TID Cond: ('(2,8)'::tid < ctid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE '(2,8)' < ctid; - ctid --------- - (2,9) - (2,10) -(2 rows) - -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid >= '(2,8)'; - QUERY PLAN ------------------------------------- - Tid Range Scan on tidrangescan - TID Cond: (ctid >= '(2,8)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid >= '(2,8)'; - ctid --------- - (2,8) - (2,9) - (2,10) -(3 rows) - -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid >= '(100,0)'; - QUERY PLAN --------------------------------------- - Tid Range Scan on tidrangescan - TID Cond: (ctid >= '(100,0)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid >= '(100,0)'; - ctid ------- -(0 rows) - --- range scans with both bounds -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid > '(1,4)' AND '(1,7)' >= ctid; - QUERY PLAN ----------------------------------------------------------------- - Tid Range Scan on tidrangescan - TID Cond: ((ctid > '(1,4)'::tid) AND ('(1,7)'::tid >= ctid)) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid > '(1,4)' AND '(1,7)' >= ctid; - ctid -------- - (1,5) - (1,6) - (1,7) -(3 rows) - -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE '(1,7)' >= ctid AND ctid > '(1,4)'; - QUERY PLAN ----------------------------------------------------------------- - Tid Range Scan on tidrangescan - TID Cond: (('(1,7)'::tid >= ctid) AND (ctid > '(1,4)'::tid)) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE '(1,7)' >= ctid AND ctid > '(1,4)'; - ctid -------- - (1,5) - (1,6) - (1,7) -(3 rows) - --- extreme offsets -SELECT ctid FROM tidrangescan WHERE ctid > '(0,65535)' AND ctid < '(1,0)' LIMIT 1; - ctid ------- -(0 rows) - -SELECT ctid FROM tidrangescan WHERE ctid < '(0,0)' LIMIT 1; - ctid ------- -(0 rows) - -SELECT ctid FROM tidrangescan WHERE ctid > '(4294967295,65535)'; - ctid ------- -(0 rows) - -SELECT ctid FROM tidrangescan WHERE ctid < '(0,0)'; - ctid ------- -(0 rows) - --- NULLs in the range cannot return tuples -SELECT ctid FROM tidrangescan WHERE ctid >= (SELECT NULL::tid); - ctid ------- -(0 rows) - --- rescans -EXPLAIN (COSTS OFF) -SELECT t.ctid,t2.c FROM tidrangescan t, -LATERAL (SELECT count(*) c FROM tidrangescan t2 WHERE t2.ctid <= t.ctid) t2 -WHERE t.ctid < '(1,0)'; - QUERY PLAN ------------------------------------------------ - Nested Loop - -> Tid Range Scan on tidrangescan t - TID Cond: (ctid < '(1,0)'::tid) - -> Aggregate - -> Tid Range Scan on tidrangescan t2 - TID Cond: (ctid <= t.ctid) -(6 rows) - -SELECT t.ctid,t2.c FROM tidrangescan t, -LATERAL (SELECT count(*) c FROM tidrangescan t2 WHERE t2.ctid <= t.ctid) t2 -WHERE t.ctid < '(1,0)'; - ctid | c ---------+---- - (0,1) | 1 - (0,2) | 2 - (0,3) | 3 - (0,4) | 4 - (0,5) | 5 - (0,6) | 6 - (0,7) | 7 - (0,8) | 8 - (0,9) | 9 - (0,10) | 10 -(10 rows) - --- cursors --- Ensure we get a TID Range scan without a Materialize node. -EXPLAIN (COSTS OFF) -DECLARE c SCROLL CURSOR FOR SELECT ctid FROM tidrangescan WHERE ctid < '(1,0)'; - QUERY PLAN ------------------------------------ - Tid Range Scan on tidrangescan - TID Cond: (ctid < '(1,0)'::tid) -(2 rows) - -BEGIN; -DECLARE c SCROLL CURSOR FOR SELECT ctid FROM tidrangescan WHERE ctid < '(1,0)'; -FETCH NEXT c; - ctid -------- - (0,1) -(1 row) - -FETCH NEXT c; - ctid -------- - (0,2) -(1 row) - -FETCH PRIOR c; - ctid -------- - (0,1) -(1 row) - -FETCH FIRST c; - ctid -------- - (0,1) -(1 row) - -FETCH LAST c; - ctid --------- - (0,10) -(1 row) - -COMMIT; -DROP TABLE tidrangescan; -RESET enable_seqscan; +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/collate.icu.utf8_1.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/collate.icu.utf8.out --- /tmp/cirrus-ci-build/src/test/regress/expected/collate.icu.utf8_1.out 2024-03-13 23:12:37.622716000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/collate.icu.utf8.out 2024-03-13 23:14:29.462867000 +0000 @@ -1,9 +1,2 @@ -/* - * This test is for ICU collations. - */ -/* skip test if not UTF8 server encoding or no ICU collations installed */ -SELECT getdatabaseencoding() <> 'UTF8' OR - (SELECT count(*) FROM pg_collation WHERE collprovider = 'i' AND collname <> 'unicode') = 0 - AS skip_test \gset -\if :skip_test -\quit +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/incremental_sort.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/incremental_sort.out --- /tmp/cirrus-ci-build/src/test/regress/expected/incremental_sort.out 2024-03-13 23:12:37.624435000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/incremental_sort.out 2024-03-13 23:14:29.468820000 +0000 @@ -1,1695 +1,2 @@ --- When there is a LIMIT clause, incremental sort is beneficial because --- it only has to sort some of the groups, and not the entire table. -explain (costs off) -select * from (select * from tenk1 order by four) t order by four, ten -limit 1; - QUERY PLAN ------------------------------------------ - Limit - -> Incremental Sort - Sort Key: tenk1.four, tenk1.ten - Presorted Key: tenk1.four - -> Sort - Sort Key: tenk1.four - -> Seq Scan on tenk1 -(7 rows) - --- When work_mem is not enough to sort the entire table, incremental sort --- may be faster if individual groups still fit into work_mem. -set work_mem to '2MB'; -explain (costs off) -select * from (select * from tenk1 order by four) t order by four, ten; - QUERY PLAN ------------------------------------ - Incremental Sort - Sort Key: tenk1.four, tenk1.ten - Presorted Key: tenk1.four - -> Sort - Sort Key: tenk1.four - -> Seq Scan on tenk1 -(6 rows) - -reset work_mem; -create table t(a integer, b integer); -create or replace function explain_analyze_without_memory(query text) -returns table (out_line text) language plpgsql -as -$$ -declare - line text; -begin - for line in - execute 'explain (analyze, costs off, summary off, timing off) ' || query - loop - out_line := regexp_replace(line, '\d+kB', 'NNkB', 'g'); - return next; - end loop; -end; -$$; -create or replace function explain_analyze_inc_sort_nodes(query text) -returns jsonb language plpgsql -as -$$ -declare - elements jsonb; - element jsonb; - matching_nodes jsonb := '[]'::jsonb; -begin - execute 'explain (analyze, costs off, summary off, timing off, format ''json'') ' || query into strict elements; - while jsonb_array_length(elements) > 0 loop - element := elements->0; - elements := elements - 0; - case jsonb_typeof(element) - when 'array' then - if jsonb_array_length(element) > 0 then - elements := elements || element; - end if; - when 'object' then - if element ? 'Plan' then - elements := elements || jsonb_build_array(element->'Plan'); - element := element - 'Plan'; - else - if element ? 'Plans' then - elements := elements || jsonb_build_array(element->'Plans'); - element := element - 'Plans'; - end if; - if (element->>'Node Type')::text = 'Incremental Sort' then - matching_nodes := matching_nodes || element; - end if; - end if; - end case; - end loop; - return matching_nodes; -end; -$$; -create or replace function explain_analyze_inc_sort_nodes_without_memory(query text) -returns jsonb language plpgsql -as -$$ -declare - nodes jsonb := '[]'::jsonb; - node jsonb; - group_key text; - space_key text; -begin - for node in select * from jsonb_array_elements(explain_analyze_inc_sort_nodes(query)) t loop - for group_key in select unnest(array['Full-sort Groups', 'Pre-sorted Groups']::text[]) t loop - for space_key in select unnest(array['Sort Space Memory', 'Sort Space Disk']::text[]) t loop - node := jsonb_set(node, array[group_key, space_key, 'Average Sort Space Used'], '"NN"', false); - node := jsonb_set(node, array[group_key, space_key, 'Peak Sort Space Used'], '"NN"', false); - end loop; - end loop; - nodes := nodes || node; - end loop; - return nodes; -end; -$$; -create or replace function explain_analyze_inc_sort_nodes_verify_invariants(query text) -returns bool language plpgsql -as -$$ -declare - node jsonb; - group_stats jsonb; - group_key text; - space_key text; -begin - for node in select * from jsonb_array_elements(explain_analyze_inc_sort_nodes(query)) t loop - for group_key in select unnest(array['Full-sort Groups', 'Pre-sorted Groups']::text[]) t loop - group_stats := node->group_key; - for space_key in select unnest(array['Sort Space Memory', 'Sort Space Disk']::text[]) t loop - if (group_stats->space_key->'Peak Sort Space Used')::bigint < (group_stats->space_key->'Peak Sort Space Used')::bigint then - raise exception '% has invalid max space < average space', group_key; - end if; - end loop; - end loop; - end loop; - return true; -end; -$$; --- A single large group tested around each mode transition point. -insert into t(a, b) select i/100 + 1, i + 1 from generate_series(0, 999) n(i); -analyze t; -explain (costs off) select * from (select * from t order by a) s order by a, b limit 31; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 31; - a | b ----+---- - 1 | 1 - 1 | 2 - 1 | 3 - 1 | 4 - 1 | 5 - 1 | 6 - 1 | 7 - 1 | 8 - 1 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 1 | 20 - 1 | 21 - 1 | 22 - 1 | 23 - 1 | 24 - 1 | 25 - 1 | 26 - 1 | 27 - 1 | 28 - 1 | 29 - 1 | 30 - 1 | 31 -(31 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 32; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 32; - a | b ----+---- - 1 | 1 - 1 | 2 - 1 | 3 - 1 | 4 - 1 | 5 - 1 | 6 - 1 | 7 - 1 | 8 - 1 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 1 | 20 - 1 | 21 - 1 | 22 - 1 | 23 - 1 | 24 - 1 | 25 - 1 | 26 - 1 | 27 - 1 | 28 - 1 | 29 - 1 | 30 - 1 | 31 - 1 | 32 -(32 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 33; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 33; - a | b ----+---- - 1 | 1 - 1 | 2 - 1 | 3 - 1 | 4 - 1 | 5 - 1 | 6 - 1 | 7 - 1 | 8 - 1 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 1 | 20 - 1 | 21 - 1 | 22 - 1 | 23 - 1 | 24 - 1 | 25 - 1 | 26 - 1 | 27 - 1 | 28 - 1 | 29 - 1 | 30 - 1 | 31 - 1 | 32 - 1 | 33 -(33 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 65; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 65; - a | b ----+---- - 1 | 1 - 1 | 2 - 1 | 3 - 1 | 4 - 1 | 5 - 1 | 6 - 1 | 7 - 1 | 8 - 1 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 1 | 20 - 1 | 21 - 1 | 22 - 1 | 23 - 1 | 24 - 1 | 25 - 1 | 26 - 1 | 27 - 1 | 28 - 1 | 29 - 1 | 30 - 1 | 31 - 1 | 32 - 1 | 33 - 1 | 34 - 1 | 35 - 1 | 36 - 1 | 37 - 1 | 38 - 1 | 39 - 1 | 40 - 1 | 41 - 1 | 42 - 1 | 43 - 1 | 44 - 1 | 45 - 1 | 46 - 1 | 47 - 1 | 48 - 1 | 49 - 1 | 50 - 1 | 51 - 1 | 52 - 1 | 53 - 1 | 54 - 1 | 55 - 1 | 56 - 1 | 57 - 1 | 58 - 1 | 59 - 1 | 60 - 1 | 61 - 1 | 62 - 1 | 63 - 1 | 64 - 1 | 65 -(65 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 66; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 66; - a | b ----+---- - 1 | 1 - 1 | 2 - 1 | 3 - 1 | 4 - 1 | 5 - 1 | 6 - 1 | 7 - 1 | 8 - 1 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 1 | 20 - 1 | 21 - 1 | 22 - 1 | 23 - 1 | 24 - 1 | 25 - 1 | 26 - 1 | 27 - 1 | 28 - 1 | 29 - 1 | 30 - 1 | 31 - 1 | 32 - 1 | 33 - 1 | 34 - 1 | 35 - 1 | 36 - 1 | 37 - 1 | 38 - 1 | 39 - 1 | 40 - 1 | 41 - 1 | 42 - 1 | 43 - 1 | 44 - 1 | 45 - 1 | 46 - 1 | 47 - 1 | 48 - 1 | 49 - 1 | 50 - 1 | 51 - 1 | 52 - 1 | 53 - 1 | 54 - 1 | 55 - 1 | 56 - 1 | 57 - 1 | 58 - 1 | 59 - 1 | 60 - 1 | 61 - 1 | 62 - 1 | 63 - 1 | 64 - 1 | 65 - 1 | 66 -(66 rows) - -delete from t; --- An initial large group followed by a small group. -insert into t(a, b) select i/50 + 1, i + 1 from generate_series(0, 999) n(i); -analyze t; -explain (costs off) select * from (select * from t order by a) s order by a, b limit 55; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 55; - a | b ----+---- - 1 | 1 - 1 | 2 - 1 | 3 - 1 | 4 - 1 | 5 - 1 | 6 - 1 | 7 - 1 | 8 - 1 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 1 | 20 - 1 | 21 - 1 | 22 - 1 | 23 - 1 | 24 - 1 | 25 - 1 | 26 - 1 | 27 - 1 | 28 - 1 | 29 - 1 | 30 - 1 | 31 - 1 | 32 - 1 | 33 - 1 | 34 - 1 | 35 - 1 | 36 - 1 | 37 - 1 | 38 - 1 | 39 - 1 | 40 - 1 | 41 - 1 | 42 - 1 | 43 - 1 | 44 - 1 | 45 - 1 | 46 - 1 | 47 - 1 | 48 - 1 | 49 - 1 | 50 - 2 | 51 - 2 | 52 - 2 | 53 - 2 | 54 - 2 | 55 -(55 rows) - --- Test EXPLAIN ANALYZE with only a fullsort group. -select explain_analyze_without_memory('select * from (select * from t order by a) s order by a, b limit 55'); - explain_analyze_without_memory ---------------------------------------------------------------------------------------------------------------- - Limit (actual rows=55 loops=1) - -> Incremental Sort (actual rows=55 loops=1) - Sort Key: t.a, t.b - Presorted Key: t.a - Full-sort Groups: 2 Sort Methods: top-N heapsort, quicksort Average Memory: NNkB Peak Memory: NNkB - -> Sort (actual rows=101 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: NNkB - -> Seq Scan on t (actual rows=1000 loops=1) -(9 rows) - -select jsonb_pretty(explain_analyze_inc_sort_nodes_without_memory('select * from (select * from t order by a) s order by a, b limit 55')); - jsonb_pretty -------------------------------------------------- - [ + - { + - "Sort Key": [ + - "t.a", + - "t.b" + - ], + - "Node Type": "Incremental Sort", + - "Actual Rows": 55, + - "Actual Loops": 1, + - "Async Capable": false, + - "Presorted Key": [ + - "t.a" + - ], + - "Parallel Aware": false, + - "Full-sort Groups": { + - "Group Count": 2, + - "Sort Methods Used": [ + - "top-N heapsort", + - "quicksort" + - ], + - "Sort Space Memory": { + - "Peak Sort Space Used": "NN", + - "Average Sort Space Used": "NN"+ - } + - }, + - "Parent Relationship": "Outer" + - } + - ] -(1 row) - -select explain_analyze_inc_sort_nodes_verify_invariants('select * from (select * from t order by a) s order by a, b limit 55'); - explain_analyze_inc_sort_nodes_verify_invariants --------------------------------------------------- - t -(1 row) - -delete from t; --- An initial small group followed by a large group. -insert into t(a, b) select (case when i < 5 then i else 9 end), i from generate_series(1, 1000) n(i); -analyze t; -explain (costs off) select * from (select * from t order by a) s order by a, b limit 70; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 70; - a | b ----+---- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 9 | 5 - 9 | 6 - 9 | 7 - 9 | 8 - 9 | 9 - 9 | 10 - 9 | 11 - 9 | 12 - 9 | 13 - 9 | 14 - 9 | 15 - 9 | 16 - 9 | 17 - 9 | 18 - 9 | 19 - 9 | 20 - 9 | 21 - 9 | 22 - 9 | 23 - 9 | 24 - 9 | 25 - 9 | 26 - 9 | 27 - 9 | 28 - 9 | 29 - 9 | 30 - 9 | 31 - 9 | 32 - 9 | 33 - 9 | 34 - 9 | 35 - 9 | 36 - 9 | 37 - 9 | 38 - 9 | 39 - 9 | 40 - 9 | 41 - 9 | 42 - 9 | 43 - 9 | 44 - 9 | 45 - 9 | 46 - 9 | 47 - 9 | 48 - 9 | 49 - 9 | 50 - 9 | 51 - 9 | 52 - 9 | 53 - 9 | 54 - 9 | 55 - 9 | 56 - 9 | 57 - 9 | 58 - 9 | 59 - 9 | 60 - 9 | 61 - 9 | 62 - 9 | 63 - 9 | 64 - 9 | 65 - 9 | 66 - 9 | 67 - 9 | 68 - 9 | 69 - 9 | 70 -(70 rows) - --- Checks case where we hit a group boundary at the last tuple of a batch. --- Because the full sort state is bounded, we scan 64 tuples (the mode --- transition point) but only retain 5. Thus when we transition modes, all --- tuples in the full sort state have different prefix keys. -explain (costs off) select * from (select * from t order by a) s order by a, b limit 5; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 5; - a | b ----+--- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 9 | 5 -(5 rows) - --- Test rescan. -begin; --- We force the planner to choose a plan with incremental sort on the right side --- of a nested loop join node. That way we trigger the rescan code path. -set local enable_hashjoin = off; -set local enable_mergejoin = off; -set local enable_material = off; -set local enable_sort = off; -explain (costs off) select * from t left join (select * from (select * from t order by a) v order by a, b) s on s.a = t.a where t.a in (1, 2); - QUERY PLAN ------------------------------------------------- - Nested Loop Left Join - Join Filter: (t_1.a = t.a) - -> Seq Scan on t - Filter: (a = ANY ('{1,2}'::integer[])) - -> Incremental Sort - Sort Key: t_1.a, t_1.b - Presorted Key: t_1.a - -> Sort - Sort Key: t_1.a - -> Seq Scan on t t_1 -(10 rows) - -select * from t left join (select * from (select * from t order by a) v order by a, b) s on s.a = t.a where t.a in (1, 2); - a | b | a | b ----+---+---+--- - 1 | 1 | 1 | 1 - 2 | 2 | 2 | 2 -(2 rows) - -rollback; --- Test EXPLAIN ANALYZE with both fullsort and presorted groups. -select explain_analyze_without_memory('select * from (select * from t order by a) s order by a, b limit 70'); - explain_analyze_without_memory ----------------------------------------------------------------------------------------------------------------- - Limit (actual rows=70 loops=1) - -> Incremental Sort (actual rows=70 loops=1) - Sort Key: t.a, t.b - Presorted Key: t.a - Full-sort Groups: 1 Sort Method: quicksort Average Memory: NNkB Peak Memory: NNkB - Pre-sorted Groups: 5 Sort Methods: top-N heapsort, quicksort Average Memory: NNkB Peak Memory: NNkB - -> Sort (actual rows=1000 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: NNkB - -> Seq Scan on t (actual rows=1000 loops=1) -(10 rows) - -select jsonb_pretty(explain_analyze_inc_sort_nodes_without_memory('select * from (select * from t order by a) s order by a, b limit 70')); - jsonb_pretty -------------------------------------------------- - [ + - { + - "Sort Key": [ + - "t.a", + - "t.b" + - ], + - "Node Type": "Incremental Sort", + - "Actual Rows": 70, + - "Actual Loops": 1, + - "Async Capable": false, + - "Presorted Key": [ + - "t.a" + - ], + - "Parallel Aware": false, + - "Full-sort Groups": { + - "Group Count": 1, + - "Sort Methods Used": [ + - "quicksort" + - ], + - "Sort Space Memory": { + - "Peak Sort Space Used": "NN", + - "Average Sort Space Used": "NN"+ - } + - }, + - "Pre-sorted Groups": { + - "Group Count": 5, + - "Sort Methods Used": [ + - "top-N heapsort", + - "quicksort" + - ], + - "Sort Space Memory": { + - "Peak Sort Space Used": "NN", + - "Average Sort Space Used": "NN"+ - } + - }, + - "Parent Relationship": "Outer" + - } + - ] -(1 row) - -select explain_analyze_inc_sort_nodes_verify_invariants('select * from (select * from t order by a) s order by a, b limit 70'); - explain_analyze_inc_sort_nodes_verify_invariants --------------------------------------------------- - t -(1 row) - -delete from t; --- Small groups of 10 tuples each tested around each mode transition point. -insert into t(a, b) select i / 10, i from generate_series(1, 1000) n(i); -analyze t; -explain (costs off) select * from (select * from t order by a) s order by a, b limit 31; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 31; - a | b ----+---- - 0 | 1 - 0 | 2 - 0 | 3 - 0 | 4 - 0 | 5 - 0 | 6 - 0 | 7 - 0 | 8 - 0 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 2 | 20 - 2 | 21 - 2 | 22 - 2 | 23 - 2 | 24 - 2 | 25 - 2 | 26 - 2 | 27 - 2 | 28 - 2 | 29 - 3 | 30 - 3 | 31 -(31 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 32; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 32; - a | b ----+---- - 0 | 1 - 0 | 2 - 0 | 3 - 0 | 4 - 0 | 5 - 0 | 6 - 0 | 7 - 0 | 8 - 0 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 2 | 20 - 2 | 21 - 2 | 22 - 2 | 23 - 2 | 24 - 2 | 25 - 2 | 26 - 2 | 27 - 2 | 28 - 2 | 29 - 3 | 30 - 3 | 31 - 3 | 32 -(32 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 33; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 33; - a | b ----+---- - 0 | 1 - 0 | 2 - 0 | 3 - 0 | 4 - 0 | 5 - 0 | 6 - 0 | 7 - 0 | 8 - 0 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 2 | 20 - 2 | 21 - 2 | 22 - 2 | 23 - 2 | 24 - 2 | 25 - 2 | 26 - 2 | 27 - 2 | 28 - 2 | 29 - 3 | 30 - 3 | 31 - 3 | 32 - 3 | 33 -(33 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 65; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 65; - a | b ----+---- - 0 | 1 - 0 | 2 - 0 | 3 - 0 | 4 - 0 | 5 - 0 | 6 - 0 | 7 - 0 | 8 - 0 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 2 | 20 - 2 | 21 - 2 | 22 - 2 | 23 - 2 | 24 - 2 | 25 - 2 | 26 - 2 | 27 - 2 | 28 - 2 | 29 - 3 | 30 - 3 | 31 - 3 | 32 - 3 | 33 - 3 | 34 - 3 | 35 - 3 | 36 - 3 | 37 - 3 | 38 - 3 | 39 - 4 | 40 - 4 | 41 - 4 | 42 - 4 | 43 - 4 | 44 - 4 | 45 - 4 | 46 - 4 | 47 - 4 | 48 - 4 | 49 - 5 | 50 - 5 | 51 - 5 | 52 - 5 | 53 - 5 | 54 - 5 | 55 - 5 | 56 - 5 | 57 - 5 | 58 - 5 | 59 - 6 | 60 - 6 | 61 - 6 | 62 - 6 | 63 - 6 | 64 - 6 | 65 -(65 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 66; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 66; - a | b ----+---- - 0 | 1 - 0 | 2 - 0 | 3 - 0 | 4 - 0 | 5 - 0 | 6 - 0 | 7 - 0 | 8 - 0 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 2 | 20 - 2 | 21 - 2 | 22 - 2 | 23 - 2 | 24 - 2 | 25 - 2 | 26 - 2 | 27 - 2 | 28 - 2 | 29 - 3 | 30 - 3 | 31 - 3 | 32 - 3 | 33 - 3 | 34 - 3 | 35 - 3 | 36 - 3 | 37 - 3 | 38 - 3 | 39 - 4 | 40 - 4 | 41 - 4 | 42 - 4 | 43 - 4 | 44 - 4 | 45 - 4 | 46 - 4 | 47 - 4 | 48 - 4 | 49 - 5 | 50 - 5 | 51 - 5 | 52 - 5 | 53 - 5 | 54 - 5 | 55 - 5 | 56 - 5 | 57 - 5 | 58 - 5 | 59 - 6 | 60 - 6 | 61 - 6 | 62 - 6 | 63 - 6 | 64 - 6 | 65 - 6 | 66 -(66 rows) - -delete from t; --- Small groups of only 1 tuple each tested around each mode transition point. -insert into t(a, b) select i, i from generate_series(1, 1000) n(i); -analyze t; -explain (costs off) select * from (select * from t order by a) s order by a, b limit 31; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 31; - a | b -----+---- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 5 | 5 - 6 | 6 - 7 | 7 - 8 | 8 - 9 | 9 - 10 | 10 - 11 | 11 - 12 | 12 - 13 | 13 - 14 | 14 - 15 | 15 - 16 | 16 - 17 | 17 - 18 | 18 - 19 | 19 - 20 | 20 - 21 | 21 - 22 | 22 - 23 | 23 - 24 | 24 - 25 | 25 - 26 | 26 - 27 | 27 - 28 | 28 - 29 | 29 - 30 | 30 - 31 | 31 -(31 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 32; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 32; - a | b -----+---- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 5 | 5 - 6 | 6 - 7 | 7 - 8 | 8 - 9 | 9 - 10 | 10 - 11 | 11 - 12 | 12 - 13 | 13 - 14 | 14 - 15 | 15 - 16 | 16 - 17 | 17 - 18 | 18 - 19 | 19 - 20 | 20 - 21 | 21 - 22 | 22 - 23 | 23 - 24 | 24 - 25 | 25 - 26 | 26 - 27 | 27 - 28 | 28 - 29 | 29 - 30 | 30 - 31 | 31 - 32 | 32 -(32 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 33; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 33; - a | b -----+---- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 5 | 5 - 6 | 6 - 7 | 7 - 8 | 8 - 9 | 9 - 10 | 10 - 11 | 11 - 12 | 12 - 13 | 13 - 14 | 14 - 15 | 15 - 16 | 16 - 17 | 17 - 18 | 18 - 19 | 19 - 20 | 20 - 21 | 21 - 22 | 22 - 23 | 23 - 24 | 24 - 25 | 25 - 26 | 26 - 27 | 27 - 28 | 28 - 29 | 29 - 30 | 30 - 31 | 31 - 32 | 32 - 33 | 33 -(33 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 65; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 65; - a | b -----+---- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 5 | 5 - 6 | 6 - 7 | 7 - 8 | 8 - 9 | 9 - 10 | 10 - 11 | 11 - 12 | 12 - 13 | 13 - 14 | 14 - 15 | 15 - 16 | 16 - 17 | 17 - 18 | 18 - 19 | 19 - 20 | 20 - 21 | 21 - 22 | 22 - 23 | 23 - 24 | 24 - 25 | 25 - 26 | 26 - 27 | 27 - 28 | 28 - 29 | 29 - 30 | 30 - 31 | 31 - 32 | 32 - 33 | 33 - 34 | 34 - 35 | 35 - 36 | 36 - 37 | 37 - 38 | 38 - 39 | 39 - 40 | 40 - 41 | 41 - 42 | 42 - 43 | 43 - 44 | 44 - 45 | 45 - 46 | 46 - 47 | 47 - 48 | 48 - 49 | 49 - 50 | 50 - 51 | 51 - 52 | 52 - 53 | 53 - 54 | 54 - 55 | 55 - 56 | 56 - 57 | 57 - 58 | 58 - 59 | 59 - 60 | 60 - 61 | 61 - 62 | 62 - 63 | 63 - 64 | 64 - 65 | 65 -(65 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 66; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 66; - a | b -----+---- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 5 | 5 - 6 | 6 - 7 | 7 - 8 | 8 - 9 | 9 - 10 | 10 - 11 | 11 - 12 | 12 - 13 | 13 - 14 | 14 - 15 | 15 - 16 | 16 - 17 | 17 - 18 | 18 - 19 | 19 - 20 | 20 - 21 | 21 - 22 | 22 - 23 | 23 - 24 | 24 - 25 | 25 - 26 | 26 - 27 | 27 - 28 | 28 - 29 | 29 - 30 | 30 - 31 | 31 - 32 | 32 - 33 | 33 - 34 | 34 - 35 | 35 - 36 | 36 - 37 | 37 - 38 | 38 - 39 | 39 - 40 | 40 - 41 | 41 - 42 | 42 - 43 | 43 - 44 | 44 - 45 | 45 - 46 | 46 - 47 | 47 - 48 | 48 - 49 | 49 - 50 | 50 - 51 | 51 - 52 | 52 - 53 | 53 - 54 | 54 - 55 | 55 - 56 | 56 - 57 | 57 - 58 | 58 - 59 | 59 - 60 | 60 - 61 | 61 - 62 | 62 - 63 | 63 - 64 | 64 - 65 | 65 - 66 | 66 -(66 rows) - -delete from t; -drop table t; --- Incremental sort vs. parallel queries -set min_parallel_table_scan_size = '1kB'; -set min_parallel_index_scan_size = '1kB'; -set parallel_setup_cost = 0; -set parallel_tuple_cost = 0; -set max_parallel_workers_per_gather = 2; -create table t (a int, b int, c int); -insert into t select mod(i,10),mod(i,10),i from generate_series(1,10000) s(i); -create index on t (a); -analyze t; -set enable_incremental_sort = off; -explain (costs off) select a,b,sum(c) from t group by 1,2 order by 1,2,3 limit 1; - QUERY PLAN ------------------------------------------------------- - Limit - -> Sort - Sort Key: a, b, (sum(c)) - -> Finalize HashAggregate - Group Key: a, b - -> Gather - Workers Planned: 2 - -> Partial HashAggregate - Group Key: a, b - -> Parallel Seq Scan on t -(10 rows) - -set enable_incremental_sort = on; -explain (costs off) select a,b,sum(c) from t group by 1,2 order by 1,2,3 limit 1; - QUERY PLAN ----------------------------------------------------------------------- - Limit - -> Incremental Sort - Sort Key: a, b, (sum(c)) - Presorted Key: a, b - -> GroupAggregate - Group Key: a, b - -> Gather Merge - Workers Planned: 2 - -> Incremental Sort - Sort Key: a, b - Presorted Key: a - -> Parallel Index Scan using t_a_idx on t -(12 rows) - --- Incremental sort vs. set operations with varno 0 -set enable_hashagg to off; -explain (costs off) select * from t union select * from t order by 1,3; - QUERY PLAN ----------------------------------------------------------- - Incremental Sort - Sort Key: t.a, t.c - Presorted Key: t.a - -> Unique - -> Sort - Sort Key: t.a, t.b, t.c - -> Gather - Workers Planned: 2 - -> Parallel Append - -> Parallel Seq Scan on t - -> Parallel Seq Scan on t t_1 -(11 rows) - --- Full sort, not just incremental sort can be pushed below a gather merge path --- by generate_useful_gather_paths. -explain (costs off) select distinct a,b from t; - QUERY PLAN ------------------------------------------------- - Unique - -> Gather Merge - Workers Planned: 2 - -> Unique - -> Sort - Sort Key: a, b - -> Parallel Seq Scan on t -(7 rows) - -drop table t; --- Sort pushdown can't go below where expressions are part of the rel target. --- In particular this is interesting for volatile expressions which have to --- go above joins since otherwise we'll incorrectly use expression evaluations --- across multiple rows. -set enable_hashagg=off; -set enable_seqscan=off; -set enable_incremental_sort = off; -set parallel_tuple_cost=0; -set parallel_setup_cost=0; -set min_parallel_table_scan_size = 0; -set min_parallel_index_scan_size = 0; --- Parallel sort below join. -explain (costs off) select distinct sub.unique1, stringu1 -from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub; - QUERY PLAN --------------------------------------------------------------------------- - Unique - -> Nested Loop - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: tenk1.unique1, tenk1.stringu1 - -> Parallel Index Scan using tenk1_unique1 on tenk1 - -> Function Scan on generate_series -(8 rows) - -explain (costs off) select sub.unique1, stringu1 -from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub -order by 1, 2; - QUERY PLAN --------------------------------------------------------------------- - Nested Loop - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: tenk1.unique1, tenk1.stringu1 - -> Parallel Index Scan using tenk1_unique1 on tenk1 - -> Function Scan on generate_series -(7 rows) - --- Parallel sort but with expression that can be safely generated at the base rel. -explain (costs off) select distinct sub.unique1, md5(stringu1) -from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub; - QUERY PLAN ----------------------------------------------------------------------------------------- - Unique - -> Nested Loop - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: tenk1.unique1, (md5((tenk1.stringu1)::text)) COLLATE "C" - -> Parallel Index Scan using tenk1_unique1 on tenk1 - -> Function Scan on generate_series -(8 rows) - -explain (costs off) select sub.unique1, md5(stringu1) -from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub -order by 1, 2; - QUERY PLAN ----------------------------------------------------------------------------------- - Nested Loop - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: tenk1.unique1, (md5((tenk1.stringu1)::text)) COLLATE "C" - -> Parallel Index Scan using tenk1_unique1 on tenk1 - -> Function Scan on generate_series -(7 rows) - --- Parallel sort with an aggregate that can be safely generated in parallel, --- but we can't sort by partial aggregate values. -explain (costs off) select count(*) -from tenk1 t1 -join tenk1 t2 on t1.unique1 = t2.unique2 -join tenk1 t3 on t2.unique1 = t3.unique1 -order by count(*); - QUERY PLAN ------------------------------------------------------------------------------------------------ - Sort - Sort Key: (count(*)) - -> Finalize Aggregate - -> Gather - Workers Planned: 2 - -> Partial Aggregate - -> Parallel Hash Join - Hash Cond: (t2.unique1 = t3.unique1) - -> Parallel Hash Join - Hash Cond: (t1.unique1 = t2.unique2) - -> Parallel Index Only Scan using tenk1_unique1 on tenk1 t1 - -> Parallel Hash - -> Parallel Index Scan using tenk1_unique2 on tenk1 t2 - -> Parallel Hash - -> Parallel Index Only Scan using tenk1_unique1 on tenk1 t3 -(15 rows) - --- Parallel sort but with expression (correlated subquery) that --- is prohibited in parallel plans. -explain (costs off) select distinct - unique1, - (select t.unique1 from tenk1 where tenk1.unique1 = t.unique1) -from tenk1 t, generate_series(1, 1000); - QUERY PLAN ---------------------------------------------------------------------------------- - Unique - -> Sort - Sort Key: t.unique1, ((SubPlan 1)) - -> Gather - Workers Planned: 2 - -> Nested Loop - -> Parallel Index Only Scan using tenk1_unique1 on tenk1 t - -> Function Scan on generate_series - SubPlan 1 - -> Index Only Scan using tenk1_unique1 on tenk1 - Index Cond: (unique1 = t.unique1) -(11 rows) - -explain (costs off) select - unique1, - (select t.unique1 from tenk1 where tenk1.unique1 = t.unique1) -from tenk1 t, generate_series(1, 1000) -order by 1, 2; - QUERY PLAN ---------------------------------------------------------------------------- - Sort - Sort Key: t.unique1, ((SubPlan 1)) - -> Gather - Workers Planned: 2 - -> Nested Loop - -> Parallel Index Only Scan using tenk1_unique1 on tenk1 t - -> Function Scan on generate_series - SubPlan 1 - -> Index Only Scan using tenk1_unique1 on tenk1 - Index Cond: (unique1 = t.unique1) -(10 rows) - --- Parallel sort but with expression not available until the upper rel. -explain (costs off) select distinct sub.unique1, stringu1 || random()::text -from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub; - QUERY PLAN ---------------------------------------------------------------------------------------------- - Unique - -> Sort - Sort Key: tenk1.unique1, (((tenk1.stringu1)::text || (random())::text)) COLLATE "C" - -> Gather - Workers Planned: 2 - -> Nested Loop - -> Parallel Index Scan using tenk1_unique1 on tenk1 - -> Function Scan on generate_series -(8 rows) - -explain (costs off) select sub.unique1, stringu1 || random()::text -from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub -order by 1, 2; - QUERY PLAN ---------------------------------------------------------------------------------------- - Sort - Sort Key: tenk1.unique1, (((tenk1.stringu1)::text || (random())::text)) COLLATE "C" - -> Gather - Workers Planned: 2 - -> Nested Loop - -> Parallel Index Scan using tenk1_unique1 on tenk1 - -> Function Scan on generate_series -(7 rows) - -reset enable_hashagg; -reset enable_seqscan; -reset enable_incremental_sort; -reset parallel_tuple_cost; -reset parallel_setup_cost; -reset min_parallel_table_scan_size; -reset min_parallel_index_scan_size; --- Ensure incremental sorts work for amcanorderbyop type indexes -create table point_table (a point, b int); -create index point_table_a_idx on point_table using gist(a); --- Ensure we get an incremental sort plan for both of the following queries -explain (costs off) select a, b, a <-> point(5, 5) dist from point_table order by dist, b limit 1; - QUERY PLAN ---------------------------------------------------------------- - Limit - -> Incremental Sort - Sort Key: ((a <-> '(5,5)'::point)), b - Presorted Key: ((a <-> '(5,5)'::point)) - -> Index Scan using point_table_a_idx on point_table - Order By: (a <-> '(5,5)'::point) -(6 rows) - -explain (costs off) select a, b, a <-> point(5, 5) dist from point_table order by dist, b desc limit 1; - QUERY PLAN ---------------------------------------------------------------- - Limit - -> Incremental Sort - Sort Key: ((a <-> '(5,5)'::point)), b DESC - Presorted Key: ((a <-> '(5,5)'::point)) - -> Index Scan using point_table_a_idx on point_table - Order By: (a <-> '(5,5)'::point) -(6 rows) - +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/create_role.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/create_role.out --- /tmp/cirrus-ci-build/src/test/regress/expected/create_role.out 2024-03-13 23:12:37.623352000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/create_role.out 2024-03-13 23:14:29.460207000 +0000 @@ -1,261 +1,2 @@ --- ok, superuser can create users with any set of privileges -CREATE ROLE regress_role_super SUPERUSER; -CREATE ROLE regress_role_admin CREATEDB CREATEROLE REPLICATION BYPASSRLS; -GRANT CREATE ON DATABASE regression TO regress_role_admin WITH GRANT OPTION; -CREATE ROLE regress_role_limited_admin CREATEROLE; -CREATE ROLE regress_role_normal; --- fail, CREATEROLE user can't give away role attributes without having them -SET SESSION AUTHORIZATION regress_role_limited_admin; -CREATE ROLE regress_nosuch_superuser SUPERUSER; -ERROR: permission denied to create role -DETAIL: Only roles with the SUPERUSER attribute may create roles with the SUPERUSER attribute. -CREATE ROLE regress_nosuch_replication_bypassrls REPLICATION BYPASSRLS; -ERROR: permission denied to create role -DETAIL: Only roles with the REPLICATION attribute may create roles with the REPLICATION attribute. -CREATE ROLE regress_nosuch_replication REPLICATION; -ERROR: permission denied to create role -DETAIL: Only roles with the REPLICATION attribute may create roles with the REPLICATION attribute. -CREATE ROLE regress_nosuch_bypassrls BYPASSRLS; -ERROR: permission denied to create role -DETAIL: Only roles with the BYPASSRLS attribute may create roles with the BYPASSRLS attribute. -CREATE ROLE regress_nosuch_createdb CREATEDB; -ERROR: permission denied to create role -DETAIL: Only roles with the CREATEDB attribute may create roles with the CREATEDB attribute. --- ok, can create a role without any special attributes -CREATE ROLE regress_role_limited; --- fail, can't give it in any of the restricted attributes -ALTER ROLE regress_role_limited SUPERUSER; -ERROR: permission denied to alter role -DETAIL: Only roles with the SUPERUSER attribute may change the SUPERUSER attribute. -ALTER ROLE regress_role_limited REPLICATION; -ERROR: permission denied to alter role -DETAIL: Only roles with the REPLICATION attribute may change the REPLICATION attribute. -ALTER ROLE regress_role_limited CREATEDB; -ERROR: permission denied to alter role -DETAIL: Only roles with the CREATEDB attribute may change the CREATEDB attribute. -ALTER ROLE regress_role_limited BYPASSRLS; -ERROR: permission denied to alter role -DETAIL: Only roles with the BYPASSRLS attribute may change the BYPASSRLS attribute. -DROP ROLE regress_role_limited; --- ok, can give away these role attributes if you have them -SET SESSION AUTHORIZATION regress_role_admin; -CREATE ROLE regress_replication_bypassrls REPLICATION BYPASSRLS; -CREATE ROLE regress_replication REPLICATION; -CREATE ROLE regress_bypassrls BYPASSRLS; -CREATE ROLE regress_createdb CREATEDB; --- ok, can toggle these role attributes off and on if you have them -ALTER ROLE regress_replication NOREPLICATION; -ALTER ROLE regress_replication REPLICATION; -ALTER ROLE regress_bypassrls NOBYPASSRLS; -ALTER ROLE regress_bypassrls BYPASSRLS; -ALTER ROLE regress_createdb NOCREATEDB; -ALTER ROLE regress_createdb CREATEDB; --- fail, can't toggle SUPERUSER -ALTER ROLE regress_createdb SUPERUSER; -ERROR: permission denied to alter role -DETAIL: Only roles with the SUPERUSER attribute may change the SUPERUSER attribute. -ALTER ROLE regress_createdb NOSUPERUSER; -ERROR: permission denied to alter role -DETAIL: Only roles with the SUPERUSER attribute may change the SUPERUSER attribute. --- ok, having CREATEROLE is enough to create users with these privileges -CREATE ROLE regress_createrole CREATEROLE NOINHERIT; -GRANT CREATE ON DATABASE regression TO regress_createrole WITH GRANT OPTION; -CREATE ROLE regress_login LOGIN; -CREATE ROLE regress_inherit INHERIT; -CREATE ROLE regress_connection_limit CONNECTION LIMIT 5; -CREATE ROLE regress_encrypted_password ENCRYPTED PASSWORD 'foo'; -CREATE ROLE regress_password_null PASSWORD NULL; --- ok, backwards compatible noise words should be ignored -CREATE ROLE regress_noiseword SYSID 12345; -NOTICE: SYSID can no longer be specified --- fail, cannot grant membership in superuser role -CREATE ROLE regress_nosuch_super IN ROLE regress_role_super; -ERROR: permission denied to grant role "regress_role_super" -DETAIL: Only roles with the SUPERUSER attribute may grant roles with the SUPERUSER attribute. --- fail, database owner cannot have members -CREATE ROLE regress_nosuch_dbowner IN ROLE pg_database_owner; -ERROR: role "pg_database_owner" cannot have explicit members --- ok, can grant other users into a role -CREATE ROLE regress_inroles ROLE - regress_role_super, regress_createdb, regress_createrole, regress_login, - regress_inherit, regress_connection_limit, regress_encrypted_password, regress_password_null; --- fail, cannot grant a role into itself -CREATE ROLE regress_nosuch_recursive ROLE regress_nosuch_recursive; -ERROR: role "regress_nosuch_recursive" is a member of role "regress_nosuch_recursive" --- ok, can grant other users into a role with admin option -CREATE ROLE regress_adminroles ADMIN - regress_role_super, regress_createdb, regress_createrole, regress_login, - regress_inherit, regress_connection_limit, regress_encrypted_password, regress_password_null; --- fail, cannot grant a role into itself with admin option -CREATE ROLE regress_nosuch_admin_recursive ADMIN regress_nosuch_admin_recursive; -ERROR: role "regress_nosuch_admin_recursive" is a member of role "regress_nosuch_admin_recursive" --- fail, regress_createrole does not have CREATEDB privilege -SET SESSION AUTHORIZATION regress_createrole; -CREATE DATABASE regress_nosuch_db; -ERROR: permission denied to create database --- ok, regress_createrole can create new roles -CREATE ROLE regress_plainrole; --- ok, roles with CREATEROLE can create new roles with it -CREATE ROLE regress_rolecreator CREATEROLE; --- ok, roles with CREATEROLE can create new roles with different role --- attributes, including CREATEROLE -CREATE ROLE regress_hasprivs CREATEROLE LOGIN INHERIT CONNECTION LIMIT 5; --- ok, we should be able to modify a role we created -COMMENT ON ROLE regress_hasprivs IS 'some comment'; -ALTER ROLE regress_hasprivs RENAME TO regress_tenant; -ALTER ROLE regress_tenant NOINHERIT NOLOGIN CONNECTION LIMIT 7; --- fail, we should be unable to modify a role we did not create -COMMENT ON ROLE regress_role_normal IS 'some comment'; -ERROR: permission denied -DETAIL: The current user must have the ADMIN option on role "regress_role_normal". -ALTER ROLE regress_role_normal RENAME TO regress_role_abnormal; -ERROR: permission denied to rename role -DETAIL: Only roles with the CREATEROLE attribute and the ADMIN option on role "regress_role_normal" may rename this role. -ALTER ROLE regress_role_normal NOINHERIT NOLOGIN CONNECTION LIMIT 7; -ERROR: permission denied to alter role -DETAIL: Only roles with the CREATEROLE attribute and the ADMIN option on role "regress_role_normal" may alter this role. --- ok, regress_tenant can create objects within the database -SET SESSION AUTHORIZATION regress_tenant; -CREATE TABLE tenant_table (i integer); -CREATE INDEX tenant_idx ON tenant_table(i); -CREATE VIEW tenant_view AS SELECT * FROM pg_catalog.pg_class; -REVOKE ALL PRIVILEGES ON tenant_table FROM PUBLIC; --- fail, these objects belonging to regress_tenant -SET SESSION AUTHORIZATION regress_createrole; -DROP INDEX tenant_idx; -ERROR: must be owner of index tenant_idx -ALTER TABLE tenant_table ADD COLUMN t text; -ERROR: must be owner of table tenant_table -DROP TABLE tenant_table; -ERROR: must be owner of table tenant_table -ALTER VIEW tenant_view OWNER TO regress_role_admin; -ERROR: must be owner of view tenant_view -DROP VIEW tenant_view; -ERROR: must be owner of view tenant_view --- fail, can't create objects owned as regress_tenant -CREATE SCHEMA regress_tenant_schema AUTHORIZATION regress_tenant; -ERROR: must be able to SET ROLE "regress_tenant" --- fail, we don't inherit permissions from regress_tenant -REASSIGN OWNED BY regress_tenant TO regress_createrole; -ERROR: permission denied to reassign objects -DETAIL: Only roles with privileges of role "regress_tenant" may reassign objects owned by it. --- ok, create a role with a value for createrole_self_grant -SET createrole_self_grant = 'set, inherit'; -CREATE ROLE regress_tenant2; -GRANT CREATE ON DATABASE regression TO regress_tenant2; --- ok, regress_tenant2 can create objects within the database -SET SESSION AUTHORIZATION regress_tenant2; -CREATE TABLE tenant2_table (i integer); -REVOKE ALL PRIVILEGES ON tenant2_table FROM PUBLIC; --- ok, because we have SET and INHERIT on regress_tenant2 -SET SESSION AUTHORIZATION regress_createrole; -CREATE SCHEMA regress_tenant2_schema AUTHORIZATION regress_tenant2; -ALTER SCHEMA regress_tenant2_schema OWNER TO regress_createrole; -ALTER TABLE tenant2_table OWNER TO regress_createrole; -ALTER TABLE tenant2_table OWNER TO regress_tenant2; --- with SET but not INHERIT, we can give away objects but not take them -REVOKE INHERIT OPTION FOR regress_tenant2 FROM regress_createrole; -ALTER SCHEMA regress_tenant2_schema OWNER TO regress_tenant2; -ALTER TABLE tenant2_table OWNER TO regress_createrole; -ERROR: must be owner of table tenant2_table --- with INHERIT but not SET, we can take objects but not give them away -GRANT regress_tenant2 TO regress_createrole WITH INHERIT TRUE, SET FALSE; -ALTER TABLE tenant2_table OWNER TO regress_createrole; -ALTER TABLE tenant2_table OWNER TO regress_tenant2; -ERROR: must be able to SET ROLE "regress_tenant2" -DROP TABLE tenant2_table; --- fail, CREATEROLE is not enough to create roles in privileged roles -CREATE ROLE regress_read_all_data IN ROLE pg_read_all_data; -ERROR: permission denied to grant role "pg_read_all_data" -DETAIL: Only roles with the ADMIN option on role "pg_read_all_data" may grant this role. -CREATE ROLE regress_write_all_data IN ROLE pg_write_all_data; -ERROR: permission denied to grant role "pg_write_all_data" -DETAIL: Only roles with the ADMIN option on role "pg_write_all_data" may grant this role. -CREATE ROLE regress_monitor IN ROLE pg_monitor; -ERROR: permission denied to grant role "pg_monitor" -DETAIL: Only roles with the ADMIN option on role "pg_monitor" may grant this role. -CREATE ROLE regress_read_all_settings IN ROLE pg_read_all_settings; -ERROR: permission denied to grant role "pg_read_all_settings" -DETAIL: Only roles with the ADMIN option on role "pg_read_all_settings" may grant this role. -CREATE ROLE regress_read_all_stats IN ROLE pg_read_all_stats; -ERROR: permission denied to grant role "pg_read_all_stats" -DETAIL: Only roles with the ADMIN option on role "pg_read_all_stats" may grant this role. -CREATE ROLE regress_stat_scan_tables IN ROLE pg_stat_scan_tables; -ERROR: permission denied to grant role "pg_stat_scan_tables" -DETAIL: Only roles with the ADMIN option on role "pg_stat_scan_tables" may grant this role. -CREATE ROLE regress_read_server_files IN ROLE pg_read_server_files; -ERROR: permission denied to grant role "pg_read_server_files" -DETAIL: Only roles with the ADMIN option on role "pg_read_server_files" may grant this role. -CREATE ROLE regress_write_server_files IN ROLE pg_write_server_files; -ERROR: permission denied to grant role "pg_write_server_files" -DETAIL: Only roles with the ADMIN option on role "pg_write_server_files" may grant this role. -CREATE ROLE regress_execute_server_program IN ROLE pg_execute_server_program; -ERROR: permission denied to grant role "pg_execute_server_program" -DETAIL: Only roles with the ADMIN option on role "pg_execute_server_program" may grant this role. -CREATE ROLE regress_signal_backend IN ROLE pg_signal_backend; -ERROR: permission denied to grant role "pg_signal_backend" -DETAIL: Only roles with the ADMIN option on role "pg_signal_backend" may grant this role. --- fail, role still owns database objects -DROP ROLE regress_tenant; -ERROR: role "regress_tenant" cannot be dropped because some objects depend on it -DETAIL: owner of table tenant_table -owner of view tenant_view --- fail, creation of these roles failed above so they do not now exist -SET SESSION AUTHORIZATION regress_role_admin; -DROP ROLE regress_nosuch_superuser; -ERROR: role "regress_nosuch_superuser" does not exist -DROP ROLE regress_nosuch_replication_bypassrls; -ERROR: role "regress_nosuch_replication_bypassrls" does not exist -DROP ROLE regress_nosuch_replication; -ERROR: role "regress_nosuch_replication" does not exist -DROP ROLE regress_nosuch_bypassrls; -ERROR: role "regress_nosuch_bypassrls" does not exist -DROP ROLE regress_nosuch_super; -ERROR: role "regress_nosuch_super" does not exist -DROP ROLE regress_nosuch_dbowner; -ERROR: role "regress_nosuch_dbowner" does not exist -DROP ROLE regress_nosuch_recursive; -ERROR: role "regress_nosuch_recursive" does not exist -DROP ROLE regress_nosuch_admin_recursive; -ERROR: role "regress_nosuch_admin_recursive" does not exist -DROP ROLE regress_plainrole; --- must revoke privileges before dropping role -REVOKE CREATE ON DATABASE regression FROM regress_createrole CASCADE; --- ok, should be able to drop non-superuser roles we created -DROP ROLE regress_replication_bypassrls; -DROP ROLE regress_replication; -DROP ROLE regress_bypassrls; -DROP ROLE regress_createdb; -DROP ROLE regress_createrole; -DROP ROLE regress_login; -DROP ROLE regress_inherit; -DROP ROLE regress_connection_limit; -DROP ROLE regress_encrypted_password; -DROP ROLE regress_password_null; -DROP ROLE regress_noiseword; -DROP ROLE regress_inroles; -DROP ROLE regress_adminroles; --- fail, cannot drop ourself, nor superusers or roles we lack ADMIN for -DROP ROLE regress_role_super; -ERROR: permission denied to drop role -DETAIL: Only roles with the SUPERUSER attribute may drop roles with the SUPERUSER attribute. -DROP ROLE regress_role_admin; -ERROR: current user cannot be dropped -DROP ROLE regress_rolecreator; -ERROR: permission denied to drop role -DETAIL: Only roles with the CREATEROLE attribute and the ADMIN option on role "regress_rolecreator" may drop this role. --- ok -RESET SESSION AUTHORIZATION; -REVOKE CREATE ON DATABASE regression FROM regress_role_admin CASCADE; -DROP INDEX tenant_idx; -DROP TABLE tenant_table; -DROP VIEW tenant_view; -DROP SCHEMA regress_tenant2_schema; --- check for duplicated drop -DROP ROLE regress_tenant, regress_tenant; -DROP ROLE regress_tenant2; -DROP ROLE regress_rolecreator; -DROP ROLE regress_role_admin; -DROP ROLE regress_role_limited_admin; -DROP ROLE regress_role_super; -DROP ROLE regress_role_normal; +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/without_overlaps.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/without_overlaps.out --- /tmp/cirrus-ci-build/src/test/regress/expected/without_overlaps.out 2024-03-13 23:12:37.628519000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/without_overlaps.out 2024-03-13 23:14:29.493146000 +0000 @@ -1,395 +1,2 @@ --- Tests for WITHOUT OVERLAPS. --- --- We leave behind several tables to test pg_dump etc: --- temporal_rng, temporal_rng2, --- temporal_fk_rng2rng. --- --- test input parser --- --- PK with no columns just WITHOUT OVERLAPS: -CREATE TABLE temporal_rng ( - valid_at tsrange, - CONSTRAINT temporal_rng_pk PRIMARY KEY (valid_at WITHOUT OVERLAPS) -); -ERROR: constraint using WITHOUT OVERLAPS needs at least two columns --- PK with a range column/PERIOD that isn't there: -CREATE TABLE temporal_rng ( - id INTEGER, - CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -); -ERROR: column "valid_at" named in key does not exist -LINE 3: CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOU... - ^ --- PK with a non-range column: -CREATE TABLE temporal_rng ( - id int4range, - valid_at TEXT, - CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -); -ERROR: data type text has no default operator class for access method "gist" -HINT: You must specify an operator class for the index or define a default operator class for the data type. --- PK with one column plus a range: -CREATE TABLE temporal_rng ( - -- Since we can't depend on having btree_gist here, - -- use an int4range instead of an int. - -- (The rangetypes regression test uses the same trick.) - id int4range, - valid_at tsrange, - CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -); -\d temporal_rng - Table "public.temporal_rng" - Column | Type | Collation | Nullable | Default -----------+-----------+-----------+----------+--------- - id | int4range | | not null | - valid_at | tsrange | | not null | -Indexes: - "temporal_rng_pk" PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) - -SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conname = 'temporal_rng_pk'; - pg_get_constraintdef ---------------------------------------------- - PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -(1 row) - -SELECT pg_get_indexdef(conindid, 0, true) FROM pg_constraint WHERE conname = 'temporal_rng_pk'; - pg_get_indexdef -------------------------------------------------------------------------------- - CREATE UNIQUE INDEX temporal_rng_pk ON temporal_rng USING gist (id, valid_at) -(1 row) - --- PK with two columns plus a range: --- We don't drop this table because tests below also need multiple scalar columns. -CREATE TABLE temporal_rng2 ( - id1 int4range, - id2 int4range, - valid_at tsrange, - CONSTRAINT temporal_rng2_pk PRIMARY KEY (id1, id2, valid_at WITHOUT OVERLAPS) -); -\d temporal_rng2 - Table "public.temporal_rng2" - Column | Type | Collation | Nullable | Default -----------+-----------+-----------+----------+--------- - id1 | int4range | | not null | - id2 | int4range | | not null | - valid_at | tsrange | | not null | -Indexes: - "temporal_rng2_pk" PRIMARY KEY (id1, id2, valid_at WITHOUT OVERLAPS) - -SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conname = 'temporal_rng2_pk'; - pg_get_constraintdef ---------------------------------------------------- - PRIMARY KEY (id1, id2, valid_at WITHOUT OVERLAPS) -(1 row) - -SELECT pg_get_indexdef(conindid, 0, true) FROM pg_constraint WHERE conname = 'temporal_rng2_pk'; - pg_get_indexdef ---------------------------------------------------------------------------------------- - CREATE UNIQUE INDEX temporal_rng2_pk ON temporal_rng2 USING gist (id1, id2, valid_at) -(1 row) - --- PK with a custom range type: -CREATE TYPE textrange2 AS range (subtype=text, collation="C"); -CREATE TABLE temporal_rng3 ( - id int4range, - valid_at textrange2, - CONSTRAINT temporal_rng3_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -); -ALTER TABLE temporal_rng3 DROP CONSTRAINT temporal_rng3_pk; -DROP TABLE temporal_rng3; -DROP TYPE textrange2; --- PK with a multirange: -CREATE TABLE temporal_mltrng ( - id int4range, - valid_at tsmultirange, - CONSTRAINT temporal_mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -); -\d temporal_mltrng - Table "public.temporal_mltrng" - Column | Type | Collation | Nullable | Default -----------+--------------+-----------+----------+--------- - id | int4range | | not null | - valid_at | tsmultirange | | not null | -Indexes: - "temporal_mltrng_pk" PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) - --- UNIQUE with no columns just WITHOUT OVERLAPS: -CREATE TABLE temporal_rng3 ( - valid_at tsrange, - CONSTRAINT temporal_rng3_uq UNIQUE (valid_at WITHOUT OVERLAPS) -); -ERROR: constraint using WITHOUT OVERLAPS needs at least two columns --- UNIQUE with a range column/PERIOD that isn't there: -CREATE TABLE temporal_rng3 ( - id INTEGER, - CONSTRAINT temporal_rng3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS) -); -ERROR: column "valid_at" named in key does not exist -LINE 3: CONSTRAINT temporal_rng3_uq UNIQUE (id, valid_at WITHOUT OV... - ^ --- UNIQUE with a non-range column: -CREATE TABLE temporal_rng3 ( - id int4range, - valid_at TEXT, - CONSTRAINT temporal_rng3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS) -); -ERROR: data type text has no default operator class for access method "gist" -HINT: You must specify an operator class for the index or define a default operator class for the data type. --- UNIQUE with one column plus a range: -CREATE TABLE temporal_rng3 ( - id int4range, - valid_at tsrange, - CONSTRAINT temporal_rng3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS) -); -\d temporal_rng3 - Table "public.temporal_rng3" - Column | Type | Collation | Nullable | Default -----------+-----------+-----------+----------+--------- - id | int4range | | | - valid_at | tsrange | | | -Indexes: - "temporal_rng3_uq" UNIQUE (id, valid_at WITHOUT OVERLAPS) - -SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conname = 'temporal_rng3_uq'; - pg_get_constraintdef ----------------------------------------- - UNIQUE (id, valid_at WITHOUT OVERLAPS) -(1 row) - -SELECT pg_get_indexdef(conindid, 0, true) FROM pg_constraint WHERE conname = 'temporal_rng3_uq'; - pg_get_indexdef ---------------------------------------------------------------------------------- - CREATE UNIQUE INDEX temporal_rng3_uq ON temporal_rng3 USING gist (id, valid_at) -(1 row) - -DROP TABLE temporal_rng3; --- UNIQUE with two columns plus a range: -CREATE TABLE temporal_rng3 ( - id1 int4range, - id2 int4range, - valid_at tsrange, - CONSTRAINT temporal_rng3_uq UNIQUE (id1, id2, valid_at WITHOUT OVERLAPS) -); -\d temporal_rng3 - Table "public.temporal_rng3" - Column | Type | Collation | Nullable | Default -----------+-----------+-----------+----------+--------- - id1 | int4range | | | - id2 | int4range | | | - valid_at | tsrange | | | -Indexes: - "temporal_rng3_uq" UNIQUE (id1, id2, valid_at WITHOUT OVERLAPS) - -SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conname = 'temporal_rng3_uq'; - pg_get_constraintdef ----------------------------------------------- - UNIQUE (id1, id2, valid_at WITHOUT OVERLAPS) -(1 row) - -SELECT pg_get_indexdef(conindid, 0, true) FROM pg_constraint WHERE conname = 'temporal_rng3_uq'; - pg_get_indexdef ---------------------------------------------------------------------------------------- - CREATE UNIQUE INDEX temporal_rng3_uq ON temporal_rng3 USING gist (id1, id2, valid_at) -(1 row) - -DROP TABLE temporal_rng3; --- UNIQUE with a custom range type: -CREATE TYPE textrange2 AS range (subtype=text, collation="C"); -CREATE TABLE temporal_rng3 ( - id int4range, - valid_at textrange2, - CONSTRAINT temporal_rng3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS) -); -ALTER TABLE temporal_rng3 DROP CONSTRAINT temporal_rng3_uq; -DROP TABLE temporal_rng3; -DROP TYPE textrange2; --- --- test ALTER TABLE ADD CONSTRAINT --- -DROP TABLE temporal_rng; -CREATE TABLE temporal_rng ( - id int4range, - valid_at tsrange -); -ALTER TABLE temporal_rng - ADD CONSTRAINT temporal_rng_pk - PRIMARY KEY (id, valid_at WITHOUT OVERLAPS); --- PK with USING INDEX (not possible): -CREATE TABLE temporal3 ( - id int4range, - valid_at tsrange -); -CREATE INDEX idx_temporal3_uq ON temporal3 USING gist (id, valid_at); -ALTER TABLE temporal3 - ADD CONSTRAINT temporal3_pk - PRIMARY KEY USING INDEX idx_temporal3_uq; -ERROR: "idx_temporal3_uq" is not a unique index -LINE 2: ADD CONSTRAINT temporal3_pk - ^ -DETAIL: Cannot create a primary key or unique constraint using such an index. -DROP TABLE temporal3; --- UNIQUE with USING INDEX (not possible): -CREATE TABLE temporal3 ( - id int4range, - valid_at tsrange -); -CREATE INDEX idx_temporal3_uq ON temporal3 USING gist (id, valid_at); -ALTER TABLE temporal3 - ADD CONSTRAINT temporal3_uq - UNIQUE USING INDEX idx_temporal3_uq; -ERROR: "idx_temporal3_uq" is not a unique index -LINE 2: ADD CONSTRAINT temporal3_uq - ^ -DETAIL: Cannot create a primary key or unique constraint using such an index. -DROP TABLE temporal3; --- UNIQUE with USING [UNIQUE] INDEX (possible but not a temporal constraint): -CREATE TABLE temporal3 ( - id int4range, - valid_at tsrange -); -CREATE UNIQUE INDEX idx_temporal3_uq ON temporal3 (id, valid_at); -ALTER TABLE temporal3 - ADD CONSTRAINT temporal3_uq - UNIQUE USING INDEX idx_temporal3_uq; -NOTICE: ALTER TABLE / ADD CONSTRAINT USING INDEX will rename index "idx_temporal3_uq" to "temporal3_uq" -DROP TABLE temporal3; --- Add range column and the PK at the same time -CREATE TABLE temporal3 ( - id int4range -); -ALTER TABLE temporal3 - ADD COLUMN valid_at tsrange, - ADD CONSTRAINT temporal3_pk - PRIMARY KEY (id, valid_at WITHOUT OVERLAPS); -DROP TABLE temporal3; --- Add range column and UNIQUE constraint at the same time -CREATE TABLE temporal3 ( - id int4range -); -ALTER TABLE temporal3 - ADD COLUMN valid_at tsrange, - ADD CONSTRAINT temporal3_uq - UNIQUE (id, valid_at WITHOUT OVERLAPS); -DROP TABLE temporal3; --- --- test PK inserts --- --- okay: -INSERT INTO temporal_rng VALUES ('[1,1]', tsrange('2018-01-02', '2018-02-03')); -INSERT INTO temporal_rng VALUES ('[1,1]', tsrange('2018-03-03', '2018-04-04')); -INSERT INTO temporal_rng VALUES ('[2,2]', tsrange('2018-01-01', '2018-01-05')); -INSERT INTO temporal_rng VALUES ('[3,3]', tsrange('2018-01-01', NULL)); --- should fail: -INSERT INTO temporal_rng VALUES ('[1,1]', tsrange('2018-01-01', '2018-01-05')); -ERROR: conflicting key value violates exclusion constraint "temporal_rng_pk" -DETAIL: Key (id, valid_at)=([1,2), ["Mon Jan 01 00:00:00 2018","Fri Jan 05 00:00:00 2018")) conflicts with existing key (id, valid_at)=([1,2), ["Tue Jan 02 00:00:00 2018","Sat Feb 03 00:00:00 2018")). -INSERT INTO temporal_rng VALUES (NULL, tsrange('2018-01-01', '2018-01-05')); -ERROR: null value in column "id" of relation "temporal_rng" violates not-null constraint -DETAIL: Failing row contains (null, ["Mon Jan 01 00:00:00 2018","Fri Jan 05 00:00:00 2018")). -INSERT INTO temporal_rng VALUES ('[3,3]', NULL); -ERROR: null value in column "valid_at" of relation "temporal_rng" violates not-null constraint -DETAIL: Failing row contains ([3,4), null). --- --- test a range with both a PK and a UNIQUE constraint --- -CREATE TABLE temporal3 ( - id int4range, - valid_at daterange, - id2 int8range, - name TEXT, - CONSTRAINT temporal3_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal3_uniq UNIQUE (id2, valid_at WITHOUT OVERLAPS) -); -INSERT INTO temporal3 (id, valid_at, id2, name) - VALUES - ('[1,1]', daterange('2000-01-01', '2010-01-01'), '[7,7]', 'foo'), - ('[2,2]', daterange('2000-01-01', '2010-01-01'), '[9,9]', 'bar') -; -DROP TABLE temporal3; --- --- test changing the PK's dependencies --- -CREATE TABLE temporal3 ( - id int4range, - valid_at tsrange, - CONSTRAINT temporal3_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -); -ALTER TABLE temporal3 ALTER COLUMN valid_at DROP NOT NULL; -ERROR: column "valid_at" is in a primary key -ALTER TABLE temporal3 ALTER COLUMN valid_at TYPE tstzrange USING tstzrange(lower(valid_at), upper(valid_at)); -ALTER TABLE temporal3 RENAME COLUMN valid_at TO valid_thru; -ALTER TABLE temporal3 DROP COLUMN valid_thru; -DROP TABLE temporal3; --- --- test PARTITION BY for ranges --- --- temporal PRIMARY KEY: -CREATE TABLE temporal_partitioned ( - id int4range, - valid_at daterange, - name text, - CONSTRAINT temporal_paritioned_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -) PARTITION BY LIST (id); -CREATE TABLE tp1 PARTITION OF temporal_partitioned FOR VALUES IN ('[1,1]', '[2,2]'); -CREATE TABLE tp2 PARTITION OF temporal_partitioned FOR VALUES IN ('[3,3]', '[4,4]'); -INSERT INTO temporal_partitioned VALUES - ('[1,1]', daterange('2000-01-01', '2000-02-01'), 'one'), - ('[1,1]', daterange('2000-02-01', '2000-03-01'), 'one'), - ('[3,3]', daterange('2000-01-01', '2010-01-01'), 'three'); -SELECT * FROM temporal_partitioned ORDER BY id, valid_at; - id | valid_at | name --------+-------------------------+------- - [1,2) | [01-01-2000,02-01-2000) | one - [1,2) | [02-01-2000,03-01-2000) | one - [3,4) | [01-01-2000,01-01-2010) | three -(3 rows) - -SELECT * FROM tp1 ORDER BY id, valid_at; - id | valid_at | name --------+-------------------------+------ - [1,2) | [01-01-2000,02-01-2000) | one - [1,2) | [02-01-2000,03-01-2000) | one -(2 rows) - -SELECT * FROM tp2 ORDER BY id, valid_at; - id | valid_at | name --------+-------------------------+------- - [3,4) | [01-01-2000,01-01-2010) | three -(1 row) - -DROP TABLE temporal_partitioned; --- temporal UNIQUE: -CREATE TABLE temporal_partitioned ( - id int4range, - valid_at daterange, - name text, - CONSTRAINT temporal_paritioned_uq UNIQUE (id, valid_at WITHOUT OVERLAPS) -) PARTITION BY LIST (id); -CREATE TABLE tp1 PARTITION OF temporal_partitioned FOR VALUES IN ('[1,1]', '[2,2]'); -CREATE TABLE tp2 PARTITION OF temporal_partitioned FOR VALUES IN ('[3,3]', '[4,4]'); -INSERT INTO temporal_partitioned VALUES - ('[1,1]', daterange('2000-01-01', '2000-02-01'), 'one'), - ('[1,1]', daterange('2000-02-01', '2000-03-01'), 'one'), - ('[3,3]', daterange('2000-01-01', '2010-01-01'), 'three'); -SELECT * FROM temporal_partitioned ORDER BY id, valid_at; - id | valid_at | name --------+-------------------------+------- - [1,2) | [01-01-2000,02-01-2000) | one - [1,2) | [02-01-2000,03-01-2000) | one - [3,4) | [01-01-2000,01-01-2010) | three -(3 rows) - -SELECT * FROM tp1 ORDER BY id, valid_at; - id | valid_at | name --------+-------------------------+------ - [1,2) | [01-01-2000,02-01-2000) | one - [1,2) | [02-01-2000,03-01-2000) | one -(2 rows) - -SELECT * FROM tp2 ORDER BY id, valid_at; - id | valid_at | name --------+-------------------------+------- - [3,4) | [01-01-2000,01-01-2010) | three -(1 row) - -DROP TABLE temporal_partitioned; +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/rules.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/rules.out --- /tmp/cirrus-ci-build/src/test/regress/expected/rules.out 2024-03-13 23:12:37.627088000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/rules.out 2024-03-13 23:14:29.807836000 +0000 @@ -1,3800 +1,2 @@ --- --- RULES --- From Jan's original setup_ruletest.sql and run_ruletest.sql --- - thomas 1998-09-13 --- --- --- Tables and rules for the view test --- -create table rtest_t1 (a int4, b int4); -create table rtest_t2 (a int4, b int4); -create table rtest_t3 (a int4, b int4); -create view rtest_v1 as select * from rtest_t1; -create rule rtest_v1_ins as on insert to rtest_v1 do instead - insert into rtest_t1 values (new.a, new.b); -create rule rtest_v1_upd as on update to rtest_v1 do instead - update rtest_t1 set a = new.a, b = new.b - where a = old.a; -create rule rtest_v1_del as on delete to rtest_v1 do instead - delete from rtest_t1 where a = old.a; --- Test comments -COMMENT ON RULE rtest_v1_bad ON rtest_v1 IS 'bad rule'; -ERROR: rule "rtest_v1_bad" for relation "rtest_v1" does not exist -COMMENT ON RULE rtest_v1_del ON rtest_v1 IS 'delete rule'; -COMMENT ON RULE rtest_v1_del ON rtest_v1 IS NULL; --- --- Tables and rules for the constraint update/delete test --- --- Note: --- Now that we have multiple action rule support, we check --- both possible syntaxes to define them (The last action --- can but must not have a semicolon at the end). --- -create table rtest_system (sysname text, sysdesc text); -create table rtest_interface (sysname text, ifname text); -create table rtest_person (pname text, pdesc text); -create table rtest_admin (pname text, sysname text); -create rule rtest_sys_upd as on update to rtest_system do also ( - update rtest_interface set sysname = new.sysname - where sysname = old.sysname; - update rtest_admin set sysname = new.sysname - where sysname = old.sysname - ); -create rule rtest_sys_del as on delete to rtest_system do also ( - delete from rtest_interface where sysname = old.sysname; - delete from rtest_admin where sysname = old.sysname; - ); -create rule rtest_pers_upd as on update to rtest_person do also - update rtest_admin set pname = new.pname where pname = old.pname; -create rule rtest_pers_del as on delete to rtest_person do also - delete from rtest_admin where pname = old.pname; --- --- Tables and rules for the logging test --- -create table rtest_emp (ename char(20), salary numeric); -create table rtest_emplog (ename char(20), who name, action char(10), newsal numeric, oldsal numeric); -create table rtest_empmass (ename char(20), salary numeric); -create rule rtest_emp_ins as on insert to rtest_emp do - insert into rtest_emplog values (new.ename, current_user, - 'hired', new.salary, '0.00'); -create rule rtest_emp_upd as on update to rtest_emp where new.salary != old.salary do - insert into rtest_emplog values (new.ename, current_user, - 'honored', new.salary, old.salary); -create rule rtest_emp_del as on delete to rtest_emp do - insert into rtest_emplog values (old.ename, current_user, - 'fired', '0.00', old.salary); --- --- Tables and rules for the multiple cascaded qualified instead --- rule test --- -create table rtest_t4 (a int4, b text); -create table rtest_t5 (a int4, b text); -create table rtest_t6 (a int4, b text); -create table rtest_t7 (a int4, b text); -create table rtest_t8 (a int4, b text); -create table rtest_t9 (a int4, b text); -create rule rtest_t4_ins1 as on insert to rtest_t4 - where new.a >= 10 and new.a < 20 do instead - insert into rtest_t5 values (new.a, new.b); -create rule rtest_t4_ins2 as on insert to rtest_t4 - where new.a >= 20 and new.a < 30 do - insert into rtest_t6 values (new.a, new.b); -create rule rtest_t5_ins as on insert to rtest_t5 - where new.a > 15 do - insert into rtest_t7 values (new.a, new.b); -create rule rtest_t6_ins as on insert to rtest_t6 - where new.a > 25 do instead - insert into rtest_t8 values (new.a, new.b); --- --- Tables and rules for the rule fire order test --- --- As of PG 7.3, the rules should fire in order by name, regardless --- of INSTEAD attributes or creation order. --- -create table rtest_order1 (a int4); -create table rtest_order2 (a int4, b int4, c text); -create sequence rtest_seq; -create rule rtest_order_r3 as on insert to rtest_order1 do instead - insert into rtest_order2 values (new.a, nextval('rtest_seq'), - 'rule 3 - this should run 3rd'); -create rule rtest_order_r4 as on insert to rtest_order1 - where a < 100 do instead - insert into rtest_order2 values (new.a, nextval('rtest_seq'), - 'rule 4 - this should run 4th'); -create rule rtest_order_r2 as on insert to rtest_order1 do - insert into rtest_order2 values (new.a, nextval('rtest_seq'), - 'rule 2 - this should run 2nd'); -create rule rtest_order_r1 as on insert to rtest_order1 do instead - insert into rtest_order2 values (new.a, nextval('rtest_seq'), - 'rule 1 - this should run 1st'); --- --- Tables and rules for the instead nothing test --- -create table rtest_nothn1 (a int4, b text); -create table rtest_nothn2 (a int4, b text); -create table rtest_nothn3 (a int4, b text); -create table rtest_nothn4 (a int4, b text); -create rule rtest_nothn_r1 as on insert to rtest_nothn1 - where new.a >= 10 and new.a < 20 do instead nothing; -create rule rtest_nothn_r2 as on insert to rtest_nothn1 - where new.a >= 30 and new.a < 40 do instead nothing; -create rule rtest_nothn_r3 as on insert to rtest_nothn2 - where new.a >= 100 do instead - insert into rtest_nothn3 values (new.a, new.b); -create rule rtest_nothn_r4 as on insert to rtest_nothn2 - do instead nothing; --- --- Tests on a view that is select * of a table --- and has insert/update/delete instead rules to --- behave close like the real table. --- --- --- We need test date later --- -insert into rtest_t2 values (1, 21); -insert into rtest_t2 values (2, 22); -insert into rtest_t2 values (3, 23); -insert into rtest_t3 values (1, 31); -insert into rtest_t3 values (2, 32); -insert into rtest_t3 values (3, 33); -insert into rtest_t3 values (4, 34); -insert into rtest_t3 values (5, 35); --- insert values -insert into rtest_v1 values (1, 11); -insert into rtest_v1 values (2, 12); -select * from rtest_v1; - a | b ----+---- - 1 | 11 - 2 | 12 -(2 rows) - --- delete with constant expression -delete from rtest_v1 where a = 1; -select * from rtest_v1; - a | b ----+---- - 2 | 12 -(1 row) - -insert into rtest_v1 values (1, 11); -delete from rtest_v1 where b = 12; -select * from rtest_v1; - a | b ----+---- - 1 | 11 -(1 row) - -insert into rtest_v1 values (2, 12); -insert into rtest_v1 values (2, 13); -select * from rtest_v1; - a | b ----+---- - 1 | 11 - 2 | 12 - 2 | 13 -(3 rows) - -** Remember the delete rule on rtest_v1: It says -** DO INSTEAD DELETE FROM rtest_t1 WHERE a = old.a -** So this time both rows with a = 2 must get deleted -\p -** Remember the delete rule on rtest_v1: It says -** DO INSTEAD DELETE FROM rtest_t1 WHERE a = old.a -** So this time both rows with a = 2 must get deleted -\r -delete from rtest_v1 where b = 12; -select * from rtest_v1; - a | b ----+---- - 1 | 11 -(1 row) - -delete from rtest_v1; --- insert select -insert into rtest_v1 select * from rtest_t2; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 2 | 22 - 3 | 23 -(3 rows) - -delete from rtest_v1; --- same with swapped targetlist -insert into rtest_v1 (b, a) select b, a from rtest_t2; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 2 | 22 - 3 | 23 -(3 rows) - --- now with only one target attribute -insert into rtest_v1 (a) select a from rtest_t3; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 2 | 22 - 3 | 23 - 1 | - 2 | - 3 | - 4 | - 5 | -(8 rows) - -select * from rtest_v1 where b isnull; - a | b ----+--- - 1 | - 2 | - 3 | - 4 | - 5 | -(5 rows) - --- let attribute a differ (must be done on rtest_t1 - see above) -update rtest_t1 set a = a + 10 where b isnull; -delete from rtest_v1 where b isnull; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 2 | 22 - 3 | 23 -(3 rows) - --- now updates with constant expression -update rtest_v1 set b = 42 where a = 2; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 3 | 23 - 2 | 42 -(3 rows) - -update rtest_v1 set b = 99 where b = 42; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 3 | 23 - 2 | 99 -(3 rows) - -update rtest_v1 set b = 88 where b < 50; -select * from rtest_v1; - a | b ----+---- - 2 | 99 - 1 | 88 - 3 | 88 -(3 rows) - -delete from rtest_v1; -insert into rtest_v1 select rtest_t2.a, rtest_t3.b - from rtest_t2, rtest_t3 - where rtest_t2.a = rtest_t3.a; -select * from rtest_v1; - a | b ----+---- - 1 | 31 - 2 | 32 - 3 | 33 -(3 rows) - --- updates in a mergejoin -update rtest_v1 set b = rtest_t2.b from rtest_t2 where rtest_v1.a = rtest_t2.a; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 2 | 22 - 3 | 23 -(3 rows) - -insert into rtest_v1 select * from rtest_t3; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 2 | 22 - 3 | 23 - 1 | 31 - 2 | 32 - 3 | 33 - 4 | 34 - 5 | 35 -(8 rows) - -update rtest_t1 set a = a + 10 where b > 30; -select * from rtest_v1; - a | b -----+---- - 1 | 21 - 2 | 22 - 3 | 23 - 11 | 31 - 12 | 32 - 13 | 33 - 14 | 34 - 15 | 35 -(8 rows) - -update rtest_v1 set a = rtest_t3.a + 20 from rtest_t3 where rtest_v1.b = rtest_t3.b; -select * from rtest_v1; - a | b -----+---- - 1 | 21 - 2 | 22 - 3 | 23 - 21 | 31 - 22 | 32 - 23 | 33 - 24 | 34 - 25 | 35 -(8 rows) - --- --- Test for constraint updates/deletes --- -insert into rtest_system values ('orion', 'Linux Jan Wieck'); -insert into rtest_system values ('notjw', 'WinNT Jan Wieck (notebook)'); -insert into rtest_system values ('neptun', 'Fileserver'); -insert into rtest_interface values ('orion', 'eth0'); -insert into rtest_interface values ('orion', 'eth1'); -insert into rtest_interface values ('notjw', 'eth0'); -insert into rtest_interface values ('neptun', 'eth0'); -insert into rtest_person values ('jw', 'Jan Wieck'); -insert into rtest_person values ('bm', 'Bruce Momjian'); -insert into rtest_admin values ('jw', 'orion'); -insert into rtest_admin values ('jw', 'notjw'); -insert into rtest_admin values ('bm', 'neptun'); -update rtest_system set sysname = 'pluto' where sysname = 'neptun'; -select * from rtest_interface; - sysname | ifname ----------+-------- - orion | eth0 - orion | eth1 - notjw | eth0 - pluto | eth0 -(4 rows) - -select * from rtest_admin; - pname | sysname --------+--------- - jw | orion - jw | notjw - bm | pluto -(3 rows) - -update rtest_person set pname = 'jwieck' where pdesc = 'Jan Wieck'; --- Note: use ORDER BY here to ensure consistent output across all systems. --- The above UPDATE affects two rows with equal keys, so they could be --- updated in either order depending on the whim of the local qsort(). -select * from rtest_admin order by pname, sysname; - pname | sysname ---------+--------- - bm | pluto - jwieck | notjw - jwieck | orion -(3 rows) - -delete from rtest_system where sysname = 'orion'; -select * from rtest_interface; - sysname | ifname ----------+-------- - notjw | eth0 - pluto | eth0 -(2 rows) - -select * from rtest_admin; - pname | sysname ---------+--------- - bm | pluto - jwieck | notjw -(2 rows) - --- --- Rule qualification test --- -insert into rtest_emp values ('wiecc', '5000.00'); -insert into rtest_emp values ('gates', '80000.00'); -update rtest_emp set ename = 'wiecx' where ename = 'wiecc'; -update rtest_emp set ename = 'wieck', salary = '6000.00' where ename = 'wiecx'; -update rtest_emp set salary = '7000.00' where ename = 'wieck'; -delete from rtest_emp where ename = 'gates'; -select ename, who = current_user as "matches user", action, newsal, oldsal from rtest_emplog order by ename, action, newsal; - ename | matches user | action | newsal | oldsal -----------------------+--------------+------------+----------+---------- - gates | t | fired | 0.00 | 80000.00 - gates | t | hired | 80000.00 | 0.00 - wiecc | t | hired | 5000.00 | 0.00 - wieck | t | honored | 6000.00 | 5000.00 - wieck | t | honored | 7000.00 | 6000.00 -(5 rows) - -insert into rtest_empmass values ('meyer', '4000.00'); -insert into rtest_empmass values ('maier', '5000.00'); -insert into rtest_empmass values ('mayr', '6000.00'); -insert into rtest_emp select * from rtest_empmass; -select ename, who = current_user as "matches user", action, newsal, oldsal from rtest_emplog order by ename, action, newsal; - ename | matches user | action | newsal | oldsal -----------------------+--------------+------------+----------+---------- - gates | t | fired | 0.00 | 80000.00 - gates | t | hired | 80000.00 | 0.00 - maier | t | hired | 5000.00 | 0.00 - mayr | t | hired | 6000.00 | 0.00 - meyer | t | hired | 4000.00 | 0.00 - wiecc | t | hired | 5000.00 | 0.00 - wieck | t | honored | 6000.00 | 5000.00 - wieck | t | honored | 7000.00 | 6000.00 -(8 rows) - -update rtest_empmass set salary = salary + '1000.00'; -update rtest_emp set salary = rtest_empmass.salary from rtest_empmass where rtest_emp.ename = rtest_empmass.ename; -select ename, who = current_user as "matches user", action, newsal, oldsal from rtest_emplog order by ename, action, newsal; - ename | matches user | action | newsal | oldsal -----------------------+--------------+------------+----------+---------- - gates | t | fired | 0.00 | 80000.00 - gates | t | hired | 80000.00 | 0.00 - maier | t | hired | 5000.00 | 0.00 - maier | t | honored | 6000.00 | 5000.00 - mayr | t | hired | 6000.00 | 0.00 - mayr | t | honored | 7000.00 | 6000.00 - meyer | t | hired | 4000.00 | 0.00 - meyer | t | honored | 5000.00 | 4000.00 - wiecc | t | hired | 5000.00 | 0.00 - wieck | t | honored | 6000.00 | 5000.00 - wieck | t | honored | 7000.00 | 6000.00 -(11 rows) - -delete from rtest_emp using rtest_empmass where rtest_emp.ename = rtest_empmass.ename; -select ename, who = current_user as "matches user", action, newsal, oldsal from rtest_emplog order by ename, action, newsal; - ename | matches user | action | newsal | oldsal -----------------------+--------------+------------+----------+---------- - gates | t | fired | 0.00 | 80000.00 - gates | t | hired | 80000.00 | 0.00 - maier | t | fired | 0.00 | 6000.00 - maier | t | hired | 5000.00 | 0.00 - maier | t | honored | 6000.00 | 5000.00 - mayr | t | fired | 0.00 | 7000.00 - mayr | t | hired | 6000.00 | 0.00 - mayr | t | honored | 7000.00 | 6000.00 - meyer | t | fired | 0.00 | 5000.00 - meyer | t | hired | 4000.00 | 0.00 - meyer | t | honored | 5000.00 | 4000.00 - wiecc | t | hired | 5000.00 | 0.00 - wieck | t | honored | 6000.00 | 5000.00 - wieck | t | honored | 7000.00 | 6000.00 -(14 rows) - --- --- Multiple cascaded qualified instead rule test --- -insert into rtest_t4 values (1, 'Record should go to rtest_t4'); -insert into rtest_t4 values (2, 'Record should go to rtest_t4'); -insert into rtest_t4 values (10, 'Record should go to rtest_t5'); -insert into rtest_t4 values (15, 'Record should go to rtest_t5'); -insert into rtest_t4 values (19, 'Record should go to rtest_t5 and t7'); -insert into rtest_t4 values (20, 'Record should go to rtest_t4 and t6'); -insert into rtest_t4 values (26, 'Record should go to rtest_t4 and t8'); -insert into rtest_t4 values (28, 'Record should go to rtest_t4 and t8'); -insert into rtest_t4 values (30, 'Record should go to rtest_t4'); -insert into rtest_t4 values (40, 'Record should go to rtest_t4'); -select * from rtest_t4; - a | b -----+------------------------------------- - 1 | Record should go to rtest_t4 - 2 | Record should go to rtest_t4 - 20 | Record should go to rtest_t4 and t6 - 26 | Record should go to rtest_t4 and t8 - 28 | Record should go to rtest_t4 and t8 - 30 | Record should go to rtest_t4 - 40 | Record should go to rtest_t4 -(7 rows) - -select * from rtest_t5; - a | b -----+------------------------------------- - 10 | Record should go to rtest_t5 - 15 | Record should go to rtest_t5 - 19 | Record should go to rtest_t5 and t7 -(3 rows) - -select * from rtest_t6; - a | b -----+------------------------------------- - 20 | Record should go to rtest_t4 and t6 -(1 row) - -select * from rtest_t7; - a | b -----+------------------------------------- - 19 | Record should go to rtest_t5 and t7 -(1 row) - -select * from rtest_t8; - a | b -----+------------------------------------- - 26 | Record should go to rtest_t4 and t8 - 28 | Record should go to rtest_t4 and t8 -(2 rows) - -delete from rtest_t4; -delete from rtest_t5; -delete from rtest_t6; -delete from rtest_t7; -delete from rtest_t8; -insert into rtest_t9 values (1, 'Record should go to rtest_t4'); -insert into rtest_t9 values (2, 'Record should go to rtest_t4'); -insert into rtest_t9 values (10, 'Record should go to rtest_t5'); -insert into rtest_t9 values (15, 'Record should go to rtest_t5'); -insert into rtest_t9 values (19, 'Record should go to rtest_t5 and t7'); -insert into rtest_t9 values (20, 'Record should go to rtest_t4 and t6'); -insert into rtest_t9 values (26, 'Record should go to rtest_t4 and t8'); -insert into rtest_t9 values (28, 'Record should go to rtest_t4 and t8'); -insert into rtest_t9 values (30, 'Record should go to rtest_t4'); -insert into rtest_t9 values (40, 'Record should go to rtest_t4'); -insert into rtest_t4 select * from rtest_t9 where a < 20; -select * from rtest_t4; - a | b ----+------------------------------ - 1 | Record should go to rtest_t4 - 2 | Record should go to rtest_t4 -(2 rows) - -select * from rtest_t5; - a | b -----+------------------------------------- - 10 | Record should go to rtest_t5 - 15 | Record should go to rtest_t5 - 19 | Record should go to rtest_t5 and t7 -(3 rows) - -select * from rtest_t6; - a | b ----+--- -(0 rows) - -select * from rtest_t7; - a | b -----+------------------------------------- - 19 | Record should go to rtest_t5 and t7 -(1 row) - -select * from rtest_t8; - a | b ----+--- -(0 rows) - -insert into rtest_t4 select * from rtest_t9 where b ~ 'and t8'; -select * from rtest_t4; - a | b -----+------------------------------------- - 1 | Record should go to rtest_t4 - 2 | Record should go to rtest_t4 - 26 | Record should go to rtest_t4 and t8 - 28 | Record should go to rtest_t4 and t8 -(4 rows) - -select * from rtest_t5; - a | b -----+------------------------------------- - 10 | Record should go to rtest_t5 - 15 | Record should go to rtest_t5 - 19 | Record should go to rtest_t5 and t7 -(3 rows) - -select * from rtest_t6; - a | b ----+--- -(0 rows) - -select * from rtest_t7; - a | b -----+------------------------------------- - 19 | Record should go to rtest_t5 and t7 -(1 row) - -select * from rtest_t8; - a | b -----+------------------------------------- - 26 | Record should go to rtest_t4 and t8 - 28 | Record should go to rtest_t4 and t8 -(2 rows) - -insert into rtest_t4 select a + 1, b from rtest_t9 where a in (20, 30, 40); -select * from rtest_t4; - a | b -----+------------------------------------- - 1 | Record should go to rtest_t4 - 2 | Record should go to rtest_t4 - 26 | Record should go to rtest_t4 and t8 - 28 | Record should go to rtest_t4 and t8 - 21 | Record should go to rtest_t4 and t6 - 31 | Record should go to rtest_t4 - 41 | Record should go to rtest_t4 -(7 rows) - -select * from rtest_t5; - a | b -----+------------------------------------- - 10 | Record should go to rtest_t5 - 15 | Record should go to rtest_t5 - 19 | Record should go to rtest_t5 and t7 -(3 rows) - -select * from rtest_t6; - a | b -----+------------------------------------- - 21 | Record should go to rtest_t4 and t6 -(1 row) - -select * from rtest_t7; - a | b -----+------------------------------------- - 19 | Record should go to rtest_t5 and t7 -(1 row) - -select * from rtest_t8; - a | b -----+------------------------------------- - 26 | Record should go to rtest_t4 and t8 - 28 | Record should go to rtest_t4 and t8 -(2 rows) - --- --- Check that the ordering of rules fired is correct --- -insert into rtest_order1 values (1); -select * from rtest_order2; - a | b | c ----+---+------------------------------ - 1 | 1 | rule 1 - this should run 1st - 1 | 2 | rule 2 - this should run 2nd - 1 | 3 | rule 3 - this should run 3rd - 1 | 4 | rule 4 - this should run 4th -(4 rows) - --- --- Check if instead nothing w/without qualification works --- -insert into rtest_nothn1 values (1, 'want this'); -insert into rtest_nothn1 values (2, 'want this'); -insert into rtest_nothn1 values (10, 'don''t want this'); -insert into rtest_nothn1 values (19, 'don''t want this'); -insert into rtest_nothn1 values (20, 'want this'); -insert into rtest_nothn1 values (29, 'want this'); -insert into rtest_nothn1 values (30, 'don''t want this'); -insert into rtest_nothn1 values (39, 'don''t want this'); -insert into rtest_nothn1 values (40, 'want this'); -insert into rtest_nothn1 values (50, 'want this'); -insert into rtest_nothn1 values (60, 'want this'); -select * from rtest_nothn1; - a | b -----+----------- - 1 | want this - 2 | want this - 20 | want this - 29 | want this - 40 | want this - 50 | want this - 60 | want this -(7 rows) - -insert into rtest_nothn2 values (10, 'too small'); -insert into rtest_nothn2 values (50, 'too small'); -insert into rtest_nothn2 values (100, 'OK'); -insert into rtest_nothn2 values (200, 'OK'); -select * from rtest_nothn2; - a | b ----+--- -(0 rows) - -select * from rtest_nothn3; - a | b ------+---- - 100 | OK - 200 | OK -(2 rows) - -delete from rtest_nothn1; -delete from rtest_nothn2; -delete from rtest_nothn3; -insert into rtest_nothn4 values (1, 'want this'); -insert into rtest_nothn4 values (2, 'want this'); -insert into rtest_nothn4 values (10, 'don''t want this'); -insert into rtest_nothn4 values (19, 'don''t want this'); -insert into rtest_nothn4 values (20, 'want this'); -insert into rtest_nothn4 values (29, 'want this'); -insert into rtest_nothn4 values (30, 'don''t want this'); -insert into rtest_nothn4 values (39, 'don''t want this'); -insert into rtest_nothn4 values (40, 'want this'); -insert into rtest_nothn4 values (50, 'want this'); -insert into rtest_nothn4 values (60, 'want this'); -insert into rtest_nothn1 select * from rtest_nothn4; -select * from rtest_nothn1; - a | b -----+----------- - 1 | want this - 2 | want this - 20 | want this - 29 | want this - 40 | want this - 50 | want this - 60 | want this -(7 rows) - -delete from rtest_nothn4; -insert into rtest_nothn4 values (10, 'too small'); -insert into rtest_nothn4 values (50, 'too small'); -insert into rtest_nothn4 values (100, 'OK'); -insert into rtest_nothn4 values (200, 'OK'); -insert into rtest_nothn2 select * from rtest_nothn4; -select * from rtest_nothn2; - a | b ----+--- -(0 rows) - -select * from rtest_nothn3; - a | b ------+---- - 100 | OK - 200 | OK -(2 rows) - -create table rtest_view1 (a int4, b text, v bool); -create table rtest_view2 (a int4); -create table rtest_view3 (a int4, b text); -create table rtest_view4 (a int4, b text, c int4); -create view rtest_vview1 as select a, b from rtest_view1 X - where 0 < (select count(*) from rtest_view2 Y where Y.a = X.a); -create view rtest_vview2 as select a, b from rtest_view1 where v; -create view rtest_vview3 as select a, b from rtest_vview2 X - where 0 < (select count(*) from rtest_view2 Y where Y.a = X.a); -create view rtest_vview4 as select X.a, X.b, count(Y.a) as refcount - from rtest_view1 X, rtest_view2 Y - where X.a = Y.a - group by X.a, X.b; -create function rtest_viewfunc1(int4) returns int4 as - 'select count(*)::int4 from rtest_view2 where a = $1' - language sql; -create view rtest_vview5 as select a, b, rtest_viewfunc1(a) as refcount - from rtest_view1; -insert into rtest_view1 values (1, 'item 1', 't'); -insert into rtest_view1 values (2, 'item 2', 't'); -insert into rtest_view1 values (3, 'item 3', 't'); -insert into rtest_view1 values (4, 'item 4', 'f'); -insert into rtest_view1 values (5, 'item 5', 't'); -insert into rtest_view1 values (6, 'item 6', 'f'); -insert into rtest_view1 values (7, 'item 7', 't'); -insert into rtest_view1 values (8, 'item 8', 't'); -insert into rtest_view2 values (2); -insert into rtest_view2 values (2); -insert into rtest_view2 values (4); -insert into rtest_view2 values (5); -insert into rtest_view2 values (7); -insert into rtest_view2 values (7); -insert into rtest_view2 values (7); -insert into rtest_view2 values (7); -select * from rtest_vview1; - a | b ----+-------- - 2 | item 2 - 4 | item 4 - 5 | item 5 - 7 | item 7 -(4 rows) - -select * from rtest_vview2; - a | b ----+-------- - 1 | item 1 - 2 | item 2 - 3 | item 3 - 5 | item 5 - 7 | item 7 - 8 | item 8 -(6 rows) - -select * from rtest_vview3; - a | b ----+-------- - 2 | item 2 - 5 | item 5 - 7 | item 7 -(3 rows) - -select * from rtest_vview4 order by a, b; - a | b | refcount ----+--------+---------- - 2 | item 2 | 2 - 4 | item 4 | 1 - 5 | item 5 | 1 - 7 | item 7 | 4 -(4 rows) - -select * from rtest_vview5; - a | b | refcount ----+--------+---------- - 1 | item 1 | 0 - 2 | item 2 | 2 - 3 | item 3 | 0 - 4 | item 4 | 1 - 5 | item 5 | 1 - 6 | item 6 | 0 - 7 | item 7 | 4 - 8 | item 8 | 0 -(8 rows) - -insert into rtest_view3 select * from rtest_vview1 where a < 7; -select * from rtest_view3; - a | b ----+-------- - 2 | item 2 - 4 | item 4 - 5 | item 5 -(3 rows) - -delete from rtest_view3; -insert into rtest_view3 select * from rtest_vview2 where a != 5 and b !~ '2'; -select * from rtest_view3; - a | b ----+-------- - 1 | item 1 - 3 | item 3 - 7 | item 7 - 8 | item 8 -(4 rows) - -delete from rtest_view3; -insert into rtest_view3 select * from rtest_vview3; -select * from rtest_view3; - a | b ----+-------- - 2 | item 2 - 5 | item 5 - 7 | item 7 -(3 rows) - -delete from rtest_view3; -insert into rtest_view4 select * from rtest_vview4 where 3 > refcount; -select * from rtest_view4 order by a, b; - a | b | c ----+--------+--- - 2 | item 2 | 2 - 4 | item 4 | 1 - 5 | item 5 | 1 -(3 rows) - -delete from rtest_view4; -insert into rtest_view4 select * from rtest_vview5 where a > 2 and refcount = 0; -select * from rtest_view4; - a | b | c ----+--------+--- - 3 | item 3 | 0 - 6 | item 6 | 0 - 8 | item 8 | 0 -(3 rows) - -delete from rtest_view4; --- --- Test for computations in views --- -create table rtest_comp ( - part text, - unit char(4), - size float -); -create table rtest_unitfact ( - unit char(4), - factor float -); -create view rtest_vcomp as - select X.part, (X.size * Y.factor) as size_in_cm - from rtest_comp X, rtest_unitfact Y - where X.unit = Y.unit; -insert into rtest_unitfact values ('m', 100.0); -insert into rtest_unitfact values ('cm', 1.0); -insert into rtest_unitfact values ('inch', 2.54); -insert into rtest_comp values ('p1', 'm', 5.0); -insert into rtest_comp values ('p2', 'm', 3.0); -insert into rtest_comp values ('p3', 'cm', 5.0); -insert into rtest_comp values ('p4', 'cm', 15.0); -insert into rtest_comp values ('p5', 'inch', 7.0); -insert into rtest_comp values ('p6', 'inch', 4.4); -select * from rtest_vcomp order by part; - part | size_in_cm -------+-------------------- - p1 | 500 - p2 | 300 - p3 | 5 - p4 | 15 - p5 | 17.78 - p6 | 11.176000000000002 -(6 rows) - -select * from rtest_vcomp where size_in_cm > 10.0 order by size_in_cm using >; - part | size_in_cm -------+-------------------- - p1 | 500 - p2 | 300 - p5 | 17.78 - p4 | 15 - p6 | 11.176000000000002 -(5 rows) - --- --- In addition run the (slightly modified) queries from the --- programmers manual section on the rule system. --- -CREATE TABLE shoe_data ( - shoename char(10), -- primary key - sh_avail integer, -- available # of pairs - slcolor char(10), -- preferred shoelace color - slminlen float, -- minimum shoelace length - slmaxlen float, -- maximum shoelace length - slunit char(8) -- length unit -); -CREATE TABLE shoelace_data ( - sl_name char(10), -- primary key - sl_avail integer, -- available # of pairs - sl_color char(10), -- shoelace color - sl_len float, -- shoelace length - sl_unit char(8) -- length unit -); -CREATE TABLE unit ( - un_name char(8), -- the primary key - un_fact float -- factor to transform to cm -); -CREATE VIEW shoe AS - SELECT sh.shoename, - sh.sh_avail, - sh.slcolor, - sh.slminlen, - sh.slminlen * un.un_fact AS slminlen_cm, - sh.slmaxlen, - sh.slmaxlen * un.un_fact AS slmaxlen_cm, - sh.slunit - FROM shoe_data sh, unit un - WHERE sh.slunit = un.un_name; -CREATE VIEW shoelace AS - SELECT s.sl_name, - s.sl_avail, - s.sl_color, - s.sl_len, - s.sl_unit, - s.sl_len * u.un_fact AS sl_len_cm - FROM shoelace_data s, unit u - WHERE s.sl_unit = u.un_name; -CREATE VIEW shoe_ready AS - SELECT rsh.shoename, - rsh.sh_avail, - rsl.sl_name, - rsl.sl_avail, - int4smaller(rsh.sh_avail, rsl.sl_avail) AS total_avail - FROM shoe rsh, shoelace rsl - WHERE rsl.sl_color = rsh.slcolor - AND rsl.sl_len_cm >= rsh.slminlen_cm - AND rsl.sl_len_cm <= rsh.slmaxlen_cm; -INSERT INTO unit VALUES ('cm', 1.0); -INSERT INTO unit VALUES ('m', 100.0); -INSERT INTO unit VALUES ('inch', 2.54); -INSERT INTO shoe_data VALUES ('sh1', 2, 'black', 70.0, 90.0, 'cm'); -INSERT INTO shoe_data VALUES ('sh2', 0, 'black', 30.0, 40.0, 'inch'); -INSERT INTO shoe_data VALUES ('sh3', 4, 'brown', 50.0, 65.0, 'cm'); -INSERT INTO shoe_data VALUES ('sh4', 3, 'brown', 40.0, 50.0, 'inch'); -INSERT INTO shoelace_data VALUES ('sl1', 5, 'black', 80.0, 'cm'); -INSERT INTO shoelace_data VALUES ('sl2', 6, 'black', 100.0, 'cm'); -INSERT INTO shoelace_data VALUES ('sl3', 0, 'black', 35.0 , 'inch'); -INSERT INTO shoelace_data VALUES ('sl4', 8, 'black', 40.0 , 'inch'); -INSERT INTO shoelace_data VALUES ('sl5', 4, 'brown', 1.0 , 'm'); -INSERT INTO shoelace_data VALUES ('sl6', 0, 'brown', 0.9 , 'm'); -INSERT INTO shoelace_data VALUES ('sl7', 7, 'brown', 60 , 'cm'); -INSERT INTO shoelace_data VALUES ('sl8', 1, 'brown', 40 , 'inch'); --- SELECTs in doc -SELECT * FROM shoelace ORDER BY sl_name; - sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm -------------+----------+------------+--------+----------+----------- - sl1 | 5 | black | 80 | cm | 80 - sl2 | 6 | black | 100 | cm | 100 - sl3 | 0 | black | 35 | inch | 88.9 - sl4 | 8 | black | 40 | inch | 101.6 - sl5 | 4 | brown | 1 | m | 100 - sl6 | 0 | brown | 0.9 | m | 90 - sl7 | 7 | brown | 60 | cm | 60 - sl8 | 1 | brown | 40 | inch | 101.6 -(8 rows) - -SELECT * FROM shoe_ready WHERE total_avail >= 2 ORDER BY 1; - shoename | sh_avail | sl_name | sl_avail | total_avail -------------+----------+------------+----------+------------- - sh1 | 2 | sl1 | 5 | 2 - sh3 | 4 | sl7 | 7 | 4 -(2 rows) - - CREATE TABLE shoelace_log ( - sl_name char(10), -- shoelace changed - sl_avail integer, -- new available value - log_who name, -- who did it - log_when timestamp -- when - ); --- Want "log_who" to be CURRENT_USER, --- but that is non-portable for the regression test --- - thomas 1999-02-21 - CREATE RULE log_shoelace AS ON UPDATE TO shoelace_data - WHERE NEW.sl_avail != OLD.sl_avail - DO INSERT INTO shoelace_log VALUES ( - NEW.sl_name, - NEW.sl_avail, - 'Al Bundy', - 'epoch' - ); -UPDATE shoelace_data SET sl_avail = 6 WHERE sl_name = 'sl7'; -SELECT * FROM shoelace_log; - sl_name | sl_avail | log_who | log_when -------------+----------+----------+-------------------------- - sl7 | 6 | Al Bundy | Thu Jan 01 00:00:00 1970 -(1 row) - - CREATE RULE shoelace_ins AS ON INSERT TO shoelace - DO INSTEAD - INSERT INTO shoelace_data VALUES ( - NEW.sl_name, - NEW.sl_avail, - NEW.sl_color, - NEW.sl_len, - NEW.sl_unit); - CREATE RULE shoelace_upd AS ON UPDATE TO shoelace - DO INSTEAD - UPDATE shoelace_data SET - sl_name = NEW.sl_name, - sl_avail = NEW.sl_avail, - sl_color = NEW.sl_color, - sl_len = NEW.sl_len, - sl_unit = NEW.sl_unit - WHERE sl_name = OLD.sl_name; - CREATE RULE shoelace_del AS ON DELETE TO shoelace - DO INSTEAD - DELETE FROM shoelace_data - WHERE sl_name = OLD.sl_name; - CREATE TABLE shoelace_arrive ( - arr_name char(10), - arr_quant integer - ); - CREATE TABLE shoelace_ok ( - ok_name char(10), - ok_quant integer - ); - CREATE RULE shoelace_ok_ins AS ON INSERT TO shoelace_ok - DO INSTEAD - UPDATE shoelace SET - sl_avail = sl_avail + NEW.ok_quant - WHERE sl_name = NEW.ok_name; -INSERT INTO shoelace_arrive VALUES ('sl3', 10); -INSERT INTO shoelace_arrive VALUES ('sl6', 20); -INSERT INTO shoelace_arrive VALUES ('sl8', 20); -SELECT * FROM shoelace ORDER BY sl_name; - sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm -------------+----------+------------+--------+----------+----------- - sl1 | 5 | black | 80 | cm | 80 - sl2 | 6 | black | 100 | cm | 100 - sl3 | 0 | black | 35 | inch | 88.9 - sl4 | 8 | black | 40 | inch | 101.6 - sl5 | 4 | brown | 1 | m | 100 - sl6 | 0 | brown | 0.9 | m | 90 - sl7 | 6 | brown | 60 | cm | 60 - sl8 | 1 | brown | 40 | inch | 101.6 -(8 rows) - -insert into shoelace_ok select * from shoelace_arrive; -SELECT * FROM shoelace ORDER BY sl_name; - sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm -------------+----------+------------+--------+----------+----------- - sl1 | 5 | black | 80 | cm | 80 - sl2 | 6 | black | 100 | cm | 100 - sl3 | 10 | black | 35 | inch | 88.9 - sl4 | 8 | black | 40 | inch | 101.6 - sl5 | 4 | brown | 1 | m | 100 - sl6 | 20 | brown | 0.9 | m | 90 - sl7 | 6 | brown | 60 | cm | 60 - sl8 | 21 | brown | 40 | inch | 101.6 -(8 rows) - -SELECT * FROM shoelace_log ORDER BY sl_name; - sl_name | sl_avail | log_who | log_when -------------+----------+----------+-------------------------- - sl3 | 10 | Al Bundy | Thu Jan 01 00:00:00 1970 - sl6 | 20 | Al Bundy | Thu Jan 01 00:00:00 1970 - sl7 | 6 | Al Bundy | Thu Jan 01 00:00:00 1970 - sl8 | 21 | Al Bundy | Thu Jan 01 00:00:00 1970 -(4 rows) - - CREATE VIEW shoelace_obsolete AS - SELECT * FROM shoelace WHERE NOT EXISTS - (SELECT shoename FROM shoe WHERE slcolor = sl_color); - CREATE VIEW shoelace_candelete AS - SELECT * FROM shoelace_obsolete WHERE sl_avail = 0; -insert into shoelace values ('sl9', 0, 'pink', 35.0, 'inch', 0.0); -insert into shoelace values ('sl10', 1000, 'magenta', 40.0, 'inch', 0.0); --- Unsupported (even though a similar updatable view construct is) -insert into shoelace values ('sl10', 1000, 'magenta', 40.0, 'inch', 0.0) - on conflict do nothing; -ERROR: INSERT with ON CONFLICT clause cannot be used with table that has INSERT or UPDATE rules -SELECT * FROM shoelace_obsolete ORDER BY sl_len_cm; - sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm -------------+----------+------------+--------+----------+----------- - sl9 | 0 | pink | 35 | inch | 88.9 - sl10 | 1000 | magenta | 40 | inch | 101.6 -(2 rows) - -SELECT * FROM shoelace_candelete; - sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm -------------+----------+------------+--------+----------+----------- - sl9 | 0 | pink | 35 | inch | 88.9 -(1 row) - -DELETE FROM shoelace WHERE EXISTS - (SELECT * FROM shoelace_candelete - WHERE sl_name = shoelace.sl_name); -SELECT * FROM shoelace ORDER BY sl_name; - sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm -------------+----------+------------+--------+----------+----------- - sl1 | 5 | black | 80 | cm | 80 - sl10 | 1000 | magenta | 40 | inch | 101.6 - sl2 | 6 | black | 100 | cm | 100 - sl3 | 10 | black | 35 | inch | 88.9 - sl4 | 8 | black | 40 | inch | 101.6 - sl5 | 4 | brown | 1 | m | 100 - sl6 | 20 | brown | 0.9 | m | 90 - sl7 | 6 | brown | 60 | cm | 60 - sl8 | 21 | brown | 40 | inch | 101.6 -(9 rows) - -SELECT * FROM shoe ORDER BY shoename; - shoename | sh_avail | slcolor | slminlen | slminlen_cm | slmaxlen | slmaxlen_cm | slunit -------------+----------+------------+----------+-------------+----------+-------------+---------- - sh1 | 2 | black | 70 | 70 | 90 | 90 | cm - sh2 | 0 | black | 30 | 76.2 | 40 | 101.6 | inch - sh3 | 4 | brown | 50 | 50 | 65 | 65 | cm - sh4 | 3 | brown | 40 | 101.6 | 50 | 127 | inch -(4 rows) - -SELECT count(*) FROM shoe; - count -------- - 4 -(1 row) - --- --- Simple test of qualified ON INSERT ... this did not work in 7.0 ... --- -create table rules_foo (f1 int); -create table rules_foo2 (f1 int); -create rule rules_foorule as on insert to rules_foo where f1 < 100 -do instead nothing; -insert into rules_foo values(1); -insert into rules_foo values(1001); -select * from rules_foo; - f1 ------- - 1001 -(1 row) - -drop rule rules_foorule on rules_foo; --- this should fail because f1 is not exposed for unqualified reference: -create rule rules_foorule as on insert to rules_foo where f1 < 100 -do instead insert into rules_foo2 values (f1); -ERROR: column "f1" does not exist -LINE 2: do instead insert into rules_foo2 values (f1); - ^ -DETAIL: There are columns named "f1", but they are in tables that cannot be referenced from this part of the query. -HINT: Try using a table-qualified name. --- this is the correct way: -create rule rules_foorule as on insert to rules_foo where f1 < 100 -do instead insert into rules_foo2 values (new.f1); -insert into rules_foo values(2); -insert into rules_foo values(100); -select * from rules_foo; - f1 ------- - 1001 - 100 -(2 rows) - -select * from rules_foo2; - f1 ----- - 2 -(1 row) - -drop rule rules_foorule on rules_foo; -drop table rules_foo; -drop table rules_foo2; --- --- Test rules containing INSERT ... SELECT, which is a very ugly special --- case as of 7.1. Example is based on bug report from Joel Burton. --- -create table pparent (pid int, txt text); -insert into pparent values (1,'parent1'); -insert into pparent values (2,'parent2'); -create table cchild (pid int, descrip text); -insert into cchild values (1,'descrip1'); -create view vview as - select pparent.pid, txt, descrip from - pparent left join cchild using (pid); -create rule rrule as - on update to vview do instead -( - insert into cchild (pid, descrip) - select old.pid, new.descrip where old.descrip isnull; - update cchild set descrip = new.descrip where cchild.pid = old.pid; -); -select * from vview; - pid | txt | descrip ------+---------+---------- - 1 | parent1 | descrip1 - 2 | parent2 | -(2 rows) - -update vview set descrip='test1' where pid=1; -select * from vview; - pid | txt | descrip ------+---------+--------- - 1 | parent1 | test1 - 2 | parent2 | -(2 rows) - -update vview set descrip='test2' where pid=2; -select * from vview; - pid | txt | descrip ------+---------+--------- - 1 | parent1 | test1 - 2 | parent2 | test2 -(2 rows) - -update vview set descrip='test3' where pid=3; -select * from vview; - pid | txt | descrip ------+---------+--------- - 1 | parent1 | test1 - 2 | parent2 | test2 -(2 rows) - -select * from cchild; - pid | descrip ------+--------- - 1 | test1 - 2 | test2 -(2 rows) - -drop rule rrule on vview; -drop view vview; -drop table pparent; -drop table cchild; --- --- Check that ruleutils are working --- --- temporarily disable fancy output, so view changes create less diff noise -\a\t -SELECT viewname, definition FROM pg_views -WHERE schemaname = 'pg_catalog' -ORDER BY viewname; -pg_available_extension_versions| SELECT e.name, - e.version, - (x.extname IS NOT NULL) AS installed, - e.superuser, - e.trusted, - e.relocatable, - e.schema, - e.requires, - e.comment - FROM (pg_available_extension_versions() e(name, version, superuser, trusted, relocatable, schema, requires, comment) - LEFT JOIN pg_extension x ON (((e.name = x.extname) AND (e.version = x.extversion)))); -pg_available_extensions| SELECT e.name, - e.default_version, - x.extversion AS installed_version, - e.comment - FROM (pg_available_extensions() e(name, default_version, comment) - LEFT JOIN pg_extension x ON ((e.name = x.extname))); -pg_backend_memory_contexts| SELECT name, - ident, - parent, - level, - total_bytes, - total_nblocks, - free_bytes, - free_chunks, - used_bytes - FROM pg_get_backend_memory_contexts() pg_get_backend_memory_contexts(name, ident, parent, level, total_bytes, total_nblocks, free_bytes, free_chunks, used_bytes); -pg_config| SELECT name, - setting - FROM pg_config() pg_config(name, setting); -pg_cursors| SELECT name, - statement, - is_holdable, - is_binary, - is_scrollable, - creation_time - FROM pg_cursor() c(name, statement, is_holdable, is_binary, is_scrollable, creation_time); -pg_file_settings| SELECT sourcefile, - sourceline, - seqno, - name, - setting, - applied, - error - FROM pg_show_all_file_settings() a(sourcefile, sourceline, seqno, name, setting, applied, error); -pg_group| SELECT rolname AS groname, - oid AS grosysid, - ARRAY( SELECT pg_auth_members.member - FROM pg_auth_members - WHERE (pg_auth_members.roleid = pg_authid.oid)) AS grolist - FROM pg_authid - WHERE (NOT rolcanlogin); -pg_hba_file_rules| SELECT rule_number, - file_name, - line_number, - type, - database, - user_name, - address, - netmask, - auth_method, - options, - error - FROM pg_hba_file_rules() a(rule_number, file_name, line_number, type, database, user_name, address, netmask, auth_method, options, error); -pg_ident_file_mappings| SELECT map_number, - file_name, - line_number, - map_name, - sys_name, - pg_username, - error - FROM pg_ident_file_mappings() a(map_number, file_name, line_number, map_name, sys_name, pg_username, error); -pg_indexes| SELECT n.nspname AS schemaname, - c.relname AS tablename, - i.relname AS indexname, - t.spcname AS tablespace, - pg_get_indexdef(i.oid) AS indexdef - FROM ((((pg_index x - JOIN pg_class c ON ((c.oid = x.indrelid))) - JOIN pg_class i ON ((i.oid = x.indexrelid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - LEFT JOIN pg_tablespace t ON ((t.oid = i.reltablespace))) - WHERE ((c.relkind = ANY (ARRAY['r'::"char", 'm'::"char", 'p'::"char"])) AND (i.relkind = ANY (ARRAY['i'::"char", 'I'::"char"]))); -pg_locks| SELECT locktype, - database, - relation, - page, - tuple, - virtualxid, - transactionid, - classid, - objid, - objsubid, - virtualtransaction, - pid, - mode, - granted, - fastpath, - waitstart - FROM pg_lock_status() l(locktype, database, relation, page, tuple, virtualxid, transactionid, classid, objid, objsubid, virtualtransaction, pid, mode, granted, fastpath, waitstart); -pg_matviews| SELECT n.nspname AS schemaname, - c.relname AS matviewname, - pg_get_userbyid(c.relowner) AS matviewowner, - t.spcname AS tablespace, - c.relhasindex AS hasindexes, - c.relispopulated AS ispopulated, - pg_get_viewdef(c.oid) AS definition - FROM ((pg_class c - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - LEFT JOIN pg_tablespace t ON ((t.oid = c.reltablespace))) - WHERE (c.relkind = 'm'::"char"); -pg_policies| SELECT n.nspname AS schemaname, - c.relname AS tablename, - pol.polname AS policyname, - CASE - WHEN pol.polpermissive THEN 'PERMISSIVE'::text - ELSE 'RESTRICTIVE'::text - END AS permissive, - CASE - WHEN (pol.polroles = '{0}'::oid[]) THEN (string_to_array('public'::text, ''::text))::name[] - ELSE ARRAY( SELECT pg_authid.rolname - FROM pg_authid - WHERE (pg_authid.oid = ANY (pol.polroles)) - ORDER BY pg_authid.rolname) - END AS roles, - CASE pol.polcmd - WHEN 'r'::"char" THEN 'SELECT'::text - WHEN 'a'::"char" THEN 'INSERT'::text - WHEN 'w'::"char" THEN 'UPDATE'::text - WHEN 'd'::"char" THEN 'DELETE'::text - WHEN '*'::"char" THEN 'ALL'::text - ELSE NULL::text - END AS cmd, - pg_get_expr(pol.polqual, pol.polrelid) AS qual, - pg_get_expr(pol.polwithcheck, pol.polrelid) AS with_check - FROM ((pg_policy pol - JOIN pg_class c ON ((c.oid = pol.polrelid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))); -pg_prepared_statements| SELECT name, - statement, - prepare_time, - parameter_types, - result_types, - from_sql, - generic_plans, - custom_plans - FROM pg_prepared_statement() p(name, statement, prepare_time, parameter_types, result_types, from_sql, generic_plans, custom_plans); -pg_prepared_xacts| SELECT p.transaction, - p.gid, - p.prepared, - u.rolname AS owner, - d.datname AS database - FROM ((pg_prepared_xact() p(transaction, gid, prepared, ownerid, dbid) - LEFT JOIN pg_authid u ON ((p.ownerid = u.oid))) - LEFT JOIN pg_database d ON ((p.dbid = d.oid))); -pg_publication_tables| SELECT p.pubname, - n.nspname AS schemaname, - c.relname AS tablename, - ( SELECT array_agg(a.attname ORDER BY a.attnum) AS array_agg - FROM pg_attribute a - WHERE ((a.attrelid = gpt.relid) AND (a.attnum = ANY ((gpt.attrs)::smallint[])))) AS attnames, - pg_get_expr(gpt.qual, gpt.relid) AS rowfilter - FROM pg_publication p, - LATERAL pg_get_publication_tables(VARIADIC ARRAY[(p.pubname)::text]) gpt(pubid, relid, attrs, qual), - (pg_class c - JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (c.oid = gpt.relid); -pg_replication_origin_status| SELECT local_id, - external_id, - remote_lsn, - local_lsn - FROM pg_show_replication_origin_status() pg_show_replication_origin_status(local_id, external_id, remote_lsn, local_lsn); -pg_replication_slots| SELECT l.slot_name, - l.plugin, - l.slot_type, - l.datoid, - d.datname AS database, - l.temporary, - l.active, - l.active_pid, - l.xmin, - l.catalog_xmin, - l.restart_lsn, - l.confirmed_flush_lsn, - l.wal_status, - l.safe_wal_size, - l.two_phase, - l.conflict_reason, - l.failover, - l.synced - FROM (pg_get_replication_slots() l(slot_name, plugin, slot_type, datoid, temporary, active, active_pid, xmin, catalog_xmin, restart_lsn, confirmed_flush_lsn, wal_status, safe_wal_size, two_phase, conflict_reason, failover, synced) - LEFT JOIN pg_database d ON ((l.datoid = d.oid))); -pg_roles| SELECT pg_authid.rolname, - pg_authid.rolsuper, - pg_authid.rolinherit, - pg_authid.rolcreaterole, - pg_authid.rolcreatedb, - pg_authid.rolcanlogin, - pg_authid.rolreplication, - pg_authid.rolconnlimit, - '********'::text AS rolpassword, - pg_authid.rolvaliduntil, - pg_authid.rolbypassrls, - s.setconfig AS rolconfig, - pg_authid.oid - FROM (pg_authid - LEFT JOIN pg_db_role_setting s ON (((pg_authid.oid = s.setrole) AND (s.setdatabase = (0)::oid)))); -pg_rules| SELECT n.nspname AS schemaname, - c.relname AS tablename, - r.rulename, - pg_get_ruledef(r.oid) AS definition - FROM ((pg_rewrite r - JOIN pg_class c ON ((c.oid = r.ev_class))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (r.rulename <> '_RETURN'::name); -pg_seclabels| SELECT l.objoid, - l.classoid, - l.objsubid, - CASE - WHEN (rel.relkind = ANY (ARRAY['r'::"char", 'p'::"char"])) THEN 'table'::text - WHEN (rel.relkind = 'v'::"char") THEN 'view'::text - WHEN (rel.relkind = 'm'::"char") THEN 'materialized view'::text - WHEN (rel.relkind = 'S'::"char") THEN 'sequence'::text - WHEN (rel.relkind = 'f'::"char") THEN 'foreign table'::text - ELSE NULL::text - END AS objtype, - rel.relnamespace AS objnamespace, - CASE - WHEN pg_table_is_visible(rel.oid) THEN quote_ident((rel.relname)::text) - ELSE ((quote_ident((nsp.nspname)::text) || '.'::text) || quote_ident((rel.relname)::text)) - END AS objname, - l.provider, - l.label - FROM ((pg_seclabel l - JOIN pg_class rel ON (((l.classoid = rel.tableoid) AND (l.objoid = rel.oid)))) - JOIN pg_namespace nsp ON ((rel.relnamespace = nsp.oid))) - WHERE (l.objsubid = 0) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - 'column'::text AS objtype, - rel.relnamespace AS objnamespace, - (( - CASE - WHEN pg_table_is_visible(rel.oid) THEN quote_ident((rel.relname)::text) - ELSE ((quote_ident((nsp.nspname)::text) || '.'::text) || quote_ident((rel.relname)::text)) - END || '.'::text) || (att.attname)::text) AS objname, - l.provider, - l.label - FROM (((pg_seclabel l - JOIN pg_class rel ON (((l.classoid = rel.tableoid) AND (l.objoid = rel.oid)))) - JOIN pg_attribute att ON (((rel.oid = att.attrelid) AND (l.objsubid = att.attnum)))) - JOIN pg_namespace nsp ON ((rel.relnamespace = nsp.oid))) - WHERE (l.objsubid <> 0) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - CASE pro.prokind - WHEN 'a'::"char" THEN 'aggregate'::text - WHEN 'f'::"char" THEN 'function'::text - WHEN 'p'::"char" THEN 'procedure'::text - WHEN 'w'::"char" THEN 'window'::text - ELSE NULL::text - END AS objtype, - pro.pronamespace AS objnamespace, - ((( - CASE - WHEN pg_function_is_visible(pro.oid) THEN quote_ident((pro.proname)::text) - ELSE ((quote_ident((nsp.nspname)::text) || '.'::text) || quote_ident((pro.proname)::text)) - END || '('::text) || pg_get_function_arguments(pro.oid)) || ')'::text) AS objname, - l.provider, - l.label - FROM ((pg_seclabel l - JOIN pg_proc pro ON (((l.classoid = pro.tableoid) AND (l.objoid = pro.oid)))) - JOIN pg_namespace nsp ON ((pro.pronamespace = nsp.oid))) - WHERE (l.objsubid = 0) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - CASE - WHEN (typ.typtype = 'd'::"char") THEN 'domain'::text - ELSE 'type'::text - END AS objtype, - typ.typnamespace AS objnamespace, - CASE - WHEN pg_type_is_visible(typ.oid) THEN quote_ident((typ.typname)::text) - ELSE ((quote_ident((nsp.nspname)::text) || '.'::text) || quote_ident((typ.typname)::text)) - END AS objname, - l.provider, - l.label - FROM ((pg_seclabel l - JOIN pg_type typ ON (((l.classoid = typ.tableoid) AND (l.objoid = typ.oid)))) - JOIN pg_namespace nsp ON ((typ.typnamespace = nsp.oid))) - WHERE (l.objsubid = 0) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - 'large object'::text AS objtype, - NULL::oid AS objnamespace, - (l.objoid)::text AS objname, - l.provider, - l.label - FROM (pg_seclabel l - JOIN pg_largeobject_metadata lom ON ((l.objoid = lom.oid))) - WHERE ((l.classoid = ('pg_largeobject'::regclass)::oid) AND (l.objsubid = 0)) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - 'language'::text AS objtype, - NULL::oid AS objnamespace, - quote_ident((lan.lanname)::text) AS objname, - l.provider, - l.label - FROM (pg_seclabel l - JOIN pg_language lan ON (((l.classoid = lan.tableoid) AND (l.objoid = lan.oid)))) - WHERE (l.objsubid = 0) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - 'schema'::text AS objtype, - nsp.oid AS objnamespace, - quote_ident((nsp.nspname)::text) AS objname, - l.provider, - l.label - FROM (pg_seclabel l - JOIN pg_namespace nsp ON (((l.classoid = nsp.tableoid) AND (l.objoid = nsp.oid)))) - WHERE (l.objsubid = 0) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - 'event trigger'::text AS objtype, - NULL::oid AS objnamespace, - quote_ident((evt.evtname)::text) AS objname, - l.provider, - l.label - FROM (pg_seclabel l - JOIN pg_event_trigger evt ON (((l.classoid = evt.tableoid) AND (l.objoid = evt.oid)))) - WHERE (l.objsubid = 0) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - 'publication'::text AS objtype, - NULL::oid AS objnamespace, - quote_ident((p.pubname)::text) AS objname, - l.provider, - l.label - FROM (pg_seclabel l - JOIN pg_publication p ON (((l.classoid = p.tableoid) AND (l.objoid = p.oid)))) - WHERE (l.objsubid = 0) -UNION ALL - SELECT l.objoid, - l.classoid, - 0 AS objsubid, - 'subscription'::text AS objtype, - NULL::oid AS objnamespace, - quote_ident((s.subname)::text) AS objname, - l.provider, - l.label - FROM (pg_shseclabel l - JOIN pg_subscription s ON (((l.classoid = s.tableoid) AND (l.objoid = s.oid)))) -UNION ALL - SELECT l.objoid, - l.classoid, - 0 AS objsubid, - 'database'::text AS objtype, - NULL::oid AS objnamespace, - quote_ident((dat.datname)::text) AS objname, - l.provider, - l.label - FROM (pg_shseclabel l - JOIN pg_database dat ON (((l.classoid = dat.tableoid) AND (l.objoid = dat.oid)))) -UNION ALL - SELECT l.objoid, - l.classoid, - 0 AS objsubid, - 'tablespace'::text AS objtype, - NULL::oid AS objnamespace, - quote_ident((spc.spcname)::text) AS objname, - l.provider, - l.label - FROM (pg_shseclabel l - JOIN pg_tablespace spc ON (((l.classoid = spc.tableoid) AND (l.objoid = spc.oid)))) -UNION ALL - SELECT l.objoid, - l.classoid, - 0 AS objsubid, - 'role'::text AS objtype, - NULL::oid AS objnamespace, - quote_ident((rol.rolname)::text) AS objname, - l.provider, - l.label - FROM (pg_shseclabel l - JOIN pg_authid rol ON (((l.classoid = rol.tableoid) AND (l.objoid = rol.oid)))); -pg_sequences| SELECT n.nspname AS schemaname, - c.relname AS sequencename, - pg_get_userbyid(c.relowner) AS sequenceowner, - (s.seqtypid)::regtype AS data_type, - s.seqstart AS start_value, - s.seqmin AS min_value, - s.seqmax AS max_value, - s.seqincrement AS increment_by, - s.seqcycle AS cycle, - s.seqcache AS cache_size, - CASE - WHEN has_sequence_privilege(c.oid, 'SELECT,USAGE'::text) THEN pg_sequence_last_value((c.oid)::regclass) - ELSE NULL::bigint - END AS last_value - FROM ((pg_sequence s - JOIN pg_class c ON ((c.oid = s.seqrelid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE ((NOT pg_is_other_temp_schema(n.oid)) AND (c.relkind = 'S'::"char")); -pg_settings| SELECT name, - setting, - unit, - category, - short_desc, - extra_desc, - context, - vartype, - source, - min_val, - max_val, - enumvals, - boot_val, - reset_val, - sourcefile, - sourceline, - pending_restart - FROM pg_show_all_settings() a(name, setting, unit, category, short_desc, extra_desc, context, vartype, source, min_val, max_val, enumvals, boot_val, reset_val, sourcefile, sourceline, pending_restart); -pg_shadow| SELECT pg_authid.rolname AS usename, - pg_authid.oid AS usesysid, - pg_authid.rolcreatedb AS usecreatedb, - pg_authid.rolsuper AS usesuper, - pg_authid.rolreplication AS userepl, - pg_authid.rolbypassrls AS usebypassrls, - pg_authid.rolpassword AS passwd, - pg_authid.rolvaliduntil AS valuntil, - s.setconfig AS useconfig - FROM (pg_authid - LEFT JOIN pg_db_role_setting s ON (((pg_authid.oid = s.setrole) AND (s.setdatabase = (0)::oid)))) - WHERE pg_authid.rolcanlogin; -pg_shmem_allocations| SELECT name, - off, - size, - allocated_size - FROM pg_get_shmem_allocations() pg_get_shmem_allocations(name, off, size, allocated_size); -pg_stat_activity| SELECT s.datid, - d.datname, - s.pid, - s.leader_pid, - s.usesysid, - u.rolname AS usename, - s.application_name, - s.client_addr, - s.client_hostname, - s.client_port, - s.backend_start, - s.xact_start, - s.query_start, - s.state_change, - s.wait_event_type, - s.wait_event, - s.state, - s.backend_xid, - s.backend_xmin, - s.query_id, - s.query, - s.backend_type - FROM ((pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, backend_type, ssl, sslversion, sslcipher, sslbits, ssl_client_dn, ssl_client_serial, ssl_issuer_dn, gss_auth, gss_princ, gss_enc, gss_delegation, leader_pid, query_id) - LEFT JOIN pg_database d ON ((s.datid = d.oid))) - LEFT JOIN pg_authid u ON ((s.usesysid = u.oid))); -pg_stat_all_indexes| SELECT c.oid AS relid, - i.oid AS indexrelid, - n.nspname AS schemaname, - c.relname, - i.relname AS indexrelname, - pg_stat_get_numscans(i.oid) AS idx_scan, - pg_stat_get_lastscan(i.oid) AS last_idx_scan, - pg_stat_get_tuples_returned(i.oid) AS idx_tup_read, - pg_stat_get_tuples_fetched(i.oid) AS idx_tup_fetch - FROM (((pg_class c - JOIN pg_index x ON ((c.oid = x.indrelid))) - JOIN pg_class i ON ((i.oid = x.indexrelid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char", 'm'::"char"])); -pg_stat_all_tables| SELECT c.oid AS relid, - n.nspname AS schemaname, - c.relname, - pg_stat_get_numscans(c.oid) AS seq_scan, - pg_stat_get_lastscan(c.oid) AS last_seq_scan, - pg_stat_get_tuples_returned(c.oid) AS seq_tup_read, - (sum(pg_stat_get_numscans(i.indexrelid)))::bigint AS idx_scan, - max(pg_stat_get_lastscan(i.indexrelid)) AS last_idx_scan, - ((sum(pg_stat_get_tuples_fetched(i.indexrelid)))::bigint + pg_stat_get_tuples_fetched(c.oid)) AS idx_tup_fetch, - pg_stat_get_tuples_inserted(c.oid) AS n_tup_ins, - pg_stat_get_tuples_updated(c.oid) AS n_tup_upd, - pg_stat_get_tuples_deleted(c.oid) AS n_tup_del, - pg_stat_get_tuples_hot_updated(c.oid) AS n_tup_hot_upd, - pg_stat_get_tuples_newpage_updated(c.oid) AS n_tup_newpage_upd, - pg_stat_get_live_tuples(c.oid) AS n_live_tup, - pg_stat_get_dead_tuples(c.oid) AS n_dead_tup, - pg_stat_get_mod_since_analyze(c.oid) AS n_mod_since_analyze, - pg_stat_get_ins_since_vacuum(c.oid) AS n_ins_since_vacuum, - pg_stat_get_last_vacuum_time(c.oid) AS last_vacuum, - pg_stat_get_last_autovacuum_time(c.oid) AS last_autovacuum, - pg_stat_get_last_analyze_time(c.oid) AS last_analyze, - pg_stat_get_last_autoanalyze_time(c.oid) AS last_autoanalyze, - pg_stat_get_vacuum_count(c.oid) AS vacuum_count, - pg_stat_get_autovacuum_count(c.oid) AS autovacuum_count, - pg_stat_get_analyze_count(c.oid) AS analyze_count, - pg_stat_get_autoanalyze_count(c.oid) AS autoanalyze_count - FROM ((pg_class c - LEFT JOIN pg_index i ON ((c.oid = i.indrelid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char", 'm'::"char", 'p'::"char"])) - GROUP BY c.oid, n.nspname, c.relname; -pg_stat_archiver| SELECT archived_count, - last_archived_wal, - last_archived_time, - failed_count, - last_failed_wal, - last_failed_time, - stats_reset - FROM pg_stat_get_archiver() s(archived_count, last_archived_wal, last_archived_time, failed_count, last_failed_wal, last_failed_time, stats_reset); -pg_stat_bgwriter| SELECT pg_stat_get_bgwriter_buf_written_clean() AS buffers_clean, - pg_stat_get_bgwriter_maxwritten_clean() AS maxwritten_clean, - pg_stat_get_buf_alloc() AS buffers_alloc, - pg_stat_get_bgwriter_stat_reset_time() AS stats_reset; -pg_stat_checkpointer| SELECT pg_stat_get_checkpointer_num_timed() AS num_timed, - pg_stat_get_checkpointer_num_requested() AS num_requested, - pg_stat_get_checkpointer_restartpoints_timed() AS restartpoints_timed, - pg_stat_get_checkpointer_restartpoints_requested() AS restartpoints_req, - pg_stat_get_checkpointer_restartpoints_performed() AS restartpoints_done, - pg_stat_get_checkpointer_write_time() AS write_time, - pg_stat_get_checkpointer_sync_time() AS sync_time, - pg_stat_get_checkpointer_buffers_written() AS buffers_written, - pg_stat_get_checkpointer_stat_reset_time() AS stats_reset; -pg_stat_database| SELECT oid AS datid, - datname, - CASE - WHEN (oid = (0)::oid) THEN 0 - ELSE pg_stat_get_db_numbackends(oid) - END AS numbackends, - pg_stat_get_db_xact_commit(oid) AS xact_commit, - pg_stat_get_db_xact_rollback(oid) AS xact_rollback, - (pg_stat_get_db_blocks_fetched(oid) - pg_stat_get_db_blocks_hit(oid)) AS blks_read, - pg_stat_get_db_blocks_hit(oid) AS blks_hit, - pg_stat_get_db_tuples_returned(oid) AS tup_returned, - pg_stat_get_db_tuples_fetched(oid) AS tup_fetched, - pg_stat_get_db_tuples_inserted(oid) AS tup_inserted, - pg_stat_get_db_tuples_updated(oid) AS tup_updated, - pg_stat_get_db_tuples_deleted(oid) AS tup_deleted, - pg_stat_get_db_conflict_all(oid) AS conflicts, - pg_stat_get_db_temp_files(oid) AS temp_files, - pg_stat_get_db_temp_bytes(oid) AS temp_bytes, - pg_stat_get_db_deadlocks(oid) AS deadlocks, - pg_stat_get_db_checksum_failures(oid) AS checksum_failures, - pg_stat_get_db_checksum_last_failure(oid) AS checksum_last_failure, - pg_stat_get_db_blk_read_time(oid) AS blk_read_time, - pg_stat_get_db_blk_write_time(oid) AS blk_write_time, - pg_stat_get_db_session_time(oid) AS session_time, - pg_stat_get_db_active_time(oid) AS active_time, - pg_stat_get_db_idle_in_transaction_time(oid) AS idle_in_transaction_time, - pg_stat_get_db_sessions(oid) AS sessions, - pg_stat_get_db_sessions_abandoned(oid) AS sessions_abandoned, - pg_stat_get_db_sessions_fatal(oid) AS sessions_fatal, - pg_stat_get_db_sessions_killed(oid) AS sessions_killed, - pg_stat_get_db_stat_reset_time(oid) AS stats_reset - FROM ( SELECT 0 AS oid, - NULL::name AS datname - UNION ALL - SELECT pg_database.oid, - pg_database.datname - FROM pg_database) d; -pg_stat_database_conflicts| SELECT oid AS datid, - datname, - pg_stat_get_db_conflict_tablespace(oid) AS confl_tablespace, - pg_stat_get_db_conflict_lock(oid) AS confl_lock, - pg_stat_get_db_conflict_snapshot(oid) AS confl_snapshot, - pg_stat_get_db_conflict_bufferpin(oid) AS confl_bufferpin, - pg_stat_get_db_conflict_startup_deadlock(oid) AS confl_deadlock, - pg_stat_get_db_conflict_logicalslot(oid) AS confl_active_logicalslot - FROM pg_database d; -pg_stat_gssapi| SELECT pid, - gss_auth AS gss_authenticated, - gss_princ AS principal, - gss_enc AS encrypted, - gss_delegation AS credentials_delegated - FROM pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, backend_type, ssl, sslversion, sslcipher, sslbits, ssl_client_dn, ssl_client_serial, ssl_issuer_dn, gss_auth, gss_princ, gss_enc, gss_delegation, leader_pid, query_id) - WHERE (client_port IS NOT NULL); -pg_stat_io| SELECT backend_type, - object, - context, - reads, - read_time, - writes, - write_time, - writebacks, - writeback_time, - extends, - extend_time, - op_bytes, - hits, - evictions, - reuses, - fsyncs, - fsync_time, - stats_reset - FROM pg_stat_get_io() b(backend_type, object, context, reads, read_time, writes, write_time, writebacks, writeback_time, extends, extend_time, op_bytes, hits, evictions, reuses, fsyncs, fsync_time, stats_reset); -pg_stat_progress_analyze| SELECT s.pid, - s.datid, - d.datname, - s.relid, - CASE s.param1 - WHEN 0 THEN 'initializing'::text - WHEN 1 THEN 'acquiring sample rows'::text - WHEN 2 THEN 'acquiring inherited sample rows'::text - WHEN 3 THEN 'computing statistics'::text - WHEN 4 THEN 'computing extended statistics'::text - WHEN 5 THEN 'finalizing analyze'::text - ELSE NULL::text - END AS phase, - s.param2 AS sample_blks_total, - s.param3 AS sample_blks_scanned, - s.param4 AS ext_stats_total, - s.param5 AS ext_stats_computed, - s.param6 AS child_tables_total, - s.param7 AS child_tables_done, - (s.param8)::oid AS current_child_table_relid - FROM (pg_stat_get_progress_info('ANALYZE'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20) - LEFT JOIN pg_database d ON ((s.datid = d.oid))); -pg_stat_progress_basebackup| SELECT pid, - CASE param1 - WHEN 0 THEN 'initializing'::text - WHEN 1 THEN 'waiting for checkpoint to finish'::text - WHEN 2 THEN 'estimating backup size'::text - WHEN 3 THEN 'streaming database files'::text - WHEN 4 THEN 'waiting for wal archiving to finish'::text - WHEN 5 THEN 'transferring wal files'::text - ELSE NULL::text - END AS phase, - CASE param2 - WHEN '-1'::integer THEN NULL::bigint - ELSE param2 - END AS backup_total, - param3 AS backup_streamed, - param4 AS tablespaces_total, - param5 AS tablespaces_streamed - FROM pg_stat_get_progress_info('BASEBACKUP'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20); -pg_stat_progress_cluster| SELECT s.pid, - s.datid, - d.datname, - s.relid, - CASE s.param1 - WHEN 1 THEN 'CLUSTER'::text - WHEN 2 THEN 'VACUUM FULL'::text - ELSE NULL::text - END AS command, - CASE s.param2 - WHEN 0 THEN 'initializing'::text - WHEN 1 THEN 'seq scanning heap'::text - WHEN 2 THEN 'index scanning heap'::text - WHEN 3 THEN 'sorting tuples'::text - WHEN 4 THEN 'writing new heap'::text - WHEN 5 THEN 'swapping relation files'::text - WHEN 6 THEN 'rebuilding index'::text - WHEN 7 THEN 'performing final cleanup'::text - ELSE NULL::text - END AS phase, - (s.param3)::oid AS cluster_index_relid, - s.param4 AS heap_tuples_scanned, - s.param5 AS heap_tuples_written, - s.param6 AS heap_blks_total, - s.param7 AS heap_blks_scanned, - s.param8 AS index_rebuild_count - FROM (pg_stat_get_progress_info('CLUSTER'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20) - LEFT JOIN pg_database d ON ((s.datid = d.oid))); -pg_stat_progress_copy| SELECT s.pid, - s.datid, - d.datname, - s.relid, - CASE s.param5 - WHEN 1 THEN 'COPY FROM'::text - WHEN 2 THEN 'COPY TO'::text - ELSE NULL::text - END AS command, - CASE s.param6 - WHEN 1 THEN 'FILE'::text - WHEN 2 THEN 'PROGRAM'::text - WHEN 3 THEN 'PIPE'::text - WHEN 4 THEN 'CALLBACK'::text - ELSE NULL::text - END AS type, - s.param1 AS bytes_processed, - s.param2 AS bytes_total, - s.param3 AS tuples_processed, - s.param4 AS tuples_excluded, - s.param7 AS tuples_skipped - FROM (pg_stat_get_progress_info('COPY'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20) - LEFT JOIN pg_database d ON ((s.datid = d.oid))); -pg_stat_progress_create_index| SELECT s.pid, - s.datid, - d.datname, - s.relid, - (s.param7)::oid AS index_relid, - CASE s.param1 - WHEN 1 THEN 'CREATE INDEX'::text - WHEN 2 THEN 'CREATE INDEX CONCURRENTLY'::text - WHEN 3 THEN 'REINDEX'::text - WHEN 4 THEN 'REINDEX CONCURRENTLY'::text - ELSE NULL::text - END AS command, - CASE s.param10 - WHEN 0 THEN 'initializing'::text - WHEN 1 THEN 'waiting for writers before build'::text - WHEN 2 THEN ('building index'::text || COALESCE((': '::text || pg_indexam_progress_phasename((s.param9)::oid, s.param11)), ''::text)) - WHEN 3 THEN 'waiting for writers before validation'::text - WHEN 4 THEN 'index validation: scanning index'::text - WHEN 5 THEN 'index validation: sorting tuples'::text - WHEN 6 THEN 'index validation: scanning table'::text - WHEN 7 THEN 'waiting for old snapshots'::text - WHEN 8 THEN 'waiting for readers before marking dead'::text - WHEN 9 THEN 'waiting for readers before dropping'::text - ELSE NULL::text - END AS phase, - s.param4 AS lockers_total, - s.param5 AS lockers_done, - s.param6 AS current_locker_pid, - s.param16 AS blocks_total, - s.param17 AS blocks_done, - s.param12 AS tuples_total, - s.param13 AS tuples_done, - s.param14 AS partitions_total, - s.param15 AS partitions_done - FROM (pg_stat_get_progress_info('CREATE INDEX'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20) - LEFT JOIN pg_database d ON ((s.datid = d.oid))); -pg_stat_progress_vacuum| SELECT s.pid, - s.datid, - d.datname, - s.relid, - CASE s.param1 - WHEN 0 THEN 'initializing'::text - WHEN 1 THEN 'scanning heap'::text - WHEN 2 THEN 'vacuuming indexes'::text - WHEN 3 THEN 'vacuuming heap'::text - WHEN 4 THEN 'cleaning up indexes'::text - WHEN 5 THEN 'truncating heap'::text - WHEN 6 THEN 'performing final cleanup'::text - ELSE NULL::text - END AS phase, - s.param2 AS heap_blks_total, - s.param3 AS heap_blks_scanned, - s.param4 AS heap_blks_vacuumed, - s.param5 AS index_vacuum_count, - s.param6 AS max_dead_tuples, - s.param7 AS num_dead_tuples, - s.param8 AS indexes_total, - s.param9 AS indexes_processed - FROM (pg_stat_get_progress_info('VACUUM'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20) - LEFT JOIN pg_database d ON ((s.datid = d.oid))); -pg_stat_recovery_prefetch| SELECT stats_reset, - prefetch, - hit, - skip_init, - skip_new, - skip_fpw, - skip_rep, - wal_distance, - block_distance, - io_depth - FROM pg_stat_get_recovery_prefetch() s(stats_reset, prefetch, hit, skip_init, skip_new, skip_fpw, skip_rep, wal_distance, block_distance, io_depth); -pg_stat_replication| SELECT s.pid, - s.usesysid, - u.rolname AS usename, - s.application_name, - s.client_addr, - s.client_hostname, - s.client_port, - s.backend_start, - s.backend_xmin, - w.state, - w.sent_lsn, - w.write_lsn, - w.flush_lsn, - w.replay_lsn, - w.write_lag, - w.flush_lag, - w.replay_lag, - w.sync_priority, - w.sync_state, - w.reply_time - FROM ((pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, backend_type, ssl, sslversion, sslcipher, sslbits, ssl_client_dn, ssl_client_serial, ssl_issuer_dn, gss_auth, gss_princ, gss_enc, gss_delegation, leader_pid, query_id) - JOIN pg_stat_get_wal_senders() w(pid, state, sent_lsn, write_lsn, flush_lsn, replay_lsn, write_lag, flush_lag, replay_lag, sync_priority, sync_state, reply_time) ON ((s.pid = w.pid))) - LEFT JOIN pg_authid u ON ((s.usesysid = u.oid))); -pg_stat_replication_slots| SELECT s.slot_name, - s.spill_txns, - s.spill_count, - s.spill_bytes, - s.stream_txns, - s.stream_count, - s.stream_bytes, - s.total_txns, - s.total_bytes, - s.stats_reset - FROM pg_replication_slots r, - LATERAL pg_stat_get_replication_slot((r.slot_name)::text) s(slot_name, spill_txns, spill_count, spill_bytes, stream_txns, stream_count, stream_bytes, total_txns, total_bytes, stats_reset) - WHERE (r.datoid IS NOT NULL); -pg_stat_slru| SELECT name, - blks_zeroed, - blks_hit, - blks_read, - blks_written, - blks_exists, - flushes, - truncates, - stats_reset - FROM pg_stat_get_slru() s(name, blks_zeroed, blks_hit, blks_read, blks_written, blks_exists, flushes, truncates, stats_reset); -pg_stat_ssl| SELECT pid, - ssl, - sslversion AS version, - sslcipher AS cipher, - sslbits AS bits, - ssl_client_dn AS client_dn, - ssl_client_serial AS client_serial, - ssl_issuer_dn AS issuer_dn - FROM pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, backend_type, ssl, sslversion, sslcipher, sslbits, ssl_client_dn, ssl_client_serial, ssl_issuer_dn, gss_auth, gss_princ, gss_enc, gss_delegation, leader_pid, query_id) - WHERE (client_port IS NOT NULL); -pg_stat_subscription| SELECT su.oid AS subid, - su.subname, - st.worker_type, - st.pid, - st.leader_pid, - st.relid, - st.received_lsn, - st.last_msg_send_time, - st.last_msg_receipt_time, - st.latest_end_lsn, - st.latest_end_time - FROM (pg_subscription su - LEFT JOIN pg_stat_get_subscription(NULL::oid) st(subid, relid, pid, leader_pid, received_lsn, last_msg_send_time, last_msg_receipt_time, latest_end_lsn, latest_end_time, worker_type) ON ((st.subid = su.oid))); -pg_stat_subscription_stats| SELECT ss.subid, - s.subname, - ss.apply_error_count, - ss.sync_error_count, - ss.stats_reset - FROM pg_subscription s, - LATERAL pg_stat_get_subscription_stats(s.oid) ss(subid, apply_error_count, sync_error_count, stats_reset); -pg_stat_sys_indexes| SELECT relid, - indexrelid, - schemaname, - relname, - indexrelname, - idx_scan, - last_idx_scan, - idx_tup_read, - idx_tup_fetch - FROM pg_stat_all_indexes - WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); -pg_stat_sys_tables| SELECT relid, - schemaname, - relname, - seq_scan, - last_seq_scan, - seq_tup_read, - idx_scan, - last_idx_scan, - idx_tup_fetch, - n_tup_ins, - n_tup_upd, - n_tup_del, - n_tup_hot_upd, - n_tup_newpage_upd, - n_live_tup, - n_dead_tup, - n_mod_since_analyze, - n_ins_since_vacuum, - last_vacuum, - last_autovacuum, - last_analyze, - last_autoanalyze, - vacuum_count, - autovacuum_count, - analyze_count, - autoanalyze_count - FROM pg_stat_all_tables - WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); -pg_stat_user_functions| SELECT p.oid AS funcid, - n.nspname AS schemaname, - p.proname AS funcname, - pg_stat_get_function_calls(p.oid) AS calls, - pg_stat_get_function_total_time(p.oid) AS total_time, - pg_stat_get_function_self_time(p.oid) AS self_time - FROM (pg_proc p - LEFT JOIN pg_namespace n ON ((n.oid = p.pronamespace))) - WHERE ((p.prolang <> (12)::oid) AND (pg_stat_get_function_calls(p.oid) IS NOT NULL)); -pg_stat_user_indexes| SELECT relid, - indexrelid, - schemaname, - relname, - indexrelname, - idx_scan, - last_idx_scan, - idx_tup_read, - idx_tup_fetch - FROM pg_stat_all_indexes - WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); -pg_stat_user_tables| SELECT relid, - schemaname, - relname, - seq_scan, - last_seq_scan, - seq_tup_read, - idx_scan, - last_idx_scan, - idx_tup_fetch, - n_tup_ins, - n_tup_upd, - n_tup_del, - n_tup_hot_upd, - n_tup_newpage_upd, - n_live_tup, - n_dead_tup, - n_mod_since_analyze, - n_ins_since_vacuum, - last_vacuum, - last_autovacuum, - last_analyze, - last_autoanalyze, - vacuum_count, - autovacuum_count, - analyze_count, - autoanalyze_count - FROM pg_stat_all_tables - WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); -pg_stat_wal| SELECT wal_records, - wal_fpi, - wal_bytes, - wal_buffers_full, - wal_write, - wal_sync, - wal_write_time, - wal_sync_time, - stats_reset - FROM pg_stat_get_wal() w(wal_records, wal_fpi, wal_bytes, wal_buffers_full, wal_write, wal_sync, wal_write_time, wal_sync_time, stats_reset); -pg_stat_wal_receiver| SELECT pid, - status, - receive_start_lsn, - receive_start_tli, - written_lsn, - flushed_lsn, - received_tli, - last_msg_send_time, - last_msg_receipt_time, - latest_end_lsn, - latest_end_time, - slot_name, - sender_host, - sender_port, - conninfo - FROM pg_stat_get_wal_receiver() s(pid, status, receive_start_lsn, receive_start_tli, written_lsn, flushed_lsn, received_tli, last_msg_send_time, last_msg_receipt_time, latest_end_lsn, latest_end_time, slot_name, sender_host, sender_port, conninfo) - WHERE (pid IS NOT NULL); -pg_stat_xact_all_tables| SELECT c.oid AS relid, - n.nspname AS schemaname, - c.relname, - pg_stat_get_xact_numscans(c.oid) AS seq_scan, - pg_stat_get_xact_tuples_returned(c.oid) AS seq_tup_read, - (sum(pg_stat_get_xact_numscans(i.indexrelid)))::bigint AS idx_scan, - ((sum(pg_stat_get_xact_tuples_fetched(i.indexrelid)))::bigint + pg_stat_get_xact_tuples_fetched(c.oid)) AS idx_tup_fetch, - pg_stat_get_xact_tuples_inserted(c.oid) AS n_tup_ins, - pg_stat_get_xact_tuples_updated(c.oid) AS n_tup_upd, - pg_stat_get_xact_tuples_deleted(c.oid) AS n_tup_del, - pg_stat_get_xact_tuples_hot_updated(c.oid) AS n_tup_hot_upd, - pg_stat_get_xact_tuples_newpage_updated(c.oid) AS n_tup_newpage_upd - FROM ((pg_class c - LEFT JOIN pg_index i ON ((c.oid = i.indrelid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char", 'm'::"char", 'p'::"char"])) - GROUP BY c.oid, n.nspname, c.relname; -pg_stat_xact_sys_tables| SELECT relid, - schemaname, - relname, - seq_scan, - seq_tup_read, - idx_scan, - idx_tup_fetch, - n_tup_ins, - n_tup_upd, - n_tup_del, - n_tup_hot_upd, - n_tup_newpage_upd - FROM pg_stat_xact_all_tables - WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); -pg_stat_xact_user_functions| SELECT p.oid AS funcid, - n.nspname AS schemaname, - p.proname AS funcname, - pg_stat_get_xact_function_calls(p.oid) AS calls, - pg_stat_get_xact_function_total_time(p.oid) AS total_time, - pg_stat_get_xact_function_self_time(p.oid) AS self_time - FROM (pg_proc p - LEFT JOIN pg_namespace n ON ((n.oid = p.pronamespace))) - WHERE ((p.prolang <> (12)::oid) AND (pg_stat_get_xact_function_calls(p.oid) IS NOT NULL)); -pg_stat_xact_user_tables| SELECT relid, - schemaname, - relname, - seq_scan, - seq_tup_read, - idx_scan, - idx_tup_fetch, - n_tup_ins, - n_tup_upd, - n_tup_del, - n_tup_hot_upd, - n_tup_newpage_upd - FROM pg_stat_xact_all_tables - WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); -pg_statio_all_indexes| SELECT c.oid AS relid, - i.oid AS indexrelid, - n.nspname AS schemaname, - c.relname, - i.relname AS indexrelname, - (pg_stat_get_blocks_fetched(i.oid) - pg_stat_get_blocks_hit(i.oid)) AS idx_blks_read, - pg_stat_get_blocks_hit(i.oid) AS idx_blks_hit - FROM (((pg_class c - JOIN pg_index x ON ((c.oid = x.indrelid))) - JOIN pg_class i ON ((i.oid = x.indexrelid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char", 'm'::"char"])); -pg_statio_all_sequences| SELECT c.oid AS relid, - n.nspname AS schemaname, - c.relname, - (pg_stat_get_blocks_fetched(c.oid) - pg_stat_get_blocks_hit(c.oid)) AS blks_read, - pg_stat_get_blocks_hit(c.oid) AS blks_hit - FROM (pg_class c - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (c.relkind = 'S'::"char"); -pg_statio_all_tables| SELECT c.oid AS relid, - n.nspname AS schemaname, - c.relname, - (pg_stat_get_blocks_fetched(c.oid) - pg_stat_get_blocks_hit(c.oid)) AS heap_blks_read, - pg_stat_get_blocks_hit(c.oid) AS heap_blks_hit, - i.idx_blks_read, - i.idx_blks_hit, - (pg_stat_get_blocks_fetched(t.oid) - pg_stat_get_blocks_hit(t.oid)) AS toast_blks_read, - pg_stat_get_blocks_hit(t.oid) AS toast_blks_hit, - x.idx_blks_read AS tidx_blks_read, - x.idx_blks_hit AS tidx_blks_hit - FROM ((((pg_class c - LEFT JOIN pg_class t ON ((c.reltoastrelid = t.oid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - LEFT JOIN LATERAL ( SELECT (sum((pg_stat_get_blocks_fetched(pg_index.indexrelid) - pg_stat_get_blocks_hit(pg_index.indexrelid))))::bigint AS idx_blks_read, - (sum(pg_stat_get_blocks_hit(pg_index.indexrelid)))::bigint AS idx_blks_hit - FROM pg_index - WHERE (pg_index.indrelid = c.oid)) i ON (true)) - LEFT JOIN LATERAL ( SELECT (sum((pg_stat_get_blocks_fetched(pg_index.indexrelid) - pg_stat_get_blocks_hit(pg_index.indexrelid))))::bigint AS idx_blks_read, - (sum(pg_stat_get_blocks_hit(pg_index.indexrelid)))::bigint AS idx_blks_hit - FROM pg_index - WHERE (pg_index.indrelid = t.oid)) x ON (true)) - WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char", 'm'::"char"])); -pg_statio_sys_indexes| SELECT relid, - indexrelid, - schemaname, - relname, - indexrelname, - idx_blks_read, - idx_blks_hit - FROM pg_statio_all_indexes - WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); -pg_statio_sys_sequences| SELECT relid, - schemaname, - relname, - blks_read, - blks_hit - FROM pg_statio_all_sequences - WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); -pg_statio_sys_tables| SELECT relid, - schemaname, - relname, - heap_blks_read, - heap_blks_hit, - idx_blks_read, - idx_blks_hit, - toast_blks_read, - toast_blks_hit, - tidx_blks_read, - tidx_blks_hit - FROM pg_statio_all_tables - WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); -pg_statio_user_indexes| SELECT relid, - indexrelid, - schemaname, - relname, - indexrelname, - idx_blks_read, - idx_blks_hit - FROM pg_statio_all_indexes - WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); -pg_statio_user_sequences| SELECT relid, - schemaname, - relname, - blks_read, - blks_hit - FROM pg_statio_all_sequences - WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); -pg_statio_user_tables| SELECT relid, - schemaname, - relname, - heap_blks_read, - heap_blks_hit, - idx_blks_read, - idx_blks_hit, - toast_blks_read, - toast_blks_hit, - tidx_blks_read, - tidx_blks_hit - FROM pg_statio_all_tables - WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); -pg_stats| SELECT n.nspname AS schemaname, - c.relname AS tablename, - a.attname, - s.stainherit AS inherited, - s.stanullfrac AS null_frac, - s.stawidth AS avg_width, - s.stadistinct AS n_distinct, - CASE - WHEN (s.stakind1 = 1) THEN s.stavalues1 - WHEN (s.stakind2 = 1) THEN s.stavalues2 - WHEN (s.stakind3 = 1) THEN s.stavalues3 - WHEN (s.stakind4 = 1) THEN s.stavalues4 - WHEN (s.stakind5 = 1) THEN s.stavalues5 - ELSE NULL::anyarray - END AS most_common_vals, - CASE - WHEN (s.stakind1 = 1) THEN s.stanumbers1 - WHEN (s.stakind2 = 1) THEN s.stanumbers2 - WHEN (s.stakind3 = 1) THEN s.stanumbers3 - WHEN (s.stakind4 = 1) THEN s.stanumbers4 - WHEN (s.stakind5 = 1) THEN s.stanumbers5 - ELSE NULL::real[] - END AS most_common_freqs, - CASE - WHEN (s.stakind1 = 2) THEN s.stavalues1 - WHEN (s.stakind2 = 2) THEN s.stavalues2 - WHEN (s.stakind3 = 2) THEN s.stavalues3 - WHEN (s.stakind4 = 2) THEN s.stavalues4 - WHEN (s.stakind5 = 2) THEN s.stavalues5 - ELSE NULL::anyarray - END AS histogram_bounds, - CASE - WHEN (s.stakind1 = 3) THEN s.stanumbers1[1] - WHEN (s.stakind2 = 3) THEN s.stanumbers2[1] - WHEN (s.stakind3 = 3) THEN s.stanumbers3[1] - WHEN (s.stakind4 = 3) THEN s.stanumbers4[1] - WHEN (s.stakind5 = 3) THEN s.stanumbers5[1] - ELSE NULL::real - END AS correlation, - CASE - WHEN (s.stakind1 = 4) THEN s.stavalues1 - WHEN (s.stakind2 = 4) THEN s.stavalues2 - WHEN (s.stakind3 = 4) THEN s.stavalues3 - WHEN (s.stakind4 = 4) THEN s.stavalues4 - WHEN (s.stakind5 = 4) THEN s.stavalues5 - ELSE NULL::anyarray - END AS most_common_elems, - CASE - WHEN (s.stakind1 = 4) THEN s.stanumbers1 - WHEN (s.stakind2 = 4) THEN s.stanumbers2 - WHEN (s.stakind3 = 4) THEN s.stanumbers3 - WHEN (s.stakind4 = 4) THEN s.stanumbers4 - WHEN (s.stakind5 = 4) THEN s.stanumbers5 - ELSE NULL::real[] - END AS most_common_elem_freqs, - CASE - WHEN (s.stakind1 = 5) THEN s.stanumbers1 - WHEN (s.stakind2 = 5) THEN s.stanumbers2 - WHEN (s.stakind3 = 5) THEN s.stanumbers3 - WHEN (s.stakind4 = 5) THEN s.stanumbers4 - WHEN (s.stakind5 = 5) THEN s.stanumbers5 - ELSE NULL::real[] - END AS elem_count_histogram, - CASE - WHEN (s.stakind1 = 6) THEN s.stavalues1 - WHEN (s.stakind2 = 6) THEN s.stavalues2 - WHEN (s.stakind3 = 6) THEN s.stavalues3 - WHEN (s.stakind4 = 6) THEN s.stavalues4 - WHEN (s.stakind5 = 6) THEN s.stavalues5 - ELSE NULL::anyarray - END AS range_length_histogram, - CASE - WHEN (s.stakind1 = 6) THEN s.stanumbers1[1] - WHEN (s.stakind2 = 6) THEN s.stanumbers2[1] - WHEN (s.stakind3 = 6) THEN s.stanumbers3[1] - WHEN (s.stakind4 = 6) THEN s.stanumbers4[1] - WHEN (s.stakind5 = 6) THEN s.stanumbers5[1] - ELSE NULL::real - END AS range_empty_frac, - CASE - WHEN (s.stakind1 = 7) THEN s.stavalues1 - WHEN (s.stakind2 = 7) THEN s.stavalues2 - WHEN (s.stakind3 = 7) THEN s.stavalues3 - WHEN (s.stakind4 = 7) THEN s.stavalues4 - WHEN (s.stakind5 = 7) THEN s.stavalues5 - ELSE NULL::anyarray - END AS range_bounds_histogram - FROM (((pg_statistic s - JOIN pg_class c ON ((c.oid = s.starelid))) - JOIN pg_attribute a ON (((c.oid = a.attrelid) AND (a.attnum = s.staattnum)))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE ((NOT a.attisdropped) AND has_column_privilege(c.oid, a.attnum, 'select'::text) AND ((c.relrowsecurity = false) OR (NOT row_security_active(c.oid)))); -pg_stats_ext| SELECT cn.nspname AS schemaname, - c.relname AS tablename, - sn.nspname AS statistics_schemaname, - s.stxname AS statistics_name, - pg_get_userbyid(s.stxowner) AS statistics_owner, - ( SELECT array_agg(a.attname ORDER BY a.attnum) AS array_agg - FROM (unnest(s.stxkeys) k(k) - JOIN pg_attribute a ON (((a.attrelid = s.stxrelid) AND (a.attnum = k.k))))) AS attnames, - pg_get_statisticsobjdef_expressions(s.oid) AS exprs, - s.stxkind AS kinds, - sd.stxdinherit AS inherited, - sd.stxdndistinct AS n_distinct, - sd.stxddependencies AS dependencies, - m.most_common_vals, - m.most_common_val_nulls, - m.most_common_freqs, - m.most_common_base_freqs - FROM (((((pg_statistic_ext s - JOIN pg_class c ON ((c.oid = s.stxrelid))) - JOIN pg_statistic_ext_data sd ON ((s.oid = sd.stxoid))) - LEFT JOIN pg_namespace cn ON ((cn.oid = c.relnamespace))) - LEFT JOIN pg_namespace sn ON ((sn.oid = s.stxnamespace))) - LEFT JOIN LATERAL ( SELECT array_agg(pg_mcv_list_items."values") AS most_common_vals, - array_agg(pg_mcv_list_items.nulls) AS most_common_val_nulls, - array_agg(pg_mcv_list_items.frequency) AS most_common_freqs, - array_agg(pg_mcv_list_items.base_frequency) AS most_common_base_freqs - FROM pg_mcv_list_items(sd.stxdmcv) pg_mcv_list_items(index, "values", nulls, frequency, base_frequency)) m ON ((sd.stxdmcv IS NOT NULL))) - WHERE ((NOT (EXISTS ( SELECT 1 - FROM (unnest(s.stxkeys) k(k) - JOIN pg_attribute a ON (((a.attrelid = s.stxrelid) AND (a.attnum = k.k)))) - WHERE (NOT has_column_privilege(c.oid, a.attnum, 'select'::text))))) AND ((c.relrowsecurity = false) OR (NOT row_security_active(c.oid)))); -pg_stats_ext_exprs| SELECT cn.nspname AS schemaname, - c.relname AS tablename, - sn.nspname AS statistics_schemaname, - s.stxname AS statistics_name, - pg_get_userbyid(s.stxowner) AS statistics_owner, - stat.expr, - sd.stxdinherit AS inherited, - (stat.a).stanullfrac AS null_frac, - (stat.a).stawidth AS avg_width, - (stat.a).stadistinct AS n_distinct, - CASE - WHEN ((stat.a).stakind1 = 1) THEN (stat.a).stavalues1 - WHEN ((stat.a).stakind2 = 1) THEN (stat.a).stavalues2 - WHEN ((stat.a).stakind3 = 1) THEN (stat.a).stavalues3 - WHEN ((stat.a).stakind4 = 1) THEN (stat.a).stavalues4 - WHEN ((stat.a).stakind5 = 1) THEN (stat.a).stavalues5 - ELSE NULL::anyarray - END AS most_common_vals, - CASE - WHEN ((stat.a).stakind1 = 1) THEN (stat.a).stanumbers1 - WHEN ((stat.a).stakind2 = 1) THEN (stat.a).stanumbers2 - WHEN ((stat.a).stakind3 = 1) THEN (stat.a).stanumbers3 - WHEN ((stat.a).stakind4 = 1) THEN (stat.a).stanumbers4 - WHEN ((stat.a).stakind5 = 1) THEN (stat.a).stanumbers5 - ELSE NULL::real[] - END AS most_common_freqs, - CASE - WHEN ((stat.a).stakind1 = 2) THEN (stat.a).stavalues1 - WHEN ((stat.a).stakind2 = 2) THEN (stat.a).stavalues2 - WHEN ((stat.a).stakind3 = 2) THEN (stat.a).stavalues3 - WHEN ((stat.a).stakind4 = 2) THEN (stat.a).stavalues4 - WHEN ((stat.a).stakind5 = 2) THEN (stat.a).stavalues5 - ELSE NULL::anyarray - END AS histogram_bounds, - CASE - WHEN ((stat.a).stakind1 = 3) THEN (stat.a).stanumbers1[1] - WHEN ((stat.a).stakind2 = 3) THEN (stat.a).stanumbers2[1] - WHEN ((stat.a).stakind3 = 3) THEN (stat.a).stanumbers3[1] - WHEN ((stat.a).stakind4 = 3) THEN (stat.a).stanumbers4[1] - WHEN ((stat.a).stakind5 = 3) THEN (stat.a).stanumbers5[1] - ELSE NULL::real - END AS correlation, - CASE - WHEN ((stat.a).stakind1 = 4) THEN (stat.a).stavalues1 - WHEN ((stat.a).stakind2 = 4) THEN (stat.a).stavalues2 - WHEN ((stat.a).stakind3 = 4) THEN (stat.a).stavalues3 - WHEN ((stat.a).stakind4 = 4) THEN (stat.a).stavalues4 - WHEN ((stat.a).stakind5 = 4) THEN (stat.a).stavalues5 - ELSE NULL::anyarray - END AS most_common_elems, - CASE - WHEN ((stat.a).stakind1 = 4) THEN (stat.a).stanumbers1 - WHEN ((stat.a).stakind2 = 4) THEN (stat.a).stanumbers2 - WHEN ((stat.a).stakind3 = 4) THEN (stat.a).stanumbers3 - WHEN ((stat.a).stakind4 = 4) THEN (stat.a).stanumbers4 - WHEN ((stat.a).stakind5 = 4) THEN (stat.a).stanumbers5 - ELSE NULL::real[] - END AS most_common_elem_freqs, - CASE - WHEN ((stat.a).stakind1 = 5) THEN (stat.a).stanumbers1 - WHEN ((stat.a).stakind2 = 5) THEN (stat.a).stanumbers2 - WHEN ((stat.a).stakind3 = 5) THEN (stat.a).stanumbers3 - WHEN ((stat.a).stakind4 = 5) THEN (stat.a).stanumbers4 - WHEN ((stat.a).stakind5 = 5) THEN (stat.a).stanumbers5 - ELSE NULL::real[] - END AS elem_count_histogram - FROM (((((pg_statistic_ext s - JOIN pg_class c ON ((c.oid = s.stxrelid))) - LEFT JOIN pg_statistic_ext_data sd ON ((s.oid = sd.stxoid))) - LEFT JOIN pg_namespace cn ON ((cn.oid = c.relnamespace))) - LEFT JOIN pg_namespace sn ON ((sn.oid = s.stxnamespace))) - JOIN LATERAL ( SELECT unnest(pg_get_statisticsobjdef_expressions(s.oid)) AS expr, - unnest(sd.stxdexpr) AS a) stat ON ((stat.expr IS NOT NULL))); -pg_tables| SELECT n.nspname AS schemaname, - c.relname AS tablename, - pg_get_userbyid(c.relowner) AS tableowner, - t.spcname AS tablespace, - c.relhasindex AS hasindexes, - c.relhasrules AS hasrules, - c.relhastriggers AS hastriggers, - c.relrowsecurity AS rowsecurity - FROM ((pg_class c - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - LEFT JOIN pg_tablespace t ON ((t.oid = c.reltablespace))) - WHERE (c.relkind = ANY (ARRAY['r'::"char", 'p'::"char"])); -pg_timezone_abbrevs| SELECT abbrev, - utc_offset, - is_dst - FROM pg_timezone_abbrevs() pg_timezone_abbrevs(abbrev, utc_offset, is_dst); -pg_timezone_names| SELECT name, - abbrev, - utc_offset, - is_dst - FROM pg_timezone_names() pg_timezone_names(name, abbrev, utc_offset, is_dst); -pg_user| SELECT usename, - usesysid, - usecreatedb, - usesuper, - userepl, - usebypassrls, - '********'::text AS passwd, - valuntil, - useconfig - FROM pg_shadow; -pg_user_mappings| SELECT u.oid AS umid, - s.oid AS srvid, - s.srvname, - u.umuser, - CASE - WHEN (u.umuser = (0)::oid) THEN 'public'::name - ELSE a.rolname - END AS usename, - CASE - WHEN (((u.umuser <> (0)::oid) AND (a.rolname = CURRENT_USER) AND (pg_has_role(s.srvowner, 'USAGE'::text) OR has_server_privilege(s.oid, 'USAGE'::text))) OR ((u.umuser = (0)::oid) AND pg_has_role(s.srvowner, 'USAGE'::text)) OR ( SELECT pg_authid.rolsuper - FROM pg_authid - WHERE (pg_authid.rolname = CURRENT_USER))) THEN u.umoptions - ELSE NULL::text[] - END AS umoptions - FROM ((pg_user_mapping u - JOIN pg_foreign_server s ON ((u.umserver = s.oid))) - LEFT JOIN pg_authid a ON ((a.oid = u.umuser))); -pg_views| SELECT n.nspname AS schemaname, - c.relname AS viewname, - pg_get_userbyid(c.relowner) AS viewowner, - pg_get_viewdef(c.oid) AS definition - FROM (pg_class c - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (c.relkind = 'v'::"char"); -pg_wait_events| SELECT type, - name, - description - FROM pg_get_wait_events() pg_get_wait_events(type, name, description); -SELECT tablename, rulename, definition FROM pg_rules -WHERE schemaname = 'pg_catalog' -ORDER BY tablename, rulename; -pg_settings|pg_settings_n|CREATE RULE pg_settings_n AS - ON UPDATE TO pg_catalog.pg_settings DO INSTEAD NOTHING; -pg_settings|pg_settings_u|CREATE RULE pg_settings_u AS - ON UPDATE TO pg_catalog.pg_settings - WHERE (new.name = old.name) DO SELECT set_config(old.name, new.setting, false) AS set_config; --- restore normal output mode -\a\t --- --- CREATE OR REPLACE RULE --- -CREATE TABLE ruletest_tbl (a int, b int); -CREATE TABLE ruletest_tbl2 (a int, b int); -CREATE OR REPLACE RULE myrule AS ON INSERT TO ruletest_tbl - DO INSTEAD INSERT INTO ruletest_tbl2 VALUES (10, 10); -INSERT INTO ruletest_tbl VALUES (99, 99); -CREATE OR REPLACE RULE myrule AS ON INSERT TO ruletest_tbl - DO INSTEAD INSERT INTO ruletest_tbl2 VALUES (1000, 1000); -INSERT INTO ruletest_tbl VALUES (99, 99); -SELECT * FROM ruletest_tbl2; - a | b -------+------ - 10 | 10 - 1000 | 1000 -(2 rows) - --- Check that rewrite rules splitting one INSERT into multiple --- conditional statements does not disable FK checking. -create table rule_and_refint_t1 ( - id1a integer, - id1b integer, - primary key (id1a, id1b) -); -create table rule_and_refint_t2 ( - id2a integer, - id2c integer, - primary key (id2a, id2c) -); -create table rule_and_refint_t3 ( - id3a integer, - id3b integer, - id3c integer, - data text, - primary key (id3a, id3b, id3c), - foreign key (id3a, id3b) references rule_and_refint_t1 (id1a, id1b), - foreign key (id3a, id3c) references rule_and_refint_t2 (id2a, id2c) -); -insert into rule_and_refint_t1 values (1, 11); -insert into rule_and_refint_t1 values (1, 12); -insert into rule_and_refint_t1 values (2, 21); -insert into rule_and_refint_t1 values (2, 22); -insert into rule_and_refint_t2 values (1, 11); -insert into rule_and_refint_t2 values (1, 12); -insert into rule_and_refint_t2 values (2, 21); -insert into rule_and_refint_t2 values (2, 22); -insert into rule_and_refint_t3 values (1, 11, 11, 'row1'); -insert into rule_and_refint_t3 values (1, 11, 12, 'row2'); -insert into rule_and_refint_t3 values (1, 12, 11, 'row3'); -insert into rule_and_refint_t3 values (1, 12, 12, 'row4'); -insert into rule_and_refint_t3 values (1, 11, 13, 'row5'); -ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3c_fkey" -DETAIL: Key (id3a, id3c)=(1, 13) is not present in table "rule_and_refint_t2". -insert into rule_and_refint_t3 values (1, 13, 11, 'row6'); -ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3b_fkey" -DETAIL: Key (id3a, id3b)=(1, 13) is not present in table "rule_and_refint_t1". --- Ordinary table -insert into rule_and_refint_t3 values (1, 13, 11, 'row6') - on conflict do nothing; -ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3b_fkey" -DETAIL: Key (id3a, id3b)=(1, 13) is not present in table "rule_and_refint_t1". --- rule not fired, so fk violation -insert into rule_and_refint_t3 values (1, 13, 11, 'row6') - on conflict (id3a, id3b, id3c) do update - set id3b = excluded.id3b; -ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3b_fkey" -DETAIL: Key (id3a, id3b)=(1, 13) is not present in table "rule_and_refint_t1". --- rule fired, so unsupported -insert into shoelace values ('sl9', 0, 'pink', 35.0, 'inch', 0.0) - on conflict (sl_name) do update - set sl_avail = excluded.sl_avail; -ERROR: INSERT with ON CONFLICT clause cannot be used with table that has INSERT or UPDATE rules -create rule rule_and_refint_t3_ins as on insert to rule_and_refint_t3 - where (exists (select 1 from rule_and_refint_t3 - where (((rule_and_refint_t3.id3a = new.id3a) - and (rule_and_refint_t3.id3b = new.id3b)) - and (rule_and_refint_t3.id3c = new.id3c)))) - do instead update rule_and_refint_t3 set data = new.data - where (((rule_and_refint_t3.id3a = new.id3a) - and (rule_and_refint_t3.id3b = new.id3b)) - and (rule_and_refint_t3.id3c = new.id3c)); -insert into rule_and_refint_t3 values (1, 11, 13, 'row7'); -ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3c_fkey" -DETAIL: Key (id3a, id3c)=(1, 13) is not present in table "rule_and_refint_t2". -insert into rule_and_refint_t3 values (1, 13, 11, 'row8'); -ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3b_fkey" -DETAIL: Key (id3a, id3b)=(1, 13) is not present in table "rule_and_refint_t1". --- --- disallow dropping a view's rule (bug #5072) --- -create view rules_fooview as select 'rules_foo'::text; -drop rule "_RETURN" on rules_fooview; -ERROR: cannot drop rule _RETURN on view rules_fooview because view rules_fooview requires it -HINT: You can drop view rules_fooview instead. -drop view rules_fooview; --- --- We used to allow converting a table to a view by creating a "_RETURN" --- rule for it, but no more. --- -create table rules_fooview (x int, y text); -create rule "_RETURN" as on select to rules_fooview do instead - select 1 as x, 'aaa'::text as y; -ERROR: relation "rules_fooview" cannot have ON SELECT rules -DETAIL: This operation is not supported for tables. -drop table rules_fooview; --- likewise, converting a partitioned table or partition to view is not allowed -create table rules_fooview (x int, y text) partition by list (x); -create rule "_RETURN" as on select to rules_fooview do instead - select 1 as x, 'aaa'::text as y; -ERROR: relation "rules_fooview" cannot have ON SELECT rules -DETAIL: This operation is not supported for partitioned tables. -create table rules_fooview_part partition of rules_fooview for values in (1); -create rule "_RETURN" as on select to rules_fooview_part do instead - select 1 as x, 'aaa'::text as y; -ERROR: relation "rules_fooview_part" cannot have ON SELECT rules -DETAIL: This operation is not supported for tables. -drop table rules_fooview; --- --- check for planner problems with complex inherited UPDATES --- -create table id (id serial primary key, name text); --- currently, must respecify PKEY for each inherited subtable -create table test_1 (id integer primary key) inherits (id); -NOTICE: merging column "id" with inherited definition -create table test_2 (id integer primary key) inherits (id); -NOTICE: merging column "id" with inherited definition -create table test_3 (id integer primary key) inherits (id); -NOTICE: merging column "id" with inherited definition -insert into test_1 (name) values ('Test 1'); -insert into test_1 (name) values ('Test 2'); -insert into test_2 (name) values ('Test 3'); -insert into test_2 (name) values ('Test 4'); -insert into test_3 (name) values ('Test 5'); -insert into test_3 (name) values ('Test 6'); -create view id_ordered as select * from id order by id; -create rule update_id_ordered as on update to id_ordered - do instead update id set name = new.name where id = old.id; -select * from id_ordered; - id | name -----+-------- - 1 | Test 1 - 2 | Test 2 - 3 | Test 3 - 4 | Test 4 - 5 | Test 5 - 6 | Test 6 -(6 rows) - -update id_ordered set name = 'update 2' where id = 2; -update id_ordered set name = 'update 4' where id = 4; -update id_ordered set name = 'update 5' where id = 5; -select * from id_ordered; - id | name -----+---------- - 1 | Test 1 - 2 | update 2 - 3 | Test 3 - 4 | update 4 - 5 | update 5 - 6 | Test 6 -(6 rows) - -drop table id cascade; -NOTICE: drop cascades to 4 other objects -DETAIL: drop cascades to table test_1 -drop cascades to table test_2 -drop cascades to table test_3 -drop cascades to view id_ordered --- --- check corner case where an entirely-dummy subplan is created by --- constraint exclusion --- -create temp table t1 (a integer primary key); -create temp table t1_1 (check (a >= 0 and a < 10)) inherits (t1); -create temp table t1_2 (check (a >= 10 and a < 20)) inherits (t1); -create rule t1_ins_1 as on insert to t1 - where new.a >= 0 and new.a < 10 - do instead - insert into t1_1 values (new.a); -create rule t1_ins_2 as on insert to t1 - where new.a >= 10 and new.a < 20 - do instead - insert into t1_2 values (new.a); -create rule t1_upd_1 as on update to t1 - where old.a >= 0 and old.a < 10 - do instead - update t1_1 set a = new.a where a = old.a; -create rule t1_upd_2 as on update to t1 - where old.a >= 10 and old.a < 20 - do instead - update t1_2 set a = new.a where a = old.a; -set constraint_exclusion = on; -insert into t1 select * from generate_series(5,19,1) g; -update t1 set a = 4 where a = 5; -select * from only t1; - a ---- -(0 rows) - -select * from only t1_1; - a ---- - 6 - 7 - 8 - 9 - 4 -(5 rows) - -select * from only t1_2; - a ----- - 10 - 11 - 12 - 13 - 14 - 15 - 16 - 17 - 18 - 19 -(10 rows) - -reset constraint_exclusion; --- test FOR UPDATE in rules -create table rules_base(f1 int, f2 int); -insert into rules_base values(1,2), (11,12); -create rule r1 as on update to rules_base do instead - select * from rules_base where f1 = 1 for update; -update rules_base set f2 = f2 + 1; - f1 | f2 -----+---- - 1 | 2 -(1 row) - -create or replace rule r1 as on update to rules_base do instead - select * from rules_base where f1 = 11 for update of rules_base; -update rules_base set f2 = f2 + 1; - f1 | f2 -----+---- - 11 | 12 -(1 row) - -create or replace rule r1 as on update to rules_base do instead - select * from rules_base where f1 = 11 for update of old; -- error -ERROR: relation "old" in FOR UPDATE clause not found in FROM clause -LINE 2: select * from rules_base where f1 = 11 for update of old; - ^ -drop table rules_base; --- test various flavors of pg_get_viewdef() -select pg_get_viewdef('shoe'::regclass) as unpretty; - unpretty ------------------------------------------------- - SELECT sh.shoename, + - sh.sh_avail, + - sh.slcolor, + - sh.slminlen, + - (sh.slminlen * un.un_fact) AS slminlen_cm,+ - sh.slmaxlen, + - (sh.slmaxlen * un.un_fact) AS slmaxlen_cm,+ - sh.slunit + - FROM shoe_data sh, + - unit un + - WHERE (sh.slunit = un.un_name); -(1 row) - -select pg_get_viewdef('shoe'::regclass,true) as pretty; - pretty ----------------------------------------------- - SELECT sh.shoename, + - sh.sh_avail, + - sh.slcolor, + - sh.slminlen, + - sh.slminlen * un.un_fact AS slminlen_cm,+ - sh.slmaxlen, + - sh.slmaxlen * un.un_fact AS slmaxlen_cm,+ - sh.slunit + - FROM shoe_data sh, + - unit un + - WHERE sh.slunit = un.un_name; -(1 row) - -select pg_get_viewdef('shoe'::regclass,0) as prettier; - prettier ----------------------------------------------- - SELECT sh.shoename, + - sh.sh_avail, + - sh.slcolor, + - sh.slminlen, + - sh.slminlen * un.un_fact AS slminlen_cm,+ - sh.slmaxlen, + - sh.slmaxlen * un.un_fact AS slmaxlen_cm,+ - sh.slunit + - FROM shoe_data sh, + - unit un + - WHERE sh.slunit = un.un_name; -(1 row) - --- --- check multi-row VALUES in rules --- -create table rules_src(f1 int, f2 int default 0); -create table rules_log(f1 int, f2 int, tag text, id serial); -insert into rules_src values(1,2), (11,12); -create rule r1 as on update to rules_src do also - insert into rules_log values(old.*, 'old', default), (new.*, 'new', default); -update rules_src set f2 = f2 + 1; -update rules_src set f2 = f2 * 10; -select * from rules_src; - f1 | f2 -----+----- - 1 | 30 - 11 | 130 -(2 rows) - -select * from rules_log; - f1 | f2 | tag | id -----+-----+-----+---- - 1 | 2 | old | 1 - 1 | 3 | new | 2 - 11 | 12 | old | 3 - 11 | 13 | new | 4 - 1 | 3 | old | 5 - 1 | 30 | new | 6 - 11 | 13 | old | 7 - 11 | 130 | new | 8 -(8 rows) - -create rule r2 as on update to rules_src do also - values(old.*, 'old'), (new.*, 'new'); -update rules_src set f2 = f2 / 10; - column1 | column2 | column3 ----------+---------+--------- - 1 | 30 | old - 1 | 3 | new - 11 | 130 | old - 11 | 13 | new -(4 rows) - -create rule r3 as on insert to rules_src do also - insert into rules_log values(null, null, '-', default), (new.*, 'new', default); -insert into rules_src values(22,23), (33,default); -select * from rules_src; - f1 | f2 -----+---- - 1 | 3 - 11 | 13 - 22 | 23 - 33 | 0 -(4 rows) - -select * from rules_log; - f1 | f2 | tag | id -----+-----+-----+---- - 1 | 2 | old | 1 - 1 | 3 | new | 2 - 11 | 12 | old | 3 - 11 | 13 | new | 4 - 1 | 3 | old | 5 - 1 | 30 | new | 6 - 11 | 13 | old | 7 - 11 | 130 | new | 8 - 1 | 30 | old | 9 - 1 | 3 | new | 10 - 11 | 130 | old | 11 - 11 | 13 | new | 12 - | | - | 13 - 22 | 23 | new | 14 - | | - | 15 - 33 | 0 | new | 16 -(16 rows) - -create rule r4 as on delete to rules_src do notify rules_src_deletion; --- --- Ensure an aliased target relation for insert is correctly deparsed. --- -create rule r5 as on insert to rules_src do instead insert into rules_log AS trgt SELECT NEW.* RETURNING trgt.f1, trgt.f2; -create rule r6 as on update to rules_src do instead UPDATE rules_log AS trgt SET tag = 'updated' WHERE trgt.f1 = new.f1; --- --- Check deparse disambiguation of INSERT/UPDATE/DELETE targets. --- -create rule r7 as on delete to rules_src do instead - with wins as (insert into int4_tbl as trgt values (0) returning *), - wupd as (update int4_tbl trgt set f1 = f1+1 returning *), - wdel as (delete from int4_tbl trgt where f1 = 0 returning *) - insert into rules_log AS trgt select old.* from wins, wupd, wdel - returning trgt.f1, trgt.f2; --- check display of all rules added above -\d+ rules_src - Table "public.rules_src" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - f1 | integer | | | | plain | | - f2 | integer | | | 0 | plain | | -Rules: - r1 AS - ON UPDATE TO rules_src DO INSERT INTO rules_log (f1, f2, tag, id) VALUES (old.f1,old.f2,'old'::text,DEFAULT), (new.f1,new.f2,'new'::text,DEFAULT) - r2 AS - ON UPDATE TO rules_src DO VALUES (old.f1,old.f2,'old'::text), (new.f1,new.f2,'new'::text) - r3 AS - ON INSERT TO rules_src DO INSERT INTO rules_log (f1, f2, tag, id) VALUES (NULL::integer,NULL::integer,'-'::text,DEFAULT), (new.f1,new.f2,'new'::text,DEFAULT) - r4 AS - ON DELETE TO rules_src DO - NOTIFY rules_src_deletion - r5 AS - ON INSERT TO rules_src DO INSTEAD INSERT INTO rules_log AS trgt (f1, f2) SELECT new.f1, - new.f2 - RETURNING trgt.f1, - trgt.f2 - r6 AS - ON UPDATE TO rules_src DO INSTEAD UPDATE rules_log trgt SET tag = 'updated'::text - WHERE trgt.f1 = new.f1 - r7 AS - ON DELETE TO rules_src DO INSTEAD WITH wins AS ( - INSERT INTO int4_tbl AS trgt_1 (f1) - VALUES (0) - RETURNING trgt_1.f1 - ), wupd AS ( - UPDATE int4_tbl trgt_1 SET f1 = trgt_1.f1 + 1 - RETURNING trgt_1.f1 - ), wdel AS ( - DELETE FROM int4_tbl trgt_1 - WHERE trgt_1.f1 = 0 - RETURNING trgt_1.f1 - ) - INSERT INTO rules_log AS trgt (f1, f2) SELECT old.f1, - old.f2 - FROM wins, - wupd, - wdel - RETURNING trgt.f1, - trgt.f2 - --- --- Also check multiassignment deparsing. --- -create table rule_t1(f1 int, f2 int); -create table rule_dest(f1 int, f2 int[], tag text); -create rule rr as on update to rule_t1 do instead UPDATE rule_dest trgt - SET (f2[1], f1, tag) = (SELECT new.f2, new.f1, 'updated'::varchar) - WHERE trgt.f1 = new.f1 RETURNING new.*; -\d+ rule_t1 - Table "public.rule_t1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - f1 | integer | | | | plain | | - f2 | integer | | | | plain | | -Rules: - rr AS - ON UPDATE TO rule_t1 DO INSTEAD UPDATE rule_dest trgt SET (f2[1], f1, tag) = ( SELECT new.f2, - new.f1, - 'updated'::character varying AS "varchar") - WHERE trgt.f1 = new.f1 - RETURNING new.f1, - new.f2 - -drop table rule_t1, rule_dest; --- --- Test implicit LATERAL references to old/new in rules --- -CREATE TABLE rule_t1(a int, b text DEFAULT 'xxx', c int); -CREATE VIEW rule_v1 AS SELECT * FROM rule_t1; -CREATE RULE v1_ins AS ON INSERT TO rule_v1 - DO ALSO INSERT INTO rule_t1 - SELECT * FROM (SELECT a + 10 FROM rule_t1 WHERE a = NEW.a) tt; -CREATE RULE v1_upd AS ON UPDATE TO rule_v1 - DO ALSO UPDATE rule_t1 t - SET c = tt.a * 10 - FROM (SELECT a FROM rule_t1 WHERE a = OLD.a) tt WHERE t.a = tt.a; -INSERT INTO rule_v1 VALUES (1, 'a'), (2, 'b'); -UPDATE rule_v1 SET b = upper(b); -SELECT * FROM rule_t1; - a | b | c -----+-----+----- - 1 | A | 10 - 2 | B | 20 - 11 | XXX | 110 - 12 | XXX | 120 -(4 rows) - -DROP TABLE rule_t1 CASCADE; -NOTICE: drop cascades to view rule_v1 --- --- check alter rename rule --- -CREATE TABLE rule_t1 (a INT); -CREATE VIEW rule_v1 AS SELECT * FROM rule_t1; -CREATE RULE InsertRule AS - ON INSERT TO rule_v1 - DO INSTEAD - INSERT INTO rule_t1 VALUES(new.a); -ALTER RULE InsertRule ON rule_v1 RENAME to NewInsertRule; -INSERT INTO rule_v1 VALUES(1); -SELECT * FROM rule_v1; - a ---- - 1 -(1 row) - -\d+ rule_v1 - View "public.rule_v1" - Column | Type | Collation | Nullable | Default | Storage | Description ---------+---------+-----------+----------+---------+---------+------------- - a | integer | | | | plain | -View definition: - SELECT a - FROM rule_t1; -Rules: - newinsertrule AS - ON INSERT TO rule_v1 DO INSTEAD INSERT INTO rule_t1 (a) - VALUES (new.a) - --- --- error conditions for alter rename rule --- -ALTER RULE InsertRule ON rule_v1 RENAME TO NewInsertRule; -- doesn't exist -ERROR: rule "insertrule" for relation "rule_v1" does not exist -ALTER RULE NewInsertRule ON rule_v1 RENAME TO "_RETURN"; -- already exists -ERROR: rule "_RETURN" for relation "rule_v1" already exists -ALTER RULE "_RETURN" ON rule_v1 RENAME TO abc; -- ON SELECT rule cannot be renamed -ERROR: renaming an ON SELECT rule is not allowed -DROP VIEW rule_v1; -DROP TABLE rule_t1; --- --- check display of VALUES in view definitions --- -create view rule_v1 as values(1,2); -\d+ rule_v1 - View "public.rule_v1" - Column | Type | Collation | Nullable | Default | Storage | Description ----------+---------+-----------+----------+---------+---------+------------- - column1 | integer | | | | plain | - column2 | integer | | | | plain | -View definition: - VALUES (1,2); - -alter table rule_v1 rename column column2 to q2; -\d+ rule_v1 - View "public.rule_v1" - Column | Type | Collation | Nullable | Default | Storage | Description ----------+---------+-----------+----------+---------+---------+------------- - column1 | integer | | | | plain | - q2 | integer | | | | plain | -View definition: - SELECT column1, - column2 AS q2 - FROM (VALUES (1,2)) "*VALUES*"; - -drop view rule_v1; -create view rule_v1(x) as values(1,2); -\d+ rule_v1 - View "public.rule_v1" - Column | Type | Collation | Nullable | Default | Storage | Description ----------+---------+-----------+----------+---------+---------+------------- - x | integer | | | | plain | - column2 | integer | | | | plain | -View definition: - SELECT column1 AS x, - column2 - FROM (VALUES (1,2)) "*VALUES*"; - -drop view rule_v1; -create view rule_v1(x) as select * from (values(1,2)) v; -\d+ rule_v1 - View "public.rule_v1" - Column | Type | Collation | Nullable | Default | Storage | Description ----------+---------+-----------+----------+---------+---------+------------- - x | integer | | | | plain | - column2 | integer | | | | plain | -View definition: - SELECT column1 AS x, - column2 - FROM ( VALUES (1,2)) v; - -drop view rule_v1; -create view rule_v1(x) as select * from (values(1,2)) v(q,w); -\d+ rule_v1 - View "public.rule_v1" - Column | Type | Collation | Nullable | Default | Storage | Description ---------+---------+-----------+----------+---------+---------+------------- - x | integer | | | | plain | - w | integer | | | | plain | -View definition: - SELECT q AS x, - w - FROM ( VALUES (1,2)) v(q, w); - -drop view rule_v1; --- --- Check DO INSTEAD rules with ON CONFLICT --- -CREATE TABLE hats ( - hat_name char(10) primary key, - hat_color char(10) -- hat color -); -CREATE TABLE hat_data ( - hat_name char(10), - hat_color char(10) -- hat color -); -create unique index hat_data_unique_idx - on hat_data (hat_name COLLATE "C" bpchar_pattern_ops); --- DO NOTHING with ON CONFLICT -CREATE RULE hat_nosert AS ON INSERT TO hats - DO INSTEAD - INSERT INTO hat_data VALUES ( - NEW.hat_name, - NEW.hat_color) - ON CONFLICT (hat_name COLLATE "C" bpchar_pattern_ops) WHERE hat_color = 'green' - DO NOTHING - RETURNING *; -SELECT definition FROM pg_rules WHERE tablename = 'hats' ORDER BY rulename; - definition ---------------------------------------------------------------------------------------------- - CREATE RULE hat_nosert AS + - ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + - VALUES (new.hat_name, new.hat_color) ON CONFLICT(hat_name COLLATE "C" bpchar_pattern_ops)+ - WHERE (hat_color = 'green'::bpchar) DO NOTHING + - RETURNING hat_data.hat_name, + - hat_data.hat_color; -(1 row) - --- Works (projects row) -INSERT INTO hats VALUES ('h7', 'black') RETURNING *; - hat_name | hat_color -------------+------------ - h7 | black -(1 row) - --- Works (does nothing) -INSERT INTO hats VALUES ('h7', 'black') RETURNING *; - hat_name | hat_color -----------+----------- -(0 rows) - -SELECT tablename, rulename, definition FROM pg_rules - WHERE tablename = 'hats'; - tablename | rulename | definition ------------+------------+--------------------------------------------------------------------------------------------- - hats | hat_nosert | CREATE RULE hat_nosert AS + - | | ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + - | | VALUES (new.hat_name, new.hat_color) ON CONFLICT(hat_name COLLATE "C" bpchar_pattern_ops)+ - | | WHERE (hat_color = 'green'::bpchar) DO NOTHING + - | | RETURNING hat_data.hat_name, + - | | hat_data.hat_color; -(1 row) - -DROP RULE hat_nosert ON hats; --- DO NOTHING without ON CONFLICT -CREATE RULE hat_nosert_all AS ON INSERT TO hats - DO INSTEAD - INSERT INTO hat_data VALUES ( - NEW.hat_name, - NEW.hat_color) - ON CONFLICT - DO NOTHING - RETURNING *; -SELECT definition FROM pg_rules WHERE tablename = 'hats' ORDER BY rulename; - definition -------------------------------------------------------------------------------------- - CREATE RULE hat_nosert_all AS + - ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color)+ - VALUES (new.hat_name, new.hat_color) ON CONFLICT DO NOTHING + - RETURNING hat_data.hat_name, + - hat_data.hat_color; -(1 row) - -DROP RULE hat_nosert_all ON hats; --- Works (does nothing) -INSERT INTO hats VALUES ('h7', 'black') RETURNING *; - hat_name | hat_color -------------+------------ - h7 | black -(1 row) - --- DO UPDATE with a WHERE clause -CREATE RULE hat_upsert AS ON INSERT TO hats - DO INSTEAD - INSERT INTO hat_data VALUES ( - NEW.hat_name, - NEW.hat_color) - ON CONFLICT (hat_name) - DO UPDATE - SET hat_name = hat_data.hat_name, hat_color = excluded.hat_color - WHERE excluded.hat_color <> 'forbidden' AND hat_data.* != excluded.* - RETURNING *; -SELECT definition FROM pg_rules WHERE tablename = 'hats' ORDER BY rulename; - definition ------------------------------------------------------------------------------------------------------------------------------------------ - CREATE RULE hat_upsert AS + - ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + - VALUES (new.hat_name, new.hat_color) ON CONFLICT(hat_name) DO UPDATE SET hat_name = hat_data.hat_name, hat_color = excluded.hat_color+ - WHERE ((excluded.hat_color <> 'forbidden'::bpchar) AND (hat_data.* <> excluded.*)) + - RETURNING hat_data.hat_name, + - hat_data.hat_color; -(1 row) - --- Works (does upsert) -INSERT INTO hats VALUES ('h8', 'black') RETURNING *; - hat_name | hat_color -------------+------------ - h8 | black -(1 row) - -SELECT * FROM hat_data WHERE hat_name = 'h8'; - hat_name | hat_color -------------+------------ - h8 | black -(1 row) - -INSERT INTO hats VALUES ('h8', 'white') RETURNING *; - hat_name | hat_color -------------+------------ - h8 | white -(1 row) - -SELECT * FROM hat_data WHERE hat_name = 'h8'; - hat_name | hat_color -------------+------------ - h8 | white -(1 row) - -INSERT INTO hats VALUES ('h8', 'forbidden') RETURNING *; - hat_name | hat_color -----------+----------- -(0 rows) - -SELECT * FROM hat_data WHERE hat_name = 'h8'; - hat_name | hat_color -------------+------------ - h8 | white -(1 row) - -SELECT tablename, rulename, definition FROM pg_rules - WHERE tablename = 'hats'; - tablename | rulename | definition ------------+------------+----------------------------------------------------------------------------------------------------------------------------------------- - hats | hat_upsert | CREATE RULE hat_upsert AS + - | | ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + - | | VALUES (new.hat_name, new.hat_color) ON CONFLICT(hat_name) DO UPDATE SET hat_name = hat_data.hat_name, hat_color = excluded.hat_color+ - | | WHERE ((excluded.hat_color <> 'forbidden'::bpchar) AND (hat_data.* <> excluded.*)) + - | | RETURNING hat_data.hat_name, + - | | hat_data.hat_color; -(1 row) - --- ensure explain works for on insert conflict rules -explain (costs off) INSERT INTO hats VALUES ('h8', 'forbidden') RETURNING *; - QUERY PLAN -------------------------------------------------------------------------------------------------- - Insert on hat_data - Conflict Resolution: UPDATE - Conflict Arbiter Indexes: hat_data_unique_idx - Conflict Filter: ((excluded.hat_color <> 'forbidden'::bpchar) AND (hat_data.* <> excluded.*)) - -> Result -(5 rows) - --- ensure upserting into a rule, with a CTE (different offsets!) works -WITH data(hat_name, hat_color) AS MATERIALIZED ( - VALUES ('h8', 'green'), - ('h9', 'blue'), - ('h7', 'forbidden') -) -INSERT INTO hats - SELECT * FROM data -RETURNING *; - hat_name | hat_color -------------+------------ - h8 | green - h9 | blue -(2 rows) - -EXPLAIN (costs off) -WITH data(hat_name, hat_color) AS MATERIALIZED ( - VALUES ('h8', 'green'), - ('h9', 'blue'), - ('h7', 'forbidden') -) -INSERT INTO hats - SELECT * FROM data -RETURNING *; - QUERY PLAN -------------------------------------------------------------------------------------------------- - Insert on hat_data - Conflict Resolution: UPDATE - Conflict Arbiter Indexes: hat_data_unique_idx - Conflict Filter: ((excluded.hat_color <> 'forbidden'::bpchar) AND (hat_data.* <> excluded.*)) - CTE data - -> Values Scan on "*VALUES*" - -> CTE Scan on data -(7 rows) - -SELECT * FROM hat_data WHERE hat_name IN ('h8', 'h9', 'h7') ORDER BY hat_name; - hat_name | hat_color -------------+------------ - h7 | black - h8 | green - h9 | blue -(3 rows) - -DROP RULE hat_upsert ON hats; -drop table hats; -drop table hat_data; --- test for pg_get_functiondef properly regurgitating SET parameters --- Note that the function is kept around to stress pg_dump. -CREATE FUNCTION func_with_set_params() RETURNS integer - AS 'select 1;' - LANGUAGE SQL - SET search_path TO PG_CATALOG - SET extra_float_digits TO 2 - SET work_mem TO '4MB' - SET datestyle to iso, mdy - SET local_preload_libraries TO "Mixed/Case", 'c:/''a"/path', '', '0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789' - IMMUTABLE STRICT; -SELECT pg_get_functiondef('func_with_set_params()'::regprocedure); - pg_get_functiondef --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - CREATE OR REPLACE FUNCTION public.func_with_set_params() + - RETURNS integer + - LANGUAGE sql + - IMMUTABLE STRICT + - SET search_path TO 'pg_catalog' + - SET extra_float_digits TO '2' + - SET work_mem TO '4MB' + - SET "DateStyle" TO 'iso, mdy' + - SET local_preload_libraries TO 'Mixed/Case', 'c:/''a"/path', '', '0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789'+ - AS $function$select 1;$function$ + - -(1 row) - --- tests for pg_get_*def with invalid objects -SELECT pg_get_constraintdef(0); - pg_get_constraintdef ----------------------- - -(1 row) - -SELECT pg_get_functiondef(0); - pg_get_functiondef --------------------- - -(1 row) - -SELECT pg_get_indexdef(0); - pg_get_indexdef ------------------ - -(1 row) - -SELECT pg_get_ruledef(0); - pg_get_ruledef ----------------- - -(1 row) - -SELECT pg_get_statisticsobjdef(0); - pg_get_statisticsobjdef -------------------------- - -(1 row) - -SELECT pg_get_triggerdef(0); - pg_get_triggerdef -------------------- - -(1 row) - -SELECT pg_get_viewdef(0); - pg_get_viewdef ----------------- - -(1 row) - -SELECT pg_get_function_arguments(0); - pg_get_function_arguments ---------------------------- - -(1 row) - -SELECT pg_get_function_identity_arguments(0); - pg_get_function_identity_arguments ------------------------------------- - -(1 row) - -SELECT pg_get_function_result(0); - pg_get_function_result ------------------------- - -(1 row) - -SELECT pg_get_function_arg_default(0, 0); - pg_get_function_arg_default ------------------------------ - -(1 row) - -SELECT pg_get_function_arg_default('pg_class'::regclass, 0); - pg_get_function_arg_default ------------------------------ - -(1 row) - -SELECT pg_get_partkeydef(0); - pg_get_partkeydef -------------------- - -(1 row) - --- test rename for a rule defined on a partitioned table -CREATE TABLE rules_parted_table (a int) PARTITION BY LIST (a); -CREATE TABLE rules_parted_table_1 PARTITION OF rules_parted_table FOR VALUES IN (1); -CREATE RULE rules_parted_table_insert AS ON INSERT to rules_parted_table - DO INSTEAD INSERT INTO rules_parted_table_1 VALUES (NEW.*); -ALTER RULE rules_parted_table_insert ON rules_parted_table RENAME TO rules_parted_table_insert_redirect; -DROP TABLE rules_parted_table; --- --- test MERGE --- -CREATE TABLE rule_merge1 (a int, b text); -CREATE TABLE rule_merge2 (a int, b text); -CREATE RULE rule1 AS ON INSERT TO rule_merge1 - DO INSTEAD INSERT INTO rule_merge2 VALUES (NEW.*); -CREATE RULE rule2 AS ON UPDATE TO rule_merge1 - DO INSTEAD UPDATE rule_merge2 SET a = NEW.a, b = NEW.b - WHERE a = OLD.a; -CREATE RULE rule3 AS ON DELETE TO rule_merge1 - DO INSTEAD DELETE FROM rule_merge2 WHERE a = OLD.a; --- MERGE not supported for table with rules -MERGE INTO rule_merge1 t USING (SELECT 1 AS a) s - ON t.a = s.a - WHEN MATCHED AND t.a < 2 THEN - UPDATE SET b = b || ' updated by merge' - WHEN MATCHED AND t.a > 2 THEN - DELETE - WHEN NOT MATCHED THEN - INSERT VALUES (s.a, ''); -ERROR: cannot execute MERGE on relation "rule_merge1" -DETAIL: MERGE is not supported for relations with rules. --- should be ok with the other table though -MERGE INTO rule_merge2 t USING (SELECT 1 AS a) s - ON t.a = s.a - WHEN MATCHED AND t.a < 2 THEN - UPDATE SET b = b || ' updated by merge' - WHEN MATCHED AND t.a > 2 THEN - DELETE - WHEN NOT MATCHED THEN - INSERT VALUES (s.a, ''); --- also ok if the rules are disabled -ALTER TABLE rule_merge1 DISABLE RULE rule1; -ALTER TABLE rule_merge1 DISABLE RULE rule2; -ALTER TABLE rule_merge1 DISABLE RULE rule3; -MERGE INTO rule_merge1 t USING (SELECT 1 AS a) s - ON t.a = s.a - WHEN MATCHED AND t.a < 2 THEN - UPDATE SET b = b || ' updated by merge' - WHEN MATCHED AND t.a > 2 THEN - DELETE - WHEN NOT MATCHED THEN - INSERT VALUES (s.a, ''); --- test deparsing -CREATE TABLE sf_target(id int, data text, filling int[]); -CREATE FUNCTION merge_sf_test() - RETURNS void - LANGUAGE sql -BEGIN ATOMIC - MERGE INTO sf_target t - USING rule_merge1 s - ON (s.a = t.id) -WHEN MATCHED - AND (s.a + t.id) = 42 - THEN UPDATE SET data = repeat(t.data, s.a) || s.b, id = length(s.b) -WHEN NOT MATCHED - AND (s.b IS NOT NULL) - THEN INSERT (data, id) - VALUES (s.b, s.a) -WHEN MATCHED - AND length(s.b || t.data) > 10 - THEN UPDATE SET data = s.b -WHEN MATCHED - AND s.a > 200 - THEN UPDATE SET filling[s.a] = t.id -WHEN MATCHED - AND s.a > 100 - THEN DELETE -WHEN MATCHED - THEN DO NOTHING -WHEN NOT MATCHED - AND s.a > 200 - THEN INSERT DEFAULT VALUES -WHEN NOT MATCHED - AND s.a > 100 - THEN INSERT (id, data) OVERRIDING USER VALUE - VALUES (s.a, DEFAULT) -WHEN NOT MATCHED - AND s.a > 0 - THEN INSERT - VALUES (s.a, s.b, DEFAULT) -WHEN NOT MATCHED - THEN INSERT (filling[1], id) - VALUES (s.a, s.a); -END; -\sf merge_sf_test -CREATE OR REPLACE FUNCTION public.merge_sf_test() - RETURNS void - LANGUAGE sql -BEGIN ATOMIC - MERGE INTO sf_target t - USING rule_merge1 s - ON (s.a = t.id) - WHEN MATCHED - AND ((s.a + t.id) = 42) - THEN UPDATE SET data = (repeat(t.data, s.a) || s.b), id = length(s.b) - WHEN NOT MATCHED - AND (s.b IS NOT NULL) - THEN INSERT (data, id) - VALUES (s.b, s.a) - WHEN MATCHED - AND (length((s.b || t.data)) > 10) - THEN UPDATE SET data = s.b - WHEN MATCHED - AND (s.a > 200) - THEN UPDATE SET filling[s.a] = t.id - WHEN MATCHED - AND (s.a > 100) - THEN DELETE - WHEN MATCHED - THEN DO NOTHING - WHEN NOT MATCHED - AND (s.a > 200) - THEN INSERT DEFAULT VALUES - WHEN NOT MATCHED - AND (s.a > 100) - THEN INSERT (id, data) OVERRIDING USER VALUE - VALUES (s.a, DEFAULT) - WHEN NOT MATCHED - AND (s.a > 0) - THEN INSERT (id, data, filling) - VALUES (s.a, s.b, DEFAULT) - WHEN NOT MATCHED - THEN INSERT (filling[1], id) - VALUES (s.a, s.a); -END -DROP FUNCTION merge_sf_test; -DROP TABLE sf_target; --- --- Test enabling/disabling --- -CREATE TABLE ruletest1 (a int); -CREATE TABLE ruletest2 (b int); -CREATE RULE rule1 AS ON INSERT TO ruletest1 - DO INSTEAD INSERT INTO ruletest2 VALUES (NEW.*); -INSERT INTO ruletest1 VALUES (1); -ALTER TABLE ruletest1 DISABLE RULE rule1; -INSERT INTO ruletest1 VALUES (2); -ALTER TABLE ruletest1 ENABLE RULE rule1; -SET session_replication_role = replica; -INSERT INTO ruletest1 VALUES (3); -ALTER TABLE ruletest1 ENABLE REPLICA RULE rule1; -INSERT INTO ruletest1 VALUES (4); -RESET session_replication_role; -INSERT INTO ruletest1 VALUES (5); -SELECT * FROM ruletest1; - a ---- - 2 - 3 - 5 -(3 rows) - -SELECT * FROM ruletest2; - b ---- - 1 - 4 -(2 rows) - -DROP TABLE ruletest1; -DROP TABLE ruletest2; --- --- Test non-SELECT rule on security invoker view. --- Should use view owner's permissions. --- -CREATE USER regress_rule_user1; -CREATE TABLE ruletest_t1 (x int); -CREATE TABLE ruletest_t2 (x int); -CREATE VIEW ruletest_v1 WITH (security_invoker=true) AS - SELECT * FROM ruletest_t1; -GRANT INSERT ON ruletest_v1 TO regress_rule_user1; -CREATE RULE rule1 AS ON INSERT TO ruletest_v1 - DO INSTEAD INSERT INTO ruletest_t2 VALUES (NEW.*); -SET SESSION AUTHORIZATION regress_rule_user1; -INSERT INTO ruletest_v1 VALUES (1); -RESET SESSION AUTHORIZATION; --- Test that main query's relation's permissions are checked before --- the rule action's relation's. -CREATE TABLE ruletest_t3 (x int); -CREATE RULE rule2 AS ON UPDATE TO ruletest_t1 - DO INSTEAD INSERT INTO ruletest_t2 VALUES (OLD.*); -REVOKE ALL ON ruletest_t2 FROM regress_rule_user1; -REVOKE ALL ON ruletest_t3 FROM regress_rule_user1; -ALTER TABLE ruletest_t1 OWNER TO regress_rule_user1; -SET SESSION AUTHORIZATION regress_rule_user1; -UPDATE ruletest_t1 t1 SET x = 0 FROM ruletest_t3 t3 WHERE t1.x = t3.x; -ERROR: permission denied for table ruletest_t3 -RESET SESSION AUTHORIZATION; -SELECT * FROM ruletest_t1; - x ---- -(0 rows) - -SELECT * FROM ruletest_t2; - x ---- - 1 -(1 row) - -DROP VIEW ruletest_v1; -DROP RULE rule2 ON ruletest_t1; -DROP TABLE ruletest_t3; -DROP TABLE ruletest_t2; -DROP TABLE ruletest_t1; -DROP USER regress_rule_user1; +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/psql.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/psql.out --- /tmp/cirrus-ci-build/src/test/regress/expected/psql.out 2024-03-13 23:12:37.626519000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/psql.out 2024-03-13 23:14:29.840344000 +0000 @@ -1,6725 +1,2 @@ --- --- Tests for psql features that aren't closely connected to any --- specific server features --- --- \set --- fail: invalid name -\set invalid/name foo -invalid variable name: "invalid/name" --- fail: invalid value for special variable -\set AUTOCOMMIT foo -unrecognized value "foo" for "AUTOCOMMIT": Boolean expected -\set FETCH_COUNT foo -invalid value "foo" for "FETCH_COUNT": integer expected --- check handling of built-in boolean variable -\echo :ON_ERROR_ROLLBACK -off -\set ON_ERROR_ROLLBACK -\echo :ON_ERROR_ROLLBACK -on -\set ON_ERROR_ROLLBACK foo -unrecognized value "foo" for "ON_ERROR_ROLLBACK" -Available values are: on, off, interactive. -\echo :ON_ERROR_ROLLBACK -on -\set ON_ERROR_ROLLBACK on -\echo :ON_ERROR_ROLLBACK -on -\unset ON_ERROR_ROLLBACK -\echo :ON_ERROR_ROLLBACK -off --- \g and \gx -SELECT 1 as one, 2 as two \g - one | two ------+----- - 1 | 2 -(1 row) - -\gx --[ RECORD 1 ] -one | 1 -two | 2 - -SELECT 3 as three, 4 as four \gx --[ RECORD 1 ] -three | 3 -four | 4 - -\g - three | four --------+------ - 3 | 4 -(1 row) - --- \gx should work in FETCH_COUNT mode too -\set FETCH_COUNT 1 -SELECT 1 as one, 2 as two \g - one | two ------+----- - 1 | 2 -(1 row) - -\gx --[ RECORD 1 ] -one | 1 -two | 2 - -SELECT 3 as three, 4 as four \gx --[ RECORD 1 ] -three | 3 -four | 4 - -\g - three | four --------+------ - 3 | 4 -(1 row) - -\unset FETCH_COUNT --- \g/\gx with pset options -SELECT 1 as one, 2 as two \g (format=csv csv_fieldsep='\t') -one two -1 2 -\g - one | two ------+----- - 1 | 2 -(1 row) - -SELECT 1 as one, 2 as two \gx (title='foo bar') -foo bar --[ RECORD 1 ] -one | 1 -two | 2 - -\g - one | two ------+----- - 1 | 2 -(1 row) - --- \bind (extended query protocol) -SELECT 1 \bind \g - ?column? ----------- - 1 -(1 row) - -SELECT $1 \bind 'foo' \g - ?column? ----------- - foo -(1 row) - -SELECT $1, $2 \bind 'foo' 'bar' \g - ?column? | ?column? -----------+---------- - foo | bar -(1 row) - --- errors --- parse error -SELECT foo \bind \g -ERROR: column "foo" does not exist -LINE 1: SELECT foo - ^ --- tcop error -SELECT 1 \; SELECT 2 \bind \g -ERROR: cannot insert multiple commands into a prepared statement --- bind error -SELECT $1, $2 \bind 'foo' \g -ERROR: bind message supplies 1 parameters, but prepared statement "" requires 2 --- \gset -select 10 as test01, 20 as test02, 'Hello' as test03 \gset pref01_ -\echo :pref01_test01 :pref01_test02 :pref01_test03 -10 20 Hello --- should fail: bad variable name -select 10 as "bad name" -\gset -invalid variable name: "bad name" -select 97 as "EOF", 'ok' as _foo \gset IGNORE -attempt to \gset into specially treated variable "IGNOREEOF" ignored -\echo :IGNORE_foo :IGNOREEOF -ok 0 --- multiple backslash commands in one line -select 1 as x, 2 as y \gset pref01_ \\ \echo :pref01_x -1 -select 3 as x, 4 as y \gset pref01_ \echo :pref01_x \echo :pref01_y -3 -4 -select 5 as x, 6 as y \gset pref01_ \\ \g \echo :pref01_x :pref01_y - x | y ----+--- - 5 | 6 -(1 row) - -5 6 -select 7 as x, 8 as y \g \gset pref01_ \echo :pref01_x :pref01_y - x | y ----+--- - 7 | 8 -(1 row) - -7 8 --- NULL should unset the variable -\set var2 xyz -select 1 as var1, NULL as var2, 3 as var3 \gset -\echo :var1 :var2 :var3 -1 :var2 3 --- \gset requires just one tuple -select 10 as test01, 20 as test02 from generate_series(1,3) \gset -more than one row returned for \gset -select 10 as test01, 20 as test02 from generate_series(1,0) \gset -no rows returned for \gset --- \gset returns no tuples -select a from generate_series(1, 10) as a where a = 11 \gset -no rows returned for \gset -\echo :ROW_COUNT -0 --- \gset should work in FETCH_COUNT mode too -\set FETCH_COUNT 1 -select 1 as x, 2 as y \gset pref01_ \\ \echo :pref01_x -1 -select 3 as x, 4 as y \gset pref01_ \echo :pref01_x \echo :pref01_y -3 -4 -select 10 as test01, 20 as test02 from generate_series(1,3) \gset -more than one row returned for \gset -select 10 as test01, 20 as test02 from generate_series(1,0) \gset -no rows returned for \gset -\unset FETCH_COUNT --- \gdesc -SELECT - NULL AS zero, - 1 AS one, - 2.0 AS two, - 'three' AS three, - $1 AS four, - sin($2) as five, - 'foo'::varchar(4) as six, - CURRENT_DATE AS now -\gdesc - Column | Type ---------+---------------------- - zero | text - one | integer - two | numeric - three | text - four | text - five | double precision - six | character varying(4) - now | date -(8 rows) - --- should work with tuple-returning utilities, such as EXECUTE -PREPARE test AS SELECT 1 AS first, 2 AS second; -EXECUTE test \gdesc - Column | Type ---------+--------- - first | integer - second | integer -(2 rows) - -EXPLAIN EXECUTE test \gdesc - Column | Type -------------+------ - QUERY PLAN | text -(1 row) - --- should fail cleanly - syntax error -SELECT 1 + \gdesc -ERROR: syntax error at end of input -LINE 1: SELECT 1 + - ^ --- check behavior with empty results -SELECT \gdesc -The command has no result, or the result has no columns. -CREATE TABLE bububu(a int) \gdesc -The command has no result, or the result has no columns. --- subject command should not have executed -TABLE bububu; -- fail -ERROR: relation "bububu" does not exist -LINE 1: TABLE bububu; - ^ --- query buffer should remain unchanged -SELECT 1 AS x, 'Hello', 2 AS y, true AS "dirty\name" -\gdesc - Column | Type -------------+--------- - x | integer - ?column? | text - y | integer - dirty\name | boolean -(4 rows) - -\g - x | ?column? | y | dirty\name ----+----------+---+------------ - 1 | Hello | 2 | t -(1 row) - --- all on one line -SELECT 3 AS x, 'Hello', 4 AS y, true AS "dirty\name" \gdesc \g - Column | Type -------------+--------- - x | integer - ?column? | text - y | integer - dirty\name | boolean -(4 rows) - - x | ?column? | y | dirty\name ----+----------+---+------------ - 3 | Hello | 4 | t -(1 row) - --- test for server bug #17983 with empty statement in aborted transaction -set search_path = default; -begin; -bogus; -ERROR: syntax error at or near "bogus" -LINE 1: bogus; - ^ -; -\gdesc -The command has no result, or the result has no columns. -rollback; --- \gexec -create temporary table gexec_test(a int, b text, c date, d float); -select format('create index on gexec_test(%I)', attname) -from pg_attribute -where attrelid = 'gexec_test'::regclass and attnum > 0 -order by attnum -\gexec -create index on gexec_test(a) -create index on gexec_test(b) -create index on gexec_test(c) -create index on gexec_test(d) --- \gexec should work in FETCH_COUNT mode too --- (though the fetch limit applies to the executed queries not the meta query) -\set FETCH_COUNT 1 -select 'select 1 as ones', 'select x.y, x.y*2 as double from generate_series(1,4) as x(y)' -union all -select 'drop table gexec_test', NULL -union all -select 'drop table gexec_test', 'select ''2000-01-01''::date as party_over' -\gexec -select 1 as ones - ones ------- - 1 -(1 row) - -select x.y, x.y*2 as double from generate_series(1,4) as x(y) - y | double ----+-------- - 1 | 2 - 2 | 4 - 3 | 6 - 4 | 8 -(4 rows) - -drop table gexec_test -drop table gexec_test -ERROR: table "gexec_test" does not exist -select '2000-01-01'::date as party_over - party_over ------------- - 01-01-2000 -(1 row) - -\unset FETCH_COUNT --- \setenv, \getenv --- ensure MYVAR isn't set -\setenv MYVAR --- in which case, reading it doesn't change the target -\getenv res MYVAR -\echo :res -:res --- now set it -\setenv MYVAR 'environment value' -\getenv res MYVAR -\echo :res -environment value --- show all pset options -\pset -border 1 -columns 0 -csv_fieldsep ',' -expanded off -fieldsep '|' -fieldsep_zero off -footer on -format aligned -linestyle ascii -null '' -numericlocale off -pager 1 -pager_min_lines 0 -recordsep '\n' -recordsep_zero off -tableattr -title -tuples_only off -unicode_border_linestyle single -unicode_column_linestyle single -unicode_header_linestyle single -xheader_width full --- test multi-line headers, wrapping, and newline indicators --- in aligned, unaligned, and wrapped formats -prepare q as select array_to_string(array_agg(repeat('x',2*n)),E'\n') as "ab - -c", array_to_string(array_agg(repeat('y',20-2*n)),E'\n') as "a -bc" from generate_series(1,10) as n(n) group by n>1 order by n>1; -\pset linestyle ascii -\pset expanded off -\pset columns 40 -\pset border 0 -\pset format unaligned -execute q; -ab - -c|a -bc -xx|yyyyyyyyyyyyyyyyyy -xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -(2 rows) -\pset format aligned -execute q; - ab + a + - + bc - c --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx +yyyyyyyyyyyyyyyy + -xxxxxx +yyyyyyyyyyyyyy + -xxxxxxxx +yyyyyyyyyyyy + -xxxxxxxxxx +yyyyyyyyyy + -xxxxxxxxxxxx +yyyyyyyy + -xxxxxxxxxxxxxx +yyyyyy + -xxxxxxxxxxxxxxxx +yyyy + -xxxxxxxxxxxxxxxxxx +yy + -xxxxxxxxxxxxxxxxxxxx -(2 rows) - -\pset format wrapped -execute q; - ab + a + - + bc - c --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx +yyyyyyyyyyyyyyyy + -xxxxxx +yyyyyyyyyyyyyy + -xxxxxxxx +yyyyyyyyyyyy + -xxxxxxxxxx +yyyyyyyyyy + -xxxxxxxxxxxx +yyyyyyyy + -xxxxxxxxxxxxxx +yyyyyy + -xxxxxxxxxxxxxxxx +yyyy + -xxxxxxxxxxxxxxxxxx +yy + -xxxxxxxxxxxxxxxxxxxx -(2 rows) - -\pset border 1 -\pset format unaligned -execute q; -ab - -c|a -bc -xx|yyyyyyyyyyyyyyyyyy -xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -(2 rows) -\pset format aligned -execute q; - ab +| a + - +| bc - c | -----------------------+-------------------- - xx | yyyyyyyyyyyyyyyyyy - xxxx +| yyyyyyyyyyyyyyyy + - xxxxxx +| yyyyyyyyyyyyyy + - xxxxxxxx +| yyyyyyyyyyyy + - xxxxxxxxxx +| yyyyyyyyyy + - xxxxxxxxxxxx +| yyyyyyyy + - xxxxxxxxxxxxxx +| yyyyyy + - xxxxxxxxxxxxxxxx +| yyyy + - xxxxxxxxxxxxxxxxxx +| yy + - xxxxxxxxxxxxxxxxxxxx | -(2 rows) - -\pset format wrapped -execute q; - ab +| a + - +| bc - c | --------------------+-------------------- - xx | yyyyyyyyyyyyyyyyyy - xxxx +| yyyyyyyyyyyyyyyy + - xxxxxx +| yyyyyyyyyyyyyy + - xxxxxxxx +| yyyyyyyyyyyy + - xxxxxxxxxx +| yyyyyyyyyy + - xxxxxxxxxxxx +| yyyyyyyy + - xxxxxxxxxxxxxx +| yyyyyy + - xxxxxxxxxxxxxxxx +| yyyy + - xxxxxxxxxxxxxxxxx.| yy + -.x +| - xxxxxxxxxxxxxxxxx.| -.xxx | -(2 rows) - -\pset border 2 -\pset format unaligned -execute q; -ab - -c|a -bc -xx|yyyyyyyyyyyyyyyyyy -xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -(2 rows) -\pset format aligned -execute q; -+----------------------+--------------------+ -| ab +| a +| -| +| bc | -| c | | -+----------------------+--------------------+ -| xx | yyyyyyyyyyyyyyyyyy | -| xxxx +| yyyyyyyyyyyyyyyy +| -| xxxxxx +| yyyyyyyyyyyyyy +| -| xxxxxxxx +| yyyyyyyyyyyy +| -| xxxxxxxxxx +| yyyyyyyyyy +| -| xxxxxxxxxxxx +| yyyyyyyy +| -| xxxxxxxxxxxxxx +| yyyyyy +| -| xxxxxxxxxxxxxxxx +| yyyy +| -| xxxxxxxxxxxxxxxxxx +| yy +| -| xxxxxxxxxxxxxxxxxxxx | | -+----------------------+--------------------+ -(2 rows) - -\pset format wrapped -execute q; -+-----------------+--------------------+ -| ab +| a +| -| +| bc | -| c | | -+-----------------+--------------------+ -| xx | yyyyyyyyyyyyyyyyyy | -| xxxx +| yyyyyyyyyyyyyyyy +| -| xxxxxx +| yyyyyyyyyyyyyy +| -| xxxxxxxx +| yyyyyyyyyyyy +| -| xxxxxxxxxx +| yyyyyyyyyy +| -| xxxxxxxxxxxx +| yyyyyyyy +| -| xxxxxxxxxxxxxx +| yyyyyy +| -| xxxxxxxxxxxxxxx.| yyyy +| -|.x +| yy +| -| xxxxxxxxxxxxxxx.| | -|.xxx +| | -| xxxxxxxxxxxxxxx.| | -|.xxxxx | | -+-----------------+--------------------+ -(2 rows) - -\pset expanded on -\pset columns 20 -\pset border 0 -\pset format unaligned -execute q; -ab - -c|xx -a -bc|yyyyyyyyyyyyyyyyyy - -ab - -c|xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx -a -bc|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -\pset format aligned -execute q; -* Record 1 -ab+ xx - + -c -a + yyyyyyyyyyyyyyyyyy -bc -* Record 2 -ab+ xxxx + - + xxxxxx + -c xxxxxxxx + - xxxxxxxxxx + - xxxxxxxxxxxx + - xxxxxxxxxxxxxx + - xxxxxxxxxxxxxxxx + - xxxxxxxxxxxxxxxxxx + - xxxxxxxxxxxxxxxxxxxx -a + yyyyyyyyyyyyyyyy + -bc yyyyyyyyyyyyyy + - yyyyyyyyyyyy + - yyyyyyyyyy + - yyyyyyyy + - yyyyyy + - yyyy + - yy + - - -\pset format wrapped -execute q; -* Record 1 -ab+ xx - + -c -a + yyyyyyyyyyyyyyy. -bc .yyy -* Record 2 -ab+ xxxx + - + xxxxxx + -c xxxxxxxx + - xxxxxxxxxx + - xxxxxxxxxxxx + - xxxxxxxxxxxxxx + - xxxxxxxxxxxxxxx. - .x + - xxxxxxxxxxxxxxx. - .xxx + - xxxxxxxxxxxxxxx. - .xxxxx -a + yyyyyyyyyyyyyyy. -bc .y + - yyyyyyyyyyyyyy + - yyyyyyyyyyyy + - yyyyyyyyyy + - yyyyyyyy + - yyyyyy + - yyyy + - yy + - - -\pset border 1 -\pset format unaligned -execute q; -ab - -c|xx -a -bc|yyyyyyyyyyyyyyyyyy - -ab - -c|xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx -a -bc|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -\pset format aligned -execute q; --[ RECORD 1 ]------------ -ab+| xx - +| -c | -a +| yyyyyyyyyyyyyyyyyy -bc | --[ RECORD 2 ]------------ -ab+| xxxx + - +| xxxxxx + -c | xxxxxxxx + - | xxxxxxxxxx + - | xxxxxxxxxxxx + - | xxxxxxxxxxxxxx + - | xxxxxxxxxxxxxxxx + - | xxxxxxxxxxxxxxxxxx + - | xxxxxxxxxxxxxxxxxxxx -a +| yyyyyyyyyyyyyyyy + -bc | yyyyyyyyyyyyyy + - | yyyyyyyyyyyy + - | yyyyyyyyyy + - | yyyyyyyy + - | yyyyyy + - | yyyy + - | yy + - | - -\pset format wrapped -execute q; --[ RECORD 1 ]------ -ab+| xx - +| -c | -a +| yyyyyyyyyyyyyy. -bc |.yyyy --[ RECORD 2 ]------ -ab+| xxxx + - +| xxxxxx + -c | xxxxxxxx + - | xxxxxxxxxx + - | xxxxxxxxxxxx + - | xxxxxxxxxxxxxx+ - | xxxxxxxxxxxxxx. - |.xx + - | xxxxxxxxxxxxxx. - |.xxxx + - | xxxxxxxxxxxxxx. - |.xxxxxx -a +| yyyyyyyyyyyyyy. -bc |.yy + - | yyyyyyyyyyyyyy+ - | yyyyyyyyyyyy + - | yyyyyyyyyy + - | yyyyyyyy + - | yyyyyy + - | yyyy + - | yy + - | - -\pset border 2 -\pset format unaligned -execute q; -ab - -c|xx -a -bc|yyyyyyyyyyyyyyyyyy - -ab - -c|xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx -a -bc|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -\pset format aligned -execute q; -+-[ RECORD 1 ]--------------+ -| ab+| xx | -| +| | -| c | | -| a +| yyyyyyyyyyyyyyyyyy | -| bc | | -+-[ RECORD 2 ]--------------+ -| ab+| xxxx +| -| +| xxxxxx +| -| c | xxxxxxxx +| -| | xxxxxxxxxx +| -| | xxxxxxxxxxxx +| -| | xxxxxxxxxxxxxx +| -| | xxxxxxxxxxxxxxxx +| -| | xxxxxxxxxxxxxxxxxx +| -| | xxxxxxxxxxxxxxxxxxxx | -| a +| yyyyyyyyyyyyyyyy +| -| bc | yyyyyyyyyyyyyy +| -| | yyyyyyyyyyyy +| -| | yyyyyyyyyy +| -| | yyyyyyyy +| -| | yyyyyy +| -| | yyyy +| -| | yy +| -| | | -+----+----------------------+ - -\pset format wrapped -execute q; -+-[ RECORD 1 ]-----+ -| ab+| xx | -| +| | -| c | | -| a +| yyyyyyyyyyy.| -| bc |.yyyyyyy | -+-[ RECORD 2 ]-----+ -| ab+| xxxx +| -| +| xxxxxx +| -| c | xxxxxxxx +| -| | xxxxxxxxxx +| -| | xxxxxxxxxxx.| -| |.x +| -| | xxxxxxxxxxx.| -| |.xxx +| -| | xxxxxxxxxxx.| -| |.xxxxx +| -| | xxxxxxxxxxx.| -| |.xxxxxxx +| -| | xxxxxxxxxxx.| -| |.xxxxxxxxx | -| a +| yyyyyyyyyyy.| -| bc |.yyyyy +| -| | yyyyyyyyyyy.| -| |.yyy +| -| | yyyyyyyyyyy.| -| |.y +| -| | yyyyyyyyyy +| -| | yyyyyyyy +| -| | yyyyyy +| -| | yyyy +| -| | yy +| -| | | -+----+-------------+ - -\pset linestyle old-ascii -\pset expanded off -\pset columns 40 -\pset border 0 -\pset format unaligned -execute q; -ab - -c|a -bc -xx|yyyyyyyyyyyyyyyyyy -xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -(2 rows) -\pset format aligned -execute q; - ab a - + bc - c + --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx yyyyyyyyyyyyyyyy -xxxxxx yyyyyyyyyyyyyy -xxxxxxxx yyyyyyyyyyyy -xxxxxxxxxx yyyyyyyyyy -xxxxxxxxxxxx yyyyyyyy -xxxxxxxxxxxxxx yyyyyy -xxxxxxxxxxxxxxxx yyyy -xxxxxxxxxxxxxxxxxx yy -xxxxxxxxxxxxxxxxxxxx -(2 rows) - -\pset format wrapped -execute q; - ab a - + bc - c + --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx yyyyyyyyyyyyyyyy -xxxxxx yyyyyyyyyyyyyy -xxxxxxxx yyyyyyyyyyyy -xxxxxxxxxx yyyyyyyyyy -xxxxxxxxxxxx yyyyyyyy -xxxxxxxxxxxxxx yyyyyy -xxxxxxxxxxxxxxxx yyyy -xxxxxxxxxxxxxxxxxx yy -xxxxxxxxxxxxxxxxxxxx -(2 rows) - -\pset border 1 -\pset format unaligned -execute q; -ab - -c|a -bc -xx|yyyyyyyyyyyyyyyyyy -xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -(2 rows) -\pset format aligned -execute q; - ab | a -+ |+ bc -+ c |+ -----------------------+-------------------- - xx | yyyyyyyyyyyyyyyyyy - xxxx | yyyyyyyyyyyyyyyy - xxxxxx : yyyyyyyyyyyyyy - xxxxxxxx : yyyyyyyyyyyy - xxxxxxxxxx : yyyyyyyyyy - xxxxxxxxxxxx : yyyyyyyy - xxxxxxxxxxxxxx : yyyyyy - xxxxxxxxxxxxxxxx : yyyy - xxxxxxxxxxxxxxxxxx : yy - xxxxxxxxxxxxxxxxxxxx : -(2 rows) - -\pset format wrapped -execute q; - ab | a -+ |+ bc -+ c |+ --------------------+-------------------- - xx | yyyyyyyyyyyyyyyyyy - xxxx | yyyyyyyyyyyyyyyy - xxxxxx : yyyyyyyyyyyyyy - xxxxxxxx : yyyyyyyyyyyy - xxxxxxxxxx : yyyyyyyyyy - xxxxxxxxxxxx : yyyyyyyy - xxxxxxxxxxxxxx : yyyyyy - xxxxxxxxxxxxxxxx : yyyy - xxxxxxxxxxxxxxxxx : yy - x : - xxxxxxxxxxxxxxxxx - xxx -(2 rows) - -\pset border 2 -\pset format unaligned -execute q; -ab - -c|a -bc -xx|yyyyyyyyyyyyyyyyyy -xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -(2 rows) -\pset format aligned -execute q; -+----------------------+--------------------+ -| ab | a | -|+ |+ bc | -|+ c |+ | -+----------------------+--------------------+ -| xx | yyyyyyyyyyyyyyyyyy | -| xxxx | yyyyyyyyyyyyyyyy | -| xxxxxx : yyyyyyyyyyyyyy | -| xxxxxxxx : yyyyyyyyyyyy | -| xxxxxxxxxx : yyyyyyyyyy | -| xxxxxxxxxxxx : yyyyyyyy | -| xxxxxxxxxxxxxx : yyyyyy | -| xxxxxxxxxxxxxxxx : yyyy | -| xxxxxxxxxxxxxxxxxx : yy | -| xxxxxxxxxxxxxxxxxxxx : | -+----------------------+--------------------+ -(2 rows) - -\pset format wrapped -execute q; -+-----------------+--------------------+ -| ab | a | -|+ |+ bc | -|+ c |+ | -+-----------------+--------------------+ -| xx | yyyyyyyyyyyyyyyyyy | -| xxxx | yyyyyyyyyyyyyyyy | -| xxxxxx : yyyyyyyyyyyyyy | -| xxxxxxxx : yyyyyyyyyyyy | -| xxxxxxxxxx : yyyyyyyyyy | -| xxxxxxxxxxxx : yyyyyyyy | -| xxxxxxxxxxxxxx : yyyyyy | -| xxxxxxxxxxxxxxx : yyyy | -| x : yy | -| xxxxxxxxxxxxxxx : | -| xxx | -| xxxxxxxxxxxxxxx | -| xxxxx | -+-----------------+--------------------+ -(2 rows) - -\pset expanded on -\pset columns 20 -\pset border 0 -\pset format unaligned -execute q; -ab - -c|xx -a -bc|yyyyyyyyyyyyyyyyyy - -ab - -c|xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx -a -bc|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -\pset format aligned -execute q; -* Record 1 - ab xx -+ -+c - a yyyyyyyyyyyyyyyyyy -+bc -* Record 2 - ab xxxx -+ xxxxxx -+c xxxxxxxx - xxxxxxxxxx - xxxxxxxxxxxx - xxxxxxxxxxxxxx - xxxxxxxxxxxxxxxx - xxxxxxxxxxxxxxxxxx - xxxxxxxxxxxxxxxxxxxx - a yyyyyyyyyyyyyyyy -+bc yyyyyyyyyyyyyy - yyyyyyyyyyyy - yyyyyyyyyy - yyyyyyyy - yyyyyy - yyyy - yy - - -\pset format wrapped -execute q; -* Record 1 - ab xx -+ -+c - a yyyyyyyyyyyyyyyy -+bc yy -* Record 2 - ab xxxx -+ xxxxxx -+c xxxxxxxx - xxxxxxxxxx - xxxxxxxxxxxx - xxxxxxxxxxxxxx - xxxxxxxxxxxxxxxx - xxxxxxxxxxxxxxxx - xx - xxxxxxxxxxxxxxxx - xxxx - a yyyyyyyyyyyyyyyy -+bc yyyyyyyyyyyyyy - yyyyyyyyyyyy - yyyyyyyyyy - yyyyyyyy - yyyyyy - yyyy - yy - - -\pset border 1 -\pset format unaligned -execute q; -ab - -c|xx -a -bc|yyyyyyyyyyyyyyyyyy - -ab - -c|xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx -a -bc|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -\pset format aligned -execute q; --[ RECORD 1 ]------------- - ab | xx -+ ; -+c ; - a | yyyyyyyyyyyyyyyyyy -+bc ; --[ RECORD 2 ]------------- - ab | xxxx -+ : xxxxxx -+c : xxxxxxxx - : xxxxxxxxxx - : xxxxxxxxxxxx - : xxxxxxxxxxxxxx - : xxxxxxxxxxxxxxxx - : xxxxxxxxxxxxxxxxxx - : xxxxxxxxxxxxxxxxxxxx - a | yyyyyyyyyyyyyyyy -+bc : yyyyyyyyyyyyyy - : yyyyyyyyyyyy - : yyyyyyyyyy - : yyyyyyyy - : yyyyyy - : yyyy - : yy - : - -\pset format wrapped -execute q; --[ RECORD 1 ]------- - ab | xx -+ ; -+c ; - a | yyyyyyyyyyyyyy -+bc ; yyyy --[ RECORD 2 ]------- - ab | xxxx -+ : xxxxxx -+c : xxxxxxxx - : xxxxxxxxxx - : xxxxxxxxxxxx - : xxxxxxxxxxxxxx - : xxxxxxxxxxxxxx - ; xx - : xxxxxxxxxxxxxx - ; xxxx - : xxxxxxxxxxxxxx - ; xxxxxx - a | yyyyyyyyyyyyyy -+bc ; yy - : yyyyyyyyyyyyyy - : yyyyyyyyyyyy - : yyyyyyyyyy - : yyyyyyyy - : yyyyyy - : yyyy - : yy - : - -\pset border 2 -\pset format unaligned -execute q; -ab - -c|xx -a -bc|yyyyyyyyyyyyyyyyyy - -ab - -c|xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx -a -bc|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -\pset format aligned -execute q; -+-[ RECORD 1 ]--------------+ -| ab | xx | -|+ ; | -|+c ; | -| a | yyyyyyyyyyyyyyyyyy | -|+bc ; | -+-[ RECORD 2 ]--------------+ -| ab | xxxx | -|+ : xxxxxx | -|+c : xxxxxxxx | -| : xxxxxxxxxx | -| : xxxxxxxxxxxx | -| : xxxxxxxxxxxxxx | -| : xxxxxxxxxxxxxxxx | -| : xxxxxxxxxxxxxxxxxx | -| : xxxxxxxxxxxxxxxxxxxx | -| a | yyyyyyyyyyyyyyyy | -|+bc : yyyyyyyyyyyyyy | -| : yyyyyyyyyyyy | -| : yyyyyyyyyy | -| : yyyyyyyy | -| : yyyyyy | -| : yyyy | -| : yy | -| : | -+----+----------------------+ - -\pset format wrapped -execute q; -+-[ RECORD 1 ]-----+ -| ab | xx | -|+ ; | -|+c ; | -| a | yyyyyyyyyyy | -|+bc ; yyyyyyy | -+-[ RECORD 2 ]-----+ -| ab | xxxx | -|+ : xxxxxx | -|+c : xxxxxxxx | -| : xxxxxxxxxx | -| : xxxxxxxxxxx | -| ; x | -| : xxxxxxxxxxx | -| ; xxx | -| : xxxxxxxxxxx | -| ; xxxxx | -| : xxxxxxxxxxx | -| ; xxxxxxx | -| : xxxxxxxxxxx | -| ; xxxxxxxxx | -| a | yyyyyyyyyyy | -|+bc ; yyyyy | -| : yyyyyyyyyyy | -| ; yyy | -| : yyyyyyyyyyy | -| ; y | -| : yyyyyyyyyy | -| : yyyyyyyy | -| : yyyyyy | -| : yyyy | -| : yy | -| : | -+----+-------------+ - -deallocate q; --- test single-line header and data -prepare q as select repeat('x',2*n) as "0123456789abcdef", repeat('y',20-2*n) as "0123456789" from generate_series(1,10) as n; -\pset linestyle ascii -\pset expanded off -\pset columns 40 -\pset border 0 -\pset format unaligned -execute q; -0123456789abcdef|0123456789 -xx|yyyyyyyyyyyyyyyyyy -xxxx|yyyyyyyyyyyyyyyy -xxxxxx|yyyyyyyyyyyyyy -xxxxxxxx|yyyyyyyyyyyy -xxxxxxxxxx|yyyyyyyyyy -xxxxxxxxxxxx|yyyyyyyy -xxxxxxxxxxxxxx|yyyyyy -xxxxxxxxxxxxxxxx|yyyy -xxxxxxxxxxxxxxxxxx|yy -xxxxxxxxxxxxxxxxxxxx| -(10 rows) -\pset format aligned -execute q; - 0123456789abcdef 0123456789 --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx yyyyyyyyyyyyyyyy -xxxxxx yyyyyyyyyyyyyy -xxxxxxxx yyyyyyyyyyyy -xxxxxxxxxx yyyyyyyyyy -xxxxxxxxxxxx yyyyyyyy -xxxxxxxxxxxxxx yyyyyy -xxxxxxxxxxxxxxxx yyyy -xxxxxxxxxxxxxxxxxx yy -xxxxxxxxxxxxxxxxxxxx -(10 rows) - -\pset format wrapped -execute q; - 0123456789abcdef 0123456789 --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx yyyyyyyyyyyyyyyy -xxxxxx yyyyyyyyyyyyyy -xxxxxxxx yyyyyyyyyyyy -xxxxxxxxxx yyyyyyyyyy -xxxxxxxxxxxx yyyyyyyy -xxxxxxxxxxxxxx yyyyyy -xxxxxxxxxxxxxxxx yyyy -xxxxxxxxxxxxxxxxxx yy -xxxxxxxxxxxxxxxxxxxx -(10 rows) - -\pset border 1 -\pset format unaligned -execute q; -0123456789abcdef|0123456789 -xx|yyyyyyyyyyyyyyyyyy -xxxx|yyyyyyyyyyyyyyyy -xxxxxx|yyyyyyyyyyyyyy -xxxxxxxx|yyyyyyyyyyyy -xxxxxxxxxx|yyyyyyyyyy -xxxxxxxxxxxx|yyyyyyyy -xxxxxxxxxxxxxx|yyyyyy -xxxxxxxxxxxxxxxx|yyyy -xxxxxxxxxxxxxxxxxx|yy -xxxxxxxxxxxxxxxxxxxx| -(10 rows) -\pset format aligned -execute q; - 0123456789abcdef | 0123456789 -----------------------+-------------------- - xx | yyyyyyyyyyyyyyyyyy - xxxx | yyyyyyyyyyyyyyyy - xxxxxx | yyyyyyyyyyyyyy - xxxxxxxx | yyyyyyyyyyyy - xxxxxxxxxx | yyyyyyyyyy - xxxxxxxxxxxx | yyyyyyyy - xxxxxxxxxxxxxx | yyyyyy - xxxxxxxxxxxxxxxx | yyyy - xxxxxxxxxxxxxxxxxx | yy - xxxxxxxxxxxxxxxxxxxx | -(10 rows) - -\pset format wrapped -execute q; - 0123456789abcdef | 0123456789 ----------------------+------------------ - xx | yyyyyyyyyyyyyyyy. - |.yy - xxxx | yyyyyyyyyyyyyyyy - xxxxxx | yyyyyyyyyyyyyy - xxxxxxxx | yyyyyyyyyyyy - xxxxxxxxxx | yyyyyyyyyy - xxxxxxxxxxxx | yyyyyyyy - xxxxxxxxxxxxxx | yyyyyy - xxxxxxxxxxxxxxxx | yyyy - xxxxxxxxxxxxxxxxxx | yy - xxxxxxxxxxxxxxxxxxx.| -.x | -(10 rows) - -\pset border 2 -\pset format unaligned -execute q; -0123456789abcdef|0123456789 -xx|yyyyyyyyyyyyyyyyyy -xxxx|yyyyyyyyyyyyyyyy -xxxxxx|yyyyyyyyyyyyyy -xxxxxxxx|yyyyyyyyyyyy -xxxxxxxxxx|yyyyyyyyyy -xxxxxxxxxxxx|yyyyyyyy -xxxxxxxxxxxxxx|yyyyyy -xxxxxxxxxxxxxxxx|yyyy -xxxxxxxxxxxxxxxxxx|yy -xxxxxxxxxxxxxxxxxxxx| -(10 rows) -\pset format aligned -execute q; -+----------------------+--------------------+ -| 0123456789abcdef | 0123456789 | -+----------------------+--------------------+ -| xx | yyyyyyyyyyyyyyyyyy | -| xxxx | yyyyyyyyyyyyyyyy | -| xxxxxx | yyyyyyyyyyyyyy | -| xxxxxxxx | yyyyyyyyyyyy | -| xxxxxxxxxx | yyyyyyyyyy | -| xxxxxxxxxxxx | yyyyyyyy | -| xxxxxxxxxxxxxx | yyyyyy | -| xxxxxxxxxxxxxxxx | yyyy | -| xxxxxxxxxxxxxxxxxx | yy | -| xxxxxxxxxxxxxxxxxxxx | | -+----------------------+--------------------+ -(10 rows) - -\pset format wrapped -execute q; -+--------------------+-----------------+ -| 0123456789abcdef | 0123456789 | -+--------------------+-----------------+ -| xx | yyyyyyyyyyyyyyy.| -| |.yyy | -| xxxx | yyyyyyyyyyyyyyy.| -| |.y | -| xxxxxx | yyyyyyyyyyyyyy | -| xxxxxxxx | yyyyyyyyyyyy | -| xxxxxxxxxx | yyyyyyyyyy | -| xxxxxxxxxxxx | yyyyyyyy | -| xxxxxxxxxxxxxx | yyyyyy | -| xxxxxxxxxxxxxxxx | yyyy | -| xxxxxxxxxxxxxxxxxx | yy | -| xxxxxxxxxxxxxxxxxx.| | -|.xx | | -+--------------------+-----------------+ -(10 rows) - -\pset expanded on -\pset columns 30 -\pset border 0 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; -* Record 1 -0123456789abcdef xx -0123456789 yyyyyyyyyyyyyyyyyy -* Record 2 -0123456789abcdef xxxx -0123456789 yyyyyyyyyyyyyyyy -* Record 3 -0123456789abcdef xxxxxx -0123456789 yyyyyyyyyyyyyy -* Record 4 -0123456789abcdef xxxxxxxx -0123456789 yyyyyyyyyyyy -* Record 5 -0123456789abcdef xxxxxxxxxx -0123456789 yyyyyyyyyy -* Record 6 -0123456789abcdef xxxxxxxxxxxx -0123456789 yyyyyyyy -* Record 7 -0123456789abcdef xxxxxxxxxxxxxx -0123456789 yyyyyy -* Record 8 -0123456789abcdef xxxxxxxxxxxxxxxx -0123456789 yyyy -* Record 9 -0123456789abcdef xxxxxxxxxxxxxxxxxx -0123456789 yy -* Record 10 -0123456789abcdef xxxxxxxxxxxxxxxxxxxx -0123456789 - -\pset format wrapped -execute q; -* Record 1 -0123456789abcdef xx -0123456789 yyyyyyyyyyyy. - .yyyyyy -* Record 2 -0123456789abcdef xxxx -0123456789 yyyyyyyyyyyy. - .yyyy -* Record 3 -0123456789abcdef xxxxxx -0123456789 yyyyyyyyyyyy. - .yy -* Record 4 -0123456789abcdef xxxxxxxx -0123456789 yyyyyyyyyyyy -* Record 5 -0123456789abcdef xxxxxxxxxx -0123456789 yyyyyyyyyy -* Record 6 -0123456789abcdef xxxxxxxxxxxx -0123456789 yyyyyyyy -* Record 7 -0123456789abcdef xxxxxxxxxxxx. - .xx -0123456789 yyyyyy -* Record 8 -0123456789abcdef xxxxxxxxxxxx. - .xxxx -0123456789 yyyy -* Record 9 -0123456789abcdef xxxxxxxxxxxx. - .xxxxxx -0123456789 yy -* Record 10 -0123456789abcdef xxxxxxxxxxxx. - .xxxxxxxx -0123456789 - -\pset border 1 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; --[ RECORD 1 ]----+--------------------- -0123456789abcdef | xx -0123456789 | yyyyyyyyyyyyyyyyyy --[ RECORD 2 ]----+--------------------- -0123456789abcdef | xxxx -0123456789 | yyyyyyyyyyyyyyyy --[ RECORD 3 ]----+--------------------- -0123456789abcdef | xxxxxx -0123456789 | yyyyyyyyyyyyyy --[ RECORD 4 ]----+--------------------- -0123456789abcdef | xxxxxxxx -0123456789 | yyyyyyyyyyyy --[ RECORD 5 ]----+--------------------- -0123456789abcdef | xxxxxxxxxx -0123456789 | yyyyyyyyyy --[ RECORD 6 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxx -0123456789 | yyyyyyyy --[ RECORD 7 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxx -0123456789 | yyyyyy --[ RECORD 8 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxx -0123456789 | yyyy --[ RECORD 9 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxx -0123456789 | yy --[ RECORD 10 ]---+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxxxx -0123456789 | - -\pset format wrapped -execute q; --[ RECORD 1 ]----+----------- -0123456789abcdef | xx -0123456789 | yyyyyyyyyy. - |.yyyyyyyy --[ RECORD 2 ]----+----------- -0123456789abcdef | xxxx -0123456789 | yyyyyyyyyy. - |.yyyyyy --[ RECORD 3 ]----+----------- -0123456789abcdef | xxxxxx -0123456789 | yyyyyyyyyy. - |.yyyy --[ RECORD 4 ]----+----------- -0123456789abcdef | xxxxxxxx -0123456789 | yyyyyyyyyy. - |.yy --[ RECORD 5 ]----+----------- -0123456789abcdef | xxxxxxxxxx -0123456789 | yyyyyyyyyy --[ RECORD 6 ]----+----------- -0123456789abcdef | xxxxxxxxxx. - |.xx -0123456789 | yyyyyyyy --[ RECORD 7 ]----+----------- -0123456789abcdef | xxxxxxxxxx. - |.xxxx -0123456789 | yyyyyy --[ RECORD 8 ]----+----------- -0123456789abcdef | xxxxxxxxxx. - |.xxxxxx -0123456789 | yyyy --[ RECORD 9 ]----+----------- -0123456789abcdef | xxxxxxxxxx. - |.xxxxxxxx -0123456789 | yy --[ RECORD 10 ]---+----------- -0123456789abcdef | xxxxxxxxxx. - |.xxxxxxxxxx -0123456789 | - -\pset border 2 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; -+-[ RECORD 1 ]-----+----------------------+ -| 0123456789abcdef | xx | -| 0123456789 | yyyyyyyyyyyyyyyyyy | -+-[ RECORD 2 ]-----+----------------------+ -| 0123456789abcdef | xxxx | -| 0123456789 | yyyyyyyyyyyyyyyy | -+-[ RECORD 3 ]-----+----------------------+ -| 0123456789abcdef | xxxxxx | -| 0123456789 | yyyyyyyyyyyyyy | -+-[ RECORD 4 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxx | -| 0123456789 | yyyyyyyyyyyy | -+-[ RECORD 5 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxx | -| 0123456789 | yyyyyyyyyy | -+-[ RECORD 6 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxx | -| 0123456789 | yyyyyyyy | -+-[ RECORD 7 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxx | -| 0123456789 | yyyyyy | -+-[ RECORD 8 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxx | -| 0123456789 | yyyy | -+-[ RECORD 9 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxxx | -| 0123456789 | yy | -+-[ RECORD 10 ]----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxxxxx | -| 0123456789 | | -+------------------+----------------------+ - -\pset format wrapped -execute q; -+-[ RECORD 1 ]-----+---------+ -| 0123456789abcdef | xx | -| 0123456789 | yyyyyyy.| -| |.yyyyyyy.| -| |.yyyy | -+-[ RECORD 2 ]-----+---------+ -| 0123456789abcdef | xxxx | -| 0123456789 | yyyyyyy.| -| |.yyyyyyy.| -| |.yy | -+-[ RECORD 3 ]-----+---------+ -| 0123456789abcdef | xxxxxx | -| 0123456789 | yyyyyyy.| -| |.yyyyyyy | -+-[ RECORD 4 ]-----+---------+ -| 0123456789abcdef | xxxxxxx.| -| |.x | -| 0123456789 | yyyyyyy.| -| |.yyyyy | -+-[ RECORD 5 ]-----+---------+ -| 0123456789abcdef | xxxxxxx.| -| |.xxx | -| 0123456789 | yyyyyyy.| -| |.yyy | -+-[ RECORD 6 ]-----+---------+ -| 0123456789abcdef | xxxxxxx.| -| |.xxxxx | -| 0123456789 | yyyyyyy.| -| |.y | -+-[ RECORD 7 ]-----+---------+ -| 0123456789abcdef | xxxxxxx.| -| |.xxxxxxx | -| 0123456789 | yyyyyy | -+-[ RECORD 8 ]-----+---------+ -| 0123456789abcdef | xxxxxxx.| -| |.xxxxxxx.| -| |.xx | -| 0123456789 | yyyy | -+-[ RECORD 9 ]-----+---------+ -| 0123456789abcdef | xxxxxxx.| -| |.xxxxxxx.| -| |.xxxx | -| 0123456789 | yy | -+-[ RECORD 10 ]----+---------+ -| 0123456789abcdef | xxxxxxx.| -| |.xxxxxxx.| -| |.xxxxxx | -| 0123456789 | | -+------------------+---------+ - -\pset expanded on -\pset columns 20 -\pset border 0 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; -* Record 1 -0123456789abcdef xx -0123456789 yyyyyyyyyyyyyyyyyy -* Record 2 -0123456789abcdef xxxx -0123456789 yyyyyyyyyyyyyyyy -* Record 3 -0123456789abcdef xxxxxx -0123456789 yyyyyyyyyyyyyy -* Record 4 -0123456789abcdef xxxxxxxx -0123456789 yyyyyyyyyyyy -* Record 5 -0123456789abcdef xxxxxxxxxx -0123456789 yyyyyyyyyy -* Record 6 -0123456789abcdef xxxxxxxxxxxx -0123456789 yyyyyyyy -* Record 7 -0123456789abcdef xxxxxxxxxxxxxx -0123456789 yyyyyy -* Record 8 -0123456789abcdef xxxxxxxxxxxxxxxx -0123456789 yyyy -* Record 9 -0123456789abcdef xxxxxxxxxxxxxxxxxx -0123456789 yy -* Record 10 -0123456789abcdef xxxxxxxxxxxxxxxxxxxx -0123456789 - -\pset format wrapped -execute q; -* Record 1 -0123456789abcdef xx -0123456789 yyy. - .yyy. - .yyy. - .yyy. - .yyy. - .yyy -* Record 2 -0123456789abcdef xxx. - .x -0123456789 yyy. - .yyy. - .yyy. - .yyy. - .yyy. - .y -* Record 3 -0123456789abcdef xxx. - .xxx -0123456789 yyy. - .yyy. - .yyy. - .yyy. - .yy -* Record 4 -0123456789abcdef xxx. - .xxx. - .xx -0123456789 yyy. - .yyy. - .yyy. - .yyy -* Record 5 -0123456789abcdef xxx. - .xxx. - .xxx. - .x -0123456789 yyy. - .yyy. - .yyy. - .y -* Record 6 -0123456789abcdef xxx. - .xxx. - .xxx. - .xxx -0123456789 yyy. - .yyy. - .yy -* Record 7 -0123456789abcdef xxx. - .xxx. - .xxx. - .xxx. - .xx -0123456789 yyy. - .yyy -* Record 8 -0123456789abcdef xxx. - .xxx. - .xxx. - .xxx. - .xxx. - .x -0123456789 yyy. - .y -* Record 9 -0123456789abcdef xxx. - .xxx. - .xxx. - .xxx. - .xxx. - .xxx -0123456789 yy -* Record 10 -0123456789abcdef xxx. - .xxx. - .xxx. - .xxx. - .xxx. - .xxx. - .xx -0123456789 - -\pset border 1 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; --[ RECORD 1 ]----+--------------------- -0123456789abcdef | xx -0123456789 | yyyyyyyyyyyyyyyyyy --[ RECORD 2 ]----+--------------------- -0123456789abcdef | xxxx -0123456789 | yyyyyyyyyyyyyyyy --[ RECORD 3 ]----+--------------------- -0123456789abcdef | xxxxxx -0123456789 | yyyyyyyyyyyyyy --[ RECORD 4 ]----+--------------------- -0123456789abcdef | xxxxxxxx -0123456789 | yyyyyyyyyyyy --[ RECORD 5 ]----+--------------------- -0123456789abcdef | xxxxxxxxxx -0123456789 | yyyyyyyyyy --[ RECORD 6 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxx -0123456789 | yyyyyyyy --[ RECORD 7 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxx -0123456789 | yyyyyy --[ RECORD 8 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxx -0123456789 | yyyy --[ RECORD 9 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxx -0123456789 | yy --[ RECORD 10 ]---+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxxxx -0123456789 | - -\pset format wrapped -execute q; --[ RECORD 1 ]----+---- -0123456789abcdef | xx -0123456789 | yyy. - |.yyy. - |.yyy. - |.yyy. - |.yyy. - |.yyy --[ RECORD 2 ]----+---- -0123456789abcdef | xxx. - |.x -0123456789 | yyy. - |.yyy. - |.yyy. - |.yyy. - |.yyy. - |.y --[ RECORD 3 ]----+---- -0123456789abcdef | xxx. - |.xxx -0123456789 | yyy. - |.yyy. - |.yyy. - |.yyy. - |.yy --[ RECORD 4 ]----+---- -0123456789abcdef | xxx. - |.xxx. - |.xx -0123456789 | yyy. - |.yyy. - |.yyy. - |.yyy --[ RECORD 5 ]----+---- -0123456789abcdef | xxx. - |.xxx. - |.xxx. - |.x -0123456789 | yyy. - |.yyy. - |.yyy. - |.y --[ RECORD 6 ]----+---- -0123456789abcdef | xxx. - |.xxx. - |.xxx. - |.xxx -0123456789 | yyy. - |.yyy. - |.yy --[ RECORD 7 ]----+---- -0123456789abcdef | xxx. - |.xxx. - |.xxx. - |.xxx. - |.xx -0123456789 | yyy. - |.yyy --[ RECORD 8 ]----+---- -0123456789abcdef | xxx. - |.xxx. - |.xxx. - |.xxx. - |.xxx. - |.x -0123456789 | yyy. - |.y --[ RECORD 9 ]----+---- -0123456789abcdef | xxx. - |.xxx. - |.xxx. - |.xxx. - |.xxx. - |.xxx -0123456789 | yy --[ RECORD 10 ]---+---- -0123456789abcdef | xxx. - |.xxx. - |.xxx. - |.xxx. - |.xxx. - |.xxx. - |.xx -0123456789 | - -\pset border 2 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; -+-[ RECORD 1 ]-----+----------------------+ -| 0123456789abcdef | xx | -| 0123456789 | yyyyyyyyyyyyyyyyyy | -+-[ RECORD 2 ]-----+----------------------+ -| 0123456789abcdef | xxxx | -| 0123456789 | yyyyyyyyyyyyyyyy | -+-[ RECORD 3 ]-----+----------------------+ -| 0123456789abcdef | xxxxxx | -| 0123456789 | yyyyyyyyyyyyyy | -+-[ RECORD 4 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxx | -| 0123456789 | yyyyyyyyyyyy | -+-[ RECORD 5 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxx | -| 0123456789 | yyyyyyyyyy | -+-[ RECORD 6 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxx | -| 0123456789 | yyyyyyyy | -+-[ RECORD 7 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxx | -| 0123456789 | yyyyyy | -+-[ RECORD 8 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxx | -| 0123456789 | yyyy | -+-[ RECORD 9 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxxx | -| 0123456789 | yy | -+-[ RECORD 10 ]----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxxxxx | -| 0123456789 | | -+------------------+----------------------+ - -\pset format wrapped -execute q; -+-[ RECORD 1 ]-----+-----+ -| 0123456789abcdef | xx | -| 0123456789 | yyy.| -| |.yyy.| -| |.yyy.| -| |.yyy.| -| |.yyy.| -| |.yyy | -+-[ RECORD 2 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.x | -| 0123456789 | yyy.| -| |.yyy.| -| |.yyy.| -| |.yyy.| -| |.yyy.| -| |.y | -+-[ RECORD 3 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx | -| 0123456789 | yyy.| -| |.yyy.| -| |.yyy.| -| |.yyy.| -| |.yy | -+-[ RECORD 4 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx.| -| |.xx | -| 0123456789 | yyy.| -| |.yyy.| -| |.yyy.| -| |.yyy | -+-[ RECORD 5 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx.| -| |.xxx.| -| |.x | -| 0123456789 | yyy.| -| |.yyy.| -| |.yyy.| -| |.y | -+-[ RECORD 6 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx | -| 0123456789 | yyy.| -| |.yyy.| -| |.yy | -+-[ RECORD 7 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.xx | -| 0123456789 | yyy.| -| |.yyy | -+-[ RECORD 8 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.x | -| 0123456789 | yyy.| -| |.y | -+-[ RECORD 9 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx | -| 0123456789 | yy | -+-[ RECORD 10 ]----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.xx | -| 0123456789 | | -+------------------+-----+ - -\pset linestyle old-ascii -\pset expanded off -\pset columns 40 -\pset border 0 -\pset format unaligned -execute q; -0123456789abcdef|0123456789 -xx|yyyyyyyyyyyyyyyyyy -xxxx|yyyyyyyyyyyyyyyy -xxxxxx|yyyyyyyyyyyyyy -xxxxxxxx|yyyyyyyyyyyy -xxxxxxxxxx|yyyyyyyyyy -xxxxxxxxxxxx|yyyyyyyy -xxxxxxxxxxxxxx|yyyyyy -xxxxxxxxxxxxxxxx|yyyy -xxxxxxxxxxxxxxxxxx|yy -xxxxxxxxxxxxxxxxxxxx| -(10 rows) -\pset format aligned -execute q; - 0123456789abcdef 0123456789 --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx yyyyyyyyyyyyyyyy -xxxxxx yyyyyyyyyyyyyy -xxxxxxxx yyyyyyyyyyyy -xxxxxxxxxx yyyyyyyyyy -xxxxxxxxxxxx yyyyyyyy -xxxxxxxxxxxxxx yyyyyy -xxxxxxxxxxxxxxxx yyyy -xxxxxxxxxxxxxxxxxx yy -xxxxxxxxxxxxxxxxxxxx -(10 rows) - -\pset format wrapped -execute q; - 0123456789abcdef 0123456789 --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx yyyyyyyyyyyyyyyy -xxxxxx yyyyyyyyyyyyyy -xxxxxxxx yyyyyyyyyyyy -xxxxxxxxxx yyyyyyyyyy -xxxxxxxxxxxx yyyyyyyy -xxxxxxxxxxxxxx yyyyyy -xxxxxxxxxxxxxxxx yyyy -xxxxxxxxxxxxxxxxxx yy -xxxxxxxxxxxxxxxxxxxx -(10 rows) - -\pset border 1 -\pset format unaligned -execute q; -0123456789abcdef|0123456789 -xx|yyyyyyyyyyyyyyyyyy -xxxx|yyyyyyyyyyyyyyyy -xxxxxx|yyyyyyyyyyyyyy -xxxxxxxx|yyyyyyyyyyyy -xxxxxxxxxx|yyyyyyyyyy -xxxxxxxxxxxx|yyyyyyyy -xxxxxxxxxxxxxx|yyyyyy -xxxxxxxxxxxxxxxx|yyyy -xxxxxxxxxxxxxxxxxx|yy -xxxxxxxxxxxxxxxxxxxx| -(10 rows) -\pset format aligned -execute q; - 0123456789abcdef | 0123456789 -----------------------+-------------------- - xx | yyyyyyyyyyyyyyyyyy - xxxx | yyyyyyyyyyyyyyyy - xxxxxx | yyyyyyyyyyyyyy - xxxxxxxx | yyyyyyyyyyyy - xxxxxxxxxx | yyyyyyyyyy - xxxxxxxxxxxx | yyyyyyyy - xxxxxxxxxxxxxx | yyyyyy - xxxxxxxxxxxxxxxx | yyyy - xxxxxxxxxxxxxxxxxx | yy - xxxxxxxxxxxxxxxxxxxx | -(10 rows) - -\pset format wrapped -execute q; - 0123456789abcdef | 0123456789 ----------------------+------------------ - xx | yyyyyyyyyyyyyyyy - ; yy - xxxx | yyyyyyyyyyyyyyyy - xxxxxx | yyyyyyyyyyyyyy - xxxxxxxx | yyyyyyyyyyyy - xxxxxxxxxx | yyyyyyyyyy - xxxxxxxxxxxx | yyyyyyyy - xxxxxxxxxxxxxx | yyyyyy - xxxxxxxxxxxxxxxx | yyyy - xxxxxxxxxxxxxxxxxx | yy - xxxxxxxxxxxxxxxxxxx | - x -(10 rows) - -\pset border 2 -\pset format unaligned -execute q; -0123456789abcdef|0123456789 -xx|yyyyyyyyyyyyyyyyyy -xxxx|yyyyyyyyyyyyyyyy -xxxxxx|yyyyyyyyyyyyyy -xxxxxxxx|yyyyyyyyyyyy -xxxxxxxxxx|yyyyyyyyyy -xxxxxxxxxxxx|yyyyyyyy -xxxxxxxxxxxxxx|yyyyyy -xxxxxxxxxxxxxxxx|yyyy -xxxxxxxxxxxxxxxxxx|yy -xxxxxxxxxxxxxxxxxxxx| -(10 rows) -\pset format aligned -execute q; -+----------------------+--------------------+ -| 0123456789abcdef | 0123456789 | -+----------------------+--------------------+ -| xx | yyyyyyyyyyyyyyyyyy | -| xxxx | yyyyyyyyyyyyyyyy | -| xxxxxx | yyyyyyyyyyyyyy | -| xxxxxxxx | yyyyyyyyyyyy | -| xxxxxxxxxx | yyyyyyyyyy | -| xxxxxxxxxxxx | yyyyyyyy | -| xxxxxxxxxxxxxx | yyyyyy | -| xxxxxxxxxxxxxxxx | yyyy | -| xxxxxxxxxxxxxxxxxx | yy | -| xxxxxxxxxxxxxxxxxxxx | | -+----------------------+--------------------+ -(10 rows) - -\pset format wrapped -execute q; -+--------------------+-----------------+ -| 0123456789abcdef | 0123456789 | -+--------------------+-----------------+ -| xx | yyyyyyyyyyyyyyy | -| ; yyy | -| xxxx | yyyyyyyyyyyyyyy | -| ; y | -| xxxxxx | yyyyyyyyyyyyyy | -| xxxxxxxx | yyyyyyyyyyyy | -| xxxxxxxxxx | yyyyyyyyyy | -| xxxxxxxxxxxx | yyyyyyyy | -| xxxxxxxxxxxxxx | yyyyyy | -| xxxxxxxxxxxxxxxx | yyyy | -| xxxxxxxxxxxxxxxxxx | yy | -| xxxxxxxxxxxxxxxxxx | | -| xx | -+--------------------+-----------------+ -(10 rows) - -\pset expanded on -\pset border 0 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; -* Record 1 -0123456789abcdef xx -0123456789 yyyyyyyyyyyyyyyyyy -* Record 2 -0123456789abcdef xxxx -0123456789 yyyyyyyyyyyyyyyy -* Record 3 -0123456789abcdef xxxxxx -0123456789 yyyyyyyyyyyyyy -* Record 4 -0123456789abcdef xxxxxxxx -0123456789 yyyyyyyyyyyy -* Record 5 -0123456789abcdef xxxxxxxxxx -0123456789 yyyyyyyyyy -* Record 6 -0123456789abcdef xxxxxxxxxxxx -0123456789 yyyyyyyy -* Record 7 -0123456789abcdef xxxxxxxxxxxxxx -0123456789 yyyyyy -* Record 8 -0123456789abcdef xxxxxxxxxxxxxxxx -0123456789 yyyy -* Record 9 -0123456789abcdef xxxxxxxxxxxxxxxxxx -0123456789 yy -* Record 10 -0123456789abcdef xxxxxxxxxxxxxxxxxxxx -0123456789 - -\pset format wrapped -execute q; -* Record 1 -0123456789abcdef xx -0123456789 yyyyyyyyyyyyyyyyyy -* Record 2 -0123456789abcdef xxxx -0123456789 yyyyyyyyyyyyyyyy -* Record 3 -0123456789abcdef xxxxxx -0123456789 yyyyyyyyyyyyyy -* Record 4 -0123456789abcdef xxxxxxxx -0123456789 yyyyyyyyyyyy -* Record 5 -0123456789abcdef xxxxxxxxxx -0123456789 yyyyyyyyyy -* Record 6 -0123456789abcdef xxxxxxxxxxxx -0123456789 yyyyyyyy -* Record 7 -0123456789abcdef xxxxxxxxxxxxxx -0123456789 yyyyyy -* Record 8 -0123456789abcdef xxxxxxxxxxxxxxxx -0123456789 yyyy -* Record 9 -0123456789abcdef xxxxxxxxxxxxxxxxxx -0123456789 yy -* Record 10 -0123456789abcdef xxxxxxxxxxxxxxxxxxxx -0123456789 - -\pset border 1 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; --[ RECORD 1 ]----+--------------------- -0123456789abcdef | xx -0123456789 | yyyyyyyyyyyyyyyyyy --[ RECORD 2 ]----+--------------------- -0123456789abcdef | xxxx -0123456789 | yyyyyyyyyyyyyyyy --[ RECORD 3 ]----+--------------------- -0123456789abcdef | xxxxxx -0123456789 | yyyyyyyyyyyyyy --[ RECORD 4 ]----+--------------------- -0123456789abcdef | xxxxxxxx -0123456789 | yyyyyyyyyyyy --[ RECORD 5 ]----+--------------------- -0123456789abcdef | xxxxxxxxxx -0123456789 | yyyyyyyyyy --[ RECORD 6 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxx -0123456789 | yyyyyyyy --[ RECORD 7 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxx -0123456789 | yyyyyy --[ RECORD 8 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxx -0123456789 | yyyy --[ RECORD 9 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxx -0123456789 | yy --[ RECORD 10 ]---+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxxxx -0123456789 | - -\pset format wrapped -execute q; --[ RECORD 1 ]----+--------------------- -0123456789abcdef | xx -0123456789 | yyyyyyyyyyyyyyyyyy --[ RECORD 2 ]----+--------------------- -0123456789abcdef | xxxx -0123456789 | yyyyyyyyyyyyyyyy --[ RECORD 3 ]----+--------------------- -0123456789abcdef | xxxxxx -0123456789 | yyyyyyyyyyyyyy --[ RECORD 4 ]----+--------------------- -0123456789abcdef | xxxxxxxx -0123456789 | yyyyyyyyyyyy --[ RECORD 5 ]----+--------------------- -0123456789abcdef | xxxxxxxxxx -0123456789 | yyyyyyyyyy --[ RECORD 6 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxx -0123456789 | yyyyyyyy --[ RECORD 7 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxx -0123456789 | yyyyyy --[ RECORD 8 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxx -0123456789 | yyyy --[ RECORD 9 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxx -0123456789 | yy --[ RECORD 10 ]---+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxxxx -0123456789 | - -\pset border 2 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; -+-[ RECORD 1 ]-----+----------------------+ -| 0123456789abcdef | xx | -| 0123456789 | yyyyyyyyyyyyyyyyyy | -+-[ RECORD 2 ]-----+----------------------+ -| 0123456789abcdef | xxxx | -| 0123456789 | yyyyyyyyyyyyyyyy | -+-[ RECORD 3 ]-----+----------------------+ -| 0123456789abcdef | xxxxxx | -| 0123456789 | yyyyyyyyyyyyyy | -+-[ RECORD 4 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxx | -| 0123456789 | yyyyyyyyyyyy | -+-[ RECORD 5 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxx | -| 0123456789 | yyyyyyyyyy | -+-[ RECORD 6 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxx | -| 0123456789 | yyyyyyyy | -+-[ RECORD 7 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxx | -| 0123456789 | yyyyyy | -+-[ RECORD 8 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxx | -| 0123456789 | yyyy | -+-[ RECORD 9 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxxx | -| 0123456789 | yy | -+-[ RECORD 10 ]----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxxxxx | -| 0123456789 | | -+------------------+----------------------+ - -\pset format wrapped -execute q; -+-[ RECORD 1 ]-----+-------------------+ -| 0123456789abcdef | xx | -| 0123456789 | yyyyyyyyyyyyyyyyy | -| ; y | -+-[ RECORD 2 ]-----+-------------------+ -| 0123456789abcdef | xxxx | -| 0123456789 | yyyyyyyyyyyyyyyy | -+-[ RECORD 3 ]-----+-------------------+ -| 0123456789abcdef | xxxxxx | -| 0123456789 | yyyyyyyyyyyyyy | -+-[ RECORD 4 ]-----+-------------------+ -| 0123456789abcdef | xxxxxxxx | -| 0123456789 | yyyyyyyyyyyy | -+-[ RECORD 5 ]-----+-------------------+ -| 0123456789abcdef | xxxxxxxxxx | -| 0123456789 | yyyyyyyyyy | -+-[ RECORD 6 ]-----+-------------------+ -| 0123456789abcdef | xxxxxxxxxxxx | -| 0123456789 | yyyyyyyy | -+-[ RECORD 7 ]-----+-------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxx | -| 0123456789 | yyyyyy | -+-[ RECORD 8 ]-----+-------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxx | -| 0123456789 | yyyy | -+-[ RECORD 9 ]-----+-------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxx | -| ; x | -| 0123456789 | yy | -+-[ RECORD 10 ]----+-------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxx | -| ; xxx | -| 0123456789 | | -+------------------+-------------------+ - -deallocate q; -\pset linestyle ascii -\pset border 1 --- support table for output-format tests (useful to create a footer) -create table psql_serial_tab (id serial); --- test header/footer/tuples_only behavior in aligned/unaligned/wrapped cases -\pset format aligned -\pset expanded off -\d psql_serial_tab_id_seq - Sequence "public.psql_serial_tab_id_seq" - Type | Start | Minimum | Maximum | Increment | Cycles? | Cache ----------+-------+---------+------------+-----------+---------+------- - integer | 1 | 1 | 2147483647 | 1 | no | 1 -Owned by: public.psql_serial_tab.id - -\pset tuples_only true -\df exp - pg_catalog | exp | double precision | double precision | func - pg_catalog | exp | numeric | numeric | func - -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq -Sequence "public.psql_serial_tab_id_seq" --[ RECORD 1 ]--------- -Type | integer -Start | 1 -Minimum | 1 -Maximum | 2147483647 -Increment | 1 -Cycles? | no -Cache | 1 - -Owned by: public.psql_serial_tab.id - -\pset tuples_only true -\df exp -Schema | pg_catalog -Name | exp -Result data type | double precision -Argument data types | double precision -Type | func ---------------------+----------------- -Schema | pg_catalog -Name | exp -Result data type | numeric -Argument data types | numeric -Type | func - -\pset tuples_only false --- empty table is a special case for this format -select 1 where false; -(0 rows) - -\pset format unaligned -\pset expanded off -\d psql_serial_tab_id_seq -Sequence "public.psql_serial_tab_id_seq" -Type|Start|Minimum|Maximum|Increment|Cycles?|Cache -integer|1|1|2147483647|1|no|1 -Owned by: public.psql_serial_tab.id -\pset tuples_only true -\df exp -pg_catalog|exp|double precision|double precision|func -pg_catalog|exp|numeric|numeric|func -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq -Sequence "public.psql_serial_tab_id_seq" - -Type|integer -Start|1 -Minimum|1 -Maximum|2147483647 -Increment|1 -Cycles?|no -Cache|1 - -Owned by: public.psql_serial_tab.id -\pset tuples_only true -\df exp -Schema|pg_catalog -Name|exp -Result data type|double precision -Argument data types|double precision -Type|func - -Schema|pg_catalog -Name|exp -Result data type|numeric -Argument data types|numeric -Type|func -\pset tuples_only false -\pset format wrapped -\pset expanded off -\d psql_serial_tab_id_seq - Sequence "public.psql_serial_tab_id_seq" - Type | Start | Minimum | Maximum | Increment | Cycles? | Cache ----------+-------+---------+------------+-----------+---------+------- - integer | 1 | 1 | 2147483647 | 1 | no | 1 -Owned by: public.psql_serial_tab.id - -\pset tuples_only true -\df exp - pg_catalog | exp | double precision | double precision | func - pg_catalog | exp | numeric | numeric | func - -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq -Sequence "public.psql_serial_tab_id_seq" --[ RECORD 1 ]--------- -Type | integer -Start | 1 -Minimum | 1 -Maximum | 2147483647 -Increment | 1 -Cycles? | no -Cache | 1 - -Owned by: public.psql_serial_tab.id - -\pset tuples_only true -\df exp -Schema | pg_catalog -Name | exp -Result data type | double precision -Argument data types | double precision -Type | func ---------------------+----------------- -Schema | pg_catalog -Name | exp -Result data type | numeric -Argument data types | numeric -Type | func - -\pset tuples_only false --- check conditional am display -\pset expanded off -CREATE SCHEMA tableam_display; -CREATE ROLE regress_display_role; -ALTER SCHEMA tableam_display OWNER TO regress_display_role; -SET search_path TO tableam_display; -CREATE ACCESS METHOD heap_psql TYPE TABLE HANDLER heap_tableam_handler; -SET ROLE TO regress_display_role; --- Use only relations with a physical size of zero. -CREATE TABLE tbl_heap_psql(f1 int, f2 char(100)) using heap_psql; -CREATE TABLE tbl_heap(f1 int, f2 char(100)) using heap; -CREATE VIEW view_heap_psql AS SELECT f1 from tbl_heap_psql; -CREATE MATERIALIZED VIEW mat_view_heap_psql USING heap_psql AS SELECT f1 from tbl_heap_psql; -\d+ tbl_heap_psql - Table "tableam_display.tbl_heap_psql" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+----------------+-----------+----------+---------+----------+--------------+------------- - f1 | integer | | | | plain | | - f2 | character(100) | | | | extended | | - -\d+ tbl_heap - Table "tableam_display.tbl_heap" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+----------------+-----------+----------+---------+----------+--------------+------------- - f1 | integer | | | | plain | | - f2 | character(100) | | | | extended | | - -\set HIDE_TABLEAM off -\d+ tbl_heap_psql - Table "tableam_display.tbl_heap_psql" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+----------------+-----------+----------+---------+----------+--------------+------------- - f1 | integer | | | | plain | | - f2 | character(100) | | | | extended | | -Access method: heap_psql - -\d+ tbl_heap - Table "tableam_display.tbl_heap" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+----------------+-----------+----------+---------+----------+--------------+------------- - f1 | integer | | | | plain | | - f2 | character(100) | | | | extended | | -Access method: heap - --- AM is displayed for tables, indexes and materialized views. -\d+ - List of relations - Schema | Name | Type | Owner | Persistence | Access method | Size | Description ------------------+--------------------+-------------------+----------------------+-------------+---------------+---------+------------- - tableam_display | mat_view_heap_psql | materialized view | regress_display_role | permanent | heap_psql | 0 bytes | - tableam_display | tbl_heap | table | regress_display_role | permanent | heap | 0 bytes | - tableam_display | tbl_heap_psql | table | regress_display_role | permanent | heap_psql | 0 bytes | - tableam_display | view_heap_psql | view | regress_display_role | permanent | | 0 bytes | -(4 rows) - -\dt+ - List of relations - Schema | Name | Type | Owner | Persistence | Access method | Size | Description ------------------+---------------+-------+----------------------+-------------+---------------+---------+------------- - tableam_display | tbl_heap | table | regress_display_role | permanent | heap | 0 bytes | - tableam_display | tbl_heap_psql | table | regress_display_role | permanent | heap_psql | 0 bytes | -(2 rows) - -\dm+ - List of relations - Schema | Name | Type | Owner | Persistence | Access method | Size | Description ------------------+--------------------+-------------------+----------------------+-------------+---------------+---------+------------- - tableam_display | mat_view_heap_psql | materialized view | regress_display_role | permanent | heap_psql | 0 bytes | -(1 row) - --- But not for views and sequences. -\dv+ - List of relations - Schema | Name | Type | Owner | Persistence | Size | Description ------------------+----------------+------+----------------------+-------------+---------+------------- - tableam_display | view_heap_psql | view | regress_display_role | permanent | 0 bytes | -(1 row) - -\set HIDE_TABLEAM on -\d+ - List of relations - Schema | Name | Type | Owner | Persistence | Size | Description ------------------+--------------------+-------------------+----------------------+-------------+---------+------------- - tableam_display | mat_view_heap_psql | materialized view | regress_display_role | permanent | 0 bytes | - tableam_display | tbl_heap | table | regress_display_role | permanent | 0 bytes | - tableam_display | tbl_heap_psql | table | regress_display_role | permanent | 0 bytes | - tableam_display | view_heap_psql | view | regress_display_role | permanent | 0 bytes | -(4 rows) - -RESET ROLE; -RESET search_path; -DROP SCHEMA tableam_display CASCADE; -NOTICE: drop cascades to 4 other objects -DETAIL: drop cascades to table tableam_display.tbl_heap_psql -drop cascades to table tableam_display.tbl_heap -drop cascades to view tableam_display.view_heap_psql -drop cascades to materialized view tableam_display.mat_view_heap_psql -DROP ACCESS METHOD heap_psql; -DROP ROLE regress_display_role; --- test numericlocale (as best we can without control of psql's locale) -\pset format aligned -\pset expanded off -\pset numericlocale true -select n, -n as m, n * 111 as x, '1e90'::float8 as f -from generate_series(0,3) n; - n | m | x | f ----+----+-----+------- - 0 | 0 | 0 | 1e+90 - 1 | -1 | 111 | 1e+90 - 2 | -2 | 222 | 1e+90 - 3 | -3 | 333 | 1e+90 -(4 rows) - -\pset numericlocale false --- test asciidoc output format -\pset format asciidoc -\pset border 1 -\pset expanded off -\d psql_serial_tab_id_seq - -.Sequence "public.psql_serial_tab_id_seq" -[options="header",cols="l,>l,>l,>l,l",frame="none"] -|==== -^l|Type ^l|Start ^l|Minimum ^l|Maximum ^l|Increment ^l|Cycles? ^l|Cache -|integer |1 |1 |2147483647 |1 |no |1 -|==== - -.... -Owned by: public.psql_serial_tab.id -.... -\pset tuples_only true -\df exp - -[cols="l|1 -l|1 -l|2147483647 -l|1 -l|1 -|==== - -.... -Owned by: public.psql_serial_tab.id -.... -\pset tuples_only true -\df exp - -[cols="h,l",frame="none"] -|==== -2+| -l|1 -2+^|Record 2 -l|2 -|==== -\pset border 1 -execute q; - -[cols="h,l",frame="none"] -|==== -2+^|Record 1 -l|1 -2+^|Record 2 -l|2 -|==== -\pset border 2 -execute q; - -[cols="h,l",frame="all",grid="all"] -|==== -2+^|Record 1 -l|1 -2+^|Record 2 -l|2 -|==== -deallocate q; --- test csv output format -\pset format csv -\pset border 1 -\pset expanded off -\d psql_serial_tab_id_seq -Type,Start,Minimum,Maximum,Increment,Cycles?,Cache -integer,1,1,2147483647,1,no,1 -\pset tuples_only true -\df exp -pg_catalog,exp,double precision,double precision,func -pg_catalog,exp,numeric,numeric,func -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq -Type,integer -Start,1 -Minimum,1 -Maximum,2147483647 -Increment,1 -Cycles?,no -Cache,1 -\pset tuples_only true -\df exp -Schema,pg_catalog -Name,exp -Result data type,double precision -Argument data types,double precision -Type,func -Schema,pg_catalog -Name,exp -Result data type,numeric -Argument data types,numeric -Type,func -\pset tuples_only false -prepare q as - select 'some"text' as "a""title", E' \n' as "junk", - ' ' as "empty", n as int - from generate_series(1,2) as n; -\pset expanded off -execute q; -"a""title",junk,empty,int -"some""text"," -", ,1 -"some""text"," -", ,2 -\pset expanded on -execute q; -"a""title","some""text" -junk," -" -empty, -int,1 -"a""title","some""text" -junk," -" -empty, -int,2 -deallocate q; --- special cases -\pset expanded off -select 'comma,comma' as comma, 'semi;semi' as semi; -comma,semi -"comma,comma",semi;semi -\pset csv_fieldsep ';' -select 'comma,comma' as comma, 'semi;semi' as semi; -comma;semi -comma,comma;"semi;semi" -select '\.' as data; -data -"\." -\pset csv_fieldsep '.' -select '\' as d1, '' as d2; -"d1"."d2" -"\"."" --- illegal csv separators -\pset csv_fieldsep '' -\pset: csv_fieldsep must be a single one-byte character -\pset csv_fieldsep '\0' -\pset: csv_fieldsep must be a single one-byte character -\pset csv_fieldsep '\n' -\pset: csv_fieldsep cannot be a double quote, a newline, or a carriage return -\pset csv_fieldsep '\r' -\pset: csv_fieldsep cannot be a double quote, a newline, or a carriage return -\pset csv_fieldsep '"' -\pset: csv_fieldsep cannot be a double quote, a newline, or a carriage return -\pset csv_fieldsep ',,' -\pset: csv_fieldsep must be a single one-byte character -\pset csv_fieldsep ',' --- test html output format -\pset format html -\pset border 1 -\pset expanded off -\d psql_serial_tab_id_seq - - - - - - - - - - - - - - - - - - - - -
Sequence "public.psql_serial_tab_id_seq"
TypeStartMinimumMaximumIncrementCycles?Cache
integer1121474836471no1
-

Owned by: public.psql_serial_tab.id
-

-\pset tuples_only true -\df exp - - - - - - - - - - - - - - - -
pg_catalogexpdouble precisiondouble precisionfunc
pg_catalogexpnumericnumericfunc
- -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Sequence "public.psql_serial_tab_id_seq"
Record 1
Typeinteger
Start1
Minimum1
Maximum2147483647
Increment1
Cycles?no
Cache1
-

Owned by: public.psql_serial_tab.id
-

-\pset tuples_only true -\df exp - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 
Schemapg_catalog
Nameexp
Result data typedouble precision
Argument data typesdouble precision
Typefunc
 
Schemapg_catalog
Nameexp
Result data typenumeric
Argument data typesnumeric
Typefunc
- -\pset tuples_only false -prepare q as - select 'some"text' as "a&title", E' \n' as "junk", - ' ' as "empty", n as int - from generate_series(1,2) as n; -\pset expanded off -\pset border 0 -execute q; - - - - - - - - - - - - - - - - - - - -
a&titlejunkemptyint
some"text  <foo>
-<bar>
  1
some"text  <foo>
-<bar>
  2
-

(2 rows)
-

-\pset border 1 -execute q; - - - - - - - - - - - - - - - - - - - -
a&titlejunkemptyint
some"text  <foo>
-<bar>
  1
some"text  <foo>
-<bar>
  2
-

(2 rows)
-

-\pset tableattr foobar -execute q; - - - - - - - - - - - - - - - - - - - -
a&titlejunkemptyint
some"text  <foo>
-<bar>
  1
some"text  <foo>
-<bar>
  2
-

(2 rows)
-

-\pset tableattr -\pset expanded on -\pset border 0 -execute q; - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Record 1
a&titlesome"text
junk  <foo>
-<bar>
empty 
int1
Record 2
a&titlesome"text
junk  <foo>
-<bar>
empty 
int2
- -\pset border 1 -execute q; - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Record 1
a&titlesome"text
junk  <foo>
-<bar>
empty 
int1
Record 2
a&titlesome"text
junk  <foo>
-<bar>
empty 
int2
- -\pset tableattr foobar -execute q; - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Record 1
a&titlesome"text
junk  <foo>
-<bar>
empty 
int1
Record 2
a&titlesome"text
junk  <foo>
-<bar>
empty 
int2
- -\pset tableattr -deallocate q; --- test latex output format -\pset format latex -\pset border 1 -\pset expanded off -\d psql_serial_tab_id_seq -\begin{center} -Sequence "public.psql\_serial\_tab\_id\_seq" -\end{center} - -\begin{tabular}{l | r | r | r | r | l | r} -\textit{Type} & \textit{Start} & \textit{Minimum} & \textit{Maximum} & \textit{Increment} & \textit{Cycles?} & \textit{Cache} \\ -\hline -integer & 1 & 1 & 2147483647 & 1 & no & 1 \\ -\end{tabular} - -\noindent Owned by: public.psql\_serial\_tab.id \\ - -\pset tuples_only true -\df exp -\begin{tabular}{l | l | l | l | l} -pg\_catalog & exp & double precision & double precision & func \\ -pg\_catalog & exp & numeric & numeric & func \\ -\end{tabular} - -\noindent -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq -\begin{center} -Sequence "public.psql\_serial\_tab\_id\_seq" -\end{center} - -\begin{tabular}{c|l} -\multicolumn{2}{c}{\textit{Record 1}} \\ -\hline -Type & integer \\ -Start & 1 \\ -Minimum & 1 \\ -Maximum & 2147483647 \\ -Increment & 1 \\ -Cycles? & no \\ -Cache & 1 \\ -\end{tabular} - -\noindent Owned by: public.psql\_serial\_tab.id \\ - -\pset tuples_only true -\df exp -\begin{tabular}{c|l} -\hline -Schema & pg\_catalog \\ -Name & exp \\ -Result data type & double precision \\ -Argument data types & double precision \\ -Type & func \\ -\hline -Schema & pg\_catalog \\ -Name & exp \\ -Result data type & numeric \\ -Argument data types & numeric \\ -Type & func \\ -\end{tabular} - -\noindent -\pset tuples_only false -prepare q as - select 'some\more_text' as "a$title", E' #%&^~|\n{bar}' as "junk", - ' ' as "empty", n as int - from generate_series(1,2) as n; -\pset expanded off -\pset border 0 -execute q; -\begin{tabular}{lllr} -\textit{a\$title} & \textit{junk} & \textit{empty} & \textit{int} \\ -\hline -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 1 \\ -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 2 \\ -\end{tabular} - -\noindent (2 rows) \\ - -\pset border 1 -execute q; -\begin{tabular}{l | l | l | r} -\textit{a\$title} & \textit{junk} & \textit{empty} & \textit{int} \\ -\hline -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 1 \\ -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 2 \\ -\end{tabular} - -\noindent (2 rows) \\ - -\pset border 2 -execute q; -\begin{tabular}{| l | l | l | r |} -\hline -\textit{a\$title} & \textit{junk} & \textit{empty} & \textit{int} \\ -\hline -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 1 \\ -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 2 \\ -\hline -\end{tabular} - -\noindent (2 rows) \\ - -\pset border 3 -execute q; -\begin{tabular}{| l | l | l | r |} -\hline -\textit{a\$title} & \textit{junk} & \textit{empty} & \textit{int} \\ -\hline -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 1 \\ -\hline -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 2 \\ -\hline -\end{tabular} - -\noindent (2 rows) \\ - -\pset expanded on -\pset border 0 -execute q; -\begin{tabular}{cl} -\multicolumn{2}{c}{\textit{Record 1}} \\ -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\multicolumn{2}{c}{\textit{Record 2}} \\ -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\end{tabular} - -\noindent -\pset border 1 -execute q; -\begin{tabular}{c|l} -\multicolumn{2}{c}{\textit{Record 1}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\multicolumn{2}{c}{\textit{Record 2}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\end{tabular} - -\noindent -\pset border 2 -execute q; -\begin{tabular}{|c|l|} -\hline -\multicolumn{2}{|c|}{\textit{Record 1}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\hline -\multicolumn{2}{|c|}{\textit{Record 2}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\hline -\end{tabular} - -\noindent -\pset border 3 -execute q; -\begin{tabular}{|c|l|} -\hline -\multicolumn{2}{|c|}{\textit{Record 1}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\hline -\multicolumn{2}{|c|}{\textit{Record 2}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\hline -\end{tabular} - -\noindent -deallocate q; --- test latex-longtable output format -\pset format latex-longtable -\pset border 1 -\pset expanded off -\d psql_serial_tab_id_seq -\begin{longtable}{l | r | r | r | r | l | r} -\small\textbf{\textit{Type}} & \small\textbf{\textit{Start}} & \small\textbf{\textit{Minimum}} & \small\textbf{\textit{Maximum}} & \small\textbf{\textit{Increment}} & \small\textbf{\textit{Cycles?}} & \small\textbf{\textit{Cache}} \\ -\midrule -\endfirsthead -\small\textbf{\textit{Type}} & \small\textbf{\textit{Start}} & \small\textbf{\textit{Minimum}} & \small\textbf{\textit{Maximum}} & \small\textbf{\textit{Increment}} & \small\textbf{\textit{Cycles?}} & \small\textbf{\textit{Cache}} \\ -\midrule -\endhead -\caption[Sequence "public.psql\_serial\_tab\_id\_seq" (Continued)]{Sequence "public.psql\_serial\_tab\_id\_seq"} -\endfoot -\caption[Sequence "public.psql\_serial\_tab\_id\_seq"]{Sequence "public.psql\_serial\_tab\_id\_seq"} -\endlastfoot -\raggedright{integer} -& -\raggedright{1} -& -\raggedright{1} -& -\raggedright{2147483647} -& -\raggedright{1} -& -\raggedright{no} -& -\raggedright{1} \tabularnewline -\end{longtable} -\pset tuples_only true -\df exp -\begin{longtable}{l | l | l | l | l} -\raggedright{pg\_catalog} -& -\raggedright{exp} -& -\raggedright{double precision} -& -\raggedright{double precision} -& -\raggedright{func} \tabularnewline -\raggedright{pg\_catalog} -& -\raggedright{exp} -& -\raggedright{numeric} -& -\raggedright{numeric} -& -\raggedright{func} \tabularnewline -\end{longtable} -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq -\begin{center} -Sequence "public.psql\_serial\_tab\_id\_seq" -\end{center} - -\begin{tabular}{c|l} -\multicolumn{2}{c}{\textit{Record 1}} \\ -\hline -Type & integer \\ -Start & 1 \\ -Minimum & 1 \\ -Maximum & 2147483647 \\ -Increment & 1 \\ -Cycles? & no \\ -Cache & 1 \\ -\end{tabular} - -\noindent Owned by: public.psql\_serial\_tab.id \\ - -\pset tuples_only true -\df exp -\begin{tabular}{c|l} -\hline -Schema & pg\_catalog \\ -Name & exp \\ -Result data type & double precision \\ -Argument data types & double precision \\ -Type & func \\ -\hline -Schema & pg\_catalog \\ -Name & exp \\ -Result data type & numeric \\ -Argument data types & numeric \\ -Type & func \\ -\end{tabular} - -\noindent -\pset tuples_only false -prepare q as - select 'some\more_text' as "a$title", E' #%&^~|\n{bar}' as "junk", - ' ' as "empty", n as int - from generate_series(1,2) as n; -\pset expanded off -\pset border 0 -execute q; -\begin{longtable}{lllr} -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endfirsthead -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endhead -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{1} \tabularnewline -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{2} \tabularnewline -\end{longtable} -\pset border 1 -execute q; -\begin{longtable}{l | l | l | r} -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endfirsthead -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endhead -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{1} \tabularnewline -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{2} \tabularnewline -\end{longtable} -\pset border 2 -execute q; -\begin{longtable}{| l | l | l | r |} -\toprule -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endfirsthead -\toprule -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endhead -\bottomrule -\endfoot -\bottomrule -\endlastfoot -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{1} \tabularnewline -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{2} \tabularnewline -\end{longtable} -\pset border 3 -execute q; -\begin{longtable}{| l | l | l | r |} -\toprule -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endfirsthead -\toprule -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\endhead -\bottomrule -\endfoot -\bottomrule -\endlastfoot -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{1} \tabularnewline - \hline -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{2} \tabularnewline - \hline -\end{longtable} -\pset tableattr lr -execute q; -\begin{longtable}{| p{lr\textwidth} | p{lr\textwidth} | p{lr\textwidth} | r |} -\toprule -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endfirsthead -\toprule -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\endhead -\bottomrule -\endfoot -\bottomrule -\endlastfoot -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{1} \tabularnewline - \hline -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{2} \tabularnewline - \hline -\end{longtable} -\pset tableattr -\pset expanded on -\pset border 0 -execute q; -\begin{tabular}{cl} -\multicolumn{2}{c}{\textit{Record 1}} \\ -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\multicolumn{2}{c}{\textit{Record 2}} \\ -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\end{tabular} - -\noindent -\pset border 1 -execute q; -\begin{tabular}{c|l} -\multicolumn{2}{c}{\textit{Record 1}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\multicolumn{2}{c}{\textit{Record 2}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\end{tabular} - -\noindent -\pset border 2 -execute q; -\begin{tabular}{|c|l|} -\hline -\multicolumn{2}{|c|}{\textit{Record 1}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\hline -\multicolumn{2}{|c|}{\textit{Record 2}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\hline -\end{tabular} - -\noindent -\pset border 3 -execute q; -\begin{tabular}{|c|l|} -\hline -\multicolumn{2}{|c|}{\textit{Record 1}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\hline -\multicolumn{2}{|c|}{\textit{Record 2}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\hline -\end{tabular} - -\noindent -\pset tableattr lr -execute q; -\begin{tabular}{|c|l|} -\hline -\multicolumn{2}{|c|}{\textit{Record 1}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\hline -\multicolumn{2}{|c|}{\textit{Record 2}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\hline -\end{tabular} - -\noindent -\pset tableattr -deallocate q; --- test troff-ms output format -\pset format troff-ms -\pset border 1 -\pset expanded off -\d psql_serial_tab_id_seq -.LP -.DS C -Sequence "public.psql_serial_tab_id_seq" -.DE -.LP -.TS -center; -l | r | r | r | r | l | r. -\fIType\fP \fIStart\fP \fIMinimum\fP \fIMaximum\fP \fIIncrement\fP \fICycles?\fP \fICache\fP -_ -integer 1 1 2147483647 1 no 1 -.TE -.DS L -Owned by: public.psql_serial_tab.id -.DE -\pset tuples_only true -\df exp -.LP -.TS -center; -l | l | l | l | l. -pg_catalog exp double precision double precision func -pg_catalog exp numeric numeric func -.TE -.DS L -.DE -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq -.LP -.DS C -Sequence "public.psql_serial_tab_id_seq" -.DE -.LP -.TS -center; -c s. -\fIRecord 1\fP -_ -.T& -c | l. -Type integer -Start 1 -Minimum 1 -Maximum 2147483647 -Increment 1 -Cycles? no -Cache 1 -.TE -.DS L -Owned by: public.psql_serial_tab.id -.DE -\pset tuples_only true -\df exp -.LP -.TS -center; -c l; -_ -Schema pg_catalog -Name exp -Result data type double precision -Argument data types double precision -Type func -_ -Schema pg_catalog -Name exp -Result data type numeric -Argument data types numeric -Type func -.TE -.DS L -.DE -\pset tuples_only false -prepare q as - select 'some\text' as "a\title", E' \n' as "junk", - ' ' as "empty", n as int - from generate_series(1,2) as n; -\pset expanded off -\pset border 0 -execute q; -.LP -.TS -center; -lllr. -\fIa\(rstitle\fP \fIjunk\fP \fIempty\fP \fIint\fP -_ -some\(rstext - 1 -some\(rstext - 2 -.TE -.DS L -(2 rows) -.DE -\pset border 1 -execute q; -.LP -.TS -center; -l | l | l | r. -\fIa\(rstitle\fP \fIjunk\fP \fIempty\fP \fIint\fP -_ -some\(rstext - 1 -some\(rstext - 2 -.TE -.DS L -(2 rows) -.DE -\pset border 2 -execute q; -.LP -.TS -center box; -l | l | l | r. -\fIa\(rstitle\fP \fIjunk\fP \fIempty\fP \fIint\fP -_ -some\(rstext - 1 -some\(rstext - 2 -.TE -.DS L -(2 rows) -.DE -\pset expanded on -\pset border 0 -execute q; -.LP -.TS -center; -c s. -\fIRecord 1\fP -.T& -c l. -a\(rstitle some\(rstext -junk - -empty -int 1 -.T& -c s. -\fIRecord 2\fP -.T& -c l. -a\(rstitle some\(rstext -junk - -empty -int 2 -.TE -.DS L -.DE -\pset border 1 -execute q; -.LP -.TS -center; -c s. -\fIRecord 1\fP -_ -.T& -c | l. -a\(rstitle some\(rstext -junk - -empty -int 1 -.T& -c s. -\fIRecord 2\fP -_ -.T& -c | l. -a\(rstitle some\(rstext -junk - -empty -int 2 -.TE -.DS L -.DE -\pset border 2 -execute q; -.LP -.TS -center box; -c s. -\fIRecord 1\fP -_ -.T& -c l. -a\(rstitle some\(rstext -junk - -empty -int 1 -_ -.T& -c s. -\fIRecord 2\fP -_ -.T& -c l. -a\(rstitle some\(rstext -junk - -empty -int 2 -.TE -.DS L -.DE -deallocate q; --- check ambiguous format requests -\pset format a -\pset: ambiguous abbreviation "a" matches both "aligned" and "asciidoc" -\pset format l --- clean up after output format tests -drop table psql_serial_tab; -\pset format aligned -\pset expanded off -\pset border 1 --- \echo and allied features -\echo this is a test -this is a test -\echo -n without newline -without newline\echo with -n newline -with -n newline -\echo '-n' with newline --n with newline -\set foo bar -\echo foo = :foo -foo = bar -\qecho this is a test -this is a test -\qecho foo = :foo -foo = bar -\warn this is a test -this is a test -\warn foo = :foo -foo = bar --- tests for \if ... \endif -\if true - select 'okay'; - ?column? ----------- - okay -(1 row) - - select 'still okay'; - ?column? ------------- - still okay -(1 row) - -\else - not okay; - still not okay -\endif --- at this point query buffer should still have last valid line -\g - ?column? ------------- - still okay -(1 row) - --- \if should work okay on part of a query -select - \if true - 42 - \else - (bogus - \endif - forty_two; - forty_two ------------ - 42 -(1 row) - -select \if false \\ (bogus \else \\ 42 \endif \\ forty_two; - forty_two ------------ - 42 -(1 row) - --- test a large nested if using a variety of true-equivalents -\if true - \if 1 - \if yes - \if on - \echo 'all true' -all true - \else - \echo 'should not print #1-1' - \endif - \else - \echo 'should not print #1-2' - \endif - \else - \echo 'should not print #1-3' - \endif -\else - \echo 'should not print #1-4' -\endif --- test a variety of false-equivalents in an if/elif/else structure -\if false - \echo 'should not print #2-1' -\elif 0 - \echo 'should not print #2-2' -\elif no - \echo 'should not print #2-3' -\elif off - \echo 'should not print #2-4' -\else - \echo 'all false' -all false -\endif --- test true-false elif after initial true branch -\if true - \echo 'should print #2-5' -should print #2-5 -\elif true - \echo 'should not print #2-6' -\elif false - \echo 'should not print #2-7' -\else - \echo 'should not print #2-8' -\endif --- test simple true-then-else -\if true - \echo 'first thing true' -first thing true -\else - \echo 'should not print #3-1' -\endif --- test simple false-true-else -\if false - \echo 'should not print #4-1' -\elif true - \echo 'second thing true' -second thing true -\else - \echo 'should not print #5-1' -\endif --- invalid boolean expressions are false -\if invalid boolean expression -unrecognized value "invalid boolean expression" for "\if expression": Boolean expected - \echo 'will not print #6-1' -\else - \echo 'will print anyway #6-2' -will print anyway #6-2 -\endif --- test un-matched endif -\endif -\endif: no matching \if --- test un-matched else -\else -\else: no matching \if --- test un-matched elif -\elif -\elif: no matching \if --- test double-else error -\if true -\else -\else -\else: cannot occur after \else -\endif --- test elif out-of-order -\if false -\else -\elif -\elif: cannot occur after \else -\endif --- test if-endif matching in a false branch -\if false - \if false - \echo 'should not print #7-1' - \else - \echo 'should not print #7-2' - \endif - \echo 'should not print #7-3' -\else - \echo 'should print #7-4' -should print #7-4 -\endif --- show that vars and backticks are not expanded when ignoring extra args -\set foo bar -\echo :foo :'foo' :"foo" -bar 'bar' "bar" -\pset fieldsep | `nosuchcommand` :foo :'foo' :"foo" -\pset: extra argument "nosuchcommand" ignored -\pset: extra argument ":foo" ignored -\pset: extra argument ":'foo'" ignored -\pset: extra argument ":"foo"" ignored --- show that vars and backticks are not expanded and commands are ignored --- when in a false if-branch -\set try_to_quit '\\q' -\if false - :try_to_quit - \echo `nosuchcommand` :foo :'foo' :"foo" - \pset fieldsep | `nosuchcommand` :foo :'foo' :"foo" - \a - SELECT $1 \bind 1 \g - \C arg1 - \c arg1 arg2 arg3 arg4 - \cd arg1 - \conninfo - \copy arg1 arg2 arg3 arg4 arg5 arg6 - \copyright - SELECT 1 as one, 2, 3 \crosstabview - \dt arg1 - \e arg1 arg2 - \ef whole_line - \ev whole_line - \echo arg1 arg2 arg3 arg4 arg5 - \echo arg1 - \encoding arg1 - \errverbose - \f arg1 - \g arg1 - \gx arg1 - \gexec - SELECT 1 AS one \gset - \h - \? - \html - \i arg1 - \ir arg1 - \l arg1 - \lo arg1 arg2 -invalid command \lo - \lo_list - \o arg1 - \p - \password arg1 - \prompt arg1 arg2 - \pset arg1 arg2 - \q - \reset - \s arg1 - \set arg1 arg2 arg3 arg4 arg5 arg6 arg7 - \setenv arg1 arg2 - \sf whole_line - \sv whole_line - \t arg1 - \T arg1 - \timing arg1 - \unset arg1 - \w arg1 - \watch arg1 arg2 - \x arg1 - -- \else here is eaten as part of OT_FILEPIPE argument - \w |/no/such/file \else - -- \endif here is eaten as part of whole-line argument - \! whole_line \endif - \z -\else - \echo 'should print #8-1' -should print #8-1 -\endif --- :{?...} defined variable test -\set i 1 -\if :{?i} - \echo '#9-1 ok, variable i is defined' -#9-1 ok, variable i is defined -\else - \echo 'should not print #9-2' -\endif -\if :{?no_such_variable} - \echo 'should not print #10-1' -\else - \echo '#10-2 ok, variable no_such_variable is not defined' -#10-2 ok, variable no_such_variable is not defined -\endif -SELECT :{?i} AS i_is_defined; - i_is_defined --------------- - t -(1 row) - -SELECT NOT :{?no_such_var} AS no_such_var_is_not_defined; - no_such_var_is_not_defined ----------------------------- - t -(1 row) - --- SHOW_CONTEXT -\set SHOW_CONTEXT never -do $$ -begin - raise notice 'foo'; - raise exception 'bar'; -end $$; -NOTICE: foo -ERROR: bar -\set SHOW_CONTEXT errors -do $$ -begin - raise notice 'foo'; - raise exception 'bar'; -end $$; -NOTICE: foo -ERROR: bar -CONTEXT: PL/pgSQL function inline_code_block line 4 at RAISE -\set SHOW_CONTEXT always -do $$ -begin - raise notice 'foo'; - raise exception 'bar'; -end $$; -NOTICE: foo -CONTEXT: PL/pgSQL function inline_code_block line 3 at RAISE -ERROR: bar -CONTEXT: PL/pgSQL function inline_code_block line 4 at RAISE --- test printing and clearing the query buffer -SELECT 1; - ?column? ----------- - 1 -(1 row) - -\p -SELECT 1; -SELECT 2 \r -\p -SELECT 1; -SELECT 3 \p -SELECT 3 -UNION SELECT 4 \p -SELECT 3 -UNION SELECT 4 -UNION SELECT 5 -ORDER BY 1; - ?column? ----------- - 3 - 4 - 5 -(3 rows) - -\r -\p -SELECT 3 -UNION SELECT 4 -UNION SELECT 5 -ORDER BY 1; --- tests for special result variables --- working query, 2 rows selected -SELECT 1 AS stuff UNION SELECT 2; - stuff -------- - 1 - 2 -(2 rows) - -\echo 'error:' :ERROR -error: false -\echo 'error code:' :SQLSTATE -error code: 00000 -\echo 'number of rows:' :ROW_COUNT -number of rows: 2 --- syntax error -SELECT 1 UNION; -ERROR: syntax error at or near ";" -LINE 1: SELECT 1 UNION; - ^ -\echo 'error:' :ERROR -error: true -\echo 'error code:' :SQLSTATE -error code: 42601 -\echo 'number of rows:' :ROW_COUNT -number of rows: 0 -\echo 'last error message:' :LAST_ERROR_MESSAGE -last error message: syntax error at or near ";" -\echo 'last error code:' :LAST_ERROR_SQLSTATE -last error code: 42601 --- empty query -; -\echo 'error:' :ERROR -error: false -\echo 'error code:' :SQLSTATE -error code: 00000 -\echo 'number of rows:' :ROW_COUNT -number of rows: 0 --- must have kept previous values -\echo 'last error message:' :LAST_ERROR_MESSAGE -last error message: syntax error at or near ";" -\echo 'last error code:' :LAST_ERROR_SQLSTATE -last error code: 42601 --- other query error -DROP TABLE this_table_does_not_exist; -ERROR: table "this_table_does_not_exist" does not exist -\echo 'error:' :ERROR -error: true -\echo 'error code:' :SQLSTATE -error code: 42P01 -\echo 'number of rows:' :ROW_COUNT -number of rows: 0 -\echo 'last error message:' :LAST_ERROR_MESSAGE -last error message: table "this_table_does_not_exist" does not exist -\echo 'last error code:' :LAST_ERROR_SQLSTATE -last error code: 42P01 --- nondefault verbosity error settings (except verbose, which is too unstable) -\set VERBOSITY terse -SELECT 1 UNION; -ERROR: syntax error at or near ";" at character 15 -\echo 'error:' :ERROR -error: true -\echo 'error code:' :SQLSTATE -error code: 42601 -\echo 'last error message:' :LAST_ERROR_MESSAGE -last error message: syntax error at or near ";" -\set VERBOSITY sqlstate -SELECT 1/0; -ERROR: 22012 -\echo 'error:' :ERROR -error: true -\echo 'error code:' :SQLSTATE -error code: 22012 -\echo 'last error message:' :LAST_ERROR_MESSAGE -last error message: division by zero -\set VERBOSITY default --- working \gdesc -SELECT 3 AS three, 4 AS four \gdesc - Column | Type ---------+--------- - three | integer - four | integer -(2 rows) - -\echo 'error:' :ERROR -error: false -\echo 'error code:' :SQLSTATE -error code: 00000 -\echo 'number of rows:' :ROW_COUNT -number of rows: 2 --- \gdesc with an error -SELECT 4 AS \gdesc -ERROR: syntax error at end of input -LINE 1: SELECT 4 AS - ^ -\echo 'error:' :ERROR -error: true -\echo 'error code:' :SQLSTATE -error code: 42601 -\echo 'number of rows:' :ROW_COUNT -number of rows: 0 -\echo 'last error message:' :LAST_ERROR_MESSAGE -last error message: syntax error at end of input -\echo 'last error code:' :LAST_ERROR_SQLSTATE -last error code: 42601 --- check row count for a cursor-fetched query -\set FETCH_COUNT 10 -select unique2 from tenk1 order by unique2 limit 19; - unique2 ---------- - 0 - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 - 11 - 12 - 13 - 14 - 15 - 16 - 17 - 18 -(19 rows) - -\echo 'error:' :ERROR -error: false -\echo 'error code:' :SQLSTATE -error code: 00000 -\echo 'number of rows:' :ROW_COUNT -number of rows: 19 --- cursor-fetched query with an error after the first group -select 1/(15-unique2) from tenk1 order by unique2 limit 19; - ?column? ----------- - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 -ERROR: division by zero -\echo 'error:' :ERROR -error: true -\echo 'error code:' :SQLSTATE -error code: 22012 -\echo 'number of rows:' :ROW_COUNT -number of rows: 0 -\echo 'last error message:' :LAST_ERROR_MESSAGE -last error message: division by zero -\echo 'last error code:' :LAST_ERROR_SQLSTATE -last error code: 22012 -\unset FETCH_COUNT -create schema testpart; -create role regress_partitioning_role; -alter schema testpart owner to regress_partitioning_role; -set role to regress_partitioning_role; --- run test inside own schema and hide other partitions -set search_path to testpart; -create table testtable_apple(logdate date); -create table testtable_orange(logdate date); -create index testtable_apple_index on testtable_apple(logdate); -create index testtable_orange_index on testtable_orange(logdate); -create table testpart_apple(logdate date) partition by range(logdate); -create table testpart_orange(logdate date) partition by range(logdate); -create index testpart_apple_index on testpart_apple(logdate); -create index testpart_orange_index on testpart_orange(logdate); --- only partition related object should be displayed -\dP test*apple* - List of partitioned relations - Schema | Name | Owner | Type | Parent name | Table -----------+----------------------+---------------------------+-------------------+-------------+---------------- - testpart | testpart_apple | regress_partitioning_role | partitioned table | | - testpart | testpart_apple_index | regress_partitioning_role | partitioned index | | testpart_apple -(2 rows) - -\dPt test*apple* - List of partitioned tables - Schema | Name | Owner | Parent name -----------+----------------+---------------------------+------------- - testpart | testpart_apple | regress_partitioning_role | -(1 row) - -\dPi test*apple* - List of partitioned indexes - Schema | Name | Owner | Parent name | Table -----------+----------------------+---------------------------+-------------+---------------- - testpart | testpart_apple_index | regress_partitioning_role | | testpart_apple -(1 row) - -drop table testtable_apple; -drop table testtable_orange; -drop table testpart_apple; -drop table testpart_orange; -create table parent_tab (id int) partition by range (id); -create index parent_index on parent_tab (id); -create table child_0_10 partition of parent_tab - for values from (0) to (10); -create table child_10_20 partition of parent_tab - for values from (10) to (20); -create table child_20_30 partition of parent_tab - for values from (20) to (30); -insert into parent_tab values (generate_series(0,29)); -create table child_30_40 partition of parent_tab -for values from (30) to (40) - partition by range(id); -create table child_30_35 partition of child_30_40 - for values from (30) to (35); -create table child_35_40 partition of child_30_40 - for values from (35) to (40); -insert into parent_tab values (generate_series(30,39)); -\dPt - List of partitioned tables - Schema | Name | Owner -----------+------------+--------------------------- - testpart | parent_tab | regress_partitioning_role -(1 row) - -\dPi - List of partitioned indexes - Schema | Name | Owner | Table -----------+--------------+---------------------------+------------ - testpart | parent_index | regress_partitioning_role | parent_tab -(1 row) - -\dP testpart.* - List of partitioned relations - Schema | Name | Owner | Type | Parent name | Table -----------+--------------------+---------------------------+-------------------+--------------+------------- - testpart | parent_tab | regress_partitioning_role | partitioned table | | - testpart | child_30_40 | regress_partitioning_role | partitioned table | parent_tab | - testpart | parent_index | regress_partitioning_role | partitioned index | | parent_tab - testpart | child_30_40_id_idx | regress_partitioning_role | partitioned index | parent_index | child_30_40 -(4 rows) - -\dP - List of partitioned relations - Schema | Name | Owner | Type | Table -----------+--------------+---------------------------+-------------------+------------ - testpart | parent_tab | regress_partitioning_role | partitioned table | - testpart | parent_index | regress_partitioning_role | partitioned index | parent_tab -(2 rows) - -\dPtn - List of partitioned tables - Schema | Name | Owner | Parent name -----------+-------------+---------------------------+------------- - testpart | parent_tab | regress_partitioning_role | - testpart | child_30_40 | regress_partitioning_role | parent_tab -(2 rows) - -\dPin - List of partitioned indexes - Schema | Name | Owner | Parent name | Table -----------+--------------------+---------------------------+--------------+------------- - testpart | parent_index | regress_partitioning_role | | parent_tab - testpart | child_30_40_id_idx | regress_partitioning_role | parent_index | child_30_40 -(2 rows) - -\dPn - List of partitioned relations - Schema | Name | Owner | Type | Parent name | Table -----------+--------------------+---------------------------+-------------------+--------------+------------- - testpart | parent_tab | regress_partitioning_role | partitioned table | | - testpart | child_30_40 | regress_partitioning_role | partitioned table | parent_tab | - testpart | parent_index | regress_partitioning_role | partitioned index | | parent_tab - testpart | child_30_40_id_idx | regress_partitioning_role | partitioned index | parent_index | child_30_40 -(4 rows) - -\dPn testpart.* - List of partitioned relations - Schema | Name | Owner | Type | Parent name | Table -----------+--------------------+---------------------------+-------------------+--------------+------------- - testpart | parent_tab | regress_partitioning_role | partitioned table | | - testpart | child_30_40 | regress_partitioning_role | partitioned table | parent_tab | - testpart | parent_index | regress_partitioning_role | partitioned index | | parent_tab - testpart | child_30_40_id_idx | regress_partitioning_role | partitioned index | parent_index | child_30_40 -(4 rows) - -drop table parent_tab cascade; -drop schema testpart; -set search_path to default; -set role to default; -drop role regress_partitioning_role; --- \d on toast table (use pg_statistic's toast table, which has a known name) -\d pg_toast.pg_toast_2619 -TOAST table "pg_toast.pg_toast_2619" - Column | Type -------------+--------- - chunk_id | oid - chunk_seq | integer - chunk_data | bytea -Owning table: "pg_catalog.pg_statistic" -Indexes: - "pg_toast_2619_index" PRIMARY KEY, btree (chunk_id, chunk_seq) - --- check printing info about access methods -\dA -List of access methods - Name | Type ---------+------- - brin | Index - btree | Index - gin | Index - gist | Index - hash | Index - heap | Table - heap2 | Table - spgist | Index -(8 rows) - -\dA * -List of access methods - Name | Type ---------+------- - brin | Index - btree | Index - gin | Index - gist | Index - hash | Index - heap | Table - heap2 | Table - spgist | Index -(8 rows) - -\dA h* -List of access methods - Name | Type --------+------- - hash | Index - heap | Table - heap2 | Table -(3 rows) - -\dA foo -List of access methods - Name | Type -------+------ -(0 rows) - -\dA foo bar -List of access methods - Name | Type -------+------ -(0 rows) - -\dA: extra argument "bar" ignored -\dA+ - List of access methods - Name | Type | Handler | Description ---------+-------+----------------------+---------------------------------------- - brin | Index | brinhandler | block range index (BRIN) access method - btree | Index | bthandler | b-tree index access method - gin | Index | ginhandler | GIN index access method - gist | Index | gisthandler | GiST index access method - hash | Index | hashhandler | hash index access method - heap | Table | heap_tableam_handler | heap table access method - heap2 | Table | heap_tableam_handler | - spgist | Index | spghandler | SP-GiST index access method -(8 rows) - -\dA+ * - List of access methods - Name | Type | Handler | Description ---------+-------+----------------------+---------------------------------------- - brin | Index | brinhandler | block range index (BRIN) access method - btree | Index | bthandler | b-tree index access method - gin | Index | ginhandler | GIN index access method - gist | Index | gisthandler | GiST index access method - hash | Index | hashhandler | hash index access method - heap | Table | heap_tableam_handler | heap table access method - heap2 | Table | heap_tableam_handler | - spgist | Index | spghandler | SP-GiST index access method -(8 rows) - -\dA+ h* - List of access methods - Name | Type | Handler | Description --------+-------+----------------------+-------------------------- - hash | Index | hashhandler | hash index access method - heap | Table | heap_tableam_handler | heap table access method - heap2 | Table | heap_tableam_handler | -(3 rows) - -\dA+ foo - List of access methods - Name | Type | Handler | Description -------+------+---------+------------- -(0 rows) - -\dAc brin pg*.oid* - List of operator classes - AM | Input type | Storage type | Operator class | Default? -------+------------+--------------+----------------------+---------- - brin | oid | | oid_bloom_ops | no - brin | oid | | oid_minmax_multi_ops | no - brin | oid | | oid_minmax_ops | yes -(3 rows) - -\dAf spgist - List of operator families - AM | Operator family | Applicable types ---------+-----------------+------------------ - spgist | box_ops | box - spgist | kd_point_ops | point - spgist | network_ops | inet - spgist | poly_ops | polygon - spgist | quad_point_ops | point - spgist | range_ops | anyrange - spgist | text_ops | text -(7 rows) - -\dAf btree int4 - List of operator families - AM | Operator family | Applicable types --------+-----------------+--------------------------- - btree | integer_ops | smallint, integer, bigint -(1 row) - -\dAo+ btree float_ops - List of operators of operator families - AM | Operator family | Operator | Strategy | Purpose | Sort opfamily --------+-----------------+---------------------------------------+----------+---------+--------------- - btree | float_ops | <(double precision,double precision) | 1 | search | - btree | float_ops | <=(double precision,double precision) | 2 | search | - btree | float_ops | =(double precision,double precision) | 3 | search | - btree | float_ops | >=(double precision,double precision) | 4 | search | - btree | float_ops | >(double precision,double precision) | 5 | search | - btree | float_ops | <(real,real) | 1 | search | - btree | float_ops | <=(real,real) | 2 | search | - btree | float_ops | =(real,real) | 3 | search | - btree | float_ops | >=(real,real) | 4 | search | - btree | float_ops | >(real,real) | 5 | search | - btree | float_ops | <(double precision,real) | 1 | search | - btree | float_ops | <=(double precision,real) | 2 | search | - btree | float_ops | =(double precision,real) | 3 | search | - btree | float_ops | >=(double precision,real) | 4 | search | - btree | float_ops | >(double precision,real) | 5 | search | - btree | float_ops | <(real,double precision) | 1 | search | - btree | float_ops | <=(real,double precision) | 2 | search | - btree | float_ops | =(real,double precision) | 3 | search | - btree | float_ops | >=(real,double precision) | 4 | search | - btree | float_ops | >(real,double precision) | 5 | search | -(20 rows) - -\dAo * pg_catalog.jsonb_path_ops - List of operators of operator families - AM | Operator family | Operator | Strategy | Purpose ------+-----------------+--------------------+----------+--------- - gin | jsonb_path_ops | @>(jsonb,jsonb) | 7 | search - gin | jsonb_path_ops | @?(jsonb,jsonpath) | 15 | search - gin | jsonb_path_ops | @@(jsonb,jsonpath) | 16 | search -(3 rows) - -\dAp+ btree float_ops - List of support functions of operator families - AM | Operator family | Registered left type | Registered right type | Number | Function --------+-----------------+----------------------+-----------------------+--------+------------------------------------------------------------------------------ - btree | float_ops | double precision | double precision | 1 | btfloat8cmp(double precision,double precision) - btree | float_ops | double precision | double precision | 2 | btfloat8sortsupport(internal) - btree | float_ops | double precision | double precision | 3 | in_range(double precision,double precision,double precision,boolean,boolean) - btree | float_ops | real | real | 1 | btfloat4cmp(real,real) - btree | float_ops | real | real | 2 | btfloat4sortsupport(internal) - btree | float_ops | double precision | real | 1 | btfloat84cmp(double precision,real) - btree | float_ops | real | double precision | 1 | btfloat48cmp(real,double precision) - btree | float_ops | real | double precision | 3 | in_range(real,real,double precision,boolean,boolean) -(8 rows) - -\dAp * pg_catalog.uuid_ops - List of support functions of operator families - AM | Operator family | Registered left type | Registered right type | Number | Function --------+-----------------+----------------------+-----------------------+--------+-------------------- - btree | uuid_ops | uuid | uuid | 1 | uuid_cmp - btree | uuid_ops | uuid | uuid | 2 | uuid_sortsupport - btree | uuid_ops | uuid | uuid | 4 | btequalimage - hash | uuid_ops | uuid | uuid | 1 | uuid_hash - hash | uuid_ops | uuid | uuid | 2 | uuid_hash_extended -(5 rows) - --- check \dconfig -set work_mem = 10240; -\dconfig work_mem -List of configuration parameters - Parameter | Value ------------+------- - work_mem | 10MB -(1 row) - -\dconfig+ work* - List of configuration parameters - Parameter | Value | Type | Context | Access privileges ------------+-------+---------+---------+------------------- - work_mem | 10MB | integer | user | -(1 row) - -reset work_mem; --- check \df, \do with argument specifications -\df *sqrt - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+--------------+------------------+---------------------+------ - pg_catalog | dsqrt | double precision | double precision | func - pg_catalog | numeric_sqrt | numeric | numeric | func - pg_catalog | sqrt | double precision | double precision | func - pg_catalog | sqrt | numeric | numeric | func -(4 rows) - -\df *sqrt num* - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+--------------+------------------+---------------------+------ - pg_catalog | numeric_sqrt | numeric | numeric | func - pg_catalog | sqrt | numeric | numeric | func -(2 rows) - -\df int*pl - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+-------------+------------------+---------------------+------ - pg_catalog | int24pl | integer | smallint, integer | func - pg_catalog | int28pl | bigint | smallint, bigint | func - pg_catalog | int2pl | smallint | smallint, smallint | func - pg_catalog | int42pl | integer | integer, smallint | func - pg_catalog | int48pl | bigint | integer, bigint | func - pg_catalog | int4pl | integer | integer, integer | func - pg_catalog | int82pl | bigint | bigint, smallint | func - pg_catalog | int84pl | bigint | bigint, integer | func - pg_catalog | int8pl | bigint | bigint, bigint | func - pg_catalog | interval_pl | interval | interval, interval | func -(10 rows) - -\df int*pl int4 - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+---------+------------------+---------------------+------ - pg_catalog | int42pl | integer | integer, smallint | func - pg_catalog | int48pl | bigint | integer, bigint | func - pg_catalog | int4pl | integer | integer, integer | func -(3 rows) - -\df int*pl * pg_catalog.int8 - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+---------+------------------+---------------------+------ - pg_catalog | int28pl | bigint | smallint, bigint | func - pg_catalog | int48pl | bigint | integer, bigint | func - pg_catalog | int8pl | bigint | bigint, bigint | func -(3 rows) - -\df acl* aclitem[] - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+-------------+------------------+----------------------------------------------------------------------------------------------------+------ - pg_catalog | aclcontains | boolean | aclitem[], aclitem | func - pg_catalog | aclexplode | SETOF record | acl aclitem[], OUT grantor oid, OUT grantee oid, OUT privilege_type text, OUT is_grantable boolean | func - pg_catalog | aclinsert | aclitem[] | aclitem[], aclitem | func - pg_catalog | aclremove | aclitem[] | aclitem[], aclitem | func -(4 rows) - -\df has_database_privilege oid text - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+------------------------+------------------+---------------------+------ - pg_catalog | has_database_privilege | boolean | oid, text | func - pg_catalog | has_database_privilege | boolean | oid, text, text | func -(2 rows) - -\df has_database_privilege oid text - - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+------------------------+------------------+---------------------+------ - pg_catalog | has_database_privilege | boolean | oid, text | func -(1 row) - -\dfa bit* small* - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+---------+------------------+---------------------+------ - pg_catalog | bit_and | smallint | smallint | agg - pg_catalog | bit_or | smallint | smallint | agg - pg_catalog | bit_xor | smallint | smallint | agg -(3 rows) - -\df *._pg_expandarray - List of functions - Schema | Name | Result data type | Argument data types | Type ---------------------+-----------------+------------------+-------------------------------------------+------ - information_schema | _pg_expandarray | SETOF record | anyarray, OUT x anyelement, OUT n integer | func -(1 row) - -\do - pg_catalog.int4 - List of operators - Schema | Name | Left arg type | Right arg type | Result type | Description -------------+------+---------------+----------------+-------------+------------- - pg_catalog | - | | integer | integer | negate -(1 row) - -\do && anyarray * - List of operators - Schema | Name | Left arg type | Right arg type | Result type | Description -------------+------+---------------+----------------+-------------+------------- - pg_catalog | && | anyarray | anyarray | boolean | overlaps -(1 row) - --- check \df+ --- we have to use functions with a predictable owner name, so make a role -create role regress_psql_user superuser; -begin; -set session authorization regress_psql_user; -create function psql_df_internal (float8) - returns float8 - language internal immutable parallel safe strict - as 'dsin'; -create function psql_df_sql (x integer) - returns integer - security definer - begin atomic select x + 1; end; -create function psql_df_plpgsql () - returns void - language plpgsql - as $$ begin return; end; $$; -comment on function psql_df_plpgsql () is 'some comment'; -\df+ psql_df_* - List of functions - Schema | Name | Result data type | Argument data types | Type | Volatility | Parallel | Owner | Security | Access privileges | Language | Internal name | Description ---------+------------------+------------------+---------------------+------+------------+----------+-------------------+----------+-------------------+----------+---------------+-------------- - public | psql_df_internal | double precision | double precision | func | immutable | safe | regress_psql_user | invoker | | internal | dsin | - public | psql_df_plpgsql | void | | func | volatile | unsafe | regress_psql_user | invoker | | plpgsql | | some comment - public | psql_df_sql | integer | x integer | func | volatile | unsafe | regress_psql_user | definer | | sql | | -(3 rows) - -rollback; -drop role regress_psql_user; --- check \sf -\sf information_schema._pg_index_position -CREATE OR REPLACE FUNCTION information_schema._pg_index_position(oid, smallint) - RETURNS integer - LANGUAGE sql - STABLE STRICT -BEGIN ATOMIC - SELECT (ss.a).n AS n - FROM ( SELECT information_schema._pg_expandarray(pg_index.indkey) AS a - FROM pg_index - WHERE (pg_index.indexrelid = $1)) ss - WHERE ((ss.a).x = $2); -END -\sf+ information_schema._pg_index_position - CREATE OR REPLACE FUNCTION information_schema._pg_index_position(oid, smallint) - RETURNS integer - LANGUAGE sql - STABLE STRICT -1 BEGIN ATOMIC -2 SELECT (ss.a).n AS n -3 FROM ( SELECT information_schema._pg_expandarray(pg_index.indkey) AS a -4 FROM pg_index -5 WHERE (pg_index.indexrelid = $1)) ss -6 WHERE ((ss.a).x = $2); -7 END -\sf+ interval_pl_time - CREATE OR REPLACE FUNCTION pg_catalog.interval_pl_time(interval, time without time zone) - RETURNS time without time zone - LANGUAGE sql - IMMUTABLE PARALLEL SAFE STRICT COST 1 -1 RETURN ($2 + $1) -\sf ts_debug(text); -CREATE OR REPLACE FUNCTION pg_catalog.ts_debug(document text, OUT alias text, OUT description text, OUT token text, OUT dictionaries regdictionary[], OUT dictionary regdictionary, OUT lexemes text[]) - RETURNS SETOF record - LANGUAGE sql - STABLE PARALLEL SAFE STRICT -BEGIN ATOMIC - SELECT ts_debug.alias, - ts_debug.description, - ts_debug.token, - ts_debug.dictionaries, - ts_debug.dictionary, - ts_debug.lexemes - FROM ts_debug(get_current_ts_config(), ts_debug.document) ts_debug(alias, description, token, dictionaries, dictionary, lexemes); -END -\sf+ ts_debug(text) - CREATE OR REPLACE FUNCTION pg_catalog.ts_debug(document text, OUT alias text, OUT description text, OUT token text, OUT dictionaries regdictionary[], OUT dictionary regdictionary, OUT lexemes text[]) - RETURNS SETOF record - LANGUAGE sql - STABLE PARALLEL SAFE STRICT -1 BEGIN ATOMIC -2 SELECT ts_debug.alias, -3 ts_debug.description, -4 ts_debug.token, -5 ts_debug.dictionaries, -6 ts_debug.dictionary, -7 ts_debug.lexemes -8 FROM ts_debug(get_current_ts_config(), ts_debug.document) ts_debug(alias, description, token, dictionaries, dictionary, lexemes); -9 END --- AUTOCOMMIT -CREATE TABLE ac_test (a int); -\set AUTOCOMMIT off -INSERT INTO ac_test VALUES (1); -COMMIT; -SELECT * FROM ac_test; - a ---- - 1 -(1 row) - -COMMIT; -INSERT INTO ac_test VALUES (2); -ROLLBACK; -SELECT * FROM ac_test; - a ---- - 1 -(1 row) - -COMMIT; -BEGIN; -INSERT INTO ac_test VALUES (3); -COMMIT; -SELECT * FROM ac_test; - a ---- - 1 - 3 -(2 rows) - -COMMIT; -BEGIN; -INSERT INTO ac_test VALUES (4); -ROLLBACK; -SELECT * FROM ac_test; - a ---- - 1 - 3 -(2 rows) - -COMMIT; -\set AUTOCOMMIT on -DROP TABLE ac_test; -SELECT * FROM ac_test; -- should be gone now -ERROR: relation "ac_test" does not exist -LINE 1: SELECT * FROM ac_test; - ^ --- ON_ERROR_ROLLBACK -\set ON_ERROR_ROLLBACK on -CREATE TABLE oer_test (a int); -BEGIN; -INSERT INTO oer_test VALUES (1); -INSERT INTO oer_test VALUES ('foo'); -ERROR: invalid input syntax for type integer: "foo" -LINE 1: INSERT INTO oer_test VALUES ('foo'); - ^ -INSERT INTO oer_test VALUES (3); -COMMIT; -SELECT * FROM oer_test; - a ---- - 1 - 3 -(2 rows) - -BEGIN; -INSERT INTO oer_test VALUES (4); -ROLLBACK; -SELECT * FROM oer_test; - a ---- - 1 - 3 -(2 rows) - -BEGIN; -INSERT INTO oer_test VALUES (5); -COMMIT AND CHAIN; -INSERT INTO oer_test VALUES (6); -COMMIT; -SELECT * FROM oer_test; - a ---- - 1 - 3 - 5 - 6 -(4 rows) - -DROP TABLE oer_test; -\set ON_ERROR_ROLLBACK off --- ECHO errors -\set ECHO errors -ERROR: relation "notexists" does not exist -LINE 1: SELECT * FROM notexists; - ^ -STATEMENT: SELECT * FROM notexists; --- --- combined queries --- -CREATE FUNCTION warn(msg TEXT) RETURNS BOOLEAN LANGUAGE plpgsql -AS $$ - BEGIN RAISE NOTICE 'warn %', msg ; RETURN TRUE ; END -$$; --- show both -SELECT 1 AS one \; SELECT warn('1.5') \; SELECT 2 AS two ; -NOTICE: warn 1.5 -CONTEXT: PL/pgSQL function warn(text) line 2 at RAISE - one ------ - 1 -(1 row) - - warn ------- - t -(1 row) - - two ------ - 2 -(1 row) - --- \gset applies to last query only -SELECT 3 AS three \; SELECT warn('3.5') \; SELECT 4 AS four \gset -NOTICE: warn 3.5 -CONTEXT: PL/pgSQL function warn(text) line 2 at RAISE - three -------- - 3 -(1 row) - - warn ------- - t -(1 row) - -\echo :three :four -:three 4 --- syntax error stops all processing -SELECT 5 \; SELECT 6 + \; SELECT warn('6.5') \; SELECT 7 ; -ERROR: syntax error at or near ";" -LINE 1: SELECT 5 ; SELECT 6 + ; SELECT warn('6.5') ; SELECT 7 ; - ^ --- with aborted transaction, stop on first error -BEGIN \; SELECT 8 AS eight \; SELECT 9/0 AS nine \; ROLLBACK \; SELECT 10 AS ten ; - eight -------- - 8 -(1 row) - -ERROR: division by zero --- close previously aborted transaction -ROLLBACK; --- miscellaneous SQL commands --- (non SELECT output is sent to stderr, thus is not shown in expected results) -SELECT 'ok' AS "begin" \; -CREATE TABLE psql_comics(s TEXT) \; -INSERT INTO psql_comics VALUES ('Calvin'), ('hobbes') \; -COPY psql_comics FROM STDIN \; -UPDATE psql_comics SET s = 'Hobbes' WHERE s = 'hobbes' \; -DELETE FROM psql_comics WHERE s = 'Moe' \; -COPY psql_comics TO STDOUT \; -TRUNCATE psql_comics \; -DROP TABLE psql_comics \; -SELECT 'ok' AS "done" ; - begin -------- - ok -(1 row) - -Calvin -Susie -Hobbes - done ------- - ok -(1 row) - -\set SHOW_ALL_RESULTS off -SELECT 1 AS one \; SELECT warn('1.5') \; SELECT 2 AS two ; -NOTICE: warn 1.5 -CONTEXT: PL/pgSQL function warn(text) line 2 at RAISE - two ------ - 2 -(1 row) - -\set SHOW_ALL_RESULTS on -DROP FUNCTION warn(TEXT); --- --- \g with file --- -\getenv abs_builddir PG_ABS_BUILDDIR -\set g_out_file :abs_builddir '/results/psql-output1' -CREATE TEMPORARY TABLE reload_output( - lineno int NOT NULL GENERATED ALWAYS AS IDENTITY, - line text -); -SELECT 1 AS a \g :g_out_file -COPY reload_output(line) FROM :'g_out_file'; -SELECT 2 AS b\; SELECT 3 AS c\; SELECT 4 AS d \g :g_out_file -COPY reload_output(line) FROM :'g_out_file'; -COPY (SELECT 'foo') TO STDOUT \; COPY (SELECT 'bar') TO STDOUT \g :g_out_file -COPY reload_output(line) FROM :'g_out_file'; -SELECT line FROM reload_output ORDER BY lineno; - line ---------- - a - --- - 1 - (1 row) - - b - --- - 2 - (1 row) - - c - --- - 3 - (1 row) - - d - --- - 4 - (1 row) - - foo - bar -(22 rows) - -TRUNCATE TABLE reload_output; --- --- \o with file --- -\set o_out_file :abs_builddir '/results/psql-output2' -\o :o_out_file -SELECT max(unique1) FROM onek; -SELECT 1 AS a\; SELECT 2 AS b\; SELECT 3 AS c; --- COPY TO file --- The data goes to :g_out_file and the status to :o_out_file -\set QUIET false -COPY (SELECT unique1 FROM onek ORDER BY unique1 LIMIT 10) TO :'g_out_file'; --- DML command status -UPDATE onek SET unique1 = unique1 WHERE false; -\set QUIET true -\o --- Check the contents of the files generated. -COPY reload_output(line) FROM :'g_out_file'; -SELECT line FROM reload_output ORDER BY lineno; - line ------- - 0 - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 -(10 rows) - -TRUNCATE TABLE reload_output; -COPY reload_output(line) FROM :'o_out_file'; -SELECT line FROM reload_output ORDER BY lineno; - line ----------- - max - ----- - 999 - (1 row) - - a - --- - 1 - (1 row) - - b - --- - 2 - (1 row) - - c - --- - 3 - (1 row) - - COPY 10 - UPDATE 0 -(22 rows) - -TRUNCATE TABLE reload_output; --- Multiple COPY TO STDOUT with output file -\o :o_out_file --- The data goes to :o_out_file with no status generated. -COPY (SELECT 'foo1') TO STDOUT \; COPY (SELECT 'bar1') TO STDOUT; --- Combination of \o and \g file with multiple COPY queries. -COPY (SELECT 'foo2') TO STDOUT \; COPY (SELECT 'bar2') TO STDOUT \g :g_out_file -\o --- Check the contents of the files generated. -COPY reload_output(line) FROM :'g_out_file'; -SELECT line FROM reload_output ORDER BY lineno; - line ------- - foo2 - bar2 -(2 rows) - -TRUNCATE TABLE reload_output; -COPY reload_output(line) FROM :'o_out_file'; -SELECT line FROM reload_output ORDER BY lineno; - line ------- - foo1 - bar1 -(2 rows) - -DROP TABLE reload_output; --- --- AUTOCOMMIT and combined queries --- -\set AUTOCOMMIT off -\echo '# AUTOCOMMIT:' :AUTOCOMMIT -# AUTOCOMMIT: off --- BEGIN is now implicit -CREATE TABLE foo(s TEXT) \; -ROLLBACK; -CREATE TABLE foo(s TEXT) \; -INSERT INTO foo(s) VALUES ('hello'), ('world') \; -COMMIT; -DROP TABLE foo \; -ROLLBACK; --- table foo is still there -SELECT * FROM foo ORDER BY 1 \; -DROP TABLE foo \; -COMMIT; - s -------- - hello - world -(2 rows) - -\set AUTOCOMMIT on -\echo '# AUTOCOMMIT:' :AUTOCOMMIT -# AUTOCOMMIT: on --- BEGIN now explicit for multi-statement transactions -BEGIN \; -CREATE TABLE foo(s TEXT) \; -INSERT INTO foo(s) VALUES ('hello'), ('world') \; -COMMIT; -BEGIN \; -DROP TABLE foo \; -ROLLBACK \; --- implicit transactions -SELECT * FROM foo ORDER BY 1 \; -DROP TABLE foo; - s -------- - hello - world -(2 rows) - --- --- test ON_ERROR_ROLLBACK and combined queries --- -CREATE FUNCTION psql_error(msg TEXT) RETURNS BOOLEAN AS $$ - BEGIN - RAISE EXCEPTION 'error %', msg; - END; -$$ LANGUAGE plpgsql; -\set ON_ERROR_ROLLBACK on -\echo '# ON_ERROR_ROLLBACK:' :ON_ERROR_ROLLBACK -# ON_ERROR_ROLLBACK: on -\echo '# AUTOCOMMIT:' :AUTOCOMMIT -# AUTOCOMMIT: on -BEGIN; -CREATE TABLE bla(s NO_SUCH_TYPE); -- fails -ERROR: type "no_such_type" does not exist -LINE 1: CREATE TABLE bla(s NO_SUCH_TYPE); - ^ -CREATE TABLE bla(s TEXT); -- succeeds -SELECT psql_error('oops!'); -- fails -ERROR: error oops! -CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE -INSERT INTO bla VALUES ('Calvin'), ('Hobbes'); -COMMIT; -SELECT * FROM bla ORDER BY 1; - s --------- - Calvin - Hobbes -(2 rows) - -BEGIN; -INSERT INTO bla VALUES ('Susie'); -- succeeds --- now with combined queries -INSERT INTO bla VALUES ('Rosalyn') \; -- will rollback -SELECT 'before error' AS show \; -- will show nevertheless! - SELECT psql_error('boum!') \; -- failure - SELECT 'after error' AS noshow; -- hidden by preceding error - show --------------- - before error -(1 row) - -ERROR: error boum! -CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE -INSERT INTO bla(s) VALUES ('Moe') \; -- will rollback - SELECT psql_error('bam!'); -ERROR: error bam! -CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE -INSERT INTO bla VALUES ('Miss Wormwood'); -- succeeds -COMMIT; -SELECT * FROM bla ORDER BY 1; - s ---------------- - Calvin - Hobbes - Miss Wormwood - Susie -(4 rows) - --- some with autocommit off -\set AUTOCOMMIT off -\echo '# AUTOCOMMIT:' :AUTOCOMMIT -# AUTOCOMMIT: off --- implicit BEGIN -INSERT INTO bla VALUES ('Dad'); -- succeeds -SELECT psql_error('bad!'); -- implicit partial rollback -ERROR: error bad! -CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE -INSERT INTO bla VALUES ('Mum') \; -- will rollback -SELECT COUNT(*) AS "#mum" -FROM bla WHERE s = 'Mum' \; -- but be counted here -SELECT psql_error('bad!'); -- implicit partial rollback - #mum ------- - 1 -(1 row) - -ERROR: error bad! -CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE -COMMIT; -SELECT COUNT(*) AS "#mum" -FROM bla WHERE s = 'Mum' \; -- no mum here -SELECT * FROM bla ORDER BY 1; - #mum ------- - 0 -(1 row) - - s ---------------- - Calvin - Dad - Hobbes - Miss Wormwood - Susie -(5 rows) - -COMMIT; --- reset all -\set AUTOCOMMIT on -\set ON_ERROR_ROLLBACK off -\echo '# final ON_ERROR_ROLLBACK:' :ON_ERROR_ROLLBACK -# final ON_ERROR_ROLLBACK: off -DROP TABLE bla; -DROP FUNCTION psql_error; --- check describing invalid multipart names -\dA regression.heap -improper qualified name (too many dotted names): regression.heap -\dA nonesuch.heap -improper qualified name (too many dotted names): nonesuch.heap -\dt host.regression.pg_catalog.pg_class -improper qualified name (too many dotted names): host.regression.pg_catalog.pg_class -\dt |.pg_catalog.pg_class -cross-database references are not implemented: |.pg_catalog.pg_class -\dt nonesuch.pg_catalog.pg_class -cross-database references are not implemented: nonesuch.pg_catalog.pg_class -\da host.regression.pg_catalog.sum -improper qualified name (too many dotted names): host.regression.pg_catalog.sum -\da +.pg_catalog.sum -cross-database references are not implemented: +.pg_catalog.sum -\da nonesuch.pg_catalog.sum -cross-database references are not implemented: nonesuch.pg_catalog.sum -\dAc nonesuch.brin -improper qualified name (too many dotted names): nonesuch.brin -\dAc regression.brin -improper qualified name (too many dotted names): regression.brin -\dAf nonesuch.brin -improper qualified name (too many dotted names): nonesuch.brin -\dAf regression.brin -improper qualified name (too many dotted names): regression.brin -\dAo nonesuch.brin -improper qualified name (too many dotted names): nonesuch.brin -\dAo regression.brin -improper qualified name (too many dotted names): regression.brin -\dAp nonesuch.brin -improper qualified name (too many dotted names): nonesuch.brin -\dAp regression.brin -improper qualified name (too many dotted names): regression.brin -\db nonesuch.pg_default -improper qualified name (too many dotted names): nonesuch.pg_default -\db regression.pg_default -improper qualified name (too many dotted names): regression.pg_default -\dc host.regression.public.conversion -improper qualified name (too many dotted names): host.regression.public.conversion -\dc (.public.conversion -cross-database references are not implemented: (.public.conversion -\dc nonesuch.public.conversion -cross-database references are not implemented: nonesuch.public.conversion -\dC host.regression.pg_catalog.int8 -improper qualified name (too many dotted names): host.regression.pg_catalog.int8 -\dC ).pg_catalog.int8 -cross-database references are not implemented: ).pg_catalog.int8 -\dC nonesuch.pg_catalog.int8 -cross-database references are not implemented: nonesuch.pg_catalog.int8 -\dd host.regression.pg_catalog.pg_class -improper qualified name (too many dotted names): host.regression.pg_catalog.pg_class -\dd [.pg_catalog.pg_class -cross-database references are not implemented: [.pg_catalog.pg_class -\dd nonesuch.pg_catalog.pg_class -cross-database references are not implemented: nonesuch.pg_catalog.pg_class -\dD host.regression.public.gtestdomain1 -improper qualified name (too many dotted names): host.regression.public.gtestdomain1 -\dD ].public.gtestdomain1 -cross-database references are not implemented: ].public.gtestdomain1 -\dD nonesuch.public.gtestdomain1 -cross-database references are not implemented: nonesuch.public.gtestdomain1 -\ddp host.regression.pg_catalog.pg_class -improper qualified name (too many dotted names): host.regression.pg_catalog.pg_class -\ddp {.pg_catalog.pg_class -cross-database references are not implemented: {.pg_catalog.pg_class -\ddp nonesuch.pg_catalog.pg_class -cross-database references are not implemented: nonesuch.pg_catalog.pg_class -\dE host.regression.public.ft -improper qualified name (too many dotted names): host.regression.public.ft -\dE }.public.ft -cross-database references are not implemented: }.public.ft -\dE nonesuch.public.ft -cross-database references are not implemented: nonesuch.public.ft -\di host.regression.public.tenk1_hundred -improper qualified name (too many dotted names): host.regression.public.tenk1_hundred -\di ..public.tenk1_hundred -improper qualified name (too many dotted names): ..public.tenk1_hundred -\di nonesuch.public.tenk1_hundred -cross-database references are not implemented: nonesuch.public.tenk1_hundred -\dm host.regression.public.mvtest_bb -improper qualified name (too many dotted names): host.regression.public.mvtest_bb -\dm ^.public.mvtest_bb -cross-database references are not implemented: ^.public.mvtest_bb -\dm nonesuch.public.mvtest_bb -cross-database references are not implemented: nonesuch.public.mvtest_bb -\ds host.regression.public.check_seq -improper qualified name (too many dotted names): host.regression.public.check_seq -\ds regression|mydb.public.check_seq -cross-database references are not implemented: regression|mydb.public.check_seq -\ds nonesuch.public.check_seq -cross-database references are not implemented: nonesuch.public.check_seq -\dt host.regression.public.b_star -improper qualified name (too many dotted names): host.regression.public.b_star -\dt regres+ion.public.b_star -cross-database references are not implemented: regres+ion.public.b_star -\dt nonesuch.public.b_star -cross-database references are not implemented: nonesuch.public.b_star -\dv host.regression.public.shoe -improper qualified name (too many dotted names): host.regression.public.shoe -\dv regress(ion).public.shoe -cross-database references are not implemented: regress(ion).public.shoe -\dv nonesuch.public.shoe -cross-database references are not implemented: nonesuch.public.shoe -\des nonesuch.server -improper qualified name (too many dotted names): nonesuch.server -\des regression.server -improper qualified name (too many dotted names): regression.server -\des nonesuch.server -improper qualified name (too many dotted names): nonesuch.server -\des regression.server -improper qualified name (too many dotted names): regression.server -\des nonesuch.username -improper qualified name (too many dotted names): nonesuch.username -\des regression.username -improper qualified name (too many dotted names): regression.username -\dew nonesuch.fdw -improper qualified name (too many dotted names): nonesuch.fdw -\dew regression.fdw -improper qualified name (too many dotted names): regression.fdw -\df host.regression.public.namelen -improper qualified name (too many dotted names): host.regression.public.namelen -\df regres[qrstuv]ion.public.namelen -cross-database references are not implemented: regres[qrstuv]ion.public.namelen -\df nonesuch.public.namelen -cross-database references are not implemented: nonesuch.public.namelen -\dF host.regression.pg_catalog.arabic -improper qualified name (too many dotted names): host.regression.pg_catalog.arabic -\dF regres{1,2}ion.pg_catalog.arabic -cross-database references are not implemented: regres{1,2}ion.pg_catalog.arabic -\dF nonesuch.pg_catalog.arabic -cross-database references are not implemented: nonesuch.pg_catalog.arabic -\dFd host.regression.pg_catalog.arabic_stem -improper qualified name (too many dotted names): host.regression.pg_catalog.arabic_stem -\dFd regres?ion.pg_catalog.arabic_stem -cross-database references are not implemented: regres?ion.pg_catalog.arabic_stem -\dFd nonesuch.pg_catalog.arabic_stem -cross-database references are not implemented: nonesuch.pg_catalog.arabic_stem -\dFp host.regression.pg_catalog.default -improper qualified name (too many dotted names): host.regression.pg_catalog.default -\dFp ^regression.pg_catalog.default -cross-database references are not implemented: ^regression.pg_catalog.default -\dFp nonesuch.pg_catalog.default -cross-database references are not implemented: nonesuch.pg_catalog.default -\dFt host.regression.pg_catalog.ispell -improper qualified name (too many dotted names): host.regression.pg_catalog.ispell -\dFt regression$.pg_catalog.ispell -cross-database references are not implemented: regression$.pg_catalog.ispell -\dFt nonesuch.pg_catalog.ispell -cross-database references are not implemented: nonesuch.pg_catalog.ispell -\dg nonesuch.pg_database_owner -improper qualified name (too many dotted names): nonesuch.pg_database_owner -\dg regression.pg_database_owner -improper qualified name (too many dotted names): regression.pg_database_owner -\dL host.regression.plpgsql -improper qualified name (too many dotted names): host.regression.plpgsql -\dL *.plpgsql -cross-database references are not implemented: *.plpgsql -\dL nonesuch.plpgsql -cross-database references are not implemented: nonesuch.plpgsql -\dn host.regression.public -improper qualified name (too many dotted names): host.regression.public -\dn """".public -cross-database references are not implemented: """".public -\dn nonesuch.public -cross-database references are not implemented: nonesuch.public -\do host.regression.public.!=- -improper qualified name (too many dotted names): host.regression.public.!=- -\do "regression|mydb".public.!=- -cross-database references are not implemented: "regression|mydb".public.!=- -\do nonesuch.public.!=- -cross-database references are not implemented: nonesuch.public.!=- -\dO host.regression.pg_catalog.POSIX -improper qualified name (too many dotted names): host.regression.pg_catalog.POSIX -\dO .pg_catalog.POSIX -cross-database references are not implemented: .pg_catalog.POSIX -\dO nonesuch.pg_catalog.POSIX -cross-database references are not implemented: nonesuch.pg_catalog.POSIX -\dp host.regression.public.a_star -improper qualified name (too many dotted names): host.regression.public.a_star -\dp "regres+ion".public.a_star -cross-database references are not implemented: "regres+ion".public.a_star -\dp nonesuch.public.a_star -cross-database references are not implemented: nonesuch.public.a_star -\dP host.regression.public.mlparted -improper qualified name (too many dotted names): host.regression.public.mlparted -\dP "regres(sion)".public.mlparted -cross-database references are not implemented: "regres(sion)".public.mlparted -\dP nonesuch.public.mlparted -cross-database references are not implemented: nonesuch.public.mlparted -\drds nonesuch.lc_messages -improper qualified name (too many dotted names): nonesuch.lc_messages -\drds regression.lc_messages -improper qualified name (too many dotted names): regression.lc_messages -\dRp public.mypub -improper qualified name (too many dotted names): public.mypub -\dRp regression.mypub -improper qualified name (too many dotted names): regression.mypub -\dRs public.mysub -improper qualified name (too many dotted names): public.mysub -\dRs regression.mysub -improper qualified name (too many dotted names): regression.mysub -\dT host.regression.public.widget -improper qualified name (too many dotted names): host.regression.public.widget -\dT "regression{1,2}".public.widget -cross-database references are not implemented: "regression{1,2}".public.widget -\dT nonesuch.public.widget -cross-database references are not implemented: nonesuch.public.widget -\dx regression.plpgsql -improper qualified name (too many dotted names): regression.plpgsql -\dx nonesuch.plpgsql -improper qualified name (too many dotted names): nonesuch.plpgsql -\dX host.regression.public.func_deps_stat -improper qualified name (too many dotted names): host.regression.public.func_deps_stat -\dX "^regression$".public.func_deps_stat -cross-database references are not implemented: "^regression$".public.func_deps_stat -\dX nonesuch.public.func_deps_stat -cross-database references are not implemented: nonesuch.public.func_deps_stat -\dy regression.myevt -improper qualified name (too many dotted names): regression.myevt -\dy nonesuch.myevt -improper qualified name (too many dotted names): nonesuch.myevt --- check that dots within quoted name segments are not counted -\dA "no.such.access.method" -List of access methods - Name | Type -------+------ -(0 rows) - -\dt "no.such.table.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\da "no.such.aggregate.function" - List of aggregate functions - Schema | Name | Result data type | Argument data types | Description ---------+------+------------------+---------------------+------------- -(0 rows) - -\dAc "no.such.operator.class" - List of operator classes - AM | Input type | Storage type | Operator class | Default? -----+------------+--------------+----------------+---------- -(0 rows) - -\dAf "no.such.operator.family" - List of operator families - AM | Operator family | Applicable types -----+-----------------+------------------ -(0 rows) - -\dAo "no.such.operator.of.operator.family" - List of operators of operator families - AM | Operator family | Operator | Strategy | Purpose -----+-----------------+----------+----------+--------- -(0 rows) - -\dAp "no.such.operator.support.function.of.operator.family" - List of support functions of operator families - AM | Operator family | Registered left type | Registered right type | Number | Function -----+-----------------+----------------------+-----------------------+--------+---------- -(0 rows) - -\db "no.such.tablespace" - List of tablespaces - Name | Owner | Location -------+-------+---------- -(0 rows) - -\dc "no.such.conversion" - List of conversions - Schema | Name | Source | Destination | Default? ---------+------+--------+-------------+---------- -(0 rows) - -\dC "no.such.cast" - List of casts - Source type | Target type | Function | Implicit? --------------+-------------+----------+----------- -(0 rows) - -\dd "no.such.object.description" - Object descriptions - Schema | Name | Object | Description ---------+------+--------+------------- -(0 rows) - -\dD "no.such.domain" - List of domains - Schema | Name | Type | Collation | Nullable | Default | Check ---------+------+------+-----------+----------+---------+------- -(0 rows) - -\ddp "no.such.default.access.privilege" - Default access privileges - Owner | Schema | Type | Access privileges --------+--------+------+------------------- -(0 rows) - -\di "no.such.index.relation" - List of relations - Schema | Name | Type | Owner | Table ---------+------+------+-------+------- -(0 rows) - -\dm "no.such.materialized.view" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\ds "no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\dt "no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\dv "no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\des "no.such.foreign.server" - List of foreign servers - Name | Owner | Foreign-data wrapper -------+-------+---------------------- -(0 rows) - -\dew "no.such.foreign.data.wrapper" - List of foreign-data wrappers - Name | Owner | Handler | Validator -------+-------+---------+----------- -(0 rows) - -\df "no.such.function" - List of functions - Schema | Name | Result data type | Argument data types | Type ---------+------+------------------+---------------------+------ -(0 rows) - -\dF "no.such.text.search.configuration" -List of text search configurations - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFd "no.such.text.search.dictionary" -List of text search dictionaries - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFp "no.such.text.search.parser" - List of text search parsers - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFt "no.such.text.search.template" -List of text search templates - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dg "no.such.role" - List of roles - Role name | Attributes ------------+------------ - -\dL "no.such.language" - List of languages - Name | Owner | Trusted | Description -------+-------+---------+------------- -(0 rows) - -\dn "no.such.schema" -List of schemas - Name | Owner -------+------- -(0 rows) - -\do "no.such.operator" - List of operators - Schema | Name | Left arg type | Right arg type | Result type | Description ---------+------+---------------+----------------+-------------+------------- -(0 rows) - -\dO "no.such.collation" - List of collations - Schema | Name | Provider | Collate | Ctype | Locale | ICU Rules | Deterministic? ---------+------+----------+---------+-------+--------+-----------+---------------- -(0 rows) - -\dp "no.such.access.privilege" - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+------+------+-------------------+-------------------+---------- -(0 rows) - -\dP "no.such.partitioned.relation" - List of partitioned relations - Schema | Name | Owner | Type | Parent name | Table ---------+------+-------+------+-------------+------- -(0 rows) - -\drds "no.such.setting" - List of settings - Role | Database | Settings -------+----------+---------- -(0 rows) - -\dRp "no.such.publication" - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root -------+-------+------------+---------+---------+---------+-----------+---------- -(0 rows) - -\dRs "no.such.subscription" - List of subscriptions - Name | Owner | Enabled | Publication -------+-------+---------+------------- -(0 rows) - -\dT "no.such.data.type" - List of data types - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dx "no.such.installed.extension" - List of installed extensions - Name | Version | Schema | Description -------+---------+--------+------------- -(0 rows) - -\dX "no.such.extended.statistics" - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+------+------------+-----------+--------------+----- -(0 rows) - -\dy "no.such.event.trigger" - List of event triggers - Name | Event | Owner | Enabled | Function | Tags -------+-------+-------+---------+----------+------ -(0 rows) - --- again, but with dotted schema qualifications. -\dA "no.such.schema"."no.such.access.method" -improper qualified name (too many dotted names): "no.such.schema"."no.such.access.method" -\dt "no.such.schema"."no.such.table.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\da "no.such.schema"."no.such.aggregate.function" - List of aggregate functions - Schema | Name | Result data type | Argument data types | Description ---------+------+------------------+---------------------+------------- -(0 rows) - -\dAc "no.such.schema"."no.such.operator.class" -improper qualified name (too many dotted names): "no.such.schema"."no.such.operator.class" -\dAf "no.such.schema"."no.such.operator.family" -improper qualified name (too many dotted names): "no.such.schema"."no.such.operator.family" -\dAo "no.such.schema"."no.such.operator.of.operator.family" -improper qualified name (too many dotted names): "no.such.schema"."no.such.operator.of.operator.family" -\dAp "no.such.schema"."no.such.operator.support.function.of.operator.family" -improper qualified name (too many dotted names): "no.such.schema"."no.such.operator.support.function.of.operator.family" -\db "no.such.schema"."no.such.tablespace" -improper qualified name (too many dotted names): "no.such.schema"."no.such.tablespace" -\dc "no.such.schema"."no.such.conversion" - List of conversions - Schema | Name | Source | Destination | Default? ---------+------+--------+-------------+---------- -(0 rows) - -\dC "no.such.schema"."no.such.cast" - List of casts - Source type | Target type | Function | Implicit? --------------+-------------+----------+----------- -(0 rows) - -\dd "no.such.schema"."no.such.object.description" - Object descriptions - Schema | Name | Object | Description ---------+------+--------+------------- -(0 rows) - -\dD "no.such.schema"."no.such.domain" - List of domains - Schema | Name | Type | Collation | Nullable | Default | Check ---------+------+------+-----------+----------+---------+------- -(0 rows) - -\ddp "no.such.schema"."no.such.default.access.privilege" - Default access privileges - Owner | Schema | Type | Access privileges --------+--------+------+------------------- -(0 rows) - -\di "no.such.schema"."no.such.index.relation" - List of relations - Schema | Name | Type | Owner | Table ---------+------+------+-------+------- -(0 rows) - -\dm "no.such.schema"."no.such.materialized.view" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\ds "no.such.schema"."no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\dt "no.such.schema"."no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\dv "no.such.schema"."no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\des "no.such.schema"."no.such.foreign.server" -improper qualified name (too many dotted names): "no.such.schema"."no.such.foreign.server" -\dew "no.such.schema"."no.such.foreign.data.wrapper" -improper qualified name (too many dotted names): "no.such.schema"."no.such.foreign.data.wrapper" -\df "no.such.schema"."no.such.function" - List of functions - Schema | Name | Result data type | Argument data types | Type ---------+------+------------------+---------------------+------ -(0 rows) - -\dF "no.such.schema"."no.such.text.search.configuration" -List of text search configurations - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFd "no.such.schema"."no.such.text.search.dictionary" -List of text search dictionaries - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFp "no.such.schema"."no.such.text.search.parser" - List of text search parsers - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFt "no.such.schema"."no.such.text.search.template" -List of text search templates - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dg "no.such.schema"."no.such.role" -improper qualified name (too many dotted names): "no.such.schema"."no.such.role" -\dL "no.such.schema"."no.such.language" -cross-database references are not implemented: "no.such.schema"."no.such.language" -\do "no.such.schema"."no.such.operator" - List of operators - Schema | Name | Left arg type | Right arg type | Result type | Description ---------+------+---------------+----------------+-------------+------------- -(0 rows) - -\dO "no.such.schema"."no.such.collation" - List of collations - Schema | Name | Provider | Collate | Ctype | Locale | ICU Rules | Deterministic? ---------+------+----------+---------+-------+--------+-----------+---------------- -(0 rows) - -\dp "no.such.schema"."no.such.access.privilege" - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+------+------+-------------------+-------------------+---------- -(0 rows) - -\dP "no.such.schema"."no.such.partitioned.relation" - List of partitioned relations - Schema | Name | Owner | Type | Parent name | Table ---------+------+-------+------+-------------+------- -(0 rows) - -\drds "no.such.schema"."no.such.setting" -improper qualified name (too many dotted names): "no.such.schema"."no.such.setting" -\dRp "no.such.schema"."no.such.publication" -improper qualified name (too many dotted names): "no.such.schema"."no.such.publication" -\dRs "no.such.schema"."no.such.subscription" -improper qualified name (too many dotted names): "no.such.schema"."no.such.subscription" -\dT "no.such.schema"."no.such.data.type" - List of data types - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dx "no.such.schema"."no.such.installed.extension" -improper qualified name (too many dotted names): "no.such.schema"."no.such.installed.extension" -\dX "no.such.schema"."no.such.extended.statistics" - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+------+------------+-----------+--------------+----- -(0 rows) - -\dy "no.such.schema"."no.such.event.trigger" -improper qualified name (too many dotted names): "no.such.schema"."no.such.event.trigger" --- again, but with current database and dotted schema qualifications. -\dt regression."no.such.schema"."no.such.table.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\da regression."no.such.schema"."no.such.aggregate.function" - List of aggregate functions - Schema | Name | Result data type | Argument data types | Description ---------+------+------------------+---------------------+------------- -(0 rows) - -\dc regression."no.such.schema"."no.such.conversion" - List of conversions - Schema | Name | Source | Destination | Default? ---------+------+--------+-------------+---------- -(0 rows) - -\dC regression."no.such.schema"."no.such.cast" - List of casts - Source type | Target type | Function | Implicit? --------------+-------------+----------+----------- -(0 rows) - -\dd regression."no.such.schema"."no.such.object.description" - Object descriptions - Schema | Name | Object | Description ---------+------+--------+------------- -(0 rows) - -\dD regression."no.such.schema"."no.such.domain" - List of domains - Schema | Name | Type | Collation | Nullable | Default | Check ---------+------+------+-----------+----------+---------+------- -(0 rows) - -\di regression."no.such.schema"."no.such.index.relation" - List of relations - Schema | Name | Type | Owner | Table ---------+------+------+-------+------- -(0 rows) - -\dm regression."no.such.schema"."no.such.materialized.view" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\ds regression."no.such.schema"."no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\dt regression."no.such.schema"."no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\dv regression."no.such.schema"."no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\df regression."no.such.schema"."no.such.function" - List of functions - Schema | Name | Result data type | Argument data types | Type ---------+------+------------------+---------------------+------ -(0 rows) - -\dF regression."no.such.schema"."no.such.text.search.configuration" -List of text search configurations - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFd regression."no.such.schema"."no.such.text.search.dictionary" -List of text search dictionaries - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFp regression."no.such.schema"."no.such.text.search.parser" - List of text search parsers - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFt regression."no.such.schema"."no.such.text.search.template" -List of text search templates - Schema | Name | Description ---------+------+------------- -(0 rows) - -\do regression."no.such.schema"."no.such.operator" - List of operators - Schema | Name | Left arg type | Right arg type | Result type | Description ---------+------+---------------+----------------+-------------+------------- -(0 rows) - -\dO regression."no.such.schema"."no.such.collation" - List of collations - Schema | Name | Provider | Collate | Ctype | Locale | ICU Rules | Deterministic? ---------+------+----------+---------+-------+--------+-----------+---------------- -(0 rows) - -\dp regression."no.such.schema"."no.such.access.privilege" - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+------+------+-------------------+-------------------+---------- -(0 rows) - -\dP regression."no.such.schema"."no.such.partitioned.relation" - List of partitioned relations - Schema | Name | Owner | Type | Parent name | Table ---------+------+-------+------+-------------+------- -(0 rows) - -\dT regression."no.such.schema"."no.such.data.type" - List of data types - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dX regression."no.such.schema"."no.such.extended.statistics" - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+------+------------+-----------+--------------+----- -(0 rows) - --- again, but with dotted database and dotted schema qualifications. -\dt "no.such.database"."no.such.schema"."no.such.table.relation" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.table.relation" -\da "no.such.database"."no.such.schema"."no.such.aggregate.function" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.aggregate.function" -\dc "no.such.database"."no.such.schema"."no.such.conversion" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.conversion" -\dC "no.such.database"."no.such.schema"."no.such.cast" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.cast" -\dd "no.such.database"."no.such.schema"."no.such.object.description" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.object.description" -\dD "no.such.database"."no.such.schema"."no.such.domain" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.domain" -\ddp "no.such.database"."no.such.schema"."no.such.default.access.privilege" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.default.access.privilege" -\di "no.such.database"."no.such.schema"."no.such.index.relation" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.index.relation" -\dm "no.such.database"."no.such.schema"."no.such.materialized.view" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.materialized.view" -\ds "no.such.database"."no.such.schema"."no.such.relation" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.relation" -\dt "no.such.database"."no.such.schema"."no.such.relation" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.relation" -\dv "no.such.database"."no.such.schema"."no.such.relation" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.relation" -\df "no.such.database"."no.such.schema"."no.such.function" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.function" -\dF "no.such.database"."no.such.schema"."no.such.text.search.configuration" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.text.search.configuration" -\dFd "no.such.database"."no.such.schema"."no.such.text.search.dictionary" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.text.search.dictionary" -\dFp "no.such.database"."no.such.schema"."no.such.text.search.parser" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.text.search.parser" -\dFt "no.such.database"."no.such.schema"."no.such.text.search.template" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.text.search.template" -\do "no.such.database"."no.such.schema"."no.such.operator" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.operator" -\dO "no.such.database"."no.such.schema"."no.such.collation" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.collation" -\dp "no.such.database"."no.such.schema"."no.such.access.privilege" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.access.privilege" -\dP "no.such.database"."no.such.schema"."no.such.partitioned.relation" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.partitioned.relation" -\dT "no.such.database"."no.such.schema"."no.such.data.type" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.data.type" -\dX "no.such.database"."no.such.schema"."no.such.extended.statistics" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.extended.statistics" --- check \drg and \du -CREATE ROLE regress_du_role0; -CREATE ROLE regress_du_role1; -CREATE ROLE regress_du_role2; -CREATE ROLE regress_du_admin; -GRANT regress_du_role0 TO regress_du_admin WITH ADMIN TRUE; -GRANT regress_du_role1 TO regress_du_admin WITH ADMIN TRUE; -GRANT regress_du_role2 TO regress_du_admin WITH ADMIN TRUE; -GRANT regress_du_role0 TO regress_du_role1 WITH ADMIN TRUE, INHERIT TRUE, SET TRUE GRANTED BY regress_du_admin; -GRANT regress_du_role0 TO regress_du_role2 WITH ADMIN TRUE, INHERIT FALSE, SET FALSE GRANTED BY regress_du_admin; -GRANT regress_du_role1 TO regress_du_role2 WITH ADMIN TRUE , INHERIT FALSE, SET TRUE GRANTED BY regress_du_admin; -GRANT regress_du_role0 TO regress_du_role1 WITH ADMIN FALSE, INHERIT TRUE, SET FALSE GRANTED BY regress_du_role1; -GRANT regress_du_role0 TO regress_du_role2 WITH ADMIN FALSE, INHERIT TRUE , SET TRUE GRANTED BY regress_du_role1; -GRANT regress_du_role0 TO regress_du_role1 WITH ADMIN FALSE, INHERIT FALSE, SET TRUE GRANTED BY regress_du_role2; -GRANT regress_du_role0 TO regress_du_role2 WITH ADMIN FALSE, INHERIT FALSE, SET FALSE GRANTED BY regress_du_role2; -\drg regress_du_role* - List of role grants - Role name | Member of | Options | Grantor -------------------+------------------+---------------------+------------------ - regress_du_role1 | regress_du_role0 | ADMIN, INHERIT, SET | regress_du_admin - regress_du_role1 | regress_du_role0 | INHERIT | regress_du_role1 - regress_du_role1 | regress_du_role0 | SET | regress_du_role2 - regress_du_role2 | regress_du_role0 | ADMIN | regress_du_admin - regress_du_role2 | regress_du_role0 | INHERIT, SET | regress_du_role1 - regress_du_role2 | regress_du_role0 | | regress_du_role2 - regress_du_role2 | regress_du_role1 | ADMIN, SET | regress_du_admin -(7 rows) - -\du regress_du_role* - List of roles - Role name | Attributes -------------------+-------------- - regress_du_role0 | Cannot login - regress_du_role1 | Cannot login - regress_du_role2 | Cannot login - -DROP ROLE regress_du_role0; -DROP ROLE regress_du_role1; -DROP ROLE regress_du_role2; -DROP ROLE regress_du_admin; --- Test display of empty privileges. -BEGIN; --- Create an owner for tested objects because output contains owner name. -CREATE ROLE regress_zeropriv_owner; -SET LOCAL ROLE regress_zeropriv_owner; -CREATE DOMAIN regress_zeropriv_domain AS int; -REVOKE ALL ON DOMAIN regress_zeropriv_domain FROM CURRENT_USER, PUBLIC; -\dD+ regress_zeropriv_domain - List of domains - Schema | Name | Type | Collation | Nullable | Default | Check | Access privileges | Description ---------+-------------------------+---------+-----------+----------+---------+-------+-------------------+------------- - public | regress_zeropriv_domain | integer | | | | | (none) | -(1 row) - -CREATE PROCEDURE regress_zeropriv_proc() LANGUAGE sql AS ''; -REVOKE ALL ON PROCEDURE regress_zeropriv_proc() FROM CURRENT_USER, PUBLIC; -\df+ regress_zeropriv_proc - List of functions - Schema | Name | Result data type | Argument data types | Type | Volatility | Parallel | Owner | Security | Access privileges | Language | Internal name | Description ---------+-----------------------+------------------+---------------------+------+------------+----------+------------------------+----------+-------------------+----------+---------------+------------- - public | regress_zeropriv_proc | | | proc | volatile | unsafe | regress_zeropriv_owner | invoker | (none) | sql | | -(1 row) - -CREATE TABLE regress_zeropriv_tbl (a int); -REVOKE ALL ON TABLE regress_zeropriv_tbl FROM CURRENT_USER; -\dp regress_zeropriv_tbl - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+----------------------+-------+-------------------+-------------------+---------- - public | regress_zeropriv_tbl | table | (none) | | -(1 row) - -CREATE TYPE regress_zeropriv_type AS (a int); -REVOKE ALL ON TYPE regress_zeropriv_type FROM CURRENT_USER, PUBLIC; -\dT+ regress_zeropriv_type - List of data types - Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description ---------+-----------------------+-----------------------+-------+----------+------------------------+-------------------+------------- - public | regress_zeropriv_type | regress_zeropriv_type | tuple | | regress_zeropriv_owner | (none) | -(1 row) - -ROLLBACK; --- Test display of default privileges with \pset null. -CREATE TABLE defprivs (a int); -\pset null '(default)' -\z defprivs - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+----------+-------+-------------------+-------------------+---------- - public | defprivs | table | (default) | | -(1 row) - -\pset null '' -DROP TABLE defprivs; +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/psql_crosstab.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/psql_crosstab.out --- /tmp/cirrus-ci-build/src/test/regress/expected/psql_crosstab.out 2024-03-13 23:12:37.626533000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/psql_crosstab.out 2024-03-13 23:14:29.834517000 +0000 @@ -1,216 +1,2 @@ --- --- \crosstabview --- -CREATE TABLE ctv_data (v, h, c, i, d) AS -VALUES - ('v1','h2','foo', 3, '2015-04-01'::date), - ('v2','h1','bar', 3, '2015-01-02'), - ('v1','h0','baz', NULL, '2015-07-12'), - ('v0','h4','qux', 4, '2015-07-15'), - ('v0','h4','dbl', -3, '2014-12-15'), - ('v0',NULL,'qux', 5, '2014-07-15'), - ('v1','h2','quux',7, '2015-04-04'); --- make plans more stable -ANALYZE ctv_data; --- running \crosstabview after query uses query in buffer -SELECT v, EXTRACT(year FROM d), count(*) - FROM ctv_data - GROUP BY 1, 2 - ORDER BY 1, 2; - v | extract | count -----+---------+------- - v0 | 2014 | 2 - v0 | 2015 | 1 - v1 | 2015 | 3 - v2 | 2015 | 1 -(4 rows) - --- basic usage with 3 columns - \crosstabview - v | 2014 | 2015 -----+------+------ - v0 | 2 | 1 - v1 | | 3 - v2 | | 1 -(3 rows) - --- ordered months in horizontal header, quoted column name -SELECT v, to_char(d, 'Mon') AS "month name", EXTRACT(month FROM d) AS num, - count(*) FROM ctv_data GROUP BY 1,2,3 ORDER BY 1 - \crosstabview v "month name" 4 num - v | Jan | Apr | Jul | Dec -----+-----+-----+-----+----- - v0 | | | 2 | 1 - v1 | | 2 | 1 | - v2 | 1 | | | -(3 rows) - --- ordered months in vertical header, ordered years in horizontal header -SELECT EXTRACT(year FROM d) AS year, to_char(d,'Mon') AS """month"" name", - EXTRACT(month FROM d) AS month, - format('sum=%s avg=%s', sum(i), avg(i)::numeric(2,1)) - FROM ctv_data - GROUP BY EXTRACT(year FROM d), to_char(d,'Mon'), EXTRACT(month FROM d) -ORDER BY month -\crosstabview """month"" name" year format year - "month" name | 2014 | 2015 ---------------+-----------------+---------------- - Jan | | sum=3 avg=3.0 - Apr | | sum=10 avg=5.0 - Jul | sum=5 avg=5.0 | sum=4 avg=4.0 - Dec | sum=-3 avg=-3.0 | -(4 rows) - --- combine contents vertically into the same cell (V/H duplicates) -SELECT v, h, string_agg(c, E'\n') FROM ctv_data GROUP BY v, h ORDER BY 1,2,3 - \crosstabview 1 2 3 - v | h4 | | h0 | h2 | h1 -----+-----+-----+-----+------+----- - v0 | qux+| qux | | | - | dbl | | | | - v1 | | | baz | foo +| - | | | | quux | - v2 | | | | | bar -(3 rows) - --- horizontal ASC order from window function -SELECT v,h, string_agg(c, E'\n') AS c, row_number() OVER(ORDER BY h) AS r -FROM ctv_data GROUP BY v, h ORDER BY 1,3,2 - \crosstabview v h c r - v | h0 | h1 | h2 | h4 | -----+-----+-----+------+-----+----- - v0 | | | | qux+| qux - | | | | dbl | - v1 | baz | | foo +| | - | | | quux | | - v2 | | bar | | | -(3 rows) - --- horizontal DESC order from window function -SELECT v, h, string_agg(c, E'\n') AS c, row_number() OVER(ORDER BY h DESC) AS r -FROM ctv_data GROUP BY v, h ORDER BY 1,3,2 - \crosstabview v h c r - v | | h4 | h2 | h1 | h0 -----+-----+-----+------+-----+----- - v0 | qux | qux+| | | - | | dbl | | | - v1 | | | foo +| | baz - | | | quux | | - v2 | | | | bar | -(3 rows) - --- horizontal ASC order from window function, NULLs pushed rightmost -SELECT v,h, string_agg(c, E'\n') AS c, row_number() OVER(ORDER BY h NULLS LAST) AS r -FROM ctv_data GROUP BY v, h ORDER BY 1,3,2 - \crosstabview v h c r - v | h0 | h1 | h2 | h4 | -----+-----+-----+------+-----+----- - v0 | | | | qux+| qux - | | | | dbl | - v1 | baz | | foo +| | - | | | quux | | - v2 | | bar | | | -(3 rows) - --- only null, no column name, 2 columns: error -SELECT null,null \crosstabview -\crosstabview: query must return at least three columns --- only null, no column name, 3 columns: works -SELECT null,null,null \crosstabview - ?column? | -----------+-- - | -(1 row) - --- null display -\pset null '#null#' -SELECT v,h, string_agg(i::text, E'\n') AS i FROM ctv_data -GROUP BY v, h ORDER BY h,v - \crosstabview v h i - v | h0 | h1 | h2 | h4 | #null# -----+--------+----+----+----+-------- - v1 | #null# | | 3 +| | - | | | 7 | | - v2 | | 3 | | | - v0 | | | | 4 +| 5 - | | | | -3 | -(3 rows) - -\pset null '' --- refer to columns by position -SELECT v,h,string_agg(i::text, E'\n'), string_agg(c, E'\n') -FROM ctv_data GROUP BY v, h ORDER BY h,v - \crosstabview 2 1 4 - h | v1 | v2 | v0 -----+------+-----+----- - h0 | baz | | - h1 | | bar | - h2 | foo +| | - | quux | | - h4 | | | qux+ - | | | dbl - | | | qux -(5 rows) - --- refer to columns by positions and names mixed -SELECT v,h, string_agg(i::text, E'\n') AS i, string_agg(c, E'\n') AS c -FROM ctv_data GROUP BY v, h ORDER BY h,v - \crosstabview 1 "h" 4 - v | h0 | h1 | h2 | h4 | -----+-----+-----+------+-----+----- - v1 | baz | | foo +| | - | | | quux | | - v2 | | bar | | | - v0 | | | | qux+| qux - | | | | dbl | -(3 rows) - --- refer to columns by quoted names, check downcasing of unquoted name -SELECT 1 as "22", 2 as b, 3 as "Foo" - \crosstabview "22" B "Foo" - 22 | 2 -----+--- - 1 | 3 -(1 row) - --- error: bad column name -SELECT v,h,c,i FROM ctv_data - \crosstabview v h j -\crosstabview: column name not found: "j" --- error: need to quote name -SELECT 1 as "22", 2 as b, 3 as "Foo" - \crosstabview 1 2 Foo -\crosstabview: column name not found: "foo" --- error: need to not quote name -SELECT 1 as "22", 2 as b, 3 as "Foo" - \crosstabview 1 "B" "Foo" -\crosstabview: column name not found: "B" --- error: bad column number -SELECT v,h,i,c FROM ctv_data - \crosstabview 2 1 5 -\crosstabview: column number 5 is out of range 1..4 --- error: same H and V columns -SELECT v,h,i,c FROM ctv_data - \crosstabview 2 h 4 -\crosstabview: vertical and horizontal headers must be different columns --- error: too many columns -SELECT a,a,1 FROM generate_series(1,3000) AS a - \crosstabview -\crosstabview: maximum number of columns (1600) exceeded --- error: only one column -SELECT 1 \crosstabview -\crosstabview: query must return at least three columns -DROP TABLE ctv_data; --- check error reporting (bug #14476) -CREATE TABLE ctv_data (x int, y int, v text); -INSERT INTO ctv_data SELECT 1, x, '*' || x FROM generate_series(1,10) x; -SELECT * FROM ctv_data \crosstabview - x | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 ----+----+----+----+----+----+----+----+----+----+----- - 1 | *1 | *2 | *3 | *4 | *5 | *6 | *7 | *8 | *9 | *10 -(1 row) - -INSERT INTO ctv_data VALUES (1, 10, '*'); -- duplicate data to cause error -SELECT * FROM ctv_data \crosstabview -\crosstabview: query result contains multiple data values for row "1", column "10" -DROP TABLE ctv_data; +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/amutils.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/amutils.out --- /tmp/cirrus-ci-build/src/test/regress/expected/amutils.out 2024-03-13 23:12:37.622405000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/amutils.out 2024-03-13 23:14:29.818786000 +0000 @@ -1,254 +1,2 @@ --- --- Test index AM property-reporting functions --- -select prop, - pg_indexam_has_property(a.oid, prop) as "AM", - pg_index_has_property('onek_hundred'::regclass, prop) as "Index", - pg_index_column_has_property('onek_hundred'::regclass, 1, prop) as "Column" - from pg_am a, - unnest(array['asc', 'desc', 'nulls_first', 'nulls_last', - 'orderable', 'distance_orderable', 'returnable', - 'search_array', 'search_nulls', - 'clusterable', 'index_scan', 'bitmap_scan', - 'backward_scan', - 'can_order', 'can_unique', 'can_multi_col', - 'can_exclude', 'can_include', - 'bogus']::text[]) - with ordinality as u(prop,ord) - where a.amname = 'btree' - order by ord; - prop | AM | Index | Column ---------------------+----+-------+-------- - asc | | | t - desc | | | f - nulls_first | | | f - nulls_last | | | t - orderable | | | t - distance_orderable | | | f - returnable | | | t - search_array | | | t - search_nulls | | | t - clusterable | | t | - index_scan | | t | - bitmap_scan | | t | - backward_scan | | t | - can_order | t | | - can_unique | t | | - can_multi_col | t | | - can_exclude | t | | - can_include | t | | - bogus | | | -(19 rows) - -select prop, - pg_indexam_has_property(a.oid, prop) as "AM", - pg_index_has_property('gcircleind'::regclass, prop) as "Index", - pg_index_column_has_property('gcircleind'::regclass, 1, prop) as "Column" - from pg_am a, - unnest(array['asc', 'desc', 'nulls_first', 'nulls_last', - 'orderable', 'distance_orderable', 'returnable', - 'search_array', 'search_nulls', - 'clusterable', 'index_scan', 'bitmap_scan', - 'backward_scan', - 'can_order', 'can_unique', 'can_multi_col', - 'can_exclude', 'can_include', - 'bogus']::text[]) - with ordinality as u(prop,ord) - where a.amname = 'gist' - order by ord; - prop | AM | Index | Column ---------------------+----+-------+-------- - asc | | | f - desc | | | f - nulls_first | | | f - nulls_last | | | f - orderable | | | f - distance_orderable | | | t - returnable | | | f - search_array | | | f - search_nulls | | | t - clusterable | | t | - index_scan | | t | - bitmap_scan | | t | - backward_scan | | f | - can_order | f | | - can_unique | f | | - can_multi_col | t | | - can_exclude | t | | - can_include | t | | - bogus | | | -(19 rows) - -select prop, - pg_index_column_has_property('onek_hundred'::regclass, 1, prop) as btree, - pg_index_column_has_property('hash_i4_index'::regclass, 1, prop) as hash, - pg_index_column_has_property('gcircleind'::regclass, 1, prop) as gist, - pg_index_column_has_property('sp_radix_ind'::regclass, 1, prop) as spgist_radix, - pg_index_column_has_property('sp_quad_ind'::regclass, 1, prop) as spgist_quad, - pg_index_column_has_property('botharrayidx'::regclass, 1, prop) as gin, - pg_index_column_has_property('brinidx'::regclass, 1, prop) as brin - from unnest(array['asc', 'desc', 'nulls_first', 'nulls_last', - 'orderable', 'distance_orderable', 'returnable', - 'search_array', 'search_nulls', - 'bogus']::text[]) - with ordinality as u(prop,ord) - order by ord; - prop | btree | hash | gist | spgist_radix | spgist_quad | gin | brin ---------------------+-------+------+------+--------------+-------------+-----+------ - asc | t | f | f | f | f | f | f - desc | f | f | f | f | f | f | f - nulls_first | f | f | f | f | f | f | f - nulls_last | t | f | f | f | f | f | f - orderable | t | f | f | f | f | f | f - distance_orderable | f | f | t | f | t | f | f - returnable | t | f | f | t | t | f | f - search_array | t | f | f | f | f | f | f - search_nulls | t | f | t | t | t | f | t - bogus | | | | | | | -(10 rows) - -select prop, - pg_index_has_property('onek_hundred'::regclass, prop) as btree, - pg_index_has_property('hash_i4_index'::regclass, prop) as hash, - pg_index_has_property('gcircleind'::regclass, prop) as gist, - pg_index_has_property('sp_radix_ind'::regclass, prop) as spgist, - pg_index_has_property('botharrayidx'::regclass, prop) as gin, - pg_index_has_property('brinidx'::regclass, prop) as brin - from unnest(array['clusterable', 'index_scan', 'bitmap_scan', - 'backward_scan', - 'bogus']::text[]) - with ordinality as u(prop,ord) - order by ord; - prop | btree | hash | gist | spgist | gin | brin ----------------+-------+------+------+--------+-----+------ - clusterable | t | f | t | f | f | f - index_scan | t | t | t | t | f | f - bitmap_scan | t | t | t | t | t | t - backward_scan | t | t | f | f | f | f - bogus | | | | | | -(5 rows) - -select amname, prop, pg_indexam_has_property(a.oid, prop) as p - from pg_am a, - unnest(array['can_order', 'can_unique', 'can_multi_col', - 'can_exclude', 'can_include', 'bogus']::text[]) - with ordinality as u(prop,ord) - where amtype = 'i' - order by amname, ord; - amname | prop | p ---------+---------------+--- - brin | can_order | f - brin | can_unique | f - brin | can_multi_col | t - brin | can_exclude | f - brin | can_include | f - brin | bogus | - btree | can_order | t - btree | can_unique | t - btree | can_multi_col | t - btree | can_exclude | t - btree | can_include | t - btree | bogus | - gin | can_order | f - gin | can_unique | f - gin | can_multi_col | t - gin | can_exclude | f - gin | can_include | f - gin | bogus | - gist | can_order | f - gist | can_unique | f - gist | can_multi_col | t - gist | can_exclude | t - gist | can_include | t - gist | bogus | - hash | can_order | f - hash | can_unique | f - hash | can_multi_col | f - hash | can_exclude | t - hash | can_include | f - hash | bogus | - spgist | can_order | f - spgist | can_unique | f - spgist | can_multi_col | f - spgist | can_exclude | t - spgist | can_include | t - spgist | bogus | -(36 rows) - --- --- additional checks for pg_index_column_has_property --- -CREATE TEMP TABLE foo (f1 int, f2 int, f3 int, f4 int); -CREATE INDEX fooindex ON foo (f1 desc, f2 asc, f3 nulls first, f4 nulls last); -select col, prop, pg_index_column_has_property(o, col, prop) - from (values ('fooindex'::regclass)) v1(o), - (values (1,'orderable'),(2,'asc'),(3,'desc'), - (4,'nulls_first'),(5,'nulls_last'), - (6, 'bogus')) v2(idx,prop), - generate_series(1,4) col - order by col, idx; - col | prop | pg_index_column_has_property ------+-------------+------------------------------ - 1 | orderable | t - 1 | asc | f - 1 | desc | t - 1 | nulls_first | t - 1 | nulls_last | f - 1 | bogus | - 2 | orderable | t - 2 | asc | t - 2 | desc | f - 2 | nulls_first | f - 2 | nulls_last | t - 2 | bogus | - 3 | orderable | t - 3 | asc | t - 3 | desc | f - 3 | nulls_first | t - 3 | nulls_last | f - 3 | bogus | - 4 | orderable | t - 4 | asc | t - 4 | desc | f - 4 | nulls_first | f - 4 | nulls_last | t - 4 | bogus | -(24 rows) - -CREATE INDEX foocover ON foo (f1) INCLUDE (f2,f3); -select col, prop, pg_index_column_has_property(o, col, prop) - from (values ('foocover'::regclass)) v1(o), - (values (1,'orderable'),(2,'asc'),(3,'desc'), - (4,'nulls_first'),(5,'nulls_last'), - (6,'distance_orderable'),(7,'returnable'), - (8, 'bogus')) v2(idx,prop), - generate_series(1,3) col - order by col, idx; - col | prop | pg_index_column_has_property ------+--------------------+------------------------------ - 1 | orderable | t - 1 | asc | t - 1 | desc | f - 1 | nulls_first | f - 1 | nulls_last | t - 1 | distance_orderable | f - 1 | returnable | t - 1 | bogus | - 2 | orderable | f - 2 | asc | - 2 | desc | - 2 | nulls_first | - 2 | nulls_last | - 2 | distance_orderable | f - 2 | returnable | t - 2 | bogus | - 3 | orderable | f - 3 | asc | - 3 | desc | - 3 | nulls_first | - 3 | nulls_last | - 3 | distance_orderable | f - 3 | returnable | t - 3 | bogus | -(24 rows) - +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/stats_ext.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/stats_ext.out --- /tmp/cirrus-ci-build/src/test/regress/expected/stats_ext.out 2024-03-13 23:12:37.627474000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/stats_ext.out 2024-03-13 23:14:29.840791000 +0000 @@ -1,3292 +1,2 @@ --- Generic extended statistics support --- --- Note: tables for which we check estimated row counts should be created --- with autovacuum_enabled = off, so that we don't have unstable results --- from auto-analyze happening when we didn't expect it. --- --- check the number of estimated/actual rows in the top node -create function check_estimated_rows(text) returns table (estimated int, actual int) -language plpgsql as -$$ -declare - ln text; - tmp text[]; - first_row bool := true; -begin - for ln in - execute format('explain analyze %s', $1) - loop - if first_row then - first_row := false; - tmp := regexp_match(ln, 'rows=(\d*) .* rows=(\d*)'); - return query select tmp[1]::int, tmp[2]::int; - end if; - end loop; -end; -$$; --- Verify failures -CREATE TABLE ext_stats_test (x text, y int, z int); -CREATE STATISTICS tst; -ERROR: syntax error at or near ";" -LINE 1: CREATE STATISTICS tst; - ^ -CREATE STATISTICS tst ON a, b; -ERROR: syntax error at or near ";" -LINE 1: CREATE STATISTICS tst ON a, b; - ^ -CREATE STATISTICS tst FROM sometab; -ERROR: syntax error at or near "FROM" -LINE 1: CREATE STATISTICS tst FROM sometab; - ^ -CREATE STATISTICS tst ON a, b FROM nonexistent; -ERROR: relation "nonexistent" does not exist -CREATE STATISTICS tst ON a, b FROM ext_stats_test; -ERROR: column "a" does not exist -CREATE STATISTICS tst ON x, x, y FROM ext_stats_test; -ERROR: duplicate column name in statistics definition -CREATE STATISTICS tst ON x, x, y, x, x, y, x, x, y FROM ext_stats_test; -ERROR: cannot have more than 8 columns in statistics -CREATE STATISTICS tst ON x, x, y, x, x, (x || 'x'), (y + 1), (x || 'x'), (x || 'x'), (y + 1) FROM ext_stats_test; -ERROR: cannot have more than 8 columns in statistics -CREATE STATISTICS tst ON (x || 'x'), (x || 'x'), (y + 1), (x || 'x'), (x || 'x'), (y + 1), (x || 'x'), (x || 'x'), (y + 1) FROM ext_stats_test; -ERROR: cannot have more than 8 columns in statistics -CREATE STATISTICS tst ON (x || 'x'), (x || 'x'), y FROM ext_stats_test; -ERROR: duplicate expression in statistics definition -CREATE STATISTICS tst (unrecognized) ON x, y FROM ext_stats_test; -ERROR: unrecognized statistics kind "unrecognized" --- incorrect expressions -CREATE STATISTICS tst ON (y) FROM ext_stats_test; -- single column reference -ERROR: extended statistics require at least 2 columns -CREATE STATISTICS tst ON y + z FROM ext_stats_test; -- missing parentheses -ERROR: syntax error at or near "+" -LINE 1: CREATE STATISTICS tst ON y + z FROM ext_stats_test; - ^ -CREATE STATISTICS tst ON (x, y) FROM ext_stats_test; -- tuple expression -ERROR: syntax error at or near "," -LINE 1: CREATE STATISTICS tst ON (x, y) FROM ext_stats_test; - ^ -DROP TABLE ext_stats_test; --- Ensure stats are dropped sanely, and test IF NOT EXISTS while at it -CREATE TABLE ab1 (a INTEGER, b INTEGER, c INTEGER); -CREATE STATISTICS IF NOT EXISTS ab1_a_b_stats ON a, b FROM ab1; -COMMENT ON STATISTICS ab1_a_b_stats IS 'new comment'; -CREATE ROLE regress_stats_ext; -SET SESSION AUTHORIZATION regress_stats_ext; -COMMENT ON STATISTICS ab1_a_b_stats IS 'changed comment'; -ERROR: must be owner of statistics object ab1_a_b_stats -DROP STATISTICS ab1_a_b_stats; -ERROR: must be owner of statistics object ab1_a_b_stats -ALTER STATISTICS ab1_a_b_stats RENAME TO ab1_a_b_stats_new; -ERROR: must be owner of statistics object ab1_a_b_stats -RESET SESSION AUTHORIZATION; -DROP ROLE regress_stats_ext; -CREATE STATISTICS IF NOT EXISTS ab1_a_b_stats ON a, b FROM ab1; -NOTICE: statistics object "ab1_a_b_stats" already exists, skipping -DROP STATISTICS ab1_a_b_stats; -CREATE SCHEMA regress_schema_2; -CREATE STATISTICS regress_schema_2.ab1_a_b_stats ON a, b FROM ab1; --- Let's also verify the pg_get_statisticsobjdef output looks sane. -SELECT pg_get_statisticsobjdef(oid) FROM pg_statistic_ext WHERE stxname = 'ab1_a_b_stats'; - pg_get_statisticsobjdef -------------------------------------------------------------------- - CREATE STATISTICS regress_schema_2.ab1_a_b_stats ON a, b FROM ab1 -(1 row) - -DROP STATISTICS regress_schema_2.ab1_a_b_stats; --- Ensure statistics are dropped when columns are -CREATE STATISTICS ab1_b_c_stats ON b, c FROM ab1; -CREATE STATISTICS ab1_a_b_c_stats ON a, b, c FROM ab1; -CREATE STATISTICS ab1_b_a_stats ON b, a FROM ab1; -ALTER TABLE ab1 DROP COLUMN a; -\d ab1 - Table "public.ab1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - b | integer | | | - c | integer | | | -Statistics objects: - "public.ab1_b_c_stats" ON b, c FROM ab1 - --- Ensure statistics are dropped when table is -SELECT stxname FROM pg_statistic_ext WHERE stxname LIKE 'ab1%'; - stxname ---------------- - ab1_b_c_stats -(1 row) - -DROP TABLE ab1; -SELECT stxname FROM pg_statistic_ext WHERE stxname LIKE 'ab1%'; - stxname ---------- -(0 rows) - --- Ensure things work sanely with SET STATISTICS 0 -CREATE TABLE ab1 (a INTEGER, b INTEGER); -ALTER TABLE ab1 ALTER a SET STATISTICS 0; -INSERT INTO ab1 SELECT a, a%23 FROM generate_series(1, 1000) a; -CREATE STATISTICS ab1_a_b_stats ON a, b FROM ab1; -ANALYZE ab1; -WARNING: statistics object "public.ab1_a_b_stats" could not be computed for relation "public.ab1" -ALTER TABLE ab1 ALTER a SET STATISTICS -1; --- setting statistics target 0 skips the statistics, without printing any message, so check catalog -ALTER STATISTICS ab1_a_b_stats SET STATISTICS 0; -\d ab1 - Table "public.ab1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Statistics objects: - "public.ab1_a_b_stats" ON a, b FROM ab1; STATISTICS 0 - -ANALYZE ab1; -SELECT stxname, stxdndistinct, stxddependencies, stxdmcv, stxdinherit - FROM pg_statistic_ext s LEFT JOIN pg_statistic_ext_data d ON (d.stxoid = s.oid) - WHERE s.stxname = 'ab1_a_b_stats'; - stxname | stxdndistinct | stxddependencies | stxdmcv | stxdinherit ----------------+---------------+------------------+---------+------------- - ab1_a_b_stats | | | | -(1 row) - -ALTER STATISTICS ab1_a_b_stats SET STATISTICS -1; -\d+ ab1 - Table "public.ab1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - a | integer | | | | plain | | - b | integer | | | | plain | | -Statistics objects: - "public.ab1_a_b_stats" ON a, b FROM ab1 - --- partial analyze doesn't build stats either -ANALYZE ab1 (a); -WARNING: statistics object "public.ab1_a_b_stats" could not be computed for relation "public.ab1" -ANALYZE ab1; -DROP TABLE ab1; -ALTER STATISTICS ab1_a_b_stats SET STATISTICS 0; -ERROR: statistics object "ab1_a_b_stats" does not exist -ALTER STATISTICS IF EXISTS ab1_a_b_stats SET STATISTICS 0; -NOTICE: statistics object "ab1_a_b_stats" does not exist, skipping --- Ensure we can build statistics for tables with inheritance. -CREATE TABLE ab1 (a INTEGER, b INTEGER); -CREATE TABLE ab1c () INHERITS (ab1); -INSERT INTO ab1 VALUES (1,1); -CREATE STATISTICS ab1_a_b_stats ON a, b FROM ab1; -ANALYZE ab1; -DROP TABLE ab1 CASCADE; -NOTICE: drop cascades to table ab1c --- Tests for stats with inheritance -CREATE TABLE stxdinh(a int, b int); -CREATE TABLE stxdinh1() INHERITS(stxdinh); -CREATE TABLE stxdinh2() INHERITS(stxdinh); -INSERT INTO stxdinh SELECT mod(a,50), mod(a,100) FROM generate_series(0, 1999) a; -INSERT INTO stxdinh1 SELECT mod(a,100), mod(a,100) FROM generate_series(0, 999) a; -INSERT INTO stxdinh2 SELECT mod(a,100), mod(a,100) FROM generate_series(0, 999) a; -VACUUM ANALYZE stxdinh, stxdinh1, stxdinh2; --- Ensure non-inherited stats are not applied to inherited query --- Without stats object, it looks like this -SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinh* GROUP BY 1, 2'); - estimated | actual ------------+-------- - 400 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinh* WHERE a = 0 AND b = 0'); - estimated | actual ------------+-------- - 3 | 40 -(1 row) - -CREATE STATISTICS stxdinh ON a, b FROM stxdinh; -VACUUM ANALYZE stxdinh, stxdinh1, stxdinh2; --- See if the extended stats affect the estimates -SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinh* GROUP BY 1, 2'); - estimated | actual ------------+-------- - 150 | 150 -(1 row) - --- Dependencies are applied at individual relations (within append), so --- this estimate changes a bit because we improve estimates for the parent -SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinh* WHERE a = 0 AND b = 0'); - estimated | actual ------------+-------- - 22 | 40 -(1 row) - --- Ensure correct (non-inherited) stats are applied to inherited query -SELECT * FROM check_estimated_rows('SELECT a, b FROM ONLY stxdinh GROUP BY 1, 2'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT a, b FROM ONLY stxdinh WHERE a = 0 AND b = 0'); - estimated | actual ------------+-------- - 20 | 20 -(1 row) - -DROP TABLE stxdinh, stxdinh1, stxdinh2; --- Ensure inherited stats ARE applied to inherited query in partitioned table -CREATE TABLE stxdinp(i int, a int, b int) PARTITION BY RANGE (i); -CREATE TABLE stxdinp1 PARTITION OF stxdinp FOR VALUES FROM (1) TO (100); -INSERT INTO stxdinp SELECT 1, a/100, a/100 FROM generate_series(1, 999) a; -CREATE STATISTICS stxdinp ON (a + 1), a, b FROM stxdinp; -VACUUM ANALYZE stxdinp; -- partitions are processed recursively -SELECT 1 FROM pg_statistic_ext WHERE stxrelid = 'stxdinp'::regclass; - ?column? ----------- - 1 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinp GROUP BY 1, 2'); - estimated | actual ------------+-------- - 10 | 10 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT a + 1, b FROM ONLY stxdinp GROUP BY 1, 2'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -DROP TABLE stxdinp; --- basic test for statistics on expressions -CREATE TABLE ab1 (a INTEGER, b INTEGER, c TIMESTAMP, d TIMESTAMPTZ); --- expression stats may be built on a single expression column -CREATE STATISTICS ab1_exprstat_1 ON (a+b) FROM ab1; --- with a single expression, we only enable expression statistics -CREATE STATISTICS ab1_exprstat_2 ON (a+b) FROM ab1; -SELECT stxkind FROM pg_statistic_ext WHERE stxname = 'ab1_exprstat_2'; - stxkind ---------- - {e} -(1 row) - --- adding anything to the expression builds all statistics kinds -CREATE STATISTICS ab1_exprstat_3 ON (a+b), a FROM ab1; -SELECT stxkind FROM pg_statistic_ext WHERE stxname = 'ab1_exprstat_3'; - stxkind ------------ - {d,f,m,e} -(1 row) - --- date_trunc on timestamptz is not immutable, but that should not matter -CREATE STATISTICS ab1_exprstat_4 ON date_trunc('day', d) FROM ab1; --- date_trunc on timestamp is immutable -CREATE STATISTICS ab1_exprstat_5 ON date_trunc('day', c) FROM ab1; --- check use of a boolean-returning expression -CREATE STATISTICS ab1_exprstat_6 ON - (case a when 1 then true else false end), b FROM ab1; --- insert some data and run analyze, to test that these cases build properly -INSERT INTO ab1 -SELECT x / 10, x / 3, - '2020-10-01'::timestamp + x * interval '1 day', - '2020-10-01'::timestamptz + x * interval '1 day' -FROM generate_series(1, 100) x; -ANALYZE ab1; --- apply some stats -SELECT * FROM check_estimated_rows('SELECT * FROM ab1 WHERE (case a when 1 then true else false end) AND b=2'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -DROP TABLE ab1; --- Verify supported object types for extended statistics -CREATE schema tststats; -CREATE TABLE tststats.t (a int, b int, c text); -CREATE INDEX ti ON tststats.t (a, b); -CREATE SEQUENCE tststats.s; -CREATE VIEW tststats.v AS SELECT * FROM tststats.t; -CREATE MATERIALIZED VIEW tststats.mv AS SELECT * FROM tststats.t; -CREATE TYPE tststats.ty AS (a int, b int, c text); -CREATE FOREIGN DATA WRAPPER extstats_dummy_fdw; -CREATE SERVER extstats_dummy_srv FOREIGN DATA WRAPPER extstats_dummy_fdw; -CREATE FOREIGN TABLE tststats.f (a int, b int, c text) SERVER extstats_dummy_srv; -CREATE TABLE tststats.pt (a int, b int, c text) PARTITION BY RANGE (a, b); -CREATE TABLE tststats.pt1 PARTITION OF tststats.pt FOR VALUES FROM (-10, -10) TO (10, 10); -CREATE STATISTICS tststats.s1 ON a, b FROM tststats.t; -CREATE STATISTICS tststats.s2 ON a, b FROM tststats.ti; -ERROR: cannot define statistics for relation "ti" -DETAIL: This operation is not supported for indexes. -CREATE STATISTICS tststats.s3 ON a, b FROM tststats.s; -ERROR: cannot define statistics for relation "s" -DETAIL: This operation is not supported for sequences. -CREATE STATISTICS tststats.s4 ON a, b FROM tststats.v; -ERROR: cannot define statistics for relation "v" -DETAIL: This operation is not supported for views. -CREATE STATISTICS tststats.s5 ON a, b FROM tststats.mv; -CREATE STATISTICS tststats.s6 ON a, b FROM tststats.ty; -ERROR: cannot define statistics for relation "ty" -DETAIL: This operation is not supported for composite types. -CREATE STATISTICS tststats.s7 ON a, b FROM tststats.f; -CREATE STATISTICS tststats.s8 ON a, b FROM tststats.pt; -CREATE STATISTICS tststats.s9 ON a, b FROM tststats.pt1; -DO $$ -DECLARE - relname text := reltoastrelid::regclass FROM pg_class WHERE oid = 'tststats.t'::regclass; -BEGIN - EXECUTE 'CREATE STATISTICS tststats.s10 ON a, b FROM ' || relname; -EXCEPTION WHEN wrong_object_type THEN - RAISE NOTICE 'stats on toast table not created'; -END; -$$; -NOTICE: stats on toast table not created -DROP SCHEMA tststats CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table tststats.t -drop cascades to sequence tststats.s -drop cascades to view tststats.v -drop cascades to materialized view tststats.mv -drop cascades to type tststats.ty -drop cascades to foreign table tststats.f -drop cascades to table tststats.pt -DROP FOREIGN DATA WRAPPER extstats_dummy_fdw CASCADE; -NOTICE: drop cascades to server extstats_dummy_srv --- n-distinct tests -CREATE TABLE ndistinct ( - filler1 TEXT, - filler2 NUMERIC, - a INT, - b INT, - filler3 DATE, - c INT, - d INT -) -WITH (autovacuum_enabled = off); --- over-estimates when using only per-column statistics -INSERT INTO ndistinct (a, b, c, filler1) - SELECT i/100, i/100, i/100, (i/100) || ' dollars and zero cents' - FROM generate_series(1,1000) s(i); -ANALYZE ndistinct; --- Group Aggregate, due to over-estimate of the number of groups -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 100 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c'); - estimated | actual ------------+-------- - 100 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c'); - estimated | actual ------------+-------- - 100 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d'); - estimated | actual ------------+-------- - 200 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d'); - estimated | actual ------------+-------- - 200 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (a+1)'); - estimated | actual ------------+-------- - 100 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); - estimated | actual ------------+-------- - 100 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); - estimated | actual ------------+-------- - 100 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); - estimated | actual ------------+-------- - 100 | 11 -(1 row) - --- correct command -CREATE STATISTICS s10 ON a, b, c FROM ndistinct; -ANALYZE ndistinct; -SELECT s.stxkind, d.stxdndistinct - FROM pg_statistic_ext s, pg_statistic_ext_data d - WHERE s.stxrelid = 'ndistinct'::regclass - AND d.stxoid = s.oid; - stxkind | stxdndistinct ----------+----------------------------------------------------- - {d,f,m} | {"3, 4": 11, "3, 6": 11, "4, 6": 11, "3, 4, 6": 11} -(1 row) - --- minor improvement, make sure the ctid does not break the matching -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY ctid, a, b'); - estimated | actual ------------+-------- - 1000 | 1000 -(1 row) - --- Hash Aggregate, thanks to estimates improved by the statistic -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 11 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c'); - estimated | actual ------------+-------- - 11 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c'); - estimated | actual ------------+-------- - 11 | 11 -(1 row) - --- partial improvement (match on attributes) -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (a+1)'); - estimated | actual ------------+-------- - 11 | 11 -(1 row) - --- expressions - no improvement -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); - estimated | actual ------------+-------- - 11 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); - estimated | actual ------------+-------- - 11 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); - estimated | actual ------------+-------- - 11 | 11 -(1 row) - --- last two plans keep using Group Aggregate, because 'd' is not covered --- by the statistic and while it's NULL-only we assume 200 values for it -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d'); - estimated | actual ------------+-------- - 200 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d'); - estimated | actual ------------+-------- - 200 | 11 -(1 row) - -TRUNCATE TABLE ndistinct; --- under-estimates when using only per-column statistics -INSERT INTO ndistinct (a, b, c, filler1) - SELECT mod(i,13), mod(i,17), mod(i,19), - mod(i,23) || ' dollars and zero cents' - FROM generate_series(1,1000) s(i); -ANALYZE ndistinct; -SELECT s.stxkind, d.stxdndistinct - FROM pg_statistic_ext s, pg_statistic_ext_data d - WHERE s.stxrelid = 'ndistinct'::regclass - AND d.stxoid = s.oid; - stxkind | stxdndistinct ----------+---------------------------------------------------------- - {d,f,m} | {"3, 4": 221, "3, 6": 247, "4, 6": 323, "3, 4, 6": 1000} -(1 row) - --- correct estimates -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 221 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c'); - estimated | actual ------------+-------- - 1000 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d'); - estimated | actual ------------+-------- - 1000 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d'); - estimated | actual ------------+-------- - 323 | 323 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, d'); - estimated | actual ------------+-------- - 200 | 13 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (a+1)'); - estimated | actual ------------+-------- - 221 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); - estimated | actual ------------+-------- - 221 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); - estimated | actual ------------+-------- - 1000 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); - estimated | actual ------------+-------- - 221 | 221 -(1 row) - -DROP STATISTICS s10; -SELECT s.stxkind, d.stxdndistinct - FROM pg_statistic_ext s, pg_statistic_ext_data d - WHERE s.stxrelid = 'ndistinct'::regclass - AND d.stxoid = s.oid; - stxkind | stxdndistinct ----------+--------------- -(0 rows) - --- dropping the statistics results in under-estimates -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 100 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c'); - estimated | actual ------------+-------- - 100 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d'); - estimated | actual ------------+-------- - 200 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d'); - estimated | actual ------------+-------- - 200 | 323 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, d'); - estimated | actual ------------+-------- - 200 | 13 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (a+1)'); - estimated | actual ------------+-------- - 100 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); - estimated | actual ------------+-------- - 100 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); - estimated | actual ------------+-------- - 100 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); - estimated | actual ------------+-------- - 100 | 221 -(1 row) - --- ndistinct estimates with statistics on expressions -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); - estimated | actual ------------+-------- - 100 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); - estimated | actual ------------+-------- - 100 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); - estimated | actual ------------+-------- - 100 | 221 -(1 row) - -CREATE STATISTICS s10 (ndistinct) ON (a+1), (b+100), (2*c) FROM ndistinct; -ANALYZE ndistinct; -SELECT s.stxkind, d.stxdndistinct - FROM pg_statistic_ext s, pg_statistic_ext_data d - WHERE s.stxrelid = 'ndistinct'::regclass - AND d.stxoid = s.oid; - stxkind | stxdndistinct ----------+------------------------------------------------------------------- - {d,e} | {"-1, -2": 221, "-1, -3": 247, "-2, -3": 323, "-1, -2, -3": 1000} -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); - estimated | actual ------------+-------- - 221 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); - estimated | actual ------------+-------- - 1000 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); - estimated | actual ------------+-------- - 221 | 221 -(1 row) - -DROP STATISTICS s10; --- a mix of attributes and expressions -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 100 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (2*c)'); - estimated | actual ------------+-------- - 100 | 247 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (2*c)'); - estimated | actual ------------+-------- - 100 | 1000 -(1 row) - -CREATE STATISTICS s10 (ndistinct) ON a, b, (2*c) FROM ndistinct; -ANALYZE ndistinct; -SELECT s.stxkind, d.stxdndistinct - FROM pg_statistic_ext s, pg_statistic_ext_data d - WHERE s.stxrelid = 'ndistinct'::regclass - AND d.stxoid = s.oid; - stxkind | stxdndistinct ----------+------------------------------------------------------------- - {d,e} | {"3, 4": 221, "3, -1": 247, "4, -1": 323, "3, 4, -1": 1000} -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 221 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (2*c)'); - estimated | actual ------------+-------- - 247 | 247 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (2*c)'); - estimated | actual ------------+-------- - 1000 | 1000 -(1 row) - -DROP STATISTICS s10; --- combination of multiple ndistinct statistics, with/without expressions -TRUNCATE ndistinct; --- two mostly independent groups of columns -INSERT INTO ndistinct (a, b, c, d) - SELECT mod(i,3), mod(i,9), mod(i,5), mod(i,20) - FROM generate_series(1,1000) s(i); -ANALYZE ndistinct; -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 27 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); - estimated | actual ------------+-------- - 27 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); - estimated | actual ------------+-------- - 27 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); - estimated | actual ------------+-------- - 27 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); - estimated | actual ------------+-------- - 100 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); - estimated | actual ------------+-------- - 100 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); - estimated | actual ------------+-------- - 100 | 180 -(1 row) - --- basic statistics on both attributes (no expressions) -CREATE STATISTICS s11 (ndistinct) ON a, b FROM ndistinct; -CREATE STATISTICS s12 (ndistinct) ON c, d FROM ndistinct; -ANALYZE ndistinct; -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); - estimated | actual ------------+-------- - 100 | 180 -(1 row) - --- replace the second statistics by statistics on expressions -DROP STATISTICS s12; -CREATE STATISTICS s12 (ndistinct) ON (c * 10), (d - 1) FROM ndistinct; -ANALYZE ndistinct; -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); - estimated | actual ------------+-------- - 100 | 180 -(1 row) - --- replace the second statistics by statistics on both attributes and expressions -DROP STATISTICS s12; -CREATE STATISTICS s12 (ndistinct) ON c, d, (c * 10), (d - 1) FROM ndistinct; -ANALYZE ndistinct; -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); - estimated | actual ------------+-------- - 100 | 180 -(1 row) - --- replace the other statistics by statistics on both attributes and expressions -DROP STATISTICS s11; -CREATE STATISTICS s11 (ndistinct) ON a, b, (a*5), (b+1) FROM ndistinct; -ANALYZE ndistinct; -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); - estimated | actual ------------+-------- - 100 | 180 -(1 row) - --- replace statistics by somewhat overlapping ones (this expected to get worse estimate --- because the first statistics shall be applied to 3 columns, and the second one can't --- be really applied) -DROP STATISTICS s11; -DROP STATISTICS s12; -CREATE STATISTICS s11 (ndistinct) ON a, b, (a*5), (b+1) FROM ndistinct; -CREATE STATISTICS s12 (ndistinct) ON a, (b+1), (c * 10) FROM ndistinct; -ANALYZE ndistinct; -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); - estimated | actual ------------+-------- - 100 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); - estimated | actual ------------+-------- - 100 | 180 -(1 row) - -DROP STATISTICS s11; -DROP STATISTICS s12; --- functional dependencies tests -CREATE TABLE functional_dependencies ( - filler1 TEXT, - filler2 NUMERIC, - a INT, - b TEXT, - filler3 DATE, - c INT, - d TEXT -) -WITH (autovacuum_enabled = off); -CREATE INDEX fdeps_ab_idx ON functional_dependencies (a, b); -CREATE INDEX fdeps_abc_idx ON functional_dependencies (a, b, c); --- random data (no functional dependencies) -INSERT INTO functional_dependencies (a, b, c, filler1) - SELECT mod(i, 5), mod(i, 7), mod(i, 11), i FROM generate_series(1,1000) s(i); -ANALYZE functional_dependencies; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 29 | 29 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 3 | 3 -(1 row) - --- create statistics -CREATE STATISTICS func_deps_stat (dependencies) ON a, b, c FROM functional_dependencies; -ANALYZE functional_dependencies; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 29 | 29 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 3 | 3 -(1 row) - --- a => b, a => c, b => c -TRUNCATE functional_dependencies; -DROP STATISTICS func_deps_stat; --- now do the same thing, but with expressions -INSERT INTO functional_dependencies (a, b, c, filler1) - SELECT i, i, i, i FROM generate_series(1,5000) s(i); -ANALYZE functional_dependencies; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE mod(a, 11) = 1 AND mod(b::int, 13) = 1'); - estimated | actual ------------+-------- - 1 | 35 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE mod(a, 11) = 1 AND mod(b::int, 13) = 1 AND mod(c, 7) = 1'); - estimated | actual ------------+-------- - 1 | 5 -(1 row) - --- create statistics -CREATE STATISTICS func_deps_stat (dependencies) ON (mod(a,11)), (mod(b::int, 13)), (mod(c, 7)) FROM functional_dependencies; -ANALYZE functional_dependencies; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE mod(a, 11) = 1 AND mod(b::int, 13) = 1'); - estimated | actual ------------+-------- - 35 | 35 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE mod(a, 11) = 1 AND mod(b::int, 13) = 1 AND mod(c, 7) = 1'); - estimated | actual ------------+-------- - 5 | 5 -(1 row) - --- a => b, a => c, b => c -TRUNCATE functional_dependencies; -DROP STATISTICS func_deps_stat; -INSERT INTO functional_dependencies (a, b, c, filler1) - SELECT mod(i,100), mod(i,50), mod(i,25), i FROM generate_series(1,5000) s(i); -ANALYZE functional_dependencies; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - --- IN -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ''1'''); - estimated | actual ------------+-------- - 2 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 4 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 8 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b = ''1'''); - estimated | actual ------------+-------- - 4 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 26, 51, 76) AND b IN (''1'', ''26'') AND c = 1'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 26, 51, 76) AND b IN (''1'', ''26'') AND c IN (1)'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 26, 27, 51, 52, 76, 77) AND b IN (''1'', ''2'', ''26'', ''27'') AND c IN (1, 2)'); - estimated | actual ------------+-------- - 3 | 400 -(1 row) - --- OR clauses referencing the same attribute -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 51) AND b = ''1'''); - estimated | actual ------------+-------- - 2 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 51) AND (b = ''1'' OR b = ''2'')'); - estimated | actual ------------+-------- - 4 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 2 OR a = 51 OR a = 52) AND (b = ''1'' OR b = ''2'')'); - estimated | actual ------------+-------- - 8 | 200 -(1 row) - --- OR clauses referencing different attributes -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR b = ''1'') AND b = ''1'''); - estimated | actual ------------+-------- - 3 | 100 -(1 row) - --- ANY -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 51]) AND b = ''1'''); - estimated | actual ------------+-------- - 2 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 51]) AND b = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 4 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 8 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 26, 51, 76]) AND b = ANY (ARRAY[''1'', ''26'']) AND c = 1'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 26, 51, 76]) AND b = ANY (ARRAY[''1'', ''26'']) AND c = ANY (ARRAY[1])'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 2, 26, 27, 51, 52, 76, 77]) AND b = ANY (ARRAY[''1'', ''2'', ''26'', ''27'']) AND c = ANY (ARRAY[1, 2])'); - estimated | actual ------------+-------- - 3 | 400 -(1 row) - --- ANY with inequalities should not benefit from functional dependencies -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a < ANY (ARRAY[1, 51]) AND b > ''1'''); - estimated | actual ------------+-------- - 2472 | 2400 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a >= ANY (ARRAY[1, 51]) AND b <= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1441 | 1250 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a <= ANY (ARRAY[1, 2, 51, 52]) AND b >= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 3909 | 2550 -(1 row) - --- ALL (should not benefit from functional dependencies) -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ALL (ARRAY[''1''])'); - estimated | actual ------------+-------- - 2 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - --- create statistics -CREATE STATISTICS func_deps_stat (dependencies) ON a, b, c FROM functional_dependencies; -ANALYZE functional_dependencies; --- print the detected dependencies -SELECT dependencies FROM pg_stats_ext WHERE statistics_name = 'func_deps_stat'; - dependencies ------------------------------------------------------------------------------------------------------------- - {"3 => 4": 1.000000, "3 => 6": 1.000000, "4 => 6": 1.000000, "3, 4 => 6": 1.000000, "3, 6 => 4": 1.000000} -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - --- IN -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ''1'''); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b = ''1'''); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 26, 51, 76) AND b IN (''1'', ''26'') AND c = 1'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 26, 51, 76) AND b IN (''1'', ''26'') AND c IN (1)'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 26, 27, 51, 52, 76, 77) AND b IN (''1'', ''2'', ''26'', ''27'') AND c IN (1, 2)'); - estimated | actual ------------+-------- - 400 | 400 -(1 row) - --- OR clauses referencing the same attribute -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 51) AND b = ''1'''); - estimated | actual ------------+-------- - 99 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 51) AND (b = ''1'' OR b = ''2'')'); - estimated | actual ------------+-------- - 99 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 2 OR a = 51 OR a = 52) AND (b = ''1'' OR b = ''2'')'); - estimated | actual ------------+-------- - 197 | 200 -(1 row) - --- OR clauses referencing different attributes are incompatible -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR b = ''1'') AND b = ''1'''); - estimated | actual ------------+-------- - 3 | 100 -(1 row) - --- ANY -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 51]) AND b = ''1'''); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 51]) AND b = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 26, 51, 76]) AND b = ANY (ARRAY[''1'', ''26'']) AND c = 1'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 26, 51, 76]) AND b = ANY (ARRAY[''1'', ''26'']) AND c = ANY (ARRAY[1])'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 2, 26, 27, 51, 52, 76, 77]) AND b = ANY (ARRAY[''1'', ''2'', ''26'', ''27'']) AND c = ANY (ARRAY[1, 2])'); - estimated | actual ------------+-------- - 400 | 400 -(1 row) - --- ANY with inequalities should not benefit from functional dependencies -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a < ANY (ARRAY[1, 51]) AND b > ''1'''); - estimated | actual ------------+-------- - 2472 | 2400 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a >= ANY (ARRAY[1, 51]) AND b <= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1441 | 1250 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a <= ANY (ARRAY[1, 2, 51, 52]) AND b >= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 3909 | 2550 -(1 row) - --- ALL (should not benefit from functional dependencies) -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ALL (ARRAY[''1''])'); - estimated | actual ------------+-------- - 2 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - --- changing the type of column c causes all its stats to be dropped, reverting --- to default estimates without any statistics, i.e. 0.5% selectivity for each --- condition -ALTER TABLE functional_dependencies ALTER COLUMN c TYPE numeric; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -ANALYZE functional_dependencies; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -DROP STATISTICS func_deps_stat; --- now try functional dependencies with expressions -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = 2 AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = 2 AND upper(b) = ''1'' AND (c + 1) = 2'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - --- IN -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 52, 102, 152) AND upper(b) IN (''1'', ''26'') AND (c + 1) = 2'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 52, 102, 152) AND upper(b) IN (''1'', ''26'') AND (c + 1) IN (2)'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 52, 54, 102, 104, 152, 154) AND upper(b) IN (''1'', ''2'', ''26'', ''27'') AND (c + 1) IN (2, 3)'); - estimated | actual ------------+-------- - 1 | 400 -(1 row) - --- OR clauses referencing the same attribute -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 102) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 102) AND (upper(b) = ''1'' OR upper(b) = ''2'')'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 4 OR (a * 2) = 102 OR (a * 2) = 104) AND (upper(b) = ''1'' OR upper(b) = ''2'')'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - --- OR clauses referencing different attributes -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR upper(b) = ''1'') AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - --- ANY -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 102]) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 102]) AND upper(b) = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 4, 102, 104]) AND upper(b) = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 52, 102, 152]) AND upper(b) = ANY (ARRAY[''1'', ''26'']) AND (c + 1) = 2'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 52, 102, 152]) AND upper(b) = ANY (ARRAY[''1'', ''26'']) AND (c + 1) = ANY (ARRAY[2])'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 4, 52, 54, 102, 104, 152, 154]) AND upper(b) = ANY (ARRAY[''1'', ''2'', ''26'', ''27'']) AND (c + 1) = ANY (ARRAY[2, 3])'); - estimated | actual ------------+-------- - 1 | 400 -(1 row) - --- ANY with inequalities should not benefit from functional dependencies --- the estimates however improve thanks to having expression statistics -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) < ANY (ARRAY[2, 102]) AND upper(b) > ''1'''); - estimated | actual ------------+-------- - 926 | 2400 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) >= ANY (ARRAY[2, 102]) AND upper(b) <= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1543 | 1250 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) <= ANY (ARRAY[2, 4, 102, 104]) AND upper(b) >= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 2229 | 2550 -(1 row) - --- ALL (should not benefit from functional dependencies) -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ALL (ARRAY[''1''])'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - --- create statistics on expressions -CREATE STATISTICS func_deps_stat (dependencies) ON (a * 2), upper(b), (c + 1) FROM functional_dependencies; -ANALYZE functional_dependencies; --- print the detected dependencies -SELECT dependencies FROM pg_stats_ext WHERE statistics_name = 'func_deps_stat'; - dependencies ------------------------------------------------------------------------------------------------------------------------- - {"-1 => -2": 1.000000, "-1 => -3": 1.000000, "-2 => -3": 1.000000, "-1, -2 => -3": 1.000000, "-1, -3 => -2": 1.000000} -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = 2 AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = 2 AND upper(b) = ''1'' AND (c + 1) = 2'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - --- IN -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 52, 102, 152) AND upper(b) IN (''1'', ''26'') AND (c + 1) = 2'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 52, 102, 152) AND upper(b) IN (''1'', ''26'') AND (c + 1) IN (2)'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 52, 54, 102, 104, 152, 154) AND upper(b) IN (''1'', ''2'', ''26'', ''27'') AND (c + 1) IN (2, 3)'); - estimated | actual ------------+-------- - 400 | 400 -(1 row) - --- OR clauses referencing the same attribute -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 102) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 99 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 102) AND (upper(b) = ''1'' OR upper(b) = ''2'')'); - estimated | actual ------------+-------- - 99 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 4 OR (a * 2) = 102 OR (a * 2) = 104) AND (upper(b) = ''1'' OR upper(b) = ''2'')'); - estimated | actual ------------+-------- - 197 | 200 -(1 row) - --- OR clauses referencing different attributes -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR upper(b) = ''1'') AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 3 | 100 -(1 row) - --- ANY -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 102]) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 102]) AND upper(b) = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 4, 102, 104]) AND upper(b) = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 52, 102, 152]) AND upper(b) = ANY (ARRAY[''1'', ''26'']) AND (c + 1) = 2'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 52, 102, 152]) AND upper(b) = ANY (ARRAY[''1'', ''26'']) AND (c + 1) = ANY (ARRAY[2])'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 4, 52, 54, 102, 104, 152, 154]) AND upper(b) = ANY (ARRAY[''1'', ''2'', ''26'', ''27'']) AND (c + 1) = ANY (ARRAY[2, 3])'); - estimated | actual ------------+-------- - 400 | 400 -(1 row) - --- ANY with inequalities should not benefit from functional dependencies --- the estimates however improve thanks to having expression statistics -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) < ANY (ARRAY[2, 102]) AND upper(b) > ''1'''); - estimated | actual ------------+-------- - 2472 | 2400 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) >= ANY (ARRAY[2, 102]) AND upper(b) <= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1441 | 1250 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) <= ANY (ARRAY[2, 4, 102, 104]) AND upper(b) >= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 3909 | 2550 -(1 row) - --- ALL (should not benefit from functional dependencies) -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ALL (ARRAY[''1''])'); - estimated | actual ------------+-------- - 2 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - --- check the ability to use multiple functional dependencies -CREATE TABLE functional_dependencies_multi ( - a INTEGER, - b INTEGER, - c INTEGER, - d INTEGER -) -WITH (autovacuum_enabled = off); -INSERT INTO functional_dependencies_multi (a, b, c, d) - SELECT - mod(i,7), - mod(i,7), - mod(i,11), - mod(i,11) - FROM generate_series(1,5000) s(i); -ANALYZE functional_dependencies_multi; --- estimates without any functional dependencies -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE a = 0 AND b = 0'); - estimated | actual ------------+-------- - 102 | 714 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE 0 = a AND 0 = b'); - estimated | actual ------------+-------- - 102 | 714 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE c = 0 AND d = 0'); - estimated | actual ------------+-------- - 41 | 454 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE a = 0 AND b = 0 AND c = 0 AND d = 0'); - estimated | actual ------------+-------- - 1 | 64 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE 0 = a AND b = 0 AND 0 = c AND d = 0'); - estimated | actual ------------+-------- - 1 | 64 -(1 row) - --- create separate functional dependencies -CREATE STATISTICS functional_dependencies_multi_1 (dependencies) ON a, b FROM functional_dependencies_multi; -CREATE STATISTICS functional_dependencies_multi_2 (dependencies) ON c, d FROM functional_dependencies_multi; -ANALYZE functional_dependencies_multi; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE a = 0 AND b = 0'); - estimated | actual ------------+-------- - 714 | 714 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE 0 = a AND 0 = b'); - estimated | actual ------------+-------- - 714 | 714 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE c = 0 AND d = 0'); - estimated | actual ------------+-------- - 454 | 454 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE a = 0 AND b = 0 AND c = 0 AND d = 0'); - estimated | actual ------------+-------- - 65 | 64 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE 0 = a AND b = 0 AND 0 = c AND d = 0'); - estimated | actual ------------+-------- - 65 | 64 -(1 row) - -DROP TABLE functional_dependencies_multi; --- MCV lists -CREATE TABLE mcv_lists ( - filler1 TEXT, - filler2 NUMERIC, - a INT, - b VARCHAR, - filler3 DATE, - c INT, - d TEXT, - ia INT[] -) -WITH (autovacuum_enabled = off); --- random data (no MCV list) -INSERT INTO mcv_lists (a, b, c, filler1) - SELECT mod(i,37), mod(i,41), mod(i,43), mod(i,47) FROM generate_series(1,5000) s(i); -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 3 | 4 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 1 | 1 -(1 row) - --- create statistics -CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, c FROM mcv_lists; -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 3 | 4 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 1 | 1 -(1 row) - -TRUNCATE mcv_lists; -DROP STATISTICS mcv_lists_stats; --- random data (no MCV list), but with expression -INSERT INTO mcv_lists (a, b, c, filler1) - SELECT i, i, i, i FROM generate_series(1,1000) s(i); -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,7) = 1 AND mod(b::int,11) = 1'); - estimated | actual ------------+-------- - 1 | 13 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,7) = 1 AND mod(b::int,11) = 1 AND mod(c,13) = 1'); - estimated | actual ------------+-------- - 1 | 1 -(1 row) - --- create statistics -CREATE STATISTICS mcv_lists_stats (mcv) ON (mod(a,7)), (mod(b::int,11)), (mod(c,13)) FROM mcv_lists; -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,7) = 1 AND mod(b::int,11) = 1'); - estimated | actual ------------+-------- - 13 | 13 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,7) = 1 AND mod(b::int,11) = 1 AND mod(c,13) = 1'); - estimated | actual ------------+-------- - 1 | 1 -(1 row) - --- 100 distinct combinations, all in the MCV list -TRUNCATE mcv_lists; -DROP STATISTICS mcv_lists_stats; -INSERT INTO mcv_lists (a, b, c, ia, filler1) - SELECT mod(i,100), mod(i,50), mod(i,25), array[mod(i,25)], i - FROM generate_series(1,5000) s(i); -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = a AND ''1'' = b'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 1 AND b < ''1'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > a AND ''1'' > b'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= 0 AND b <= ''0'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 0 >= a AND ''0'' >= b'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 5 AND b < ''1'' AND c < 5'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 5 AND ''1'' > b AND 5 > c'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= 4 AND b <= ''0'' AND c <= 4'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 4 >= a AND ''0'' >= b AND 4 >= c'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1'); - estimated | actual ------------+-------- - 343 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1 OR d IS NOT NULL'); - estimated | actual ------------+-------- - 343 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (1, 2, 51, 52) AND b IN ( ''1'', ''2'')'); - estimated | actual ------------+-------- - 8 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (1, 2, 51, 52, NULL) AND b IN ( ''1'', ''2'', NULL)'); - estimated | actual ------------+-------- - 8 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 8 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[NULL, 1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2'', NULL])'); - estimated | actual ------------+-------- - 8 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= ANY (ARRAY[1, 2, 3]) AND b IN (''1'', ''2'', ''3'')'); - estimated | actual ------------+-------- - 26 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= ANY (ARRAY[1, NULL, 2, 3]) AND b IN (''1'', ''2'', NULL, ''3'')'); - estimated | actual ------------+-------- - 26 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND c > ANY (ARRAY[1, 2, 3])'); - estimated | actual ------------+-------- - 10 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND c > ANY (ARRAY[1, 2, 3, NULL])'); - estimated | actual ------------+-------- - 10 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND b IN (''1'', ''2'', ''3'') AND c > ANY (ARRAY[1, 2, 3])'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND b IN (''1'', ''2'', NULL, ''3'') AND c > ANY (ARRAY[1, 2, NULL, 3])'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[4,5]) AND 4 = ANY(ia)'); - estimated | actual ------------+-------- - 4 | 50 -(1 row) - --- create statistics -CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, c, ia FROM mcv_lists; -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = a AND ''1'' = b'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 1 AND b < ''1'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > a AND ''1'' > b'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= 0 AND b <= ''0'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 0 >= a AND ''0'' >= b'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 5 AND b < ''1'' AND c < 5'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 5 AND ''1'' > b AND 5 > c'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= 4 AND b <= ''0'' AND c <= 4'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 4 >= a AND ''0'' >= b AND 4 >= c'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1 OR d IS NOT NULL'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1 OR d IS NOT NULL'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (1, 2, 51, 52) AND b IN ( ''1'', ''2'')'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (1, 2, 51, 52, NULL) AND b IN ( ''1'', ''2'', NULL)'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[NULL, 1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2'', NULL])'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= ANY (ARRAY[1, 2, 3]) AND b IN (''1'', ''2'', ''3'')'); - estimated | actual ------------+-------- - 150 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= ANY (ARRAY[1, NULL, 2, 3]) AND b IN (''1'', ''2'', NULL, ''3'')'); - estimated | actual ------------+-------- - 150 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND c > ANY (ARRAY[1, 2, 3])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND c > ANY (ARRAY[1, 2, 3, NULL])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND b IN (''1'', ''2'', ''3'') AND c > ANY (ARRAY[1, 2, 3])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND b IN (''1'', ''2'', NULL, ''3'') AND c > ANY (ARRAY[1, 2, NULL, 3])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[4,5]) AND 4 = ANY(ia)'); - estimated | actual ------------+-------- - 4 | 50 -(1 row) - --- check change of unrelated column type does not reset the MCV statistics -ALTER TABLE mcv_lists ALTER COLUMN d TYPE VARCHAR(64); -SELECT d.stxdmcv IS NOT NULL - FROM pg_statistic_ext s, pg_statistic_ext_data d - WHERE s.stxname = 'mcv_lists_stats' - AND d.stxoid = s.oid; - ?column? ----------- - t -(1 row) - --- check change of column type resets the MCV statistics -ALTER TABLE mcv_lists ALTER COLUMN c TYPE numeric; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - --- 100 distinct combinations, all in the MCV list, but with expressions -TRUNCATE mcv_lists; -DROP STATISTICS mcv_lists_stats; -INSERT INTO mcv_lists (a, b, c, filler1) - SELECT i, i, i, i FROM generate_series(1,1000) s(i); -ANALYZE mcv_lists; --- without any stats on the expressions, we have to use default selectivities, which --- is why the estimates here are different from the pre-computed case above -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = mod(a,20) AND 1 = mod(b::int,10)'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < 1 AND mod(b::int,10) < 1'); - estimated | actual ------------+-------- - 111 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > mod(a,20) AND 1 > mod(b::int,10)'); - estimated | actual ------------+-------- - 111 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1 AND mod(c,5) = 1'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 OR mod(b::int,10) = 1 OR mod(c,25) = 1 OR d IS NOT NULL'); - estimated | actual ------------+-------- - 15 | 120 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) IN (1, 2, 51, 52, NULL) AND mod(b::int,10) IN ( 1, 2, NULL)'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = ANY (ARRAY[1, 2, 51, 52]) AND mod(b::int,10) = ANY (ARRAY[1, 2])'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) <= ANY (ARRAY[1, NULL, 2, 3]) AND mod(b::int,10) IN (1, 2, NULL, 3)'); - estimated | actual ------------+-------- - 11 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < ALL (ARRAY[4, 5]) AND mod(b::int,10) IN (1, 2, 3) AND mod(c,5) > ANY (ARRAY[1, 2, 3])'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - --- create statistics with expressions only (we create three separate stats, in order not to build more complex extended stats) -CREATE STATISTICS mcv_lists_stats_1 ON (mod(a,20)) FROM mcv_lists; -CREATE STATISTICS mcv_lists_stats_2 ON (mod(b::int,10)) FROM mcv_lists; -CREATE STATISTICS mcv_lists_stats_3 ON (mod(c,5)) FROM mcv_lists; -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1'); - estimated | actual ------------+-------- - 5 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = mod(a,20) AND 1 = mod(b::int,10)'); - estimated | actual ------------+-------- - 5 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < 1 AND mod(b::int,10) < 1'); - estimated | actual ------------+-------- - 5 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > mod(a,20) AND 1 > mod(b::int,10)'); - estimated | actual ------------+-------- - 5 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1 AND mod(c,5) = 1'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 OR mod(b::int,10) = 1 OR mod(c,25) = 1 OR d IS NOT NULL'); - estimated | actual ------------+-------- - 149 | 120 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) IN (1, 2, 51, 52, NULL) AND mod(b::int,10) IN ( 1, 2, NULL)'); - estimated | actual ------------+-------- - 20 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = ANY (ARRAY[1, 2, 51, 52]) AND mod(b::int,10) = ANY (ARRAY[1, 2])'); - estimated | actual ------------+-------- - 20 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) <= ANY (ARRAY[1, NULL, 2, 3]) AND mod(b::int,10) IN (1, 2, NULL, 3)'); - estimated | actual ------------+-------- - 116 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < ALL (ARRAY[4, 5]) AND mod(b::int,10) IN (1, 2, 3) AND mod(c,5) > ANY (ARRAY[1, 2, 3])'); - estimated | actual ------------+-------- - 12 | 100 -(1 row) - -DROP STATISTICS mcv_lists_stats_1; -DROP STATISTICS mcv_lists_stats_2; -DROP STATISTICS mcv_lists_stats_3; --- create statistics with both MCV and expressions -CREATE STATISTICS mcv_lists_stats (mcv) ON (mod(a,20)), (mod(b::int,10)), (mod(c,5)) FROM mcv_lists; -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = mod(a,20) AND 1 = mod(b::int,10)'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < 1 AND mod(b::int,10) < 1'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > mod(a,20) AND 1 > mod(b::int,10)'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1 AND mod(c,5) = 1'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 OR mod(b::int,10) = 1 OR mod(c,25) = 1 OR d IS NOT NULL'); - estimated | actual ------------+-------- - 105 | 120 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) IN (1, 2, 51, 52, NULL) AND mod(b::int,10) IN ( 1, 2, NULL)'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = ANY (ARRAY[1, 2, 51, 52]) AND mod(b::int,10) = ANY (ARRAY[1, 2])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) <= ANY (ARRAY[1, NULL, 2, 3]) AND mod(b::int,10) IN (1, 2, NULL, 3)'); - estimated | actual ------------+-------- - 150 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < ALL (ARRAY[4, 5]) AND mod(b::int,10) IN (1, 2, 3) AND mod(c,5) > ANY (ARRAY[1, 2, 3])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - --- we can't use the statistic for OR clauses that are not fully covered (missing 'd' attribute) -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 OR mod(b::int,10) = 1 OR mod(c,5) = 1 OR d IS NOT NULL'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - --- 100 distinct combinations with NULL values, all in the MCV list -TRUNCATE mcv_lists; -DROP STATISTICS mcv_lists_stats; -INSERT INTO mcv_lists (a, b, c, filler1) - SELECT - (CASE WHEN mod(i,100) = 1 THEN NULL ELSE mod(i,100) END), - (CASE WHEN mod(i,50) = 1 THEN NULL ELSE mod(i,50) END), - (CASE WHEN mod(i,25) = 1 THEN NULL ELSE mod(i,25) END), - i - FROM generate_series(1,5000) s(i); -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NULL'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NULL AND c IS NULL'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NOT NULL'); - estimated | actual ------------+-------- - 49 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NOT NULL AND b IS NULL AND c IS NOT NULL'); - estimated | actual ------------+-------- - 95 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (0, 1) AND b IN (''0'', ''1'')'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - --- create statistics -CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, c FROM mcv_lists; -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NULL'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NULL AND c IS NULL'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NOT NULL'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NOT NULL AND b IS NULL AND c IS NOT NULL'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (0, 1) AND b IN (''0'', ''1'')'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - --- test pg_mcv_list_items with a very simple (single item) MCV list -TRUNCATE mcv_lists; -INSERT INTO mcv_lists (a, b, c) SELECT 1, 2, 3 FROM generate_series(1,1000) s(i); -ANALYZE mcv_lists; -SELECT m.* - FROM pg_statistic_ext s, pg_statistic_ext_data d, - pg_mcv_list_items(d.stxdmcv) m - WHERE s.stxname = 'mcv_lists_stats' - AND d.stxoid = s.oid; - index | values | nulls | frequency | base_frequency --------+---------+---------+-----------+---------------- - 0 | {1,2,3} | {f,f,f} | 1 | 1 -(1 row) - --- 2 distinct combinations with NULL values, all in the MCV list -TRUNCATE mcv_lists; -DROP STATISTICS mcv_lists_stats; -INSERT INTO mcv_lists (a, b, c, d) - SELECT - NULL, -- always NULL - (CASE WHEN mod(i,2) = 0 THEN NULL ELSE 'x' END), - (CASE WHEN mod(i,2) = 0 THEN NULL ELSE 0 END), - (CASE WHEN mod(i,2) = 0 THEN NULL ELSE 'x' END) - FROM generate_series(1,5000) s(i); -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE b = ''x'' OR d = ''x'''); - estimated | actual ------------+-------- - 3750 | 2500 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''x'' OR d = ''x'''); - estimated | actual ------------+-------- - 3750 | 2500 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND (b = ''x'' OR d = ''x'')'); - estimated | actual ------------+-------- - 3750 | 2500 -(1 row) - --- create statistics -CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, d FROM mcv_lists; -ANALYZE mcv_lists; --- test pg_mcv_list_items with MCV list containing variable-length data and NULLs -SELECT m.* - FROM pg_statistic_ext s, pg_statistic_ext_data d, - pg_mcv_list_items(d.stxdmcv) m - WHERE s.stxname = 'mcv_lists_stats' - AND d.stxoid = s.oid; - index | values | nulls | frequency | base_frequency --------+------------------+---------+-----------+---------------- - 0 | {NULL,x,x} | {t,f,f} | 0.5 | 0.25 - 1 | {NULL,NULL,NULL} | {t,t,t} | 0.5 | 0.25 -(2 rows) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE b = ''x'' OR d = ''x'''); - estimated | actual ------------+-------- - 2500 | 2500 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''x'' OR d = ''x'''); - estimated | actual ------------+-------- - 2500 | 2500 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND (b = ''x'' OR d = ''x'')'); - estimated | actual ------------+-------- - 2500 | 2500 -(1 row) - --- mcv with pass-by-ref fixlen types, e.g. uuid -CREATE TABLE mcv_lists_uuid ( - a UUID, - b UUID, - c UUID -) -WITH (autovacuum_enabled = off); -INSERT INTO mcv_lists_uuid (a, b, c) - SELECT - fipshash(mod(i,100)::text)::uuid, - fipshash(mod(i,50)::text)::uuid, - fipshash(mod(i,25)::text)::uuid - FROM generate_series(1,5000) s(i); -ANALYZE mcv_lists_uuid; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND c = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -CREATE STATISTICS mcv_lists_uuid_stats (mcv) ON a, b, c - FROM mcv_lists_uuid; -ANALYZE mcv_lists_uuid; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND c = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -DROP TABLE mcv_lists_uuid; --- mcv with arrays -CREATE TABLE mcv_lists_arrays ( - a TEXT[], - b NUMERIC[], - c INT[] -) -WITH (autovacuum_enabled = off); -INSERT INTO mcv_lists_arrays (a, b, c) - SELECT - ARRAY[fipshash((i/100)::text), fipshash((i/100-1)::text), fipshash((i/100+1)::text)], - ARRAY[(i/100-1)::numeric/1000, (i/100)::numeric/1000, (i/100+1)::numeric/1000], - ARRAY[(i/100-1), i/100, (i/100+1)] - FROM generate_series(1,5000) s(i); -CREATE STATISTICS mcv_lists_arrays_stats (mcv) ON a, b, c - FROM mcv_lists_arrays; -ANALYZE mcv_lists_arrays; --- mcv with bool -CREATE TABLE mcv_lists_bool ( - a BOOL, - b BOOL, - c BOOL -) -WITH (autovacuum_enabled = off); -INSERT INTO mcv_lists_bool (a, b, c) - SELECT - (mod(i,2) = 0), (mod(i,4) = 0), (mod(i,8) = 0) - FROM generate_series(1,10000) s(i); -ANALYZE mcv_lists_bool; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE a AND b AND c'); - estimated | actual ------------+-------- - 156 | 1250 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND b AND c'); - estimated | actual ------------+-------- - 156 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND NOT b AND c'); - estimated | actual ------------+-------- - 469 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND b AND NOT c'); - estimated | actual ------------+-------- - 1094 | 0 -(1 row) - -CREATE STATISTICS mcv_lists_bool_stats (mcv) ON a, b, c - FROM mcv_lists_bool; -ANALYZE mcv_lists_bool; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE a AND b AND c'); - estimated | actual ------------+-------- - 1250 | 1250 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND b AND c'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND NOT b AND c'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND b AND NOT c'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - --- mcv covering just a small fraction of data -CREATE TABLE mcv_lists_partial ( - a INT, - b INT, - c INT -); --- 10 frequent groups, each with 100 elements -INSERT INTO mcv_lists_partial (a, b, c) - SELECT - mod(i,10), - mod(i,10), - mod(i,10) - FROM generate_series(0,999) s(i); --- 100 groups that will make it to the MCV list (includes the 10 frequent ones) -INSERT INTO mcv_lists_partial (a, b, c) - SELECT - i, - i, - i - FROM generate_series(0,99) s(i); --- 4000 groups in total, most of which won't make it (just a single item) -INSERT INTO mcv_lists_partial (a, b, c) - SELECT - i, - i, - i - FROM generate_series(0,3999) s(i); -ANALYZE mcv_lists_partial; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 AND b = 0 AND c = 0'); - estimated | actual ------------+-------- - 1 | 102 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 OR b = 0 OR c = 0'); - estimated | actual ------------+-------- - 300 | 102 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 10 AND b = 10 AND c = 10'); - estimated | actual ------------+-------- - 1 | 2 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 10 OR b = 10 OR c = 10'); - estimated | actual ------------+-------- - 6 | 2 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 AND b = 0 AND c = 10'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 OR b = 0 OR c = 10'); - estimated | actual ------------+-------- - 204 | 104 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE (a = 0 AND b = 0 AND c = 0) OR (a = 1 AND b = 1 AND c = 1) OR (a = 2 AND b = 2 AND c = 2)'); - estimated | actual ------------+-------- - 1 | 306 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE (a = 0 AND b = 0) OR (a = 0 AND c = 0) OR (b = 0 AND c = 0)'); - estimated | actual ------------+-------- - 6 | 102 -(1 row) - -CREATE STATISTICS mcv_lists_partial_stats (mcv) ON a, b, c - FROM mcv_lists_partial; -ANALYZE mcv_lists_partial; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 AND b = 0 AND c = 0'); - estimated | actual ------------+-------- - 102 | 102 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 OR b = 0 OR c = 0'); - estimated | actual ------------+-------- - 96 | 102 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 10 AND b = 10 AND c = 10'); - estimated | actual ------------+-------- - 2 | 2 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 10 OR b = 10 OR c = 10'); - estimated | actual ------------+-------- - 2 | 2 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 AND b = 0 AND c = 10'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 OR b = 0 OR c = 10'); - estimated | actual ------------+-------- - 102 | 104 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE (a = 0 AND b = 0 AND c = 0) OR (a = 1 AND b = 1 AND c = 1) OR (a = 2 AND b = 2 AND c = 2)'); - estimated | actual ------------+-------- - 306 | 306 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE (a = 0 AND b = 0) OR (a = 0 AND c = 0) OR (b = 0 AND c = 0)'); - estimated | actual ------------+-------- - 108 | 102 -(1 row) - -DROP TABLE mcv_lists_partial; --- check the ability to use multiple MCV lists -CREATE TABLE mcv_lists_multi ( - a INTEGER, - b INTEGER, - c INTEGER, - d INTEGER -) -WITH (autovacuum_enabled = off); -INSERT INTO mcv_lists_multi (a, b, c, d) - SELECT - mod(i,5), - mod(i,5), - mod(i,7), - mod(i,7) - FROM generate_series(1,5000) s(i); -ANALYZE mcv_lists_multi; --- estimates without any mcv statistics -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 AND b = 0'); - estimated | actual ------------+-------- - 200 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE c = 0 AND d = 0'); - estimated | actual ------------+-------- - 102 | 714 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE b = 0 AND c = 0'); - estimated | actual ------------+-------- - 143 | 142 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE b = 0 OR c = 0'); - estimated | actual ------------+-------- - 1571 | 1572 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 AND b = 0 AND c = 0 AND d = 0'); - estimated | actual ------------+-------- - 4 | 142 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE (a = 0 AND b = 0) OR (c = 0 AND d = 0)'); - estimated | actual ------------+-------- - 298 | 1572 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 OR b = 0 OR c = 0 OR d = 0'); - estimated | actual ------------+-------- - 2649 | 1572 -(1 row) - --- create separate MCV statistics -CREATE STATISTICS mcv_lists_multi_1 (mcv) ON a, b FROM mcv_lists_multi; -CREATE STATISTICS mcv_lists_multi_2 (mcv) ON c, d FROM mcv_lists_multi; -ANALYZE mcv_lists_multi; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 AND b = 0'); - estimated | actual ------------+-------- - 1000 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE c = 0 AND d = 0'); - estimated | actual ------------+-------- - 714 | 714 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE b = 0 AND c = 0'); - estimated | actual ------------+-------- - 143 | 142 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE b = 0 OR c = 0'); - estimated | actual ------------+-------- - 1571 | 1572 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 AND b = 0 AND c = 0 AND d = 0'); - estimated | actual ------------+-------- - 143 | 142 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE (a = 0 AND b = 0) OR (c = 0 AND d = 0)'); - estimated | actual ------------+-------- - 1571 | 1572 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 OR b = 0 OR c = 0 OR d = 0'); - estimated | actual ------------+-------- - 1571 | 1572 -(1 row) - -DROP TABLE mcv_lists_multi; --- statistics on integer expressions -CREATE TABLE expr_stats (a int, b int, c int); -INSERT INTO expr_stats SELECT mod(i,10), mod(i,10), mod(i,10) FROM generate_series(1,1000) s(i); -ANALYZE expr_stats; -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE (2*a) = 0 AND (3*b) = 0'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE (a+b) = 0 AND (a-b) = 0'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -CREATE STATISTICS expr_stats_1 (mcv) ON (a+b), (a-b), (2*a), (3*b) FROM expr_stats; -ANALYZE expr_stats; -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE (2*a) = 0 AND (3*b) = 0'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE (a+b) = 0 AND (a-b) = 0'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -DROP STATISTICS expr_stats_1; -DROP TABLE expr_stats; --- statistics on a mix columns and expressions -CREATE TABLE expr_stats (a int, b int, c int); -INSERT INTO expr_stats SELECT mod(i,10), mod(i,10), mod(i,10) FROM generate_series(1,1000) s(i); -ANALYZE expr_stats; -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND (2*a) = 0 AND (3*b) = 0'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 3 AND b = 3 AND (a-b) = 0'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND b = 1 AND (a-b) = 0'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -CREATE STATISTICS expr_stats_1 (mcv) ON a, b, (2*a), (3*b), (a+b), (a-b) FROM expr_stats; -ANALYZE expr_stats; -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND (2*a) = 0 AND (3*b) = 0'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 3 AND b = 3 AND (a-b) = 0'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND b = 1 AND (a-b) = 0'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -DROP TABLE expr_stats; --- statistics on expressions with different data types -CREATE TABLE expr_stats (a int, b name, c text); -INSERT INTO expr_stats SELECT mod(i,10), fipshash(mod(i,10)::text), fipshash(mod(i,10)::text) FROM generate_series(1,1000) s(i); -ANALYZE expr_stats; -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND (b || c) <= ''z'' AND (c || b) >= ''0'''); - estimated | actual ------------+-------- - 11 | 100 -(1 row) - -CREATE STATISTICS expr_stats_1 (mcv) ON a, b, (b || c), (c || b) FROM expr_stats; -ANALYZE expr_stats; -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND (b || c) <= ''z'' AND (c || b) >= ''0'''); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -DROP TABLE expr_stats; --- test handling of a mix of compatible and incompatible expressions -CREATE TABLE expr_stats_incompatible_test ( - c0 double precision, - c1 boolean NOT NULL -); -CREATE STATISTICS expr_stat_comp_1 ON c0, c1 FROM expr_stats_incompatible_test; -INSERT INTO expr_stats_incompatible_test VALUES (1234,false), (5678,true); -ANALYZE expr_stats_incompatible_test; -SELECT c0 FROM ONLY expr_stats_incompatible_test WHERE -( - upper('x') LIKE ('x'||('[0,1]'::int4range)) - AND - (c0 IN (0, 1) OR c1) -); - c0 ----- -(0 rows) - -DROP TABLE expr_stats_incompatible_test; --- Permission tests. Users should not be able to see specific data values in --- the extended statistics, if they lack permission to see those values in --- the underlying table. --- --- Currently this is only relevant for MCV stats. -CREATE SCHEMA tststats; -CREATE TABLE tststats.priv_test_tbl ( - a int, - b int -); -INSERT INTO tststats.priv_test_tbl - SELECT mod(i,5), mod(i,10) FROM generate_series(1,100) s(i); -CREATE STATISTICS tststats.priv_test_stats (mcv) ON a, b - FROM tststats.priv_test_tbl; -ANALYZE tststats.priv_test_tbl; --- Check printing info about extended statistics by \dX -create table stts_t1 (a int, b int); -create statistics (ndistinct) on a, b from stts_t1; -create statistics (ndistinct, dependencies) on a, b from stts_t1; -create statistics (ndistinct, dependencies, mcv) on a, b from stts_t1; -create table stts_t2 (a int, b int, c int); -create statistics on b, c from stts_t2; -create table stts_t3 (col1 int, col2 int, col3 int); -create statistics stts_hoge on col1, col2, col3 from stts_t3; -create schema stts_s1; -create schema stts_s2; -create statistics stts_s1.stts_foo on col1, col2 from stts_t3; -create statistics stts_s2.stts_yama (dependencies, mcv) on col1, col3 from stts_t3; -insert into stts_t1 select i,i from generate_series(1,100) i; -analyze stts_t1; -set search_path to public, stts_s1, stts_s2, tststats; -\dX - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV -----------+------------------------+------------------------------------------------------------------+-----------+--------------+--------- - public | func_deps_stat | (a * 2), upper(b), (c + 1::numeric) FROM functional_dependencies | | defined | - public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined - public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined - public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined - public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined - public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | - public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | - public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined - public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined - stts_s1 | stts_foo | col1, col2 FROM stts_t3 | defined | defined | defined - stts_s2 | stts_yama | col1, col3 FROM stts_t3 | | defined | defined - tststats | priv_test_stats | a, b FROM priv_test_tbl | | | defined -(12 rows) - -\dX stts_t* - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+-------------------+-------------------+-----------+--------------+--------- - public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | - public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | - public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined - public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined -(4 rows) - -\dX *stts_hoge - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+-----------+-------------------------------+-----------+--------------+--------- - public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined -(1 row) - -\dX+ - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV -----------+------------------------+------------------------------------------------------------------+-----------+--------------+--------- - public | func_deps_stat | (a * 2), upper(b), (c + 1::numeric) FROM functional_dependencies | | defined | - public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined - public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined - public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined - public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined - public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | - public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | - public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined - public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined - stts_s1 | stts_foo | col1, col2 FROM stts_t3 | defined | defined | defined - stts_s2 | stts_yama | col1, col3 FROM stts_t3 | | defined | defined - tststats | priv_test_stats | a, b FROM priv_test_tbl | | | defined -(12 rows) - -\dX+ stts_t* - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+-------------------+-------------------+-----------+--------------+--------- - public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | - public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | - public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined - public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined -(4 rows) - -\dX+ *stts_hoge - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+-----------+-------------------------------+-----------+--------------+--------- - public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined -(1 row) - -\dX+ stts_s2.stts_yama - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ----------+-----------+-------------------------+-----------+--------------+--------- - stts_s2 | stts_yama | col1, col3 FROM stts_t3 | | defined | defined -(1 row) - -create statistics (mcv) ON a, b, (a+b), (a-b) FROM stts_t1; -create statistics (mcv) ON a, b, (a+b), (a-b) FROM stts_t1; -create statistics (mcv) ON (a+b), (a-b) FROM stts_t1; -\dX stts_t*expr* - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+-----------------------------+-------------------------------------+-----------+--------------+--------- - public | stts_t1_a_b_expr_expr_stat | a, b, (a + b), (a - b) FROM stts_t1 | | | defined - public | stts_t1_a_b_expr_expr_stat1 | a, b, (a + b), (a - b) FROM stts_t1 | | | defined - public | stts_t1_expr_expr_stat | (a + b), (a - b) FROM stts_t1 | | | defined -(3 rows) - -drop statistics stts_t1_a_b_expr_expr_stat; -drop statistics stts_t1_a_b_expr_expr_stat1; -drop statistics stts_t1_expr_expr_stat; -set search_path to public, stts_s1; -\dX - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ----------+------------------------+------------------------------------------------------------------+-----------+--------------+--------- - public | func_deps_stat | (a * 2), upper(b), (c + 1::numeric) FROM functional_dependencies | | defined | - public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined - public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined - public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined - public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined - public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | - public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | - public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined - public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined - stts_s1 | stts_foo | col1, col2 FROM stts_t3 | defined | defined | defined -(10 rows) - -create role regress_stats_ext nosuperuser; -set role regress_stats_ext; -\dX - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+------------------------+------------------------------------------------------------------+-----------+--------------+--------- - public | func_deps_stat | (a * 2), upper(b), (c + 1::numeric) FROM functional_dependencies | | defined | - public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined - public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined - public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined - public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined - public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | - public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | - public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined - public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined -(9 rows) - -reset role; -drop table stts_t1, stts_t2, stts_t3; -drop schema stts_s1, stts_s2 cascade; -drop user regress_stats_ext; -reset search_path; --- User with no access -CREATE USER regress_stats_user1; -GRANT USAGE ON SCHEMA tststats TO regress_stats_user1; -SET SESSION AUTHORIZATION regress_stats_user1; -SELECT * FROM tststats.priv_test_tbl; -- Permission denied -ERROR: permission denied for table priv_test_tbl --- Check individual columns if we don't have table privilege -SELECT * FROM tststats.priv_test_tbl - WHERE a = 1 and tststats.priv_test_tbl.* > (1, 1) is not null; -ERROR: permission denied for table priv_test_tbl --- Attempt to gain access using a leaky operator -CREATE FUNCTION op_leak(int, int) RETURNS bool - AS 'BEGIN RAISE NOTICE ''op_leak => %, %'', $1, $2; RETURN $1 < $2; END' - LANGUAGE plpgsql; -CREATE OPERATOR <<< (procedure = op_leak, leftarg = int, rightarg = int, - restrict = scalarltsel); -SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied -ERROR: permission denied for table priv_test_tbl -DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied -ERROR: permission denied for table priv_test_tbl --- Grant access via a security barrier view, but hide all data -RESET SESSION AUTHORIZATION; -CREATE VIEW tststats.priv_test_view WITH (security_barrier=true) - AS SELECT * FROM tststats.priv_test_tbl WHERE false; -GRANT SELECT, DELETE ON tststats.priv_test_view TO regress_stats_user1; --- Should now have access via the view, but see nothing and leak nothing -SET SESSION AUTHORIZATION regress_stats_user1; -SELECT * FROM tststats.priv_test_view WHERE a <<< 0 AND b <<< 0; -- Should not leak - a | b ----+--- -(0 rows) - -DELETE FROM tststats.priv_test_view WHERE a <<< 0 AND b <<< 0; -- Should not leak --- Grant table access, but hide all data with RLS -RESET SESSION AUTHORIZATION; -ALTER TABLE tststats.priv_test_tbl ENABLE ROW LEVEL SECURITY; -GRANT SELECT, DELETE ON tststats.priv_test_tbl TO regress_stats_user1; --- Should now have direct table access, but see nothing and leak nothing -SET SESSION AUTHORIZATION regress_stats_user1; -SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak - a | b ----+--- -(0 rows) - -DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak --- Tidy up -DROP OPERATOR <<< (int, int); -DROP FUNCTION op_leak(int, int); -RESET SESSION AUTHORIZATION; -DROP SCHEMA tststats CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table tststats.priv_test_tbl -drop cascades to view tststats.priv_test_view -DROP USER regress_stats_user1; +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/collate.linux.utf8_1.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/collate.linux.utf8.out --- /tmp/cirrus-ci-build/src/test/regress/expected/collate.linux.utf8_1.out 2024-03-13 23:12:37.622744000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/collate.linux.utf8.out 2024-03-13 23:14:29.809196000 +0000 @@ -1,11 +1,2 @@ -/* - * This test is for Linux/glibc systems and assumes that a full set of - * locales is installed. It must be run in a database with UTF-8 encoding, - * because other encodings don't support all the characters used. - */ -SELECT getdatabaseencoding() <> 'UTF8' OR - (SELECT count(*) FROM pg_collation WHERE collname IN ('de_DE', 'en_US', 'sv_SE', 'tr_TR') AND collencoding = pg_char_to_encoding('UTF8')) <> 4 OR - version() !~ 'linux-gnu' - AS skip_test \gset -\if :skip_test -\quit +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/collate.windows.win1252_1.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/collate.windows.win1252.out --- /tmp/cirrus-ci-build/src/test/regress/expected/collate.windows.win1252_1.out 2024-03-13 23:12:37.622787000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/collate.windows.win1252.out 2024-03-13 23:14:29.805472000 +0000 @@ -1,13 +1,2 @@ -/* - * This test is meant to run on Windows systems that has successfully - * run pg_import_system_collations(). Also, the database must have - * WIN1252 encoding, because of the locales' own encodings. Because - * of this, some test are lost from UTF-8 version, such as Turkish - * dotted and undotted 'i'. - */ -SELECT getdatabaseencoding() <> 'WIN1252' OR - (SELECT count(*) FROM pg_collation WHERE collname IN ('de_DE', 'en_US', 'sv_SE') AND collencoding = pg_char_to_encoding('WIN1252')) <> 3 OR - (version() !~ 'Visual C\+\+' AND version() !~ 'mingw32' AND version() !~ 'windows') - AS skip_test \gset -\if :skip_test -\quit +psql: error: connection to server on socket "/tmp/pg_regress-F0fRvK/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/partition_join.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/partition_join.out --- /tmp/cirrus-ci-build/src/test/regress/expected/partition_join.out 2024-03-13 23:12:37.626035000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/partition_join.out 2024-03-13 23:14:45.721416000 +0000 @@ -2691,2277 +2691,10 @@ -- anti join EXPLAIN (COSTS OFF) SELECT t1.* FROM prt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Anti Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Hash Right Anti Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Hash Right Anti Join - Hash Cond: (t2_3.b = t1_3.a) - -> Seq Scan on prt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(21 rows) - -SELECT t1.* FROM prt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - a | b | c ------+---+------ - 150 | 0 | 0150 - 175 | 0 | 0175 - 300 | 0 | 0300 - 325 | 0 | 0325 -(4 rows) - --- anti join; currently we can't do partitioned join if there are no matched --- partitions on the nullable side -EXPLAIN (COSTS OFF) -SELECT t1.* FROM prt2_adv t1 WHERE NOT EXISTS (SELECT 1 FROM prt1_adv t2 WHERE t1.b = t2.a) AND t1.a = 0 ORDER BY t1.b; - QUERY PLAN ---------------------------------------------------------- - Sort - Sort Key: t1.b - -> Hash Right Anti Join - Hash Cond: (t2.a = t1.b) - -> Append - -> Seq Scan on prt1_adv_p1 t2_1 - -> Seq Scan on prt1_adv_p2 t2_2 - -> Seq Scan on prt1_adv_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on prt2_adv_p1 t1_1 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p2 t1_2 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p3 t1_3 - Filter: (a = 0) - -> Seq Scan on prt2_adv_extra t1_4 - Filter: (a = 0) -(18 rows) - --- full join; currently we can't do partitioned join if there are no matched --- partitions on the nullable side -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT 175 phv, * FROM prt1_adv WHERE prt1_adv.b = 0) t1 FULL JOIN (SELECT 425 phv, * FROM prt2_adv WHERE prt2_adv.a = 0) t2 ON (t1.a = t2.b) WHERE t1.phv = t1.a OR t2.phv = t2.b ORDER BY t1.a, t2.b; - QUERY PLAN ----------------------------------------------------------------- - Sort - Sort Key: prt1_adv.a, prt2_adv.b - -> Hash Full Join - Hash Cond: (prt2_adv.b = prt1_adv.a) - Filter: (((175) = prt1_adv.a) OR ((425) = prt2_adv.b)) - -> Append - -> Seq Scan on prt2_adv_p1 prt2_adv_1 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p2 prt2_adv_2 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p3 prt2_adv_3 - Filter: (a = 0) - -> Seq Scan on prt2_adv_extra prt2_adv_4 - Filter: (a = 0) - -> Hash - -> Append - -> Seq Scan on prt1_adv_p1 prt1_adv_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p2 prt1_adv_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 prt1_adv_3 - Filter: (b = 0) -(22 rows) - --- 3-way join where not every pair of relations can do partitioned join -EXPLAIN (COSTS OFF) -SELECT t1.b, t1.c, t2.a, t2.c, t3.a, t3.c FROM prt2_adv t1 LEFT JOIN prt1_adv t2 ON (t1.b = t2.a) INNER JOIN prt1_adv t3 ON (t1.b = t3.a) WHERE t1.a = 0 ORDER BY t1.b, t2.a, t3.a; - QUERY PLAN --------------------------------------------------------------------------------- - Sort - Sort Key: t1.b, t2.a - -> Append - -> Nested Loop Left Join - -> Nested Loop - -> Seq Scan on prt2_adv_p1 t1_1 - Filter: (a = 0) - -> Index Scan using prt1_adv_p1_a_idx on prt1_adv_p1 t3_1 - Index Cond: (a = t1_1.b) - -> Index Scan using prt1_adv_p1_a_idx on prt1_adv_p1 t2_1 - Index Cond: (a = t1_1.b) - -> Hash Right Join - Hash Cond: (t2_2.a = t1_2.b) - -> Seq Scan on prt1_adv_p2 t2_2 - -> Hash - -> Hash Join - Hash Cond: (t3_2.a = t1_2.b) - -> Seq Scan on prt1_adv_p2 t3_2 - -> Hash - -> Seq Scan on prt2_adv_p2 t1_2 - Filter: (a = 0) - -> Hash Right Join - Hash Cond: (t2_3.a = t1_3.b) - -> Seq Scan on prt1_adv_p3 t2_3 - -> Hash - -> Hash Join - Hash Cond: (t3_3.a = t1_3.b) - -> Seq Scan on prt1_adv_p3 t3_3 - -> Hash - -> Seq Scan on prt2_adv_p3 t1_3 - Filter: (a = 0) -(31 rows) - -SELECT t1.b, t1.c, t2.a, t2.c, t3.a, t3.c FROM prt2_adv t1 LEFT JOIN prt1_adv t2 ON (t1.b = t2.a) INNER JOIN prt1_adv t3 ON (t1.b = t3.a) WHERE t1.a = 0 ORDER BY t1.b, t2.a, t3.a; - b | c | a | c | a | c ------+------+-----+------+-----+------ - 100 | 0100 | 100 | 0100 | 100 | 0100 - 125 | 0125 | 125 | 0125 | 125 | 0125 - 200 | 0200 | 200 | 0200 | 200 | 0200 - 225 | 0225 | 225 | 0225 | 225 | 0225 - 250 | 0250 | 250 | 0250 | 250 | 0250 - 275 | 0275 | 275 | 0275 | 275 | 0275 - 350 | 0350 | 350 | 0350 | 350 | 0350 - 375 | 0375 | 375 | 0375 | 375 | 0375 -(8 rows) - -DROP TABLE prt2_adv_extra; --- Test cases where a partition on one side matches multiple partitions on --- the other side; we currently can't do partitioned join in such cases -ALTER TABLE prt2_adv DETACH PARTITION prt2_adv_p3; --- Split prt2_adv_p3 into two partitions so that prt1_adv_p3 matches both -CREATE TABLE prt2_adv_p3_1 PARTITION OF prt2_adv FOR VALUES FROM (350) TO (375); -CREATE TABLE prt2_adv_p3_2 PARTITION OF prt2_adv FOR VALUES FROM (375) TO (500); -INSERT INTO prt2_adv SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(350, 499) i; -ANALYZE prt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Join - Hash Cond: (t2.b = t1.a) - -> Append - -> Seq Scan on prt2_adv_p1 t2_1 - -> Seq Scan on prt2_adv_p2 t2_2 - -> Seq Scan on prt2_adv_p3_1 t2_3 - -> Seq Scan on prt2_adv_p3_2 t2_4 - -> Hash - -> Append - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(17 rows) - --- semi join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM prt1_adv t1 WHERE EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Semi Join - Hash Cond: (t1.a = t2.b) - -> Append - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) - -> Hash - -> Append - -> Seq Scan on prt2_adv_p1 t2_1 - -> Seq Scan on prt2_adv_p2 t2_2 - -> Seq Scan on prt2_adv_p3_1 t2_3 - -> Seq Scan on prt2_adv_p3_2 t2_4 -(17 rows) - --- left join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a, t2.b - -> Hash Right Join - Hash Cond: (t2.b = t1.a) - -> Append - -> Seq Scan on prt2_adv_p1 t2_1 - -> Seq Scan on prt2_adv_p2 t2_2 - -> Seq Scan on prt2_adv_p3_1 t2_3 - -> Seq Scan on prt2_adv_p3_2 t2_4 - -> Hash - -> Append - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(17 rows) - --- anti join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM prt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Anti Join - Hash Cond: (t2.b = t1.a) - -> Append - -> Seq Scan on prt2_adv_p1 t2_1 - -> Seq Scan on prt2_adv_p2 t2_2 - -> Seq Scan on prt2_adv_p3_1 t2_3 - -> Seq Scan on prt2_adv_p3_2 t2_4 - -> Hash - -> Append - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(17 rows) - --- full join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT 175 phv, * FROM prt1_adv WHERE prt1_adv.b = 0) t1 FULL JOIN (SELECT 425 phv, * FROM prt2_adv WHERE prt2_adv.a = 0) t2 ON (t1.a = t2.b) WHERE t1.phv = t1.a OR t2.phv = t2.b ORDER BY t1.a, t2.b; - QUERY PLAN ----------------------------------------------------------------- - Sort - Sort Key: prt1_adv.a, prt2_adv.b - -> Hash Full Join - Hash Cond: (prt2_adv.b = prt1_adv.a) - Filter: (((175) = prt1_adv.a) OR ((425) = prt2_adv.b)) - -> Append - -> Seq Scan on prt2_adv_p1 prt2_adv_1 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p2 prt2_adv_2 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p3_1 prt2_adv_3 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p3_2 prt2_adv_4 - Filter: (a = 0) - -> Hash - -> Append - -> Seq Scan on prt1_adv_p1 prt1_adv_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p2 prt1_adv_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 prt1_adv_3 - Filter: (b = 0) -(22 rows) - -DROP TABLE prt2_adv_p3_1; -DROP TABLE prt2_adv_p3_2; -ANALYZE prt2_adv; --- Test default partitions -ALTER TABLE prt1_adv DETACH PARTITION prt1_adv_p1; --- Change prt1_adv_p1 to the default partition -ALTER TABLE prt1_adv ATTACH PARTITION prt1_adv_p1 DEFAULT; -ALTER TABLE prt1_adv DETACH PARTITION prt1_adv_p3; -ANALYZE prt1_adv; --- We can do partitioned join even if only one of relations has the default --- partition -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: (t2_1.b = t1_2.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_2 - Filter: (b = 0) - -> Hash Join - Hash Cond: (t2_2.b = t1_1.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_1 - Filter: (b = 0) -(15 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 100 | 0100 | 100 | 0100 - 125 | 0125 | 125 | 0125 - 200 | 0200 | 200 | 0200 - 225 | 0225 | 225 | 0225 - 250 | 0250 | 250 | 0250 - 275 | 0275 | 275 | 0275 -(6 rows) - --- Restore prt1_adv_p3 -ALTER TABLE prt1_adv ATTACH PARTITION prt1_adv_p3 FOR VALUES FROM (300) TO (400); -ANALYZE prt1_adv; --- Restore prt2_adv_p3 -ALTER TABLE prt2_adv ATTACH PARTITION prt2_adv_p3 FOR VALUES FROM (350) TO (500); -ANALYZE prt2_adv; --- Partitioned join can't be applied because the default partition of prt1_adv --- matches prt2_adv_p1 and prt2_adv_p3 -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Join - Hash Cond: (t2.b = t1.a) - -> Append - -> Seq Scan on prt2_adv_p1 t2_1 - -> Seq Scan on prt2_adv_p2 t2_2 - -> Seq Scan on prt2_adv_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on prt1_adv_p2 t1_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 t1_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p1 t1_3 - Filter: (b = 0) -(16 rows) - -ALTER TABLE prt2_adv DETACH PARTITION prt2_adv_p3; --- Change prt2_adv_p3 to the default partition -ALTER TABLE prt2_adv ATTACH PARTITION prt2_adv_p3 DEFAULT; -ANALYZE prt2_adv; --- Partitioned join can't be applied because the default partition of prt1_adv --- matches prt2_adv_p1 and prt2_adv_p3 -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Join - Hash Cond: (t2.b = t1.a) - -> Append - -> Seq Scan on prt2_adv_p1 t2_1 - -> Seq Scan on prt2_adv_p2 t2_2 - -> Seq Scan on prt2_adv_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on prt1_adv_p2 t1_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 t1_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p1 t1_3 - Filter: (b = 0) -(16 rows) - -DROP TABLE prt1_adv_p3; -ANALYZE prt1_adv; -DROP TABLE prt2_adv_p3; -ANALYZE prt2_adv; -CREATE TABLE prt3_adv (a int, b int, c varchar) PARTITION BY RANGE (a); -CREATE TABLE prt3_adv_p1 PARTITION OF prt3_adv FOR VALUES FROM (200) TO (300); -CREATE TABLE prt3_adv_p2 PARTITION OF prt3_adv FOR VALUES FROM (300) TO (400); -CREATE INDEX prt3_adv_a_idx ON prt3_adv (a); -INSERT INTO prt3_adv SELECT i, i % 25, to_char(i, 'FM0000') FROM generate_series(200, 399) i; -ANALYZE prt3_adv; --- 3-way join to test the default partition of a join relation -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c, t3.a, t3.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) LEFT JOIN prt3_adv t3 ON (t1.a = t3.a) WHERE t1.b = 0 ORDER BY t1.a, t2.b, t3.a; - QUERY PLAN ------------------------------------------------------------------- - Sort - Sort Key: t1.a, t2.b, t3.a - -> Append - -> Hash Right Join - Hash Cond: (t3_1.a = t1_1.a) - -> Seq Scan on prt3_adv_p1 t3_1 - -> Hash - -> Hash Right Join - Hash Cond: (t2_2.b = t1_1.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_1 - Filter: (b = 0) - -> Hash Right Join - Hash Cond: (t3_2.a = t1_2.a) - -> Seq Scan on prt3_adv_p2 t3_2 - -> Hash - -> Hash Right Join - Hash Cond: (t2_1.b = t1_2.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_2 - Filter: (b = 0) -(23 rows) - -SELECT t1.a, t1.c, t2.b, t2.c, t3.a, t3.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) LEFT JOIN prt3_adv t3 ON (t1.a = t3.a) WHERE t1.b = 0 ORDER BY t1.a, t2.b, t3.a; - a | c | b | c | a | c ------+------+-----+------+-----+------ - 100 | 0100 | 100 | 0100 | | - 125 | 0125 | 125 | 0125 | | - 150 | 0150 | | | | - 175 | 0175 | | | | - 200 | 0200 | 200 | 0200 | 200 | 0200 - 225 | 0225 | 225 | 0225 | 225 | 0225 - 250 | 0250 | 250 | 0250 | 250 | 0250 - 275 | 0275 | 275 | 0275 | 275 | 0275 -(8 rows) - -DROP TABLE prt1_adv; -DROP TABLE prt2_adv; -DROP TABLE prt3_adv; --- Test interaction of partitioned join with partition pruning -CREATE TABLE prt1_adv (a int, b int, c varchar) PARTITION BY RANGE (a); -CREATE TABLE prt1_adv_p1 PARTITION OF prt1_adv FOR VALUES FROM (100) TO (200); -CREATE TABLE prt1_adv_p2 PARTITION OF prt1_adv FOR VALUES FROM (200) TO (300); -CREATE TABLE prt1_adv_p3 PARTITION OF prt1_adv FOR VALUES FROM (300) TO (400); -CREATE INDEX prt1_adv_a_idx ON prt1_adv (a); -INSERT INTO prt1_adv SELECT i, i % 25, to_char(i, 'FM0000') FROM generate_series(100, 399) i; -ANALYZE prt1_adv; -CREATE TABLE prt2_adv (a int, b int, c varchar) PARTITION BY RANGE (b); -CREATE TABLE prt2_adv_p1 PARTITION OF prt2_adv FOR VALUES FROM (100) TO (200); -CREATE TABLE prt2_adv_p2 PARTITION OF prt2_adv FOR VALUES FROM (200) TO (400); -CREATE INDEX prt2_adv_b_idx ON prt2_adv (b); -INSERT INTO prt2_adv SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(100, 399) i; -ANALYZE prt2_adv; -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.a < 300 AND t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------------ - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: ((a < 300) AND (b = 0)) - -> Hash Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: ((a < 300) AND (b = 0)) -(15 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.a < 300 AND t1.b = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 100 | 0100 | 100 | 0100 - 125 | 0125 | 125 | 0125 - 150 | 0150 | 150 | 0150 - 175 | 0175 | 175 | 0175 - 200 | 0200 | 200 | 0200 - 225 | 0225 | 225 | 0225 - 250 | 0250 | 250 | 0250 - 275 | 0275 | 275 | 0275 -(8 rows) - -DROP TABLE prt1_adv_p3; -CREATE TABLE prt1_adv_default PARTITION OF prt1_adv DEFAULT; -ANALYZE prt1_adv; -CREATE TABLE prt2_adv_default PARTITION OF prt2_adv DEFAULT; -ANALYZE prt2_adv; -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.a >= 100 AND t1.a < 300 AND t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN --------------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: ((a >= 100) AND (a < 300) AND (b = 0)) - -> Hash Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: ((a >= 100) AND (a < 300) AND (b = 0)) -(15 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.a >= 100 AND t1.a < 300 AND t1.b = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 100 | 0100 | 100 | 0100 - 125 | 0125 | 125 | 0125 - 150 | 0150 | 150 | 0150 - 175 | 0175 | 175 | 0175 - 200 | 0200 | 200 | 0200 - 225 | 0225 | 225 | 0225 - 250 | 0250 | 250 | 0250 - 275 | 0275 | 275 | 0275 -(8 rows) - -DROP TABLE prt1_adv; -DROP TABLE prt2_adv; --- Tests for list-partitioned tables -CREATE TABLE plt1_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt1_adv_p1 PARTITION OF plt1_adv FOR VALUES IN ('0001', '0003'); -CREATE TABLE plt1_adv_p2 PARTITION OF plt1_adv FOR VALUES IN ('0004', '0006'); -CREATE TABLE plt1_adv_p3 PARTITION OF plt1_adv FOR VALUES IN ('0008', '0009'); -INSERT INTO plt1_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (1, 3, 4, 6, 8, 9); -ANALYZE plt1_adv; -CREATE TABLE plt2_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt2_adv_p1 PARTITION OF plt2_adv FOR VALUES IN ('0002', '0003'); -CREATE TABLE plt2_adv_p2 PARTITION OF plt2_adv FOR VALUES IN ('0004', '0006'); -CREATE TABLE plt2_adv_p3 PARTITION OF plt2_adv FOR VALUES IN ('0007', '0009'); -INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); -ANALYZE plt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 9 | 0009 | 9 | 0009 -(4 rows) - --- semi join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN ----------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Nested Loop Semi Join - Join Filter: ((t1_1.a = t2_1.a) AND (t1_1.c = t2_1.c)) - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Nested Loop Semi Join - Join Filter: ((t1_2.a = t2_2.a) AND (t1_2.c = t2_2.c)) - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Nested Loop Semi Join - Join Filter: ((t1_3.a = t2_3.a) AND (t1_3.c = t2_3.c)) - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p3 t2_3 -(18 rows) - -SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - a | b | c ----+---+------ - 3 | 3 | 0003 - 4 | 4 | 0004 - 6 | 6 | 0006 - 9 | 9 | 0009 -(4 rows) - --- left join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 1 | 0001 | | - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 8 | 0008 | | - 9 | 0009 | 9 | 0009 -(6 rows) - --- anti join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Anti Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Right Anti Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Anti Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - a | b | c ----+---+------ - 1 | 1 | 0001 - 8 | 8 | 0008 -(2 rows) - --- full join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - QUERY PLAN ------------------------------------------------------------------------------------ - Sort - Sort Key: t1.a, t2.a - -> Append - -> Hash Full Join - Hash Cond: ((t1_1.a = t2_1.a) AND (t1_1.c = t2_1.c)) - Filter: ((COALESCE(t1_1.b, 0) < 10) AND (COALESCE(t2_1.b, 0) < 10)) - -> Seq Scan on plt1_adv_p1 t1_1 - -> Hash - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash Full Join - Hash Cond: ((t1_2.a = t2_2.a) AND (t1_2.c = t2_2.c)) - Filter: ((COALESCE(t1_2.b, 0) < 10) AND (COALESCE(t2_2.b, 0) < 10)) - -> Seq Scan on plt1_adv_p2 t1_2 - -> Hash - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash Full Join - Hash Cond: ((t1_3.a = t2_3.a) AND (t1_3.c = t2_3.c)) - Filter: ((COALESCE(t1_3.b, 0) < 10) AND (COALESCE(t2_3.b, 0) < 10)) - -> Seq Scan on plt1_adv_p3 t1_3 - -> Hash - -> Seq Scan on plt2_adv_p3 t2_3 -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - a | c | a | c ----+------+---+------ - 1 | 0001 | | - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 8 | 0008 | | - 9 | 0009 | 9 | 0009 - | | 2 | 0002 - | | 7 | 0007 -(8 rows) - --- Test cases where one side has an extra partition -CREATE TABLE plt2_adv_extra PARTITION OF plt2_adv FOR VALUES IN ('0000'); -INSERT INTO plt2_adv_extra VALUES (0, 0, '0000'); -ANALYZE plt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 9 | 0009 | 9 | 0009 -(4 rows) - --- semi join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN ----------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Nested Loop Semi Join - Join Filter: ((t1_1.a = t2_1.a) AND (t1_1.c = t2_1.c)) - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Nested Loop Semi Join - Join Filter: ((t1_2.a = t2_2.a) AND (t1_2.c = t2_2.c)) - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Nested Loop Semi Join - Join Filter: ((t1_3.a = t2_3.a) AND (t1_3.c = t2_3.c)) - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p3 t2_3 -(18 rows) - -SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - a | b | c ----+---+------ - 3 | 3 | 0003 - 4 | 4 | 0004 - 6 | 6 | 0006 - 9 | 9 | 0009 -(4 rows) - --- left join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 1 | 0001 | | - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 8 | 0008 | | - 9 | 0009 | 9 | 0009 -(6 rows) - --- left join; currently we can't do partitioned join if there are no matched --- partitions on the nullable side -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt2_adv t1 LEFT JOIN plt1_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ---------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt1_adv_p1 t2_1 - -> Seq Scan on plt1_adv_p2 t2_2 - -> Seq Scan on plt1_adv_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on plt2_adv_extra t1_1 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p1 t1_2 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p2 t1_3 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p3 t1_4 - Filter: (b < 10) -(18 rows) - --- anti join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Anti Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Right Anti Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Anti Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - a | b | c ----+---+------ - 1 | 1 | 0001 - 8 | 8 | 0008 -(2 rows) - --- anti join; currently we can't do partitioned join if there are no matched --- partitions on the nullable side -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt2_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt1_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN ---------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Anti Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt1_adv_p1 t2_1 - -> Seq Scan on plt1_adv_p2 t2_2 - -> Seq Scan on plt1_adv_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on plt2_adv_extra t1_1 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p1 t1_2 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p2 t1_3 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p3 t1_4 - Filter: (b < 10) -(18 rows) - --- full join; currently we can't do partitioned join if there are no matched --- partitions on the nullable side -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - QUERY PLAN -------------------------------------------------------------------------- - Sort - Sort Key: t1.a, t2.a - -> Hash Full Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - Filter: ((COALESCE(t1.b, 0) < 10) AND (COALESCE(t2.b, 0) < 10)) - -> Append - -> Seq Scan on plt2_adv_extra t2_1 - -> Seq Scan on plt2_adv_p1 t2_2 - -> Seq Scan on plt2_adv_p2 t2_3 - -> Seq Scan on plt2_adv_p3 t2_4 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - -> Seq Scan on plt1_adv_p2 t1_2 - -> Seq Scan on plt1_adv_p3 t1_3 -(15 rows) - -DROP TABLE plt2_adv_extra; --- Test cases where a partition on one side matches multiple partitions on --- the other side; we currently can't do partitioned join in such cases -ALTER TABLE plt2_adv DETACH PARTITION plt2_adv_p2; --- Split plt2_adv_p2 into two partitions so that plt1_adv_p2 matches both -CREATE TABLE plt2_adv_p2_1 PARTITION OF plt2_adv FOR VALUES IN ('0004'); -CREATE TABLE plt2_adv_p2_2 PARTITION OF plt2_adv FOR VALUES IN ('0006'); -INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (4, 6); -ANALYZE plt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2_1 t2_2 - -> Seq Scan on plt2_adv_p2_2 t2_3 - -> Seq Scan on plt2_adv_p3 t2_4 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(17 rows) - --- semi join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Semi Join - Hash Cond: ((t1.a = t2.a) AND (t1.c = t2.c)) - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) - -> Hash - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2_1 t2_2 - -> Seq Scan on plt2_adv_p2_2 t2_3 - -> Seq Scan on plt2_adv_p3 t2_4 -(17 rows) - --- left join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2_1 t2_2 - -> Seq Scan on plt2_adv_p2_2 t2_3 - -> Seq Scan on plt2_adv_p3 t2_4 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(17 rows) - --- anti join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Anti Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2_1 t2_2 - -> Seq Scan on plt2_adv_p2_2 t2_3 - -> Seq Scan on plt2_adv_p3 t2_4 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(17 rows) - --- full join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - QUERY PLAN -------------------------------------------------------------------------- - Sort - Sort Key: t1.a, t2.a - -> Hash Full Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - Filter: ((COALESCE(t1.b, 0) < 10) AND (COALESCE(t2.b, 0) < 10)) - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2_1 t2_2 - -> Seq Scan on plt2_adv_p2_2 t2_3 - -> Seq Scan on plt2_adv_p3 t2_4 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - -> Seq Scan on plt1_adv_p2 t1_2 - -> Seq Scan on plt1_adv_p3 t1_3 -(15 rows) - -DROP TABLE plt2_adv_p2_1; -DROP TABLE plt2_adv_p2_2; --- Restore plt2_adv_p2 -ALTER TABLE plt2_adv ATTACH PARTITION plt2_adv_p2 FOR VALUES IN ('0004', '0006'); --- Test NULL partitions -ALTER TABLE plt1_adv DETACH PARTITION plt1_adv_p1; --- Change plt1_adv_p1 to the NULL partition -CREATE TABLE plt1_adv_p1_null PARTITION OF plt1_adv FOR VALUES IN (NULL, '0001', '0003'); -INSERT INTO plt1_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (1, 3); -INSERT INTO plt1_adv VALUES (-1, -1, NULL); -ANALYZE plt1_adv; -ALTER TABLE plt2_adv DETACH PARTITION plt2_adv_p3; --- Change plt2_adv_p3 to the NULL partition -CREATE TABLE plt2_adv_p3_null PARTITION OF plt2_adv FOR VALUES IN (NULL, '0007', '0009'); -INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (7, 9); -INSERT INTO plt2_adv VALUES (-1, -1, NULL); -ANALYZE plt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1_null t1_1 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3_null t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 9 | 0009 | 9 | 0009 -(4 rows) - --- semi join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN ----------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Semi Join - Hash Cond: ((t1_1.a = t2_1.a) AND (t1_1.c = t2_1.c)) - -> Seq Scan on plt1_adv_p1_null t1_1 - Filter: (b < 10) - -> Hash - -> Seq Scan on plt2_adv_p1 t2_1 - -> Nested Loop Semi Join - Join Filter: ((t1_2.a = t2_2.a) AND (t1_2.c = t2_2.c)) - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Nested Loop Semi Join - Join Filter: ((t1_3.a = t2_3.a) AND (t1_3.c = t2_3.c)) - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p3_null t2_3 -(19 rows) - -SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - a | b | c ----+---+------ - 3 | 3 | 0003 - 4 | 4 | 0004 - 6 | 6 | 0006 - 9 | 9 | 0009 -(4 rows) - --- left join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1_null t1_1 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3_null t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c -----+------+---+------ - -1 | | | - 1 | 0001 | | - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 8 | 0008 | | - 9 | 0009 | 9 | 0009 -(7 rows) - --- anti join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Anti Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1_null t1_1 - Filter: (b < 10) - -> Hash Right Anti Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Anti Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3_null t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - a | b | c -----+----+------ - -1 | -1 | - 1 | 1 | 0001 - 8 | 8 | 0008 -(3 rows) - --- full join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - QUERY PLAN ------------------------------------------------------------------------------------ - Sort - Sort Key: t1.a, t2.a - -> Append - -> Hash Full Join - Hash Cond: ((t1_1.a = t2_1.a) AND (t1_1.c = t2_1.c)) - Filter: ((COALESCE(t1_1.b, 0) < 10) AND (COALESCE(t2_1.b, 0) < 10)) - -> Seq Scan on plt1_adv_p1_null t1_1 - -> Hash - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash Full Join - Hash Cond: ((t1_2.a = t2_2.a) AND (t1_2.c = t2_2.c)) - Filter: ((COALESCE(t1_2.b, 0) < 10) AND (COALESCE(t2_2.b, 0) < 10)) - -> Seq Scan on plt1_adv_p2 t1_2 - -> Hash - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash Full Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - Filter: ((COALESCE(t1_3.b, 0) < 10) AND (COALESCE(t2_3.b, 0) < 10)) - -> Seq Scan on plt2_adv_p3_null t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - a | c | a | c -----+------+----+------ - -1 | | | - 1 | 0001 | | - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 8 | 0008 | | - 9 | 0009 | 9 | 0009 - | | -1 | - | | 2 | 0002 - | | 7 | 0007 -(10 rows) - -DROP TABLE plt1_adv_p1_null; --- Restore plt1_adv_p1 -ALTER TABLE plt1_adv ATTACH PARTITION plt1_adv_p1 FOR VALUES IN ('0001', '0003'); --- Add to plt1_adv the extra NULL partition containing only NULL values as the --- key values -CREATE TABLE plt1_adv_extra PARTITION OF plt1_adv FOR VALUES IN (NULL); -INSERT INTO plt1_adv VALUES (-1, -1, NULL); -ANALYZE plt1_adv; -DROP TABLE plt2_adv_p3_null; --- Restore plt2_adv_p3 -ALTER TABLE plt2_adv ATTACH PARTITION plt2_adv_p3 FOR VALUES IN ('0007', '0009'); -ANALYZE plt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 9 | 0009 | 9 | 0009 -(4 rows) - --- left join; currently we can't do partitioned join if there are no matched --- partitions on the nullable side -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ---------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2 t2_2 - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) - -> Seq Scan on plt1_adv_extra t1_4 - Filter: (b < 10) -(18 rows) - --- full join; currently we can't do partitioned join if there are no matched --- partitions on the nullable side -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - QUERY PLAN -------------------------------------------------------------------------- - Sort - Sort Key: t1.a, t2.a - -> Hash Full Join - Hash Cond: ((t1.a = t2.a) AND (t1.c = t2.c)) - Filter: ((COALESCE(t1.b, 0) < 10) AND (COALESCE(t2.b, 0) < 10)) - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - -> Seq Scan on plt1_adv_p2 t1_2 - -> Seq Scan on plt1_adv_p3 t1_3 - -> Seq Scan on plt1_adv_extra t1_4 - -> Hash - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2 t2_2 - -> Seq Scan on plt2_adv_p3 t2_3 -(15 rows) - --- Add to plt2_adv the extra NULL partition containing only NULL values as the --- key values -CREATE TABLE plt2_adv_extra PARTITION OF plt2_adv FOR VALUES IN (NULL); -INSERT INTO plt2_adv VALUES (-1, -1, NULL); -ANALYZE plt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 9 | 0009 | 9 | 0009 -(4 rows) - --- left join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ----------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) - -> Nested Loop Left Join - Join Filter: ((t1_4.a = t2_4.a) AND (t1_4.c = t2_4.c)) - -> Seq Scan on plt1_adv_extra t1_4 - Filter: (b < 10) - -> Seq Scan on plt2_adv_extra t2_4 -(26 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c -----+------+---+------ - -1 | | | - 1 | 0001 | | - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 8 | 0008 | | - 9 | 0009 | 9 | 0009 -(7 rows) - --- full join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - QUERY PLAN ------------------------------------------------------------------------------------ - Sort - Sort Key: t1.a, t2.a - -> Append - -> Hash Full Join - Hash Cond: ((t1_1.a = t2_1.a) AND (t1_1.c = t2_1.c)) - Filter: ((COALESCE(t1_1.b, 0) < 10) AND (COALESCE(t2_1.b, 0) < 10)) - -> Seq Scan on plt1_adv_p1 t1_1 - -> Hash - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash Full Join - Hash Cond: ((t1_2.a = t2_2.a) AND (t1_2.c = t2_2.c)) - Filter: ((COALESCE(t1_2.b, 0) < 10) AND (COALESCE(t2_2.b, 0) < 10)) - -> Seq Scan on plt1_adv_p2 t1_2 - -> Hash - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash Full Join - Hash Cond: ((t1_3.a = t2_3.a) AND (t1_3.c = t2_3.c)) - Filter: ((COALESCE(t1_3.b, 0) < 10) AND (COALESCE(t2_3.b, 0) < 10)) - -> Seq Scan on plt1_adv_p3 t1_3 - -> Hash - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash Full Join - Hash Cond: ((t1_4.a = t2_4.a) AND (t1_4.c = t2_4.c)) - Filter: ((COALESCE(t1_4.b, 0) < 10) AND (COALESCE(t2_4.b, 0) < 10)) - -> Seq Scan on plt1_adv_extra t1_4 - -> Hash - -> Seq Scan on plt2_adv_extra t2_4 -(27 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - a | c | a | c -----+------+----+------ - -1 | | | - 1 | 0001 | | - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 8 | 0008 | | - 9 | 0009 | 9 | 0009 - | | -1 | - | | 2 | 0002 - | | 7 | 0007 -(10 rows) - --- 3-way join to test the NULL partition of a join relation -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) LEFT JOIN plt1_adv t3 ON (t1.a = t3.a AND t1.c = t3.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Join - Hash Cond: ((t3_1.a = t1_1.a) AND (t3_1.c = t1_1.c)) - -> Seq Scan on plt1_adv_p1 t3_1 - -> Hash - -> Hash Right Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t3_2.a = t1_2.a) AND (t3_2.c = t1_2.c)) - -> Seq Scan on plt1_adv_p2 t3_2 - -> Hash - -> Hash Right Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t3_3.a = t1_3.a) AND (t3_3.c = t1_3.c)) - -> Seq Scan on plt1_adv_p3 t3_3 - -> Hash - -> Hash Right Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) - -> Nested Loop Left Join - Join Filter: ((t1_4.a = t3_4.a) AND (t1_4.c = t3_4.c)) - -> Nested Loop Left Join - Join Filter: ((t1_4.a = t2_4.a) AND (t1_4.c = t2_4.c)) - -> Seq Scan on plt1_adv_extra t1_4 - Filter: (b < 10) - -> Seq Scan on plt2_adv_extra t2_4 - -> Seq Scan on plt1_adv_extra t3_4 -(41 rows) - -SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) LEFT JOIN plt1_adv t3 ON (t1.a = t3.a AND t1.c = t3.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c | a | c -----+------+---+------+---+------ - -1 | | | | | - 1 | 0001 | | | 1 | 0001 - 3 | 0003 | 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 | 6 | 0006 - 8 | 0008 | | | 8 | 0008 - 9 | 0009 | 9 | 0009 | 9 | 0009 -(7 rows) - -DROP TABLE plt1_adv_extra; -DROP TABLE plt2_adv_extra; --- Test default partitions -ALTER TABLE plt1_adv DETACH PARTITION plt1_adv_p1; --- Change plt1_adv_p1 to the default partition -ALTER TABLE plt1_adv ATTACH PARTITION plt1_adv_p1 DEFAULT; -DROP TABLE plt1_adv_p3; -ANALYZE plt1_adv; -DROP TABLE plt2_adv_p3; -ANALYZE plt2_adv; --- We can do partitioned join even if only one of relations has the default --- partition -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_2.a) AND (t2_1.c = t1_2.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_2 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_2.a = t1_1.a) AND (t2_2.c = t1_1.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_1 - Filter: (b < 10) -(15 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 -(3 rows) - -ALTER TABLE plt2_adv DETACH PARTITION plt2_adv_p2; --- Change plt2_adv_p2 to contain '0005' in addition to '0004' and '0006' as --- the key values -CREATE TABLE plt2_adv_p2_ext PARTITION OF plt2_adv FOR VALUES IN ('0004', '0005', '0006'); -INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (4, 5, 6); -ANALYZE plt2_adv; --- Partitioned join can't be applied because the default partition of plt1_adv --- matches plt2_adv_p1 and plt2_adv_p2_ext -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2_ext t2_2 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p2 t1_1 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p1 t1_2 - Filter: (b < 10) -(13 rows) - -ALTER TABLE plt2_adv DETACH PARTITION plt2_adv_p2_ext; --- Change plt2_adv_p2_ext to the default partition -ALTER TABLE plt2_adv ATTACH PARTITION plt2_adv_p2_ext DEFAULT; -ANALYZE plt2_adv; --- Partitioned join can't be applied because the default partition of plt1_adv --- matches plt2_adv_p1 and plt2_adv_p2_ext -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2_ext t2_2 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p2 t1_1 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p1 t1_2 - Filter: (b < 10) -(13 rows) - -DROP TABLE plt2_adv_p2_ext; --- Restore plt2_adv_p2 -ALTER TABLE plt2_adv ATTACH PARTITION plt2_adv_p2 FOR VALUES IN ('0004', '0006'); -ANALYZE plt2_adv; -CREATE TABLE plt3_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt3_adv_p1 PARTITION OF plt3_adv FOR VALUES IN ('0004', '0006'); -CREATE TABLE plt3_adv_p2 PARTITION OF plt3_adv FOR VALUES IN ('0007', '0009'); -INSERT INTO plt3_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (4, 6, 7, 9); -ANALYZE plt3_adv; --- 3-way join to test the default partition of a join relation -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) LEFT JOIN plt3_adv t3 ON (t1.a = t3.a AND t1.c = t3.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Join - Hash Cond: ((t3_1.a = t1_1.a) AND (t3_1.c = t1_1.c)) - -> Seq Scan on plt3_adv_p1 t3_1 - -> Hash - -> Hash Right Join - Hash Cond: ((t2_2.a = t1_1.a) AND (t2_2.c = t1_1.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_1 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t3_2.a = t1_2.a) AND (t3_2.c = t1_2.c)) - -> Seq Scan on plt3_adv_p2 t3_2 - -> Hash - -> Hash Right Join - Hash Cond: ((t2_1.a = t1_2.a) AND (t2_1.c = t1_2.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_2 - Filter: (b < 10) -(23 rows) - -SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) LEFT JOIN plt3_adv t3 ON (t1.a = t3.a AND t1.c = t3.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c | a | c ----+------+---+------+---+------ - 1 | 0001 | | | | - 3 | 0003 | 3 | 0003 | | - 4 | 0004 | 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 | 6 | 0006 -(4 rows) - --- Test cases where one side has the default partition while the other side --- has the NULL partition -DROP TABLE plt2_adv_p1; --- Add the NULL partition to plt2_adv -CREATE TABLE plt2_adv_p1_null PARTITION OF plt2_adv FOR VALUES IN (NULL, '0001', '0003'); -INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (1, 3); -INSERT INTO plt2_adv VALUES (-1, -1, NULL); -ANALYZE plt2_adv; -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_2.a) AND (t2_1.c = t1_2.c)) - -> Seq Scan on plt2_adv_p1_null t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_2 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_2.a = t1_1.a) AND (t2_2.c = t1_1.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_1 - Filter: (b < 10) -(15 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 1 | 0001 | 1 | 0001 - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 -(4 rows) - -DROP TABLE plt2_adv_p1_null; --- Add the NULL partition that contains only NULL values as the key values -CREATE TABLE plt2_adv_p1_null PARTITION OF plt2_adv FOR VALUES IN (NULL); -INSERT INTO plt2_adv VALUES (-1, -1, NULL); -ANALYZE plt2_adv; -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Seq Scan on plt2_adv_p2 t2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1 - Filter: (b < 10) -(8 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 -(2 rows) - -DROP TABLE plt1_adv; -DROP TABLE plt2_adv; -DROP TABLE plt3_adv; --- Test interaction of partitioned join with partition pruning -CREATE TABLE plt1_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt1_adv_p1 PARTITION OF plt1_adv FOR VALUES IN ('0001'); -CREATE TABLE plt1_adv_p2 PARTITION OF plt1_adv FOR VALUES IN ('0002'); -CREATE TABLE plt1_adv_p3 PARTITION OF plt1_adv FOR VALUES IN ('0003'); -CREATE TABLE plt1_adv_p4 PARTITION OF plt1_adv FOR VALUES IN (NULL, '0004', '0005'); -INSERT INTO plt1_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (1, 2, 3, 4, 5); -INSERT INTO plt1_adv VALUES (-1, -1, NULL); -ANALYZE plt1_adv; -CREATE TABLE plt2_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt2_adv_p1 PARTITION OF plt2_adv FOR VALUES IN ('0001', '0002'); -CREATE TABLE plt2_adv_p2 PARTITION OF plt2_adv FOR VALUES IN (NULL); -CREATE TABLE plt2_adv_p3 PARTITION OF plt2_adv FOR VALUES IN ('0003'); -CREATE TABLE plt2_adv_p4 PARTITION OF plt2_adv FOR VALUES IN ('0004', '0005'); -INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (1, 2, 3, 4, 5); -INSERT INTO plt2_adv VALUES (-1, -1, NULL); -ANALYZE plt2_adv; -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IN ('0003', '0004', '0005') AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------------------------------------------ - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p3 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_1 - Filter: ((b < 10) AND (c = ANY ('{0003,0004,0005}'::text[]))) - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p4 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p4 t1_2 - Filter: ((b < 10) AND (c = ANY ('{0003,0004,0005}'::text[]))) -(15 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IN ('0003', '0004', '0005') AND t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 5 | 0005 | 5 | 0005 -(3 rows) - -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IS NULL AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Seq Scan on plt2_adv_p4 t2 - -> Hash - -> Seq Scan on plt1_adv_p4 t1 - Filter: ((c IS NULL) AND (b < 10)) -(8 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IS NULL AND t1.b < 10 ORDER BY t1.a; - a | c | a | c -----+---+---+--- - -1 | | | -(1 row) - -CREATE TABLE plt1_adv_default PARTITION OF plt1_adv DEFAULT; -ANALYZE plt1_adv; -CREATE TABLE plt2_adv_default PARTITION OF plt2_adv DEFAULT; -ANALYZE plt2_adv; -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IN ('0003', '0004', '0005') AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------------------------------------------ - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p3 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_1 - Filter: ((b < 10) AND (c = ANY ('{0003,0004,0005}'::text[]))) - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p4 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p4 t1_2 - Filter: ((b < 10) AND (c = ANY ('{0003,0004,0005}'::text[]))) -(15 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IN ('0003', '0004', '0005') AND t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 5 | 0005 | 5 | 0005 -(3 rows) - -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IS NULL AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Seq Scan on plt2_adv_p4 t2 - -> Hash - -> Seq Scan on plt1_adv_p4 t1 - Filter: ((c IS NULL) AND (b < 10)) -(8 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IS NULL AND t1.b < 10 ORDER BY t1.a; - a | c | a | c -----+---+---+--- - -1 | | | -(1 row) - -DROP TABLE plt1_adv; -DROP TABLE plt2_adv; --- Test the process_outer_partition() code path -CREATE TABLE plt1_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt1_adv_p1 PARTITION OF plt1_adv FOR VALUES IN ('0000', '0001', '0002'); -CREATE TABLE plt1_adv_p2 PARTITION OF plt1_adv FOR VALUES IN ('0003', '0004'); -INSERT INTO plt1_adv SELECT i, i, to_char(i % 5, 'FM0000') FROM generate_series(0, 24) i; -ANALYZE plt1_adv; -CREATE TABLE plt2_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt2_adv_p1 PARTITION OF plt2_adv FOR VALUES IN ('0002'); -CREATE TABLE plt2_adv_p2 PARTITION OF plt2_adv FOR VALUES IN ('0003', '0004'); -INSERT INTO plt2_adv SELECT i, i, to_char(i % 5, 'FM0000') FROM generate_series(0, 24) i WHERE i % 5 IN (2, 3, 4); -ANALYZE plt2_adv; -CREATE TABLE plt3_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt3_adv_p1 PARTITION OF plt3_adv FOR VALUES IN ('0001'); -CREATE TABLE plt3_adv_p2 PARTITION OF plt3_adv FOR VALUES IN ('0003', '0004'); -INSERT INTO plt3_adv SELECT i, i, to_char(i % 5, 'FM0000') FROM generate_series(0, 24) i WHERE i % 5 IN (1, 3, 4); -ANALYZE plt3_adv; --- This tests that when merging partitions from plt1_adv and plt2_adv in --- merge_list_bounds(), process_outer_partition() returns an already-assigned --- merged partition when re-called with plt1_adv_p1 for the second list value --- '0001' of that partition -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM (plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.c = t2.c)) FULL JOIN plt3_adv t3 ON (t1.c = t3.c) WHERE coalesce(t1.a, 0) % 5 != 3 AND coalesce(t1.a, 0) % 5 != 4 ORDER BY t1.c, t1.a, t2.a, t3.a; - QUERY PLAN ------------------------------------------------------------------------------------------------ - Sort - Sort Key: t1.c, t1.a, t2.a, t3.a - -> Append - -> Hash Full Join - Hash Cond: (t1_1.c = t3_1.c) - Filter: (((COALESCE(t1_1.a, 0) % 5) <> 3) AND ((COALESCE(t1_1.a, 0) % 5) <> 4)) - -> Hash Left Join - Hash Cond: (t1_1.c = t2_1.c) - -> Seq Scan on plt1_adv_p1 t1_1 - -> Hash - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt3_adv_p1 t3_1 - -> Hash Full Join - Hash Cond: (t1_2.c = t3_2.c) - Filter: (((COALESCE(t1_2.a, 0) % 5) <> 3) AND ((COALESCE(t1_2.a, 0) % 5) <> 4)) - -> Hash Left Join - Hash Cond: (t1_2.c = t2_2.c) - -> Seq Scan on plt1_adv_p2 t1_2 - -> Hash - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt3_adv_p2 t3_2 -(23 rows) - -SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM (plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.c = t2.c)) FULL JOIN plt3_adv t3 ON (t1.c = t3.c) WHERE coalesce(t1.a, 0) % 5 != 3 AND coalesce(t1.a, 0) % 5 != 4 ORDER BY t1.c, t1.a, t2.a, t3.a; - a | c | a | c | a | c -----+------+----+------+----+------ - 0 | 0000 | | | | - 5 | 0000 | | | | - 10 | 0000 | | | | - 15 | 0000 | | | | - 20 | 0000 | | | | - 1 | 0001 | | | 1 | 0001 - 1 | 0001 | | | 6 | 0001 - 1 | 0001 | | | 11 | 0001 - 1 | 0001 | | | 16 | 0001 - 1 | 0001 | | | 21 | 0001 - 6 | 0001 | | | 1 | 0001 - 6 | 0001 | | | 6 | 0001 - 6 | 0001 | | | 11 | 0001 - 6 | 0001 | | | 16 | 0001 - 6 | 0001 | | | 21 | 0001 - 11 | 0001 | | | 1 | 0001 - 11 | 0001 | | | 6 | 0001 - 11 | 0001 | | | 11 | 0001 - 11 | 0001 | | | 16 | 0001 - 11 | 0001 | | | 21 | 0001 - 16 | 0001 | | | 1 | 0001 - 16 | 0001 | | | 6 | 0001 - 16 | 0001 | | | 11 | 0001 - 16 | 0001 | | | 16 | 0001 - 16 | 0001 | | | 21 | 0001 - 21 | 0001 | | | 1 | 0001 - 21 | 0001 | | | 6 | 0001 - 21 | 0001 | | | 11 | 0001 - 21 | 0001 | | | 16 | 0001 - 21 | 0001 | | | 21 | 0001 - 2 | 0002 | 2 | 0002 | | - 2 | 0002 | 7 | 0002 | | - 2 | 0002 | 12 | 0002 | | - 2 | 0002 | 17 | 0002 | | - 2 | 0002 | 22 | 0002 | | - 7 | 0002 | 2 | 0002 | | - 7 | 0002 | 7 | 0002 | | - 7 | 0002 | 12 | 0002 | | - 7 | 0002 | 17 | 0002 | | - 7 | 0002 | 22 | 0002 | | - 12 | 0002 | 2 | 0002 | | - 12 | 0002 | 7 | 0002 | | - 12 | 0002 | 12 | 0002 | | - 12 | 0002 | 17 | 0002 | | - 12 | 0002 | 22 | 0002 | | - 17 | 0002 | 2 | 0002 | | - 17 | 0002 | 7 | 0002 | | - 17 | 0002 | 12 | 0002 | | - 17 | 0002 | 17 | 0002 | | - 17 | 0002 | 22 | 0002 | | - 22 | 0002 | 2 | 0002 | | - 22 | 0002 | 7 | 0002 | | - 22 | 0002 | 12 | 0002 | | - 22 | 0002 | 17 | 0002 | | - 22 | 0002 | 22 | 0002 | | -(55 rows) - -DROP TABLE plt1_adv; -DROP TABLE plt2_adv; -DROP TABLE plt3_adv; --- Tests for multi-level partitioned tables -CREATE TABLE alpha (a double precision, b int, c text) PARTITION BY RANGE (a); -CREATE TABLE alpha_neg PARTITION OF alpha FOR VALUES FROM ('-Infinity') TO (0) PARTITION BY RANGE (b); -CREATE TABLE alpha_pos PARTITION OF alpha FOR VALUES FROM (0) TO (10.0) PARTITION BY LIST (c); -CREATE TABLE alpha_neg_p1 PARTITION OF alpha_neg FOR VALUES FROM (100) TO (200); -CREATE TABLE alpha_neg_p2 PARTITION OF alpha_neg FOR VALUES FROM (200) TO (300); -CREATE TABLE alpha_neg_p3 PARTITION OF alpha_neg FOR VALUES FROM (300) TO (400); -CREATE TABLE alpha_pos_p1 PARTITION OF alpha_pos FOR VALUES IN ('0001', '0003'); -CREATE TABLE alpha_pos_p2 PARTITION OF alpha_pos FOR VALUES IN ('0004', '0006'); -CREATE TABLE alpha_pos_p3 PARTITION OF alpha_pos FOR VALUES IN ('0008', '0009'); -INSERT INTO alpha_neg SELECT -1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(100, 399) i WHERE i % 10 IN (1, 3, 4, 6, 8, 9); -INSERT INTO alpha_pos SELECT 1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(100, 399) i WHERE i % 10 IN (1, 3, 4, 6, 8, 9); -ANALYZE alpha; -CREATE TABLE beta (a double precision, b int, c text) PARTITION BY RANGE (a); -CREATE TABLE beta_neg PARTITION OF beta FOR VALUES FROM (-10.0) TO (0) PARTITION BY RANGE (b); -CREATE TABLE beta_pos PARTITION OF beta FOR VALUES FROM (0) TO ('Infinity') PARTITION BY LIST (c); -CREATE TABLE beta_neg_p1 PARTITION OF beta_neg FOR VALUES FROM (100) TO (150); -CREATE TABLE beta_neg_p2 PARTITION OF beta_neg FOR VALUES FROM (200) TO (300); -CREATE TABLE beta_neg_p3 PARTITION OF beta_neg FOR VALUES FROM (350) TO (500); -CREATE TABLE beta_pos_p1 PARTITION OF beta_pos FOR VALUES IN ('0002', '0003'); -CREATE TABLE beta_pos_p2 PARTITION OF beta_pos FOR VALUES IN ('0004', '0006'); -CREATE TABLE beta_pos_p3 PARTITION OF beta_pos FOR VALUES IN ('0007', '0009'); -INSERT INTO beta_neg SELECT -1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(100, 149) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); -INSERT INTO beta_neg SELECT -1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(200, 299) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); -INSERT INTO beta_neg SELECT -1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(350, 499) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); -INSERT INTO beta_pos SELECT 1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(100, 149) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); -INSERT INTO beta_pos SELECT 1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(200, 299) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); -INSERT INTO beta_pos SELECT 1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(350, 499) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); -ANALYZE beta; -EXPLAIN (COSTS OFF) -SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.b = t2.b) WHERE t1.b >= 125 AND t1.b < 225 ORDER BY t1.a, t1.b; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a, t1.b - -> Append - -> Hash Join - Hash Cond: ((t1_1.a = t2_1.a) AND (t1_1.b = t2_1.b)) - -> Seq Scan on alpha_neg_p1 t1_1 - Filter: ((b >= 125) AND (b < 225)) - -> Hash - -> Seq Scan on beta_neg_p1 t2_1 - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.b = t1_2.b)) - -> Seq Scan on beta_neg_p2 t2_2 - -> Hash - -> Seq Scan on alpha_neg_p2 t1_2 - Filter: ((b >= 125) AND (b < 225)) - -> Hash Join - Hash Cond: ((t2_4.a = t1_4.a) AND (t2_4.b = t1_4.b)) - -> Append - -> Seq Scan on beta_pos_p1 t2_4 - -> Seq Scan on beta_pos_p2 t2_5 - -> Seq Scan on beta_pos_p3 t2_6 - -> Hash - -> Append - -> Seq Scan on alpha_pos_p1 t1_4 - Filter: ((b >= 125) AND (b < 225)) - -> Seq Scan on alpha_pos_p2 t1_5 - Filter: ((b >= 125) AND (b < 225)) - -> Seq Scan on alpha_pos_p3 t1_6 - Filter: ((b >= 125) AND (b < 225)) -(29 rows) - -SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.b = t2.b) WHERE t1.b >= 125 AND t1.b < 225 ORDER BY t1.a, t1.b; - a | b | c | a | b | c -----+-----+------+----+-----+------ - -1 | 126 | 0006 | -1 | 126 | 0006 - -1 | 129 | 0009 | -1 | 129 | 0009 - -1 | 133 | 0003 | -1 | 133 | 0003 - -1 | 134 | 0004 | -1 | 134 | 0004 - -1 | 136 | 0006 | -1 | 136 | 0006 - -1 | 139 | 0009 | -1 | 139 | 0009 - -1 | 143 | 0003 | -1 | 143 | 0003 - -1 | 144 | 0004 | -1 | 144 | 0004 - -1 | 146 | 0006 | -1 | 146 | 0006 - -1 | 149 | 0009 | -1 | 149 | 0009 - -1 | 203 | 0003 | -1 | 203 | 0003 - -1 | 204 | 0004 | -1 | 204 | 0004 - -1 | 206 | 0006 | -1 | 206 | 0006 - -1 | 209 | 0009 | -1 | 209 | 0009 - -1 | 213 | 0003 | -1 | 213 | 0003 - -1 | 214 | 0004 | -1 | 214 | 0004 - -1 | 216 | 0006 | -1 | 216 | 0006 - -1 | 219 | 0009 | -1 | 219 | 0009 - -1 | 223 | 0003 | -1 | 223 | 0003 - -1 | 224 | 0004 | -1 | 224 | 0004 - 1 | 126 | 0006 | 1 | 126 | 0006 - 1 | 129 | 0009 | 1 | 129 | 0009 - 1 | 133 | 0003 | 1 | 133 | 0003 - 1 | 134 | 0004 | 1 | 134 | 0004 - 1 | 136 | 0006 | 1 | 136 | 0006 - 1 | 139 | 0009 | 1 | 139 | 0009 - 1 | 143 | 0003 | 1 | 143 | 0003 - 1 | 144 | 0004 | 1 | 144 | 0004 - 1 | 146 | 0006 | 1 | 146 | 0006 - 1 | 149 | 0009 | 1 | 149 | 0009 - 1 | 203 | 0003 | 1 | 203 | 0003 - 1 | 204 | 0004 | 1 | 204 | 0004 - 1 | 206 | 0006 | 1 | 206 | 0006 - 1 | 209 | 0009 | 1 | 209 | 0009 - 1 | 213 | 0003 | 1 | 213 | 0003 - 1 | 214 | 0004 | 1 | 214 | 0004 - 1 | 216 | 0006 | 1 | 216 | 0006 - 1 | 219 | 0009 | 1 | 219 | 0009 - 1 | 223 | 0003 | 1 | 223 | 0003 - 1 | 224 | 0004 | 1 | 224 | 0004 -(40 rows) - -EXPLAIN (COSTS OFF) -SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE ((t1.b >= 100 AND t1.b < 110) OR (t1.b >= 200 AND t1.b < 210)) AND ((t2.b >= 100 AND t2.b < 110) OR (t2.b >= 200 AND t2.b < 210)) AND t1.c IN ('0004', '0009') ORDER BY t1.a, t1.b, t2.b; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------- - Sort - Sort Key: t1.a, t1.b, t2.b - -> Append - -> Hash Join - Hash Cond: ((t1_2.a = t2_2.a) AND (t1_2.c = t2_2.c)) - -> Append - -> Seq Scan on alpha_neg_p1 t1_2 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Seq Scan on alpha_neg_p2 t1_3 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Hash - -> Append - -> Seq Scan on beta_neg_p1 t2_2 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) - -> Seq Scan on beta_neg_p2 t2_3 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) - -> Nested Loop - Join Filter: ((t1_4.a = t2_4.a) AND (t1_4.c = t2_4.c)) - -> Seq Scan on alpha_pos_p2 t1_4 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Seq Scan on beta_pos_p2 t2_4 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) - -> Nested Loop - Join Filter: ((t1_5.a = t2_5.a) AND (t1_5.c = t2_5.c)) - -> Seq Scan on alpha_pos_p3 t1_5 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Seq Scan on beta_pos_p3 t2_5 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) -(28 rows) - -SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE ((t1.b >= 100 AND t1.b < 110) OR (t1.b >= 200 AND t1.b < 210)) AND ((t2.b >= 100 AND t2.b < 110) OR (t2.b >= 200 AND t2.b < 210)) AND t1.c IN ('0004', '0009') ORDER BY t1.a, t1.b, t2.b; - a | b | c | a | b | c -----+-----+------+----+-----+------ - -1 | 104 | 0004 | -1 | 104 | 0004 - -1 | 104 | 0004 | -1 | 204 | 0004 - -1 | 109 | 0009 | -1 | 109 | 0009 - -1 | 109 | 0009 | -1 | 209 | 0009 - -1 | 204 | 0004 | -1 | 104 | 0004 - -1 | 204 | 0004 | -1 | 204 | 0004 - -1 | 209 | 0009 | -1 | 109 | 0009 - -1 | 209 | 0009 | -1 | 209 | 0009 - 1 | 104 | 0004 | 1 | 104 | 0004 - 1 | 104 | 0004 | 1 | 204 | 0004 - 1 | 109 | 0009 | 1 | 109 | 0009 - 1 | 109 | 0009 | 1 | 209 | 0009 - 1 | 204 | 0004 | 1 | 104 | 0004 - 1 | 204 | 0004 | 1 | 204 | 0004 - 1 | 209 | 0009 | 1 | 109 | 0009 - 1 | 209 | 0009 | 1 | 209 | 0009 -(16 rows) - -EXPLAIN (COSTS OFF) -SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.b = t2.b AND t1.c = t2.c) WHERE ((t1.b >= 100 AND t1.b < 110) OR (t1.b >= 200 AND t1.b < 210)) AND ((t2.b >= 100 AND t2.b < 110) OR (t2.b >= 200 AND t2.b < 210)) AND t1.c IN ('0004', '0009') ORDER BY t1.a, t1.b; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------- - Sort - Sort Key: t1.a, t1.b - -> Append - -> Hash Join - Hash Cond: ((t1_1.a = t2_1.a) AND (t1_1.b = t2_1.b) AND (t1_1.c = t2_1.c)) - -> Seq Scan on alpha_neg_p1 t1_1 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Hash - -> Seq Scan on beta_neg_p1 t2_1 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) - -> Hash Join - Hash Cond: ((t1_2.a = t2_2.a) AND (t1_2.b = t2_2.b) AND (t1_2.c = t2_2.c)) - -> Seq Scan on alpha_neg_p2 t1_2 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Hash - -> Seq Scan on beta_neg_p2 t2_2 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) - -> Nested Loop - Join Filter: ((t1_3.a = t2_3.a) AND (t1_3.b = t2_3.b) AND (t1_3.c = t2_3.c)) - -> Seq Scan on alpha_pos_p2 t1_3 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Seq Scan on beta_pos_p2 t2_3 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) - -> Nested Loop - Join Filter: ((t1_4.a = t2_4.a) AND (t1_4.b = t2_4.b) AND (t1_4.c = t2_4.c)) - -> Seq Scan on alpha_pos_p3 t1_4 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Seq Scan on beta_pos_p3 t2_4 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) -(29 rows) - -SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.b = t2.b AND t1.c = t2.c) WHERE ((t1.b >= 100 AND t1.b < 110) OR (t1.b >= 200 AND t1.b < 210)) AND ((t2.b >= 100 AND t2.b < 110) OR (t2.b >= 200 AND t2.b < 210)) AND t1.c IN ('0004', '0009') ORDER BY t1.a, t1.b; - a | b | c | a | b | c -----+-----+------+----+-----+------ - -1 | 104 | 0004 | -1 | 104 | 0004 - -1 | 109 | 0009 | -1 | 109 | 0009 - -1 | 204 | 0004 | -1 | 204 | 0004 - -1 | 209 | 0009 | -1 | 209 | 0009 - 1 | 104 | 0004 | 1 | 104 | 0004 - 1 | 109 | 0009 | 1 | 109 | 0009 - 1 | 204 | 0004 | 1 | 204 | 0004 - 1 | 209 | 0009 | 1 | 209 | 0009 -(8 rows) - --- partitionwise join with fractional paths -CREATE TABLE fract_t (id BIGINT, PRIMARY KEY (id)) PARTITION BY RANGE (id); -CREATE TABLE fract_t0 PARTITION OF fract_t FOR VALUES FROM ('0') TO ('1000'); -CREATE TABLE fract_t1 PARTITION OF fract_t FOR VALUES FROM ('1000') TO ('2000'); --- insert data -INSERT INTO fract_t (id) (SELECT generate_series(0, 1999)); -ANALYZE fract_t; --- verify plan; nested index only scans -SET max_parallel_workers_per_gather = 0; -SET enable_partitionwise_join = on; -EXPLAIN (COSTS OFF) -SELECT x.id, y.id FROM fract_t x LEFT JOIN fract_t y USING (id) ORDER BY x.id ASC LIMIT 10; - QUERY PLAN ------------------------------------------------------------------------ - Limit - -> Merge Append - Sort Key: x.id - -> Merge Left Join - Merge Cond: (x_1.id = y_1.id) - -> Index Only Scan using fract_t0_pkey on fract_t0 x_1 - -> Index Only Scan using fract_t0_pkey on fract_t0 y_1 - -> Merge Left Join - Merge Cond: (x_2.id = y_2.id) - -> Index Only Scan using fract_t1_pkey on fract_t1 x_2 - -> Index Only Scan using fract_t1_pkey on fract_t1 y_2 -(11 rows) - -EXPLAIN (COSTS OFF) -SELECT x.id, y.id FROM fract_t x LEFT JOIN fract_t y USING (id) ORDER BY x.id DESC LIMIT 10; - QUERY PLAN --------------------------------------------------------------------------------- - Limit - -> Merge Append - Sort Key: x.id DESC - -> Nested Loop Left Join - -> Index Only Scan Backward using fract_t0_pkey on fract_t0 x_1 - -> Index Only Scan using fract_t0_pkey on fract_t0 y_1 - Index Cond: (id = x_1.id) - -> Nested Loop Left Join - -> Index Only Scan Backward using fract_t1_pkey on fract_t1 x_2 - -> Index Only Scan using fract_t1_pkey on fract_t1 y_2 - Index Cond: (id = x_2.id) -(11 rows) - --- cleanup -DROP TABLE fract_t; -RESET max_parallel_workers_per_gather; -RESET enable_partitionwise_join; +WARNING: terminating connection because of crash of another server process +DETAIL: The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory. +HINT: In a moment you should be able to reconnect to the database and repeat your command. +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/partition_prune.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/partition_prune.out --- /tmp/cirrus-ci-build/src/test/regress/expected/partition_prune.out 2024-03-13 23:12:37.626104000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/partition_prune.out 2024-03-13 23:14:45.721587000 +0000 @@ -2550,1872 +2550,10 @@ (27 rows) select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(1, 0, 0)'); - explain_parallel_append --------------------------------------------------------------------------------------------------------- - Finalize Aggregate (actual rows=N loops=N) - -> Gather (actual rows=N loops=N) - Workers Planned: 1 - Workers Launched: N - -> Partial Aggregate (actual rows=N loops=N) - -> Nested Loop (actual rows=N loops=N) - -> Parallel Seq Scan on lprt_a a (actual rows=N loops=N) - Filter: (a = ANY ('{1,0,0}'::integer[])) - Rows Removed by Filter: N - -> Append (actual rows=N loops=N) - -> Index Scan using ab_a1_b1_a_idx on ab_a1_b1 ab_1 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a1_b2_a_idx on ab_a1_b2 ab_2 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a1_b3_a_idx on ab_a1_b3 ab_3 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b1_a_idx on ab_a2_b1 ab_4 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b2_a_idx on ab_a2_b2 ab_5 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b3_a_idx on ab_a2_b3 ab_6 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b1_a_idx on ab_a3_b1 ab_7 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b2_a_idx on ab_a3_b2 ab_8 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b3_a_idx on ab_a3_b3 ab_9 (never executed) - Index Cond: (a = a.a) -(28 rows) - -delete from lprt_a where a = 1; -select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(1, 0, 0)'); - explain_parallel_append -------------------------------------------------------------------------------------------------- - Finalize Aggregate (actual rows=N loops=N) - -> Gather (actual rows=N loops=N) - Workers Planned: 1 - Workers Launched: N - -> Partial Aggregate (actual rows=N loops=N) - -> Nested Loop (actual rows=N loops=N) - -> Parallel Seq Scan on lprt_a a (actual rows=N loops=N) - Filter: (a = ANY ('{1,0,0}'::integer[])) - Rows Removed by Filter: N - -> Append (actual rows=N loops=N) - -> Index Scan using ab_a1_b1_a_idx on ab_a1_b1 ab_1 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a1_b2_a_idx on ab_a1_b2 ab_2 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a1_b3_a_idx on ab_a1_b3 ab_3 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b1_a_idx on ab_a2_b1 ab_4 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b2_a_idx on ab_a2_b2 ab_5 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b3_a_idx on ab_a2_b3 ab_6 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b1_a_idx on ab_a3_b1 ab_7 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b2_a_idx on ab_a3_b2 ab_8 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b3_a_idx on ab_a3_b3 ab_9 (never executed) - Index Cond: (a = a.a) -(28 rows) - -reset enable_hashjoin; -reset enable_mergejoin; -reset enable_memoize; -reset parallel_setup_cost; -reset parallel_tuple_cost; -reset min_parallel_table_scan_size; -reset max_parallel_workers_per_gather; --- Test run-time partition pruning with an initplan -explain (analyze, costs off, summary off, timing off) -select * from ab where a = (select max(a) from lprt_a) and b = (select max(a)-1 from lprt_a); - QUERY PLAN -------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - InitPlan 1 (returns $0) - -> Aggregate (actual rows=1 loops=1) - -> Seq Scan on lprt_a (actual rows=102 loops=1) - InitPlan 2 (returns $1) - -> Aggregate (actual rows=1 loops=1) - -> Seq Scan on lprt_a lprt_a_1 (actual rows=102 loops=1) - -> Bitmap Heap Scan on ab_a1_b1 ab_1 (never executed) - Recheck Cond: (a = $0) - Filter: (b = $1) - -> Bitmap Index Scan on ab_a1_b1_a_idx (never executed) - Index Cond: (a = $0) - -> Bitmap Heap Scan on ab_a1_b2 ab_2 (never executed) - Recheck Cond: (a = $0) - Filter: (b = $1) - -> Bitmap Index Scan on ab_a1_b2_a_idx (never executed) - Index Cond: (a = $0) - -> Bitmap Heap Scan on ab_a1_b3 ab_3 (never executed) - Recheck Cond: (a = $0) - Filter: (b = $1) - -> Bitmap Index Scan on ab_a1_b3_a_idx (never executed) - Index Cond: (a = $0) - -> Bitmap Heap Scan on ab_a2_b1 ab_4 (never executed) - Recheck Cond: (a = $0) - Filter: (b = $1) - -> Bitmap Index Scan on ab_a2_b1_a_idx (never executed) - Index Cond: (a = $0) - -> Bitmap Heap Scan on ab_a2_b2 ab_5 (never executed) - Recheck Cond: (a = $0) - Filter: (b = $1) - -> Bitmap Index Scan on ab_a2_b2_a_idx (never executed) - Index Cond: (a = $0) - -> Bitmap Heap Scan on ab_a2_b3 ab_6 (never executed) - Recheck Cond: (a = $0) - Filter: (b = $1) - -> Bitmap Index Scan on ab_a2_b3_a_idx (never executed) - Index Cond: (a = $0) - -> Bitmap Heap Scan on ab_a3_b1 ab_7 (never executed) - Recheck Cond: (a = $0) - Filter: (b = $1) - -> Bitmap Index Scan on ab_a3_b1_a_idx (never executed) - Index Cond: (a = $0) - -> Bitmap Heap Scan on ab_a3_b2 ab_8 (actual rows=0 loops=1) - Recheck Cond: (a = $0) - Filter: (b = $1) - -> Bitmap Index Scan on ab_a3_b2_a_idx (actual rows=0 loops=1) - Index Cond: (a = $0) - -> Bitmap Heap Scan on ab_a3_b3 ab_9 (never executed) - Recheck Cond: (a = $0) - Filter: (b = $1) - -> Bitmap Index Scan on ab_a3_b3_a_idx (never executed) - Index Cond: (a = $0) -(52 rows) - --- Test run-time partition pruning with UNION ALL parents -explain (analyze, costs off, summary off, timing off) -select * from (select * from ab where a = 1 union all select * from ab) ab where b = (select 1); - QUERY PLAN -------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - InitPlan 1 (returns $0) - -> Result (actual rows=1 loops=1) - -> Append (actual rows=0 loops=1) - -> Bitmap Heap Scan on ab_a1_b1 ab_11 (actual rows=0 loops=1) - Recheck Cond: (a = 1) - Filter: (b = $0) - -> Bitmap Index Scan on ab_a1_b1_a_idx (actual rows=0 loops=1) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b2 ab_12 (never executed) - Recheck Cond: (a = 1) - Filter: (b = $0) - -> Bitmap Index Scan on ab_a1_b2_a_idx (never executed) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b3 ab_13 (never executed) - Recheck Cond: (a = 1) - Filter: (b = $0) - -> Bitmap Index Scan on ab_a1_b3_a_idx (never executed) - Index Cond: (a = 1) - -> Seq Scan on ab_a1_b1 ab_1 (actual rows=0 loops=1) - Filter: (b = $0) - -> Seq Scan on ab_a1_b2 ab_2 (never executed) - Filter: (b = $0) - -> Seq Scan on ab_a1_b3 ab_3 (never executed) - Filter: (b = $0) - -> Seq Scan on ab_a2_b1 ab_4 (actual rows=0 loops=1) - Filter: (b = $0) - -> Seq Scan on ab_a2_b2 ab_5 (never executed) - Filter: (b = $0) - -> Seq Scan on ab_a2_b3 ab_6 (never executed) - Filter: (b = $0) - -> Seq Scan on ab_a3_b1 ab_7 (actual rows=0 loops=1) - Filter: (b = $0) - -> Seq Scan on ab_a3_b2 ab_8 (never executed) - Filter: (b = $0) - -> Seq Scan on ab_a3_b3 ab_9 (never executed) - Filter: (b = $0) -(37 rows) - --- A case containing a UNION ALL with a non-partitioned child. -explain (analyze, costs off, summary off, timing off) -select * from (select * from ab where a = 1 union all (values(10,5)) union all select * from ab) ab where b = (select 1); - QUERY PLAN -------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - InitPlan 1 (returns $0) - -> Result (actual rows=1 loops=1) - -> Append (actual rows=0 loops=1) - -> Bitmap Heap Scan on ab_a1_b1 ab_11 (actual rows=0 loops=1) - Recheck Cond: (a = 1) - Filter: (b = $0) - -> Bitmap Index Scan on ab_a1_b1_a_idx (actual rows=0 loops=1) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b2 ab_12 (never executed) - Recheck Cond: (a = 1) - Filter: (b = $0) - -> Bitmap Index Scan on ab_a1_b2_a_idx (never executed) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b3 ab_13 (never executed) - Recheck Cond: (a = 1) - Filter: (b = $0) - -> Bitmap Index Scan on ab_a1_b3_a_idx (never executed) - Index Cond: (a = 1) - -> Result (actual rows=0 loops=1) - One-Time Filter: (5 = $0) - -> Seq Scan on ab_a1_b1 ab_1 (actual rows=0 loops=1) - Filter: (b = $0) - -> Seq Scan on ab_a1_b2 ab_2 (never executed) - Filter: (b = $0) - -> Seq Scan on ab_a1_b3 ab_3 (never executed) - Filter: (b = $0) - -> Seq Scan on ab_a2_b1 ab_4 (actual rows=0 loops=1) - Filter: (b = $0) - -> Seq Scan on ab_a2_b2 ab_5 (never executed) - Filter: (b = $0) - -> Seq Scan on ab_a2_b3 ab_6 (never executed) - Filter: (b = $0) - -> Seq Scan on ab_a3_b1 ab_7 (actual rows=0 loops=1) - Filter: (b = $0) - -> Seq Scan on ab_a3_b2 ab_8 (never executed) - Filter: (b = $0) - -> Seq Scan on ab_a3_b3 ab_9 (never executed) - Filter: (b = $0) -(39 rows) - --- Another UNION ALL test, but containing a mix of exec init and exec run-time pruning. -create table xy_1 (x int, y int); -insert into xy_1 values(100,-10); -set enable_bitmapscan = 0; -set enable_indexscan = 0; -prepare ab_q6 as -select * from ( - select tableoid::regclass,a,b from ab -union all - select tableoid::regclass,x,y from xy_1 -union all - select tableoid::regclass,a,b from ab -) ab where a = $1 and b = (select -10); --- Ensure the xy_1 subplan is not pruned. -explain (analyze, costs off, summary off, timing off) execute ab_q6(1); - QUERY PLAN --------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 12 - InitPlan 1 (returns $0) - -> Result (actual rows=1 loops=1) - -> Seq Scan on ab_a1_b1 ab_1 (never executed) - Filter: ((a = $1) AND (b = $0)) - -> Seq Scan on ab_a1_b2 ab_2 (never executed) - Filter: ((a = $1) AND (b = $0)) - -> Seq Scan on ab_a1_b3 ab_3 (never executed) - Filter: ((a = $1) AND (b = $0)) - -> Seq Scan on xy_1 (actual rows=0 loops=1) - Filter: ((x = $1) AND (y = $0)) - Rows Removed by Filter: 1 - -> Seq Scan on ab_a1_b1 ab_4 (never executed) - Filter: ((a = $1) AND (b = $0)) - -> Seq Scan on ab_a1_b2 ab_5 (never executed) - Filter: ((a = $1) AND (b = $0)) - -> Seq Scan on ab_a1_b3 ab_6 (never executed) - Filter: ((a = $1) AND (b = $0)) -(19 rows) - --- Ensure we see just the xy_1 row. -execute ab_q6(100); - tableoid | a | b -----------+-----+----- - xy_1 | 100 | -10 -(1 row) - -reset enable_bitmapscan; -reset enable_indexscan; -deallocate ab_q1; -deallocate ab_q2; -deallocate ab_q3; -deallocate ab_q4; -deallocate ab_q5; -deallocate ab_q6; --- UPDATE on a partition subtree has been seen to have problems. -insert into ab values (1,2); -explain (analyze, costs off, summary off, timing off) -update ab_a1 set b = 3 from ab where ab.a = 1 and ab.a = ab_a1.a; - QUERY PLAN -------------------------------------------------------------------------------------------- - Update on ab_a1 (actual rows=0 loops=1) - Update on ab_a1_b1 ab_a1_1 - Update on ab_a1_b2 ab_a1_2 - Update on ab_a1_b3 ab_a1_3 - -> Nested Loop (actual rows=1 loops=1) - -> Append (actual rows=1 loops=1) - -> Bitmap Heap Scan on ab_a1_b1 ab_a1_1 (actual rows=0 loops=1) - Recheck Cond: (a = 1) - -> Bitmap Index Scan on ab_a1_b1_a_idx (actual rows=0 loops=1) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b2 ab_a1_2 (actual rows=1 loops=1) - Recheck Cond: (a = 1) - Heap Blocks: exact=1 - -> Bitmap Index Scan on ab_a1_b2_a_idx (actual rows=1 loops=1) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b3 ab_a1_3 (actual rows=0 loops=1) - Recheck Cond: (a = 1) - -> Bitmap Index Scan on ab_a1_b3_a_idx (actual rows=1 loops=1) - Index Cond: (a = 1) - -> Materialize (actual rows=1 loops=1) - -> Append (actual rows=1 loops=1) - -> Bitmap Heap Scan on ab_a1_b1 ab_1 (actual rows=0 loops=1) - Recheck Cond: (a = 1) - -> Bitmap Index Scan on ab_a1_b1_a_idx (actual rows=0 loops=1) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b2 ab_2 (actual rows=1 loops=1) - Recheck Cond: (a = 1) - Heap Blocks: exact=1 - -> Bitmap Index Scan on ab_a1_b2_a_idx (actual rows=1 loops=1) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b3 ab_3 (actual rows=0 loops=1) - Recheck Cond: (a = 1) - -> Bitmap Index Scan on ab_a1_b3_a_idx (actual rows=1 loops=1) - Index Cond: (a = 1) -(34 rows) - -table ab; - a | b ----+--- - 1 | 3 -(1 row) - --- Test UPDATE where source relation has run-time pruning enabled -truncate ab; -insert into ab values (1, 1), (1, 2), (1, 3), (2, 1); -explain (analyze, costs off, summary off, timing off) -update ab_a1 set b = 3 from ab_a2 where ab_a2.b = (select 1); - QUERY PLAN ------------------------------------------------------------------------------- - Update on ab_a1 (actual rows=0 loops=1) - Update on ab_a1_b1 ab_a1_1 - Update on ab_a1_b2 ab_a1_2 - Update on ab_a1_b3 ab_a1_3 - InitPlan 1 (returns $0) - -> Result (actual rows=1 loops=1) - -> Nested Loop (actual rows=3 loops=1) - -> Append (actual rows=3 loops=1) - -> Seq Scan on ab_a1_b1 ab_a1_1 (actual rows=1 loops=1) - -> Seq Scan on ab_a1_b2 ab_a1_2 (actual rows=1 loops=1) - -> Seq Scan on ab_a1_b3 ab_a1_3 (actual rows=1 loops=1) - -> Materialize (actual rows=1 loops=3) - -> Append (actual rows=1 loops=1) - -> Seq Scan on ab_a2_b1 ab_a2_1 (actual rows=1 loops=1) - Filter: (b = $0) - -> Seq Scan on ab_a2_b2 ab_a2_2 (never executed) - Filter: (b = $0) - -> Seq Scan on ab_a2_b3 ab_a2_3 (never executed) - Filter: (b = $0) -(19 rows) - -select tableoid::regclass, * from ab; - tableoid | a | b -----------+---+--- - ab_a1_b3 | 1 | 3 - ab_a1_b3 | 1 | 3 - ab_a1_b3 | 1 | 3 - ab_a2_b1 | 2 | 1 -(4 rows) - -drop table ab, lprt_a; --- Join -create table tbl1(col1 int); -insert into tbl1 values (501), (505); --- Basic table -create table tprt (col1 int) partition by range (col1); -create table tprt_1 partition of tprt for values from (1) to (501); -create table tprt_2 partition of tprt for values from (501) to (1001); -create table tprt_3 partition of tprt for values from (1001) to (2001); -create table tprt_4 partition of tprt for values from (2001) to (3001); -create table tprt_5 partition of tprt for values from (3001) to (4001); -create table tprt_6 partition of tprt for values from (4001) to (5001); -create index tprt1_idx on tprt_1 (col1); -create index tprt2_idx on tprt_2 (col1); -create index tprt3_idx on tprt_3 (col1); -create index tprt4_idx on tprt_4 (col1); -create index tprt5_idx on tprt_5 (col1); -create index tprt6_idx on tprt_6 (col1); -insert into tprt values (10), (20), (501), (502), (505), (1001), (4500); -set enable_hashjoin = off; -set enable_mergejoin = off; -explain (analyze, costs off, summary off, timing off) -select * from tbl1 join tprt on tbl1.col1 > tprt.col1; - QUERY PLAN --------------------------------------------------------------------------- - Nested Loop (actual rows=6 loops=1) - -> Seq Scan on tbl1 (actual rows=2 loops=1) - -> Append (actual rows=3 loops=2) - -> Index Scan using tprt1_idx on tprt_1 (actual rows=2 loops=2) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt2_idx on tprt_2 (actual rows=2 loops=1) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt3_idx on tprt_3 (never executed) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt4_idx on tprt_4 (never executed) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt5_idx on tprt_5 (never executed) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt6_idx on tprt_6 (never executed) - Index Cond: (col1 < tbl1.col1) -(15 rows) - -explain (analyze, costs off, summary off, timing off) -select * from tbl1 join tprt on tbl1.col1 = tprt.col1; - QUERY PLAN --------------------------------------------------------------------------- - Nested Loop (actual rows=2 loops=1) - -> Seq Scan on tbl1 (actual rows=2 loops=1) - -> Append (actual rows=1 loops=2) - -> Index Scan using tprt1_idx on tprt_1 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt2_idx on tprt_2 (actual rows=1 loops=2) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt3_idx on tprt_3 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt4_idx on tprt_4 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt5_idx on tprt_5 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt6_idx on tprt_6 (never executed) - Index Cond: (col1 = tbl1.col1) -(15 rows) - -select tbl1.col1, tprt.col1 from tbl1 -inner join tprt on tbl1.col1 > tprt.col1 -order by tbl1.col1, tprt.col1; - col1 | col1 -------+------ - 501 | 10 - 501 | 20 - 505 | 10 - 505 | 20 - 505 | 501 - 505 | 502 -(6 rows) - -select tbl1.col1, tprt.col1 from tbl1 -inner join tprt on tbl1.col1 = tprt.col1 -order by tbl1.col1, tprt.col1; - col1 | col1 -------+------ - 501 | 501 - 505 | 505 -(2 rows) - --- Multiple partitions -insert into tbl1 values (1001), (1010), (1011); -explain (analyze, costs off, summary off, timing off) -select * from tbl1 inner join tprt on tbl1.col1 > tprt.col1; - QUERY PLAN --------------------------------------------------------------------------- - Nested Loop (actual rows=23 loops=1) - -> Seq Scan on tbl1 (actual rows=5 loops=1) - -> Append (actual rows=5 loops=5) - -> Index Scan using tprt1_idx on tprt_1 (actual rows=2 loops=5) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt2_idx on tprt_2 (actual rows=3 loops=4) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt3_idx on tprt_3 (actual rows=1 loops=2) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt4_idx on tprt_4 (never executed) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt5_idx on tprt_5 (never executed) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt6_idx on tprt_6 (never executed) - Index Cond: (col1 < tbl1.col1) -(15 rows) - -explain (analyze, costs off, summary off, timing off) -select * from tbl1 inner join tprt on tbl1.col1 = tprt.col1; - QUERY PLAN --------------------------------------------------------------------------- - Nested Loop (actual rows=3 loops=1) - -> Seq Scan on tbl1 (actual rows=5 loops=1) - -> Append (actual rows=1 loops=5) - -> Index Scan using tprt1_idx on tprt_1 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt2_idx on tprt_2 (actual rows=1 loops=2) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt3_idx on tprt_3 (actual rows=0 loops=3) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt4_idx on tprt_4 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt5_idx on tprt_5 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt6_idx on tprt_6 (never executed) - Index Cond: (col1 = tbl1.col1) -(15 rows) - -select tbl1.col1, tprt.col1 from tbl1 -inner join tprt on tbl1.col1 > tprt.col1 -order by tbl1.col1, tprt.col1; - col1 | col1 -------+------ - 501 | 10 - 501 | 20 - 505 | 10 - 505 | 20 - 505 | 501 - 505 | 502 - 1001 | 10 - 1001 | 20 - 1001 | 501 - 1001 | 502 - 1001 | 505 - 1010 | 10 - 1010 | 20 - 1010 | 501 - 1010 | 502 - 1010 | 505 - 1010 | 1001 - 1011 | 10 - 1011 | 20 - 1011 | 501 - 1011 | 502 - 1011 | 505 - 1011 | 1001 -(23 rows) - -select tbl1.col1, tprt.col1 from tbl1 -inner join tprt on tbl1.col1 = tprt.col1 -order by tbl1.col1, tprt.col1; - col1 | col1 -------+------ - 501 | 501 - 505 | 505 - 1001 | 1001 -(3 rows) - --- Last partition -delete from tbl1; -insert into tbl1 values (4400); -explain (analyze, costs off, summary off, timing off) -select * from tbl1 join tprt on tbl1.col1 < tprt.col1; - QUERY PLAN --------------------------------------------------------------------------- - Nested Loop (actual rows=1 loops=1) - -> Seq Scan on tbl1 (actual rows=1 loops=1) - -> Append (actual rows=1 loops=1) - -> Index Scan using tprt1_idx on tprt_1 (never executed) - Index Cond: (col1 > tbl1.col1) - -> Index Scan using tprt2_idx on tprt_2 (never executed) - Index Cond: (col1 > tbl1.col1) - -> Index Scan using tprt3_idx on tprt_3 (never executed) - Index Cond: (col1 > tbl1.col1) - -> Index Scan using tprt4_idx on tprt_4 (never executed) - Index Cond: (col1 > tbl1.col1) - -> Index Scan using tprt5_idx on tprt_5 (never executed) - Index Cond: (col1 > tbl1.col1) - -> Index Scan using tprt6_idx on tprt_6 (actual rows=1 loops=1) - Index Cond: (col1 > tbl1.col1) -(15 rows) - -select tbl1.col1, tprt.col1 from tbl1 -inner join tprt on tbl1.col1 < tprt.col1 -order by tbl1.col1, tprt.col1; - col1 | col1 -------+------ - 4400 | 4500 -(1 row) - --- No matching partition -delete from tbl1; -insert into tbl1 values (10000); -explain (analyze, costs off, summary off, timing off) -select * from tbl1 join tprt on tbl1.col1 = tprt.col1; - QUERY PLAN -------------------------------------------------------------------- - Nested Loop (actual rows=0 loops=1) - -> Seq Scan on tbl1 (actual rows=1 loops=1) - -> Append (actual rows=0 loops=1) - -> Index Scan using tprt1_idx on tprt_1 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt2_idx on tprt_2 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt3_idx on tprt_3 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt4_idx on tprt_4 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt5_idx on tprt_5 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt6_idx on tprt_6 (never executed) - Index Cond: (col1 = tbl1.col1) -(15 rows) - -select tbl1.col1, tprt.col1 from tbl1 -inner join tprt on tbl1.col1 = tprt.col1 -order by tbl1.col1, tprt.col1; - col1 | col1 -------+------ -(0 rows) - -drop table tbl1, tprt; --- Test with columns defined in varying orders between each level -create table part_abc (a int not null, b int not null, c int not null) partition by list (a); -create table part_bac (b int not null, a int not null, c int not null) partition by list (b); -create table part_cab (c int not null, a int not null, b int not null) partition by list (c); -create table part_abc_p1 (a int not null, b int not null, c int not null); -alter table part_abc attach partition part_bac for values in(1); -alter table part_bac attach partition part_cab for values in(2); -alter table part_cab attach partition part_abc_p1 for values in(3); -prepare part_abc_q1 (int, int, int) as -select * from part_abc where a = $1 and b = $2 and c = $3; --- Single partition should be scanned. -explain (analyze, costs off, summary off, timing off) execute part_abc_q1 (1, 2, 3); - QUERY PLAN ----------------------------------------------------------- - Seq Scan on part_abc_p1 part_abc (actual rows=0 loops=1) - Filter: ((a = $1) AND (b = $2) AND (c = $3)) -(2 rows) - -deallocate part_abc_q1; -drop table part_abc; --- Ensure that an Append node properly handles a sub-partitioned table --- matching without any of its leaf partitions matching the clause. -create table listp (a int, b int) partition by list (a); -create table listp_1 partition of listp for values in(1) partition by list (b); -create table listp_1_1 partition of listp_1 for values in(1); -create table listp_2 partition of listp for values in(2) partition by list (b); -create table listp_2_1 partition of listp_2 for values in(2); -select * from listp where b = 1; - a | b ----+--- -(0 rows) - --- Ensure that an Append node properly can handle selection of all first level --- partitions before finally detecting the correct set of 2nd level partitions --- which match the given parameter. -prepare q1 (int,int) as select * from listp where b in ($1,$2); -explain (analyze, costs off, summary off, timing off) execute q1 (1,1); - QUERY PLAN -------------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 1 - -> Seq Scan on listp_1_1 listp_1 (actual rows=0 loops=1) - Filter: (b = ANY (ARRAY[$1, $2])) -(4 rows) - -explain (analyze, costs off, summary off, timing off) execute q1 (2,2); - QUERY PLAN -------------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 1 - -> Seq Scan on listp_2_1 listp_1 (actual rows=0 loops=1) - Filter: (b = ANY (ARRAY[$1, $2])) -(4 rows) - --- Try with no matching partitions. -explain (analyze, costs off, summary off, timing off) execute q1 (0,0); - QUERY PLAN --------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 2 -(2 rows) - -deallocate q1; --- Test more complex cases where a not-equal condition further eliminates partitions. -prepare q1 (int,int,int,int) as select * from listp where b in($1,$2) and $3 <> b and $4 <> b; --- Both partitions allowed by IN clause, but one disallowed by <> clause -explain (analyze, costs off, summary off, timing off) execute q1 (1,2,2,0); - QUERY PLAN -------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 1 - -> Seq Scan on listp_1_1 listp_1 (actual rows=0 loops=1) - Filter: ((b = ANY (ARRAY[$1, $2])) AND ($3 <> b) AND ($4 <> b)) -(4 rows) - --- Both partitions allowed by IN clause, then both excluded again by <> clauses. -explain (analyze, costs off, summary off, timing off) execute q1 (1,2,2,1); - QUERY PLAN --------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 2 -(2 rows) - --- Ensure Params that evaluate to NULL properly prune away all partitions -explain (analyze, costs off, summary off, timing off) -select * from listp where a = (select null::int); - QUERY PLAN ------------------------------------------------------- - Append (actual rows=0 loops=1) - InitPlan 1 (returns $0) - -> Result (actual rows=1 loops=1) - -> Seq Scan on listp_1_1 listp_1 (never executed) - Filter: (a = $0) - -> Seq Scan on listp_2_1 listp_2 (never executed) - Filter: (a = $0) -(7 rows) - -drop table listp; --- --- check that stable query clauses are only used in run-time pruning --- -create table stable_qual_pruning (a timestamp) partition by range (a); -create table stable_qual_pruning1 partition of stable_qual_pruning - for values from ('2000-01-01') to ('2000-02-01'); -create table stable_qual_pruning2 partition of stable_qual_pruning - for values from ('2000-02-01') to ('2000-03-01'); -create table stable_qual_pruning3 partition of stable_qual_pruning - for values from ('3000-02-01') to ('3000-03-01'); --- comparison against a stable value requires run-time pruning -explain (analyze, costs off, summary off, timing off) -select * from stable_qual_pruning where a < localtimestamp; - QUERY PLAN --------------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 1 - -> Seq Scan on stable_qual_pruning1 stable_qual_pruning_1 (actual rows=0 loops=1) - Filter: (a < LOCALTIMESTAMP) - -> Seq Scan on stable_qual_pruning2 stable_qual_pruning_2 (actual rows=0 loops=1) - Filter: (a < LOCALTIMESTAMP) -(6 rows) - --- timestamp < timestamptz comparison is only stable, not immutable -explain (analyze, costs off, summary off, timing off) -select * from stable_qual_pruning where a < '2000-02-01'::timestamptz; - QUERY PLAN --------------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 2 - -> Seq Scan on stable_qual_pruning1 stable_qual_pruning_1 (actual rows=0 loops=1) - Filter: (a < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) -(4 rows) - --- check ScalarArrayOp cases -explain (analyze, costs off, summary off, timing off) -select * from stable_qual_pruning - where a = any(array['2010-02-01', '2020-01-01']::timestamp[]); - QUERY PLAN --------------------------------- - Result (actual rows=0 loops=1) - One-Time Filter: false -(2 rows) - -explain (analyze, costs off, summary off, timing off) -select * from stable_qual_pruning - where a = any(array['2000-02-01', '2010-01-01']::timestamp[]); - QUERY PLAN ----------------------------------------------------------------------------------------------------------------- - Seq Scan on stable_qual_pruning2 stable_qual_pruning (actual rows=0 loops=1) - Filter: (a = ANY ('{"Tue Feb 01 00:00:00 2000","Fri Jan 01 00:00:00 2010"}'::timestamp without time zone[])) -(2 rows) - -explain (analyze, costs off, summary off, timing off) -select * from stable_qual_pruning - where a = any(array['2000-02-01', localtimestamp]::timestamp[]); - QUERY PLAN ------------------------------------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 2 - -> Seq Scan on stable_qual_pruning2 stable_qual_pruning_1 (actual rows=0 loops=1) - Filter: (a = ANY (ARRAY['Tue Feb 01 00:00:00 2000'::timestamp without time zone, LOCALTIMESTAMP])) -(4 rows) - -explain (analyze, costs off, summary off, timing off) -select * from stable_qual_pruning - where a = any(array['2010-02-01', '2020-01-01']::timestamptz[]); - QUERY PLAN --------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 3 -(2 rows) - -explain (analyze, costs off, summary off, timing off) -select * from stable_qual_pruning - where a = any(array['2000-02-01', '2010-01-01']::timestamptz[]); - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 2 - -> Seq Scan on stable_qual_pruning2 stable_qual_pruning_1 (actual rows=0 loops=1) - Filter: (a = ANY ('{"Tue Feb 01 00:00:00 2000 PST","Fri Jan 01 00:00:00 2010 PST"}'::timestamp with time zone[])) -(4 rows) - -explain (analyze, costs off, summary off, timing off) -select * from stable_qual_pruning - where a = any(null::timestamptz[]); - QUERY PLAN --------------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - -> Seq Scan on stable_qual_pruning1 stable_qual_pruning_1 (actual rows=0 loops=1) - Filter: (a = ANY (NULL::timestamp with time zone[])) - -> Seq Scan on stable_qual_pruning2 stable_qual_pruning_2 (actual rows=0 loops=1) - Filter: (a = ANY (NULL::timestamp with time zone[])) - -> Seq Scan on stable_qual_pruning3 stable_qual_pruning_3 (actual rows=0 loops=1) - Filter: (a = ANY (NULL::timestamp with time zone[])) -(7 rows) - -drop table stable_qual_pruning; --- --- Check that pruning with composite range partitioning works correctly when --- it must ignore clauses for trailing keys once it has seen a clause with --- non-inclusive operator for an earlier key --- -create table mc3p (a int, b int, c int) partition by range (a, abs(b), c); -create table mc3p0 partition of mc3p - for values from (0, 0, 0) to (0, maxvalue, maxvalue); -create table mc3p1 partition of mc3p - for values from (1, 1, 1) to (2, minvalue, minvalue); -create table mc3p2 partition of mc3p - for values from (2, minvalue, minvalue) to (3, maxvalue, maxvalue); -insert into mc3p values (0, 1, 1), (1, 1, 1), (2, 1, 1); -explain (analyze, costs off, summary off, timing off) -select * from mc3p where a < 3 and abs(b) = 1; - QUERY PLAN --------------------------------------------------------- - Append (actual rows=3 loops=1) - -> Seq Scan on mc3p0 mc3p_1 (actual rows=1 loops=1) - Filter: ((a < 3) AND (abs(b) = 1)) - -> Seq Scan on mc3p1 mc3p_2 (actual rows=1 loops=1) - Filter: ((a < 3) AND (abs(b) = 1)) - -> Seq Scan on mc3p2 mc3p_3 (actual rows=1 loops=1) - Filter: ((a < 3) AND (abs(b) = 1)) -(7 rows) - --- --- Check that pruning with composite range partitioning works correctly when --- a combination of runtime parameters is specified, not all of whose values --- are available at the same time --- -prepare ps1 as - select * from mc3p where a = $1 and abs(b) < (select 3); -explain (analyze, costs off, summary off, timing off) -execute ps1(1); - QUERY PLAN --------------------------------------------------------- - Append (actual rows=1 loops=1) - Subplans Removed: 2 - InitPlan 1 (returns $0) - -> Result (actual rows=1 loops=1) - -> Seq Scan on mc3p1 mc3p_1 (actual rows=1 loops=1) - Filter: ((a = $1) AND (abs(b) < $0)) -(6 rows) - -deallocate ps1; -prepare ps2 as - select * from mc3p where a <= $1 and abs(b) < (select 3); -explain (analyze, costs off, summary off, timing off) -execute ps2(1); - QUERY PLAN --------------------------------------------------------- - Append (actual rows=2 loops=1) - Subplans Removed: 1 - InitPlan 1 (returns $0) - -> Result (actual rows=1 loops=1) - -> Seq Scan on mc3p0 mc3p_1 (actual rows=1 loops=1) - Filter: ((a <= $1) AND (abs(b) < $0)) - -> Seq Scan on mc3p1 mc3p_2 (actual rows=1 loops=1) - Filter: ((a <= $1) AND (abs(b) < $0)) -(8 rows) - -deallocate ps2; -drop table mc3p; --- Ensure runtime pruning works with initplans params with boolean types -create table boolvalues (value bool not null); -insert into boolvalues values('t'),('f'); -create table boolp (a bool) partition by list (a); -create table boolp_t partition of boolp for values in('t'); -create table boolp_f partition of boolp for values in('f'); -explain (analyze, costs off, summary off, timing off) -select * from boolp where a = (select value from boolvalues where value); - QUERY PLAN ------------------------------------------------------------ - Append (actual rows=0 loops=1) - InitPlan 1 (returns $0) - -> Seq Scan on boolvalues (actual rows=1 loops=1) - Filter: value - Rows Removed by Filter: 1 - -> Seq Scan on boolp_f boolp_1 (never executed) - Filter: (a = $0) - -> Seq Scan on boolp_t boolp_2 (actual rows=0 loops=1) - Filter: (a = $0) -(9 rows) - -explain (analyze, costs off, summary off, timing off) -select * from boolp where a = (select value from boolvalues where not value); - QUERY PLAN ------------------------------------------------------------ - Append (actual rows=0 loops=1) - InitPlan 1 (returns $0) - -> Seq Scan on boolvalues (actual rows=1 loops=1) - Filter: (NOT value) - Rows Removed by Filter: 1 - -> Seq Scan on boolp_f boolp_1 (actual rows=0 loops=1) - Filter: (a = $0) - -> Seq Scan on boolp_t boolp_2 (never executed) - Filter: (a = $0) -(9 rows) - -drop table boolp; --- --- Test run-time pruning of MergeAppend subnodes --- -set enable_seqscan = off; -set enable_sort = off; -create table ma_test (a int, b int) partition by range (a); -create table ma_test_p1 partition of ma_test for values from (0) to (10); -create table ma_test_p2 partition of ma_test for values from (10) to (20); -create table ma_test_p3 partition of ma_test for values from (20) to (30); -insert into ma_test select x,x from generate_series(0,29) t(x); -create index on ma_test (b); -analyze ma_test; -prepare mt_q1 (int) as select a from ma_test where a >= $1 and a % 10 = 5 order by b; -explain (analyze, costs off, summary off, timing off) execute mt_q1(15); - QUERY PLAN ------------------------------------------------------------------------------------------ - Merge Append (actual rows=2 loops=1) - Sort Key: ma_test.b - Subplans Removed: 1 - -> Index Scan using ma_test_p2_b_idx on ma_test_p2 ma_test_1 (actual rows=1 loops=1) - Filter: ((a >= $1) AND ((a % 10) = 5)) - Rows Removed by Filter: 9 - -> Index Scan using ma_test_p3_b_idx on ma_test_p3 ma_test_2 (actual rows=1 loops=1) - Filter: ((a >= $1) AND ((a % 10) = 5)) - Rows Removed by Filter: 9 -(9 rows) - -execute mt_q1(15); - a ----- - 15 - 25 -(2 rows) - -explain (analyze, costs off, summary off, timing off) execute mt_q1(25); - QUERY PLAN ------------------------------------------------------------------------------------------ - Merge Append (actual rows=1 loops=1) - Sort Key: ma_test.b - Subplans Removed: 2 - -> Index Scan using ma_test_p3_b_idx on ma_test_p3 ma_test_1 (actual rows=1 loops=1) - Filter: ((a >= $1) AND ((a % 10) = 5)) - Rows Removed by Filter: 9 -(6 rows) - -execute mt_q1(25); - a ----- - 25 -(1 row) - --- Ensure MergeAppend behaves correctly when no subplans match -explain (analyze, costs off, summary off, timing off) execute mt_q1(35); - QUERY PLAN --------------------------------------- - Merge Append (actual rows=0 loops=1) - Sort Key: ma_test.b - Subplans Removed: 3 -(3 rows) - -execute mt_q1(35); - a ---- -(0 rows) - -deallocate mt_q1; -prepare mt_q2 (int) as select * from ma_test where a >= $1 order by b limit 1; --- Ensure output list looks sane when the MergeAppend has no subplans. -explain (analyze, verbose, costs off, summary off, timing off) execute mt_q2 (35); - QUERY PLAN --------------------------------------------- - Limit (actual rows=0 loops=1) - Output: ma_test.a, ma_test.b - -> Merge Append (actual rows=0 loops=1) - Sort Key: ma_test.b - Subplans Removed: 3 -(5 rows) - -deallocate mt_q2; --- ensure initplan params properly prune partitions -explain (analyze, costs off, summary off, timing off) select * from ma_test where a >= (select min(b) from ma_test_p2) order by b; - QUERY PLAN ------------------------------------------------------------------------------------------------ - Merge Append (actual rows=20 loops=1) - Sort Key: ma_test.b - InitPlan 2 (returns $1) - -> Result (actual rows=1 loops=1) - InitPlan 1 (returns $0) - -> Limit (actual rows=1 loops=1) - -> Index Scan using ma_test_p2_b_idx on ma_test_p2 (actual rows=1 loops=1) - Index Cond: (b IS NOT NULL) - -> Index Scan using ma_test_p1_b_idx on ma_test_p1 ma_test_1 (never executed) - Filter: (a >= $1) - -> Index Scan using ma_test_p2_b_idx on ma_test_p2 ma_test_2 (actual rows=10 loops=1) - Filter: (a >= $1) - -> Index Scan using ma_test_p3_b_idx on ma_test_p3 ma_test_3 (actual rows=10 loops=1) - Filter: (a >= $1) -(14 rows) - -reset enable_seqscan; -reset enable_sort; -drop table ma_test; -reset enable_indexonlyscan; --- --- check that pruning works properly when the partition key is of a --- pseudotype --- --- array type list partition key -create table pp_arrpart (a int[]) partition by list (a); -create table pp_arrpart1 partition of pp_arrpart for values in ('{1}'); -create table pp_arrpart2 partition of pp_arrpart for values in ('{2, 3}', '{4, 5}'); -explain (costs off) select * from pp_arrpart where a = '{1}'; - QUERY PLAN ------------------------------------- - Seq Scan on pp_arrpart1 pp_arrpart - Filter: (a = '{1}'::integer[]) -(2 rows) - -explain (costs off) select * from pp_arrpart where a = '{1, 2}'; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -explain (costs off) select * from pp_arrpart where a in ('{4, 5}', '{1}'); - QUERY PLAN ----------------------------------------------------------------------- - Append - -> Seq Scan on pp_arrpart1 pp_arrpart_1 - Filter: ((a = '{4,5}'::integer[]) OR (a = '{1}'::integer[])) - -> Seq Scan on pp_arrpart2 pp_arrpart_2 - Filter: ((a = '{4,5}'::integer[]) OR (a = '{1}'::integer[])) -(5 rows) - -explain (costs off) update pp_arrpart set a = a where a = '{1}'; - QUERY PLAN --------------------------------------------- - Update on pp_arrpart - Update on pp_arrpart1 pp_arrpart_1 - -> Seq Scan on pp_arrpart1 pp_arrpart_1 - Filter: (a = '{1}'::integer[]) -(4 rows) - -explain (costs off) delete from pp_arrpart where a = '{1}'; - QUERY PLAN --------------------------------------------- - Delete on pp_arrpart - Delete on pp_arrpart1 pp_arrpart_1 - -> Seq Scan on pp_arrpart1 pp_arrpart_1 - Filter: (a = '{1}'::integer[]) -(4 rows) - -drop table pp_arrpart; --- array type hash partition key -create table pph_arrpart (a int[]) partition by hash (a); -create table pph_arrpart1 partition of pph_arrpart for values with (modulus 2, remainder 0); -create table pph_arrpart2 partition of pph_arrpart for values with (modulus 2, remainder 1); -insert into pph_arrpart values ('{1}'), ('{1, 2}'), ('{4, 5}'); -select tableoid::regclass, * from pph_arrpart order by 1; - tableoid | a ---------------+------- - pph_arrpart1 | {1,2} - pph_arrpart1 | {4,5} - pph_arrpart2 | {1} -(3 rows) - -explain (costs off) select * from pph_arrpart where a = '{1}'; - QUERY PLAN --------------------------------------- - Seq Scan on pph_arrpart2 pph_arrpart - Filter: (a = '{1}'::integer[]) -(2 rows) - -explain (costs off) select * from pph_arrpart where a = '{1, 2}'; - QUERY PLAN --------------------------------------- - Seq Scan on pph_arrpart1 pph_arrpart - Filter: (a = '{1,2}'::integer[]) -(2 rows) - -explain (costs off) select * from pph_arrpart where a in ('{4, 5}', '{1}'); - QUERY PLAN ----------------------------------------------------------------------- - Append - -> Seq Scan on pph_arrpart1 pph_arrpart_1 - Filter: ((a = '{4,5}'::integer[]) OR (a = '{1}'::integer[])) - -> Seq Scan on pph_arrpart2 pph_arrpart_2 - Filter: ((a = '{4,5}'::integer[]) OR (a = '{1}'::integer[])) -(5 rows) - -drop table pph_arrpart; --- enum type list partition key -create type pp_colors as enum ('green', 'blue', 'black'); -create table pp_enumpart (a pp_colors) partition by list (a); -create table pp_enumpart_green partition of pp_enumpart for values in ('green'); -create table pp_enumpart_blue partition of pp_enumpart for values in ('blue'); -explain (costs off) select * from pp_enumpart where a = 'blue'; - QUERY PLAN ------------------------------------------- - Seq Scan on pp_enumpart_blue pp_enumpart - Filter: (a = 'blue'::pp_colors) -(2 rows) - -explain (costs off) select * from pp_enumpart where a = 'black'; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -drop table pp_enumpart; -drop type pp_colors; --- record type as partition key -create type pp_rectype as (a int, b int); -create table pp_recpart (a pp_rectype) partition by list (a); -create table pp_recpart_11 partition of pp_recpart for values in ('(1,1)'); -create table pp_recpart_23 partition of pp_recpart for values in ('(2,3)'); -explain (costs off) select * from pp_recpart where a = '(1,1)'::pp_rectype; - QUERY PLAN --------------------------------------- - Seq Scan on pp_recpart_11 pp_recpart - Filter: (a = '(1,1)'::pp_rectype) -(2 rows) - -explain (costs off) select * from pp_recpart where a = '(1,2)'::pp_rectype; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -drop table pp_recpart; -drop type pp_rectype; --- range type partition key -create table pp_intrangepart (a int4range) partition by list (a); -create table pp_intrangepart12 partition of pp_intrangepart for values in ('[1,2]'); -create table pp_intrangepart2inf partition of pp_intrangepart for values in ('[2,)'); -explain (costs off) select * from pp_intrangepart where a = '[1,2]'::int4range; - QUERY PLAN ------------------------------------------------ - Seq Scan on pp_intrangepart12 pp_intrangepart - Filter: (a = '[1,3)'::int4range) -(2 rows) - -explain (costs off) select * from pp_intrangepart where a = '(1,2)'::int4range; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -drop table pp_intrangepart; --- --- Ensure the enable_partition_prune GUC properly disables partition pruning. --- -create table pp_lp (a int, value int) partition by list (a); -create table pp_lp1 partition of pp_lp for values in(1); -create table pp_lp2 partition of pp_lp for values in(2); -explain (costs off) select * from pp_lp where a = 1; - QUERY PLAN --------------------------- - Seq Scan on pp_lp1 pp_lp - Filter: (a = 1) -(2 rows) - -explain (costs off) update pp_lp set value = 10 where a = 1; - QUERY PLAN ----------------------------------- - Update on pp_lp - Update on pp_lp1 pp_lp_1 - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) -(4 rows) - -explain (costs off) delete from pp_lp where a = 1; - QUERY PLAN ----------------------------------- - Delete on pp_lp - Delete on pp_lp1 pp_lp_1 - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) -(4 rows) - -set enable_partition_pruning = off; -set constraint_exclusion = 'partition'; -- this should not affect the result. -explain (costs off) select * from pp_lp where a = 1; - QUERY PLAN ----------------------------------- - Append - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) - -> Seq Scan on pp_lp2 pp_lp_2 - Filter: (a = 1) -(5 rows) - -explain (costs off) update pp_lp set value = 10 where a = 1; - QUERY PLAN ----------------------------------------- - Update on pp_lp - Update on pp_lp1 pp_lp_1 - Update on pp_lp2 pp_lp_2 - -> Append - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) - -> Seq Scan on pp_lp2 pp_lp_2 - Filter: (a = 1) -(8 rows) - -explain (costs off) delete from pp_lp where a = 1; - QUERY PLAN ----------------------------------------- - Delete on pp_lp - Delete on pp_lp1 pp_lp_1 - Delete on pp_lp2 pp_lp_2 - -> Append - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) - -> Seq Scan on pp_lp2 pp_lp_2 - Filter: (a = 1) -(8 rows) - -set constraint_exclusion = 'off'; -- this should not affect the result. -explain (costs off) select * from pp_lp where a = 1; - QUERY PLAN ----------------------------------- - Append - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) - -> Seq Scan on pp_lp2 pp_lp_2 - Filter: (a = 1) -(5 rows) - -explain (costs off) update pp_lp set value = 10 where a = 1; - QUERY PLAN ----------------------------------------- - Update on pp_lp - Update on pp_lp1 pp_lp_1 - Update on pp_lp2 pp_lp_2 - -> Append - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) - -> Seq Scan on pp_lp2 pp_lp_2 - Filter: (a = 1) -(8 rows) - -explain (costs off) delete from pp_lp where a = 1; - QUERY PLAN ----------------------------------------- - Delete on pp_lp - Delete on pp_lp1 pp_lp_1 - Delete on pp_lp2 pp_lp_2 - -> Append - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) - -> Seq Scan on pp_lp2 pp_lp_2 - Filter: (a = 1) -(8 rows) - -drop table pp_lp; --- Ensure enable_partition_prune does not affect non-partitioned tables. -create table inh_lp (a int, value int); -create table inh_lp1 (a int, value int, check(a = 1)) inherits (inh_lp); -NOTICE: merging column "a" with inherited definition -NOTICE: merging column "value" with inherited definition -create table inh_lp2 (a int, value int, check(a = 2)) inherits (inh_lp); -NOTICE: merging column "a" with inherited definition -NOTICE: merging column "value" with inherited definition -set constraint_exclusion = 'partition'; --- inh_lp2 should be removed in the following 3 cases. -explain (costs off) select * from inh_lp where a = 1; - QUERY PLAN ------------------------------------- - Append - -> Seq Scan on inh_lp inh_lp_1 - Filter: (a = 1) - -> Seq Scan on inh_lp1 inh_lp_2 - Filter: (a = 1) -(5 rows) - -explain (costs off) update inh_lp set value = 10 where a = 1; - QUERY PLAN ------------------------------------------------- - Update on inh_lp - Update on inh_lp inh_lp_1 - Update on inh_lp1 inh_lp_2 - -> Result - -> Append - -> Seq Scan on inh_lp inh_lp_1 - Filter: (a = 1) - -> Seq Scan on inh_lp1 inh_lp_2 - Filter: (a = 1) -(9 rows) - -explain (costs off) delete from inh_lp where a = 1; - QUERY PLAN ------------------------------------------- - Delete on inh_lp - Delete on inh_lp inh_lp_1 - Delete on inh_lp1 inh_lp_2 - -> Append - -> Seq Scan on inh_lp inh_lp_1 - Filter: (a = 1) - -> Seq Scan on inh_lp1 inh_lp_2 - Filter: (a = 1) -(8 rows) - --- Ensure we don't exclude normal relations when we only expect to exclude --- inheritance children -explain (costs off) update inh_lp1 set value = 10 where a = 2; - QUERY PLAN ---------------------------- - Update on inh_lp1 - -> Seq Scan on inh_lp1 - Filter: (a = 2) -(3 rows) - -drop table inh_lp cascade; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table inh_lp1 -drop cascades to table inh_lp2 -reset enable_partition_pruning; -reset constraint_exclusion; --- Check pruning for a partition tree containing only temporary relations -create temp table pp_temp_parent (a int) partition by list (a); -create temp table pp_temp_part_1 partition of pp_temp_parent for values in (1); -create temp table pp_temp_part_def partition of pp_temp_parent default; -explain (costs off) select * from pp_temp_parent where true; - QUERY PLAN ------------------------------------------------------ - Append - -> Seq Scan on pp_temp_part_1 pp_temp_parent_1 - -> Seq Scan on pp_temp_part_def pp_temp_parent_2 -(3 rows) - -explain (costs off) select * from pp_temp_parent where a = 2; - QUERY PLAN ---------------------------------------------- - Seq Scan on pp_temp_part_def pp_temp_parent - Filter: (a = 2) -(2 rows) - -drop table pp_temp_parent; --- Stress run-time partition pruning a bit more, per bug reports -create temp table p (a int, b int, c int) partition by list (a); -create temp table p1 partition of p for values in (1); -create temp table p2 partition of p for values in (2); -create temp table q (a int, b int, c int) partition by list (a); -create temp table q1 partition of q for values in (1) partition by list (b); -create temp table q11 partition of q1 for values in (1) partition by list (c); -create temp table q111 partition of q11 for values in (1); -create temp table q2 partition of q for values in (2) partition by list (b); -create temp table q21 partition of q2 for values in (1); -create temp table q22 partition of q2 for values in (2); -insert into q22 values (2, 2, 3); -explain (costs off) -select * -from ( - select * from p - union all - select * from q1 - union all - select 1, 1, 1 - ) s(a, b, c) -where s.a = 1 and s.b = 1 and s.c = (select 1); - QUERY PLAN ----------------------------------------------------- - Append - InitPlan 1 (returns $0) - -> Result - -> Seq Scan on p1 p - Filter: ((a = 1) AND (b = 1) AND (c = $0)) - -> Seq Scan on q111 q1 - Filter: ((a = 1) AND (b = 1) AND (c = $0)) - -> Result - One-Time Filter: (1 = $0) -(9 rows) - -select * -from ( - select * from p - union all - select * from q1 - union all - select 1, 1, 1 - ) s(a, b, c) -where s.a = 1 and s.b = 1 and s.c = (select 1); - a | b | c ----+---+--- - 1 | 1 | 1 -(1 row) - -prepare q (int, int) as -select * -from ( - select * from p - union all - select * from q1 - union all - select 1, 1, 1 - ) s(a, b, c) -where s.a = $1 and s.b = $2 and s.c = (select 1); -explain (costs off) execute q (1, 1); - QUERY PLAN ---------------------------------------------------------------- - Append - Subplans Removed: 1 - InitPlan 1 (returns $0) - -> Result - -> Seq Scan on p1 p - Filter: ((a = $1) AND (b = $2) AND (c = $0)) - -> Seq Scan on q111 q1 - Filter: ((a = $1) AND (b = $2) AND (c = $0)) - -> Result - One-Time Filter: ((1 = $1) AND (1 = $2) AND (1 = $0)) -(10 rows) - -execute q (1, 1); - a | b | c ----+---+--- - 1 | 1 | 1 -(1 row) - -drop table p, q; --- Ensure run-time pruning works correctly when we match a partitioned table --- on the first level but find no matching partitions on the second level. -create table listp (a int, b int) partition by list (a); -create table listp1 partition of listp for values in(1); -create table listp2 partition of listp for values in(2) partition by list(b); -create table listp2_10 partition of listp2 for values in (10); -explain (analyze, costs off, summary off, timing off) -select * from listp where a = (select 2) and b <> 10; - QUERY PLAN --------------------------------------------------- - Seq Scan on listp1 listp (actual rows=0 loops=1) - Filter: ((b <> 10) AND (a = $0)) - InitPlan 1 (returns $0) - -> Result (never executed) -(4 rows) - --- --- check that a partition directly accessed in a query is excluded with --- constraint_exclusion = on --- --- turn off partition pruning, so that it doesn't interfere -set enable_partition_pruning to off; --- setting constraint_exclusion to 'partition' disables exclusion -set constraint_exclusion to 'partition'; -explain (costs off) select * from listp1 where a = 2; - QUERY PLAN --------------------- - Seq Scan on listp1 - Filter: (a = 2) -(2 rows) - -explain (costs off) update listp1 set a = 1 where a = 2; - QUERY PLAN --------------------------- - Update on listp1 - -> Seq Scan on listp1 - Filter: (a = 2) -(3 rows) - --- constraint exclusion enabled -set constraint_exclusion to 'on'; -explain (costs off) select * from listp1 where a = 2; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -explain (costs off) update listp1 set a = 1 where a = 2; - QUERY PLAN --------------------------------- - Update on listp1 - -> Result - One-Time Filter: false -(3 rows) - -reset constraint_exclusion; -reset enable_partition_pruning; -drop table listp; --- Ensure run-time pruning works correctly for nested Append nodes -set parallel_setup_cost to 0; -set parallel_tuple_cost to 0; -create table listp (a int) partition by list(a); -create table listp_12 partition of listp for values in(1,2) partition by list(a); -create table listp_12_1 partition of listp_12 for values in(1); -create table listp_12_2 partition of listp_12 for values in(2); --- Force the 2nd subnode of the Append to be non-parallel. This results in --- a nested Append node because the mixed parallel / non-parallel paths cannot --- be pulled into the top-level Append. -alter table listp_12_1 set (parallel_workers = 0); --- Ensure that listp_12_2 is not scanned. (The nested Append is not seen in --- the plan as it's pulled in setref.c due to having just a single subnode). -select explain_parallel_append('select * from listp where a = (select 1);'); - explain_parallel_append ----------------------------------------------------------------------- - Gather (actual rows=N loops=N) - Workers Planned: 2 - Params Evaluated: $0 - Workers Launched: N - InitPlan 1 (returns $0) - -> Result (actual rows=N loops=N) - -> Parallel Append (actual rows=N loops=N) - -> Seq Scan on listp_12_1 listp_1 (actual rows=N loops=N) - Filter: (a = $0) - -> Parallel Seq Scan on listp_12_2 listp_2 (never executed) - Filter: (a = $0) -(11 rows) - --- Like the above but throw some more complexity at the planner by adding --- a UNION ALL. We expect both sides of the union not to scan the --- non-required partitions. -select explain_parallel_append( -'select * from listp where a = (select 1) - union all -select * from listp where a = (select 2);'); - explain_parallel_append ------------------------------------------------------------------------------------ - Gather (actual rows=N loops=N) - Workers Planned: 2 - Workers Launched: N - -> Parallel Append (actual rows=N loops=N) - -> Parallel Append (actual rows=N loops=N) - InitPlan 2 (returns $1) - -> Result (actual rows=N loops=N) - -> Seq Scan on listp_12_1 listp_1 (never executed) - Filter: (a = $1) - -> Parallel Seq Scan on listp_12_2 listp_2 (actual rows=N loops=N) - Filter: (a = $1) - -> Parallel Append (actual rows=N loops=N) - InitPlan 1 (returns $0) - -> Result (actual rows=N loops=N) - -> Seq Scan on listp_12_1 listp_4 (actual rows=N loops=N) - Filter: (a = $0) - -> Parallel Seq Scan on listp_12_2 listp_5 (never executed) - Filter: (a = $0) -(18 rows) - -drop table listp; -reset parallel_tuple_cost; -reset parallel_setup_cost; --- Test case for run-time pruning with a nested Merge Append -set enable_sort to 0; -create table rangep (a int, b int) partition by range (a); -create table rangep_0_to_100 partition of rangep for values from (0) to (100) partition by list (b); --- We need 3 sub-partitions. 1 to validate pruning worked and another two --- because a single remaining partition would be pulled up to the main Append. -create table rangep_0_to_100_1 partition of rangep_0_to_100 for values in(1); -create table rangep_0_to_100_2 partition of rangep_0_to_100 for values in(2); -create table rangep_0_to_100_3 partition of rangep_0_to_100 for values in(3); -create table rangep_100_to_200 partition of rangep for values from (100) to (200); -create index on rangep (a); --- Ensure run-time pruning works on the nested Merge Append -explain (analyze on, costs off, timing off, summary off) -select * from rangep where b IN((select 1),(select 2)) order by a; - QUERY PLAN ------------------------------------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - InitPlan 1 (returns $0) - -> Result (actual rows=1 loops=1) - InitPlan 2 (returns $1) - -> Result (actual rows=1 loops=1) - -> Merge Append (actual rows=0 loops=1) - Sort Key: rangep_2.a - -> Index Scan using rangep_0_to_100_1_a_idx on rangep_0_to_100_1 rangep_2 (actual rows=0 loops=1) - Filter: (b = ANY (ARRAY[$0, $1])) - -> Index Scan using rangep_0_to_100_2_a_idx on rangep_0_to_100_2 rangep_3 (actual rows=0 loops=1) - Filter: (b = ANY (ARRAY[$0, $1])) - -> Index Scan using rangep_0_to_100_3_a_idx on rangep_0_to_100_3 rangep_4 (never executed) - Filter: (b = ANY (ARRAY[$0, $1])) - -> Index Scan using rangep_100_to_200_a_idx on rangep_100_to_200 rangep_5 (actual rows=0 loops=1) - Filter: (b = ANY (ARRAY[$0, $1])) -(15 rows) - -reset enable_sort; -drop table rangep; --- --- Check that gen_prune_steps_from_opexps() works well for various cases of --- clauses for different partition keys --- -create table rp_prefix_test1 (a int, b varchar) partition by range(a, b); -create table rp_prefix_test1_p1 partition of rp_prefix_test1 for values from (1, 'a') to (1, 'b'); -create table rp_prefix_test1_p2 partition of rp_prefix_test1 for values from (2, 'a') to (2, 'b'); --- Don't call get_steps_using_prefix() with the last partition key b plus --- an empty prefix -explain (costs off) select * from rp_prefix_test1 where a <= 1 and b = 'a'; - QUERY PLAN --------------------------------------------------- - Seq Scan on rp_prefix_test1_p1 rp_prefix_test1 - Filter: ((a <= 1) AND ((b)::text = 'a'::text)) -(2 rows) - -create table rp_prefix_test2 (a int, b int, c int) partition by range(a, b, c); -create table rp_prefix_test2_p1 partition of rp_prefix_test2 for values from (1, 1, 0) to (1, 1, 10); -create table rp_prefix_test2_p2 partition of rp_prefix_test2 for values from (2, 2, 0) to (2, 2, 10); --- Don't call get_steps_using_prefix() with the last partition key c plus --- an invalid prefix (ie, b = 1) -explain (costs off) select * from rp_prefix_test2 where a <= 1 and b = 1 and c >= 0; - QUERY PLAN ------------------------------------------------- - Seq Scan on rp_prefix_test2_p1 rp_prefix_test2 - Filter: ((a <= 1) AND (c >= 0) AND (b = 1)) -(2 rows) - -create table rp_prefix_test3 (a int, b int, c int, d int) partition by range(a, b, c, d); -create table rp_prefix_test3_p1 partition of rp_prefix_test3 for values from (1, 1, 1, 0) to (1, 1, 1, 10); -create table rp_prefix_test3_p2 partition of rp_prefix_test3 for values from (2, 2, 2, 0) to (2, 2, 2, 10); --- Test that get_steps_using_prefix() handles a prefix that contains multiple --- clauses for the partition key b (ie, b >= 1 and b >= 2) -explain (costs off) select * from rp_prefix_test3 where a >= 1 and b >= 1 and b >= 2 and c >= 2 and d >= 0; - QUERY PLAN --------------------------------------------------------------------------- - Seq Scan on rp_prefix_test3_p2 rp_prefix_test3 - Filter: ((a >= 1) AND (b >= 1) AND (b >= 2) AND (c >= 2) AND (d >= 0)) -(2 rows) - --- Test that get_steps_using_prefix() handles a prefix that contains multiple --- clauses for the partition key b (ie, b >= 1 and b = 2) (This also tests --- that the caller arranges clauses in that prefix in the required order) -explain (costs off) select * from rp_prefix_test3 where a >= 1 and b >= 1 and b = 2 and c = 2 and d >= 0; - QUERY PLAN ------------------------------------------------------------------------- - Seq Scan on rp_prefix_test3_p2 rp_prefix_test3 - Filter: ((a >= 1) AND (b >= 1) AND (d >= 0) AND (b = 2) AND (c = 2)) -(2 rows) - -drop table rp_prefix_test1; -drop table rp_prefix_test2; -drop table rp_prefix_test3; --- --- Test that get_steps_using_prefix() handles IS NULL clauses correctly --- -create table hp_prefix_test (a int, b int, c int, d int) - partition by hash (a part_test_int4_ops, b part_test_int4_ops, c part_test_int4_ops, d part_test_int4_ops); --- create 8 partitions -select 'create table hp_prefix_test_p' || x::text || ' partition of hp_prefix_test for values with (modulus 8, remainder ' || x::text || ');' -from generate_Series(0,7) x; - ?column? ------------------------------------------------------------------------------------------------------- - create table hp_prefix_test_p0 partition of hp_prefix_test for values with (modulus 8, remainder 0); - create table hp_prefix_test_p1 partition of hp_prefix_test for values with (modulus 8, remainder 1); - create table hp_prefix_test_p2 partition of hp_prefix_test for values with (modulus 8, remainder 2); - create table hp_prefix_test_p3 partition of hp_prefix_test for values with (modulus 8, remainder 3); - create table hp_prefix_test_p4 partition of hp_prefix_test for values with (modulus 8, remainder 4); - create table hp_prefix_test_p5 partition of hp_prefix_test for values with (modulus 8, remainder 5); - create table hp_prefix_test_p6 partition of hp_prefix_test for values with (modulus 8, remainder 6); - create table hp_prefix_test_p7 partition of hp_prefix_test for values with (modulus 8, remainder 7); -(8 rows) - -\gexec -create table hp_prefix_test_p0 partition of hp_prefix_test for values with (modulus 8, remainder 0); -create table hp_prefix_test_p1 partition of hp_prefix_test for values with (modulus 8, remainder 1); -create table hp_prefix_test_p2 partition of hp_prefix_test for values with (modulus 8, remainder 2); -create table hp_prefix_test_p3 partition of hp_prefix_test for values with (modulus 8, remainder 3); -create table hp_prefix_test_p4 partition of hp_prefix_test for values with (modulus 8, remainder 4); -create table hp_prefix_test_p5 partition of hp_prefix_test for values with (modulus 8, remainder 5); -create table hp_prefix_test_p6 partition of hp_prefix_test for values with (modulus 8, remainder 6); -create table hp_prefix_test_p7 partition of hp_prefix_test for values with (modulus 8, remainder 7); --- insert 16 rows, one row for each test to perform. -insert into hp_prefix_test -select - case a when 0 then null else 1 end, - case b when 0 then null else 2 end, - case c when 0 then null else 3 end, - case d when 0 then null else 4 end -from - generate_series(0,1) a, - generate_series(0,1) b, - generate_Series(0,1) c, - generate_Series(0,1) d; --- Ensure partition pruning works correctly for each combination of IS NULL --- and equality quals. This may seem a little excessive, but there have been --- a number of bugs in this area over the years. We make use of row only --- output to reduce the size of the expected results. -\t on -select - 'explain (costs off) select tableoid::regclass,* from hp_prefix_test where ' || - string_agg(c.colname || case when g.s & (1 << c.colpos) = 0 then ' is null' else ' = ' || (colpos+1)::text end, ' and ' order by c.colpos) -from (values('a',0),('b',1),('c',2),('d',3)) c(colname, colpos), generate_Series(0,15) g(s) -group by g.s -order by g.s; - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d = 4 - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d = 4 - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d = 4 - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d = 4 - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d = 4 - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d = 4 - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d = 4 - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d = 4 - -\gexec -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d is null - Seq Scan on hp_prefix_test_p0 hp_prefix_test - Filter: ((a IS NULL) AND (b IS NULL) AND (c IS NULL) AND (d IS NULL)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d is null - Seq Scan on hp_prefix_test_p1 hp_prefix_test - Filter: ((b IS NULL) AND (c IS NULL) AND (d IS NULL) AND (a = 1)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d is null - Seq Scan on hp_prefix_test_p2 hp_prefix_test - Filter: ((a IS NULL) AND (c IS NULL) AND (d IS NULL) AND (b = 2)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d is null - Seq Scan on hp_prefix_test_p4 hp_prefix_test - Filter: ((c IS NULL) AND (d IS NULL) AND (a = 1) AND (b = 2)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d is null - Seq Scan on hp_prefix_test_p3 hp_prefix_test - Filter: ((a IS NULL) AND (b IS NULL) AND (d IS NULL) AND (c = 3)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d is null - Seq Scan on hp_prefix_test_p7 hp_prefix_test - Filter: ((b IS NULL) AND (d IS NULL) AND (a = 1) AND (c = 3)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d is null - Seq Scan on hp_prefix_test_p4 hp_prefix_test - Filter: ((a IS NULL) AND (d IS NULL) AND (b = 2) AND (c = 3)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d is null - Seq Scan on hp_prefix_test_p5 hp_prefix_test - Filter: ((d IS NULL) AND (a = 1) AND (b = 2) AND (c = 3)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d = 4 - Seq Scan on hp_prefix_test_p4 hp_prefix_test - Filter: ((a IS NULL) AND (b IS NULL) AND (c IS NULL) AND (d = 4)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d = 4 - Seq Scan on hp_prefix_test_p6 hp_prefix_test - Filter: ((b IS NULL) AND (c IS NULL) AND (a = 1) AND (d = 4)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d = 4 - Seq Scan on hp_prefix_test_p5 hp_prefix_test - Filter: ((a IS NULL) AND (c IS NULL) AND (b = 2) AND (d = 4)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d = 4 - Seq Scan on hp_prefix_test_p6 hp_prefix_test - Filter: ((c IS NULL) AND (a = 1) AND (b = 2) AND (d = 4)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d = 4 - Seq Scan on hp_prefix_test_p4 hp_prefix_test - Filter: ((a IS NULL) AND (b IS NULL) AND (c = 3) AND (d = 4)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d = 4 - Seq Scan on hp_prefix_test_p5 hp_prefix_test - Filter: ((b IS NULL) AND (a = 1) AND (c = 3) AND (d = 4)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d = 4 - Seq Scan on hp_prefix_test_p6 hp_prefix_test - Filter: ((a IS NULL) AND (b = 2) AND (c = 3) AND (d = 4)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d = 4 - Seq Scan on hp_prefix_test_p4 hp_prefix_test - Filter: ((a = 1) AND (b = 2) AND (c = 3) AND (d = 4)) - --- And ensure we get exactly 1 row from each. Again, all 16 possible combinations. -select - 'select tableoid::regclass,* from hp_prefix_test where ' || - string_agg(c.colname || case when g.s & (1 << c.colpos) = 0 then ' is null' else ' = ' || (colpos+1)::text end, ' and ' order by c.colpos) -from (values('a',0),('b',1),('c',2),('d',3)) c(colname, colpos), generate_Series(0,15) g(s) -group by g.s -order by g.s; - select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d is null - select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d is null - select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d is null - select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d is null - select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d is null - select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d is null - select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d is null - select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d is null - select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d = 4 - select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d = 4 - select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d = 4 - select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d = 4 - select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d = 4 - select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d = 4 - select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d = 4 - select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d = 4 - -\gexec -select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d is null - hp_prefix_test_p0 | | | | - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d is null - hp_prefix_test_p1 | 1 | | | - -select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d is null - hp_prefix_test_p2 | | 2 | | - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d is null - hp_prefix_test_p4 | 1 | 2 | | - -select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d is null - hp_prefix_test_p3 | | | 3 | - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d is null - hp_prefix_test_p7 | 1 | | 3 | - -select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d is null - hp_prefix_test_p4 | | 2 | 3 | - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d is null - hp_prefix_test_p5 | 1 | 2 | 3 | - -select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d = 4 - hp_prefix_test_p4 | | | | 4 - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d = 4 - hp_prefix_test_p6 | 1 | | | 4 - -select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d = 4 - hp_prefix_test_p5 | | 2 | | 4 - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d = 4 - hp_prefix_test_p6 | 1 | 2 | | 4 - -select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d = 4 - hp_prefix_test_p4 | | | 3 | 4 - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d = 4 - hp_prefix_test_p5 | 1 | | 3 | 4 - -select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d = 4 - hp_prefix_test_p6 | | 2 | 3 | 4 - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d = 4 - hp_prefix_test_p4 | 1 | 2 | 3 | 4 - -\t off -drop table hp_prefix_test; --- --- Check that gen_partprune_steps() detects self-contradiction from clauses --- regardless of the order of the clauses (Here we use a custom operator to --- prevent the equivclass.c machinery from reordering the clauses) --- -create operator === ( - leftarg = int4, - rightarg = int4, - procedure = int4eq, - commutator = ===, - hashes -); -create operator class part_test_int4_ops2 -for type int4 -using hash as -operator 1 ===, -function 2 part_hashint4_noop(int4, int8); -create table hp_contradict_test (a int, b int) partition by hash (a part_test_int4_ops2, b part_test_int4_ops2); -create table hp_contradict_test_p1 partition of hp_contradict_test for values with (modulus 2, remainder 0); -create table hp_contradict_test_p2 partition of hp_contradict_test for values with (modulus 2, remainder 1); -explain (costs off) select * from hp_contradict_test where a is null and a === 1 and b === 1; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -explain (costs off) select * from hp_contradict_test where a === 1 and b === 1 and a is null; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -drop table hp_contradict_test; -drop operator class part_test_int4_ops2 using hash; -drop operator ===(int4, int4); +WARNING: terminating connection because of crash of another server process +DETAIL: The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory. +HINT: In a moment you should be able to reconnect to the database and repeat your command. +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/indexing.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/indexing.out --- /tmp/cirrus-ci-build/src/test/regress/expected/indexing.out 2024-03-13 23:12:37.624498000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/indexing.out 2024-03-13 23:14:45.725403000 +0000 @@ -611,1042 +611,7 @@ alter table idxpart detach partition idxpart1; alter table idxpart detach partition idxpart2; alter table idxpart detach partition idxpart3; -drop index idxpart_a_idx; -select relname, relkind from pg_class where relname like 'idxpart%' order by relname; - relname | relkind -----------------+--------- - idxpart | p - idxpart1 | r - idxpart1_a_idx | i - idxpart2 | r - idxpart2_a_idx | i - idxpart3 | r - idxpart3_a_idx | i -(7 rows) - -drop table idxpart, idxpart1, idxpart2, idxpart3; -select relname, relkind from pg_class where relname like 'idxpart%' order by relname; - relname | relkind ----------+--------- -(0 rows) - -create table idxpart (a int, b int, c int) partition by range(a); -create index on idxpart(c); -create table idxpart1 partition of idxpart for values from (0) to (250); -create table idxpart2 partition of idxpart for values from (250) to (500); -alter table idxpart detach partition idxpart2; -\d idxpart2 - Table "public.idxpart2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | - c | integer | | | -Indexes: - "idxpart2_c_idx" btree (c) - -alter table idxpart2 drop column c; -\d idxpart2 - Table "public.idxpart2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | - -drop table idxpart, idxpart2; --- Verify that expression indexes inherit correctly -create table idxpart (a int, b int) partition by range (a); -create table idxpart1 (like idxpart); -create index on idxpart1 ((a + b)); -create index on idxpart ((a + b)); -create table idxpart2 (like idxpart); -alter table idxpart attach partition idxpart1 for values from (0000) to (1000); -alter table idxpart attach partition idxpart2 for values from (1000) to (2000); -create table idxpart3 partition of idxpart for values from (2000) to (3000); -select relname as child, inhparent::regclass as parent, pg_get_indexdef as childdef - from pg_class join pg_inherits on inhrelid = oid, - lateral pg_get_indexdef(pg_class.oid) - where relkind in ('i', 'I') and relname like 'idxpart%' order by relname; - child | parent | childdef --------------------+------------------+--------------------------------------------------------------------------- - idxpart1_expr_idx | idxpart_expr_idx | CREATE INDEX idxpart1_expr_idx ON public.idxpart1 USING btree (((a + b))) - idxpart2_expr_idx | idxpart_expr_idx | CREATE INDEX idxpart2_expr_idx ON public.idxpart2 USING btree (((a + b))) - idxpart3_expr_idx | idxpart_expr_idx | CREATE INDEX idxpart3_expr_idx ON public.idxpart3 USING btree (((a + b))) -(3 rows) - -drop table idxpart; --- Verify behavior for collation (mis)matches -create table idxpart (a text) partition by range (a); -create table idxpart1 (like idxpart); -create table idxpart2 (like idxpart); -create index on idxpart2 (a collate "POSIX"); -create index on idxpart2 (a); -create index on idxpart2 (a collate "C"); -alter table idxpart attach partition idxpart1 for values from ('aaa') to ('bbb'); -alter table idxpart attach partition idxpart2 for values from ('bbb') to ('ccc'); -create table idxpart3 partition of idxpart for values from ('ccc') to ('ddd'); -create index on idxpart (a collate "C"); -create table idxpart4 partition of idxpart for values from ('ddd') to ('eee'); -select relname as child, inhparent::regclass as parent, pg_get_indexdef as childdef - from pg_class left join pg_inherits on inhrelid = oid, - lateral pg_get_indexdef(pg_class.oid) - where relkind in ('i', 'I') and relname like 'idxpart%' order by relname; - child | parent | childdef ------------------+---------------+-------------------------------------------------------------------------------- - idxpart1_a_idx | idxpart_a_idx | CREATE INDEX idxpart1_a_idx ON public.idxpart1 USING btree (a COLLATE "C") - idxpart2_a_idx | | CREATE INDEX idxpart2_a_idx ON public.idxpart2 USING btree (a COLLATE "POSIX") - idxpart2_a_idx1 | | CREATE INDEX idxpart2_a_idx1 ON public.idxpart2 USING btree (a) - idxpart2_a_idx2 | idxpart_a_idx | CREATE INDEX idxpart2_a_idx2 ON public.idxpart2 USING btree (a COLLATE "C") - idxpart3_a_idx | idxpart_a_idx | CREATE INDEX idxpart3_a_idx ON public.idxpart3 USING btree (a COLLATE "C") - idxpart4_a_idx | idxpart_a_idx | CREATE INDEX idxpart4_a_idx ON public.idxpart4 USING btree (a COLLATE "C") - idxpart_a_idx | | CREATE INDEX idxpart_a_idx ON ONLY public.idxpart USING btree (a COLLATE "C") -(7 rows) - -drop table idxpart; --- Verify behavior for opclass (mis)matches -create table idxpart (a text) partition by range (a); -create table idxpart1 (like idxpart); -create table idxpart2 (like idxpart); -create index on idxpart2 (a); -alter table idxpart attach partition idxpart1 for values from ('aaa') to ('bbb'); -alter table idxpart attach partition idxpart2 for values from ('bbb') to ('ccc'); -create table idxpart3 partition of idxpart for values from ('ccc') to ('ddd'); -create index on idxpart (a text_pattern_ops); -create table idxpart4 partition of idxpart for values from ('ddd') to ('eee'); --- must *not* have attached the index we created on idxpart2 -select relname as child, inhparent::regclass as parent, pg_get_indexdef as childdef - from pg_class left join pg_inherits on inhrelid = oid, - lateral pg_get_indexdef(pg_class.oid) - where relkind in ('i', 'I') and relname like 'idxpart%' order by relname; - child | parent | childdef ------------------+---------------+------------------------------------------------------------------------------------ - idxpart1_a_idx | idxpart_a_idx | CREATE INDEX idxpart1_a_idx ON public.idxpart1 USING btree (a text_pattern_ops) - idxpart2_a_idx | | CREATE INDEX idxpart2_a_idx ON public.idxpart2 USING btree (a) - idxpart2_a_idx1 | idxpart_a_idx | CREATE INDEX idxpart2_a_idx1 ON public.idxpart2 USING btree (a text_pattern_ops) - idxpart3_a_idx | idxpart_a_idx | CREATE INDEX idxpart3_a_idx ON public.idxpart3 USING btree (a text_pattern_ops) - idxpart4_a_idx | idxpart_a_idx | CREATE INDEX idxpart4_a_idx ON public.idxpart4 USING btree (a text_pattern_ops) - idxpart_a_idx | | CREATE INDEX idxpart_a_idx ON ONLY public.idxpart USING btree (a text_pattern_ops) -(6 rows) - -drop index idxpart_a_idx; -create index on only idxpart (a text_pattern_ops); --- must reject -alter index idxpart_a_idx attach partition idxpart2_a_idx; -ERROR: cannot attach index "idxpart2_a_idx" as a partition of index "idxpart_a_idx" -DETAIL: The index definitions do not match. -drop table idxpart; --- Verify that attaching indexes maps attribute numbers correctly -create table idxpart (col1 int, a int, col2 int, b int) partition by range (a); -create table idxpart1 (b int, col1 int, col2 int, col3 int, a int); -alter table idxpart drop column col1, drop column col2; -alter table idxpart1 drop column col1, drop column col2, drop column col3; -alter table idxpart attach partition idxpart1 for values from (0) to (1000); -create index idxpart_1_idx on only idxpart (b, a); -create index idxpart1_1_idx on idxpart1 (b, a); -create index idxpart1_1b_idx on idxpart1 (b); --- test expressions and partial-index predicate, too -create index idxpart_2_idx on only idxpart ((b + a)) where a > 1; -create index idxpart1_2_idx on idxpart1 ((b + a)) where a > 1; -create index idxpart1_2b_idx on idxpart1 ((a + b)) where a > 1; -create index idxpart1_2c_idx on idxpart1 ((b + a)) where b > 1; -alter index idxpart_1_idx attach partition idxpart1_1b_idx; -- fail -ERROR: cannot attach index "idxpart1_1b_idx" as a partition of index "idxpart_1_idx" -DETAIL: The index definitions do not match. -alter index idxpart_1_idx attach partition idxpart1_1_idx; -alter index idxpart_2_idx attach partition idxpart1_2b_idx; -- fail -ERROR: cannot attach index "idxpart1_2b_idx" as a partition of index "idxpart_2_idx" -DETAIL: The index definitions do not match. -alter index idxpart_2_idx attach partition idxpart1_2c_idx; -- fail -ERROR: cannot attach index "idxpart1_2c_idx" as a partition of index "idxpart_2_idx" -DETAIL: The index definitions do not match. -alter index idxpart_2_idx attach partition idxpart1_2_idx; -- ok -select relname as child, inhparent::regclass as parent, pg_get_indexdef as childdef - from pg_class left join pg_inherits on inhrelid = oid, - lateral pg_get_indexdef(pg_class.oid) - where relkind in ('i', 'I') and relname like 'idxpart%' order by relname; - child | parent | childdef ------------------+---------------+----------------------------------------------------------------------------------------- - idxpart1_1_idx | idxpart_1_idx | CREATE INDEX idxpart1_1_idx ON public.idxpart1 USING btree (b, a) - idxpart1_1b_idx | | CREATE INDEX idxpart1_1b_idx ON public.idxpart1 USING btree (b) - idxpart1_2_idx | idxpart_2_idx | CREATE INDEX idxpart1_2_idx ON public.idxpart1 USING btree (((b + a))) WHERE (a > 1) - idxpart1_2b_idx | | CREATE INDEX idxpart1_2b_idx ON public.idxpart1 USING btree (((a + b))) WHERE (a > 1) - idxpart1_2c_idx | | CREATE INDEX idxpart1_2c_idx ON public.idxpart1 USING btree (((b + a))) WHERE (b > 1) - idxpart_1_idx | | CREATE INDEX idxpart_1_idx ON ONLY public.idxpart USING btree (b, a) - idxpart_2_idx | | CREATE INDEX idxpart_2_idx ON ONLY public.idxpart USING btree (((b + a))) WHERE (a > 1) -(7 rows) - -drop table idxpart; --- Make sure the partition columns are mapped correctly -create table idxpart (a int, b int, c text) partition by range (a); -create index idxparti on idxpart (a); -create index idxparti2 on idxpart (c, b); -create table idxpart1 (c text, a int, b int); -alter table idxpart attach partition idxpart1 for values from (0) to (10); -create table idxpart2 (c text, a int, b int); -create index on idxpart2 (a); -create index on idxpart2 (c, b); -alter table idxpart attach partition idxpart2 for values from (10) to (20); -select c.relname, pg_get_indexdef(indexrelid) - from pg_class c join pg_index i on c.oid = i.indexrelid - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - relname | pg_get_indexdef -------------------+--------------------------------------------------------------------- - idxpart1_a_idx | CREATE INDEX idxpart1_a_idx ON public.idxpart1 USING btree (a) - idxpart1_c_b_idx | CREATE INDEX idxpart1_c_b_idx ON public.idxpart1 USING btree (c, b) - idxpart2_a_idx | CREATE INDEX idxpart2_a_idx ON public.idxpart2 USING btree (a) - idxpart2_c_b_idx | CREATE INDEX idxpart2_c_b_idx ON public.idxpart2 USING btree (c, b) - idxparti | CREATE INDEX idxparti ON ONLY public.idxpart USING btree (a) - idxparti2 | CREATE INDEX idxparti2 ON ONLY public.idxpart USING btree (c, b) -(6 rows) - -drop table idxpart; --- Verify that columns are mapped correctly in expression indexes -create table idxpart (col1 int, col2 int, a int, b int) partition by range (a); -create table idxpart1 (col2 int, b int, col1 int, a int); -create table idxpart2 (col1 int, col2 int, b int, a int); -alter table idxpart drop column col1, drop column col2; -alter table idxpart1 drop column col1, drop column col2; -alter table idxpart2 drop column col1, drop column col2; -create index on idxpart2 (abs(b)); -alter table idxpart attach partition idxpart2 for values from (0) to (1); -create index on idxpart (abs(b)); -create index on idxpart ((b + 1)); -alter table idxpart attach partition idxpart1 for values from (1) to (2); -select c.relname, pg_get_indexdef(indexrelid) - from pg_class c join pg_index i on c.oid = i.indexrelid - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - relname | pg_get_indexdef --------------------+------------------------------------------------------------------------------ - idxpart1_abs_idx | CREATE INDEX idxpart1_abs_idx ON public.idxpart1 USING btree (abs(b)) - idxpart1_expr_idx | CREATE INDEX idxpart1_expr_idx ON public.idxpart1 USING btree (((b + 1))) - idxpart2_abs_idx | CREATE INDEX idxpart2_abs_idx ON public.idxpart2 USING btree (abs(b)) - idxpart2_expr_idx | CREATE INDEX idxpart2_expr_idx ON public.idxpart2 USING btree (((b + 1))) - idxpart_abs_idx | CREATE INDEX idxpart_abs_idx ON ONLY public.idxpart USING btree (abs(b)) - idxpart_expr_idx | CREATE INDEX idxpart_expr_idx ON ONLY public.idxpart USING btree (((b + 1))) -(6 rows) - -drop table idxpart; --- Verify that columns are mapped correctly for WHERE in a partial index -create table idxpart (col1 int, a int, col3 int, b int) partition by range (a); -alter table idxpart drop column col1, drop column col3; -create table idxpart1 (col1 int, col2 int, col3 int, col4 int, b int, a int); -alter table idxpart1 drop column col1, drop column col2, drop column col3, drop column col4; -alter table idxpart attach partition idxpart1 for values from (0) to (1000); -create table idxpart2 (col1 int, col2 int, b int, a int); -create index on idxpart2 (a) where b > 1000; -alter table idxpart2 drop column col1, drop column col2; -alter table idxpart attach partition idxpart2 for values from (1000) to (2000); -create index on idxpart (a) where b > 1000; -select c.relname, pg_get_indexdef(indexrelid) - from pg_class c join pg_index i on c.oid = i.indexrelid - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - relname | pg_get_indexdef -----------------+------------------------------------------------------------------------------------ - idxpart1_a_idx | CREATE INDEX idxpart1_a_idx ON public.idxpart1 USING btree (a) WHERE (b > 1000) - idxpart2_a_idx | CREATE INDEX idxpart2_a_idx ON public.idxpart2 USING btree (a) WHERE (b > 1000) - idxpart_a_idx | CREATE INDEX idxpart_a_idx ON ONLY public.idxpart USING btree (a) WHERE (b > 1000) -(3 rows) - -drop table idxpart; --- Column number mapping: dropped columns in the partition -create table idxpart1 (drop_1 int, drop_2 int, col_keep int, drop_3 int); -alter table idxpart1 drop column drop_1; -alter table idxpart1 drop column drop_2; -alter table idxpart1 drop column drop_3; -create index on idxpart1 (col_keep); -create table idxpart (col_keep int) partition by range (col_keep); -create index on idxpart (col_keep); -alter table idxpart attach partition idxpart1 for values from (0) to (1000); -\d idxpart - Partitioned table "public.idxpart" - Column | Type | Collation | Nullable | Default -----------+---------+-----------+----------+--------- - col_keep | integer | | | -Partition key: RANGE (col_keep) -Indexes: - "idxpart_col_keep_idx" btree (col_keep) -Number of partitions: 1 (Use \d+ to list them.) - -\d idxpart1 - Table "public.idxpart1" - Column | Type | Collation | Nullable | Default -----------+---------+-----------+----------+--------- - col_keep | integer | | | -Partition of: idxpart FOR VALUES FROM (0) TO (1000) -Indexes: - "idxpart1_col_keep_idx" btree (col_keep) - -select attrelid::regclass, attname, attnum from pg_attribute - where attrelid::regclass::text like 'idxpart%' and attnum > 0 - order by attrelid::regclass, attnum; - attrelid | attname | attnum ------------------------+------------------------------+-------- - idxpart1 | ........pg.dropped.1........ | 1 - idxpart1 | ........pg.dropped.2........ | 2 - idxpart1 | col_keep | 3 - idxpart1 | ........pg.dropped.4........ | 4 - idxpart1_col_keep_idx | col_keep | 1 - idxpart | col_keep | 1 - idxpart_col_keep_idx | col_keep | 1 -(7 rows) - -drop table idxpart; --- Column number mapping: dropped columns in the parent table -create table idxpart(drop_1 int, drop_2 int, col_keep int, drop_3 int) partition by range (col_keep); -alter table idxpart drop column drop_1; -alter table idxpart drop column drop_2; -alter table idxpart drop column drop_3; -create table idxpart1 (col_keep int); -create index on idxpart1 (col_keep); -create index on idxpart (col_keep); -alter table idxpart attach partition idxpart1 for values from (0) to (1000); -\d idxpart - Partitioned table "public.idxpart" - Column | Type | Collation | Nullable | Default -----------+---------+-----------+----------+--------- - col_keep | integer | | | -Partition key: RANGE (col_keep) -Indexes: - "idxpart_col_keep_idx" btree (col_keep) -Number of partitions: 1 (Use \d+ to list them.) - -\d idxpart1 - Table "public.idxpart1" - Column | Type | Collation | Nullable | Default -----------+---------+-----------+----------+--------- - col_keep | integer | | | -Partition of: idxpart FOR VALUES FROM (0) TO (1000) -Indexes: - "idxpart1_col_keep_idx" btree (col_keep) - -select attrelid::regclass, attname, attnum from pg_attribute - where attrelid::regclass::text like 'idxpart%' and attnum > 0 - order by attrelid::regclass, attnum; - attrelid | attname | attnum ------------------------+------------------------------+-------- - idxpart | ........pg.dropped.1........ | 1 - idxpart | ........pg.dropped.2........ | 2 - idxpart | col_keep | 3 - idxpart | ........pg.dropped.4........ | 4 - idxpart1 | col_keep | 1 - idxpart1_col_keep_idx | col_keep | 1 - idxpart_col_keep_idx | col_keep | 1 -(7 rows) - -drop table idxpart; --- --- Constraint-related indexes --- --- Verify that it works to add primary key / unique to partitioned tables -create table idxpart (a int primary key, b int) partition by range (a); -\d idxpart - Partitioned table "public.idxpart" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | not null | - b | integer | | | -Partition key: RANGE (a) -Indexes: - "idxpart_pkey" PRIMARY KEY, btree (a) -Number of partitions: 0 - --- multiple primary key on child should fail -create table failpart partition of idxpart (b primary key) for values from (0) to (100); -ERROR: multiple primary keys for table "failpart" are not allowed -drop table idxpart; --- primary key on child is okay if there's no PK in the parent, though -create table idxpart (a int) partition by range (a); -create table idxpart1pk partition of idxpart (a primary key) for values from (0) to (100); -\d idxpart1pk - Table "public.idxpart1pk" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | not null | -Partition of: idxpart FOR VALUES FROM (0) TO (100) -Indexes: - "idxpart1pk_pkey" PRIMARY KEY, btree (a) - -drop table idxpart; --- Failing to use the full partition key is not allowed -create table idxpart (a int unique, b int) partition by range (a, b); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: UNIQUE constraint on table "idxpart" lacks column "b" which is part of the partition key. -create table idxpart (a int, b int unique) partition by range (a, b); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: UNIQUE constraint on table "idxpart" lacks column "a" which is part of the partition key. -create table idxpart (a int primary key, b int) partition by range (b, a); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: PRIMARY KEY constraint on table "idxpart" lacks column "b" which is part of the partition key. -create table idxpart (a int, b int primary key) partition by range (b, a); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: PRIMARY KEY constraint on table "idxpart" lacks column "a" which is part of the partition key. --- OK if you use them in some other order -create table idxpart (a int, b int, c text, primary key (a, b, c)) partition by range (b, c, a); -drop table idxpart; --- OK to add an exclusion constraint if partitioning by its equal column -create table idxpart (a int4range, exclude USING GIST (a with = )) partition by range (a); -drop table idxpart; --- OK more than one equal column -create table idxpart (a int4range, b int4range, exclude USING GIST (a with =, b with =)) partition by range (a, b); -drop table idxpart; --- OK with more than one equal column: constraint is a proper superset of partition key -create table idxpart (a int4range, b int4range, exclude USING GIST (a with =, b with =)) partition by range (a); -drop table idxpart; --- Not OK more than one equal column: partition keys are a proper superset of constraint -create table idxpart (a int4range, b int4range, exclude USING GIST (a with = )) partition by range (a, b); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: EXCLUDE constraint on table "idxpart" lacks column "b" which is part of the partition key. --- Not OK with just -|- -create table idxpart (a int4range, exclude USING GIST (a with -|- )) partition by range (a); -ERROR: cannot match partition key to index on column "a" using non-equal operator "-|-" --- OK with equals and &&, and equals is the partition key -create table idxpart (a int4range, b int4range, exclude USING GIST (a with =, b with &&)) partition by range (a); -drop table idxpart; --- Not OK with equals and &&, and equals is not the partition key -create table idxpart (a int4range, b int4range, c int4range, exclude USING GIST (b with =, c with &&)) partition by range (a); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: EXCLUDE constraint on table "idxpart" lacks column "a" which is part of the partition key. --- OK more than one equal column and a && column -create table idxpart (a int4range, b int4range, c int4range, exclude USING GIST (a with =, b with =, c with &&)) partition by range (a, b); -drop table idxpart; --- no expressions in partition key for PK/UNIQUE -create table idxpart (a int primary key, b int) partition by range ((b + a)); -ERROR: unsupported PRIMARY KEY constraint with partition key definition -DETAIL: PRIMARY KEY constraints cannot be used when partition keys include expressions. -create table idxpart (a int unique, b int) partition by range ((b + a)); -ERROR: unsupported UNIQUE constraint with partition key definition -DETAIL: UNIQUE constraints cannot be used when partition keys include expressions. --- use ALTER TABLE to add a primary key -create table idxpart (a int, b int, c text) partition by range (a, b); -alter table idxpart add primary key (a); -- not an incomplete one though -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: PRIMARY KEY constraint on table "idxpart" lacks column "b" which is part of the partition key. -alter table idxpart add primary key (a, b); -- this works -\d idxpart - Partitioned table "public.idxpart" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | not null | - b | integer | | not null | - c | text | | | -Partition key: RANGE (a, b) -Indexes: - "idxpart_pkey" PRIMARY KEY, btree (a, b) -Number of partitions: 0 - -create table idxpart1 partition of idxpart for values from (0, 0) to (1000, 1000); -\d idxpart1 - Table "public.idxpart1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | not null | - b | integer | | not null | - c | text | | | -Partition of: idxpart FOR VALUES FROM (0, 0) TO (1000, 1000) -Indexes: - "idxpart1_pkey" PRIMARY KEY, btree (a, b) - -drop table idxpart; --- use ALTER TABLE to add a unique constraint -create table idxpart (a int, b int) partition by range (a, b); -alter table idxpart add unique (a); -- not an incomplete one though -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: UNIQUE constraint on table "idxpart" lacks column "b" which is part of the partition key. -alter table idxpart add unique (b, a); -- this works -\d idxpart - Partitioned table "public.idxpart" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition key: RANGE (a, b) -Indexes: - "idxpart_b_a_key" UNIQUE CONSTRAINT, btree (b, a) -Number of partitions: 0 - -drop table idxpart; --- Exclusion constraints can be added if partitioning by their equal column -create table idxpart (a int4range, b int4range) partition by range (a); -alter table idxpart add exclude USING GIST (a with =); -drop table idxpart; --- OK more than one equal column -create table idxpart (a int4range, b int4range) partition by range (a, b); -alter table idxpart add exclude USING GIST (a with =, b with =); -drop table idxpart; --- OK with more than one equal column: constraint is a proper superset of partition key -create table idxpart (a int4range, b int4range) partition by range (a); -alter table idxpart add exclude USING GIST (a with =, b with =); -drop table idxpart; --- Not OK more than one equal column: partition keys are a proper superset of constraint -create table idxpart (a int4range, b int4range) partition by range (a, b); -alter table idxpart add exclude USING GIST (a with =); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: EXCLUDE constraint on table "idxpart" lacks column "b" which is part of the partition key. -drop table idxpart; --- Not OK with just -|- -create table idxpart (a int4range, b int4range) partition by range (a, b); -alter table idxpart add exclude USING GIST (a with -|-); -ERROR: cannot match partition key to index on column "a" using non-equal operator "-|-" -drop table idxpart; --- OK with equals and &&, and equals is the partition key -create table idxpart (a int4range, b int4range) partition by range (a); -alter table idxpart add exclude USING GIST (a with =, b with &&); -drop table idxpart; --- Not OK with equals and &&, and equals is not the partition key -create table idxpart (a int4range, b int4range, c int4range) partition by range (a); -alter table idxpart add exclude USING GIST (b with =, c with &&); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: EXCLUDE constraint on table "idxpart" lacks column "a" which is part of the partition key. -drop table idxpart; --- OK more than one equal column and a && column -create table idxpart (a int4range, b int4range, c int4range) partition by range (a, b); -alter table idxpart add exclude USING GIST (a with =, b with =, c with &&); -drop table idxpart; --- When (sub)partitions are created, they also contain the constraint -create table idxpart (a int, b int, primary key (a, b)) partition by range (a, b); -create table idxpart1 partition of idxpart for values from (1, 1) to (10, 10); -create table idxpart2 partition of idxpart for values from (10, 10) to (20, 20) - partition by range (b); -create table idxpart21 partition of idxpart2 for values from (10) to (15); -create table idxpart22 partition of idxpart2 for values from (15) to (20); -create table idxpart3 (b int not null, a int not null); -alter table idxpart attach partition idxpart3 for values from (20, 20) to (30, 30); -select conname, contype, conrelid::regclass, conindid::regclass, conkey - from pg_constraint where conrelid::regclass::text like 'idxpart%' - order by conrelid::regclass::text, conname; - conname | contype | conrelid | conindid | conkey ----------------------+---------+-----------+----------------+-------- - idxpart_pkey | p | idxpart | idxpart_pkey | {1,2} - idxpart1_pkey | p | idxpart1 | idxpart1_pkey | {1,2} - idxpart2_pkey | p | idxpart2 | idxpart2_pkey | {1,2} - idxpart21_pkey | p | idxpart21 | idxpart21_pkey | {1,2} - idxpart22_pkey | p | idxpart22 | idxpart22_pkey | {1,2} - idxpart3_a_not_null | n | idxpart3 | - | {2} - idxpart3_b_not_null | n | idxpart3 | - | {1} - idxpart3_pkey | p | idxpart3 | idxpart3_pkey | {2,1} -(8 rows) - -drop table idxpart; --- Verify that multi-layer partitioning honors the requirement that all --- columns in the partition key must appear in primary/unique key -create table idxpart (a int, b int, primary key (a)) partition by range (a); -create table idxpart2 partition of idxpart -for values from (0) to (1000) partition by range (b); -- fail -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: PRIMARY KEY constraint on table "idxpart2" lacks column "b" which is part of the partition key. -drop table idxpart; --- Ditto for the ATTACH PARTITION case -create table idxpart (a int unique, b int) partition by range (a); -create table idxpart1 (a int not null, b int, unique (a, b)) - partition by range (a, b); -alter table idxpart attach partition idxpart1 for values from (1) to (1000); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: UNIQUE constraint on table "idxpart1" lacks column "b" which is part of the partition key. -DROP TABLE idxpart, idxpart1; --- Multi-layer partitioning works correctly in this case: -create table idxpart (a int, b int, primary key (a, b)) partition by range (a); -create table idxpart2 partition of idxpart for values from (0) to (1000) partition by range (b); -create table idxpart21 partition of idxpart2 for values from (0) to (1000); -select conname, contype, conrelid::regclass, conindid::regclass, conkey - from pg_constraint where conrelid::regclass::text like 'idxpart%' - order by conname; - conname | contype | conrelid | conindid | conkey -----------------+---------+-----------+----------------+-------- - idxpart21_pkey | p | idxpart21 | idxpart21_pkey | {1,2} - idxpart2_pkey | p | idxpart2 | idxpart2_pkey | {1,2} - idxpart_pkey | p | idxpart | idxpart_pkey | {1,2} -(3 rows) - -drop table idxpart; --- If a partitioned table has a unique/PK constraint, then it's not possible --- to drop the corresponding constraint in the children; nor it's possible --- to drop the indexes individually. Dropping the constraint in the parent --- gets rid of the lot. -create table idxpart (i int) partition by hash (i); -create table idxpart0 partition of idxpart (i) for values with (modulus 2, remainder 0); -create table idxpart1 partition of idxpart (i) for values with (modulus 2, remainder 1); -alter table idxpart0 add primary key(i); -alter table idxpart add primary key(i); -select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, - conname, conislocal, coninhcount, connoinherit, convalidated - from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) - left join pg_constraint con on (idx.indexrelid = con.conindid) - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated -----------+---------------+--------------+------------+---------------+------------+-------------+--------------+-------------- - idxpart0 | idxpart0_pkey | idxpart_pkey | t | idxpart0_pkey | f | 1 | t | t - idxpart1 | idxpart1_pkey | idxpart_pkey | t | idxpart1_pkey | f | 1 | f | t - idxpart | idxpart_pkey | | t | idxpart_pkey | t | 0 | t | t -(3 rows) - -drop index idxpart0_pkey; -- fail -ERROR: cannot drop index idxpart0_pkey because index idxpart_pkey requires it -HINT: You can drop index idxpart_pkey instead. -drop index idxpart1_pkey; -- fail -ERROR: cannot drop index idxpart1_pkey because index idxpart_pkey requires it -HINT: You can drop index idxpart_pkey instead. -alter table idxpart0 drop constraint idxpart0_pkey; -- fail -ERROR: cannot drop inherited constraint "idxpart0_pkey" of relation "idxpart0" -alter table idxpart1 drop constraint idxpart1_pkey; -- fail -ERROR: cannot drop inherited constraint "idxpart1_pkey" of relation "idxpart1" -alter table idxpart drop constraint idxpart_pkey; -- ok -select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, - conname, conislocal, coninhcount, connoinherit, convalidated - from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) - left join pg_constraint con on (idx.indexrelid = con.conindid) - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated -----------+------------+-----------+------------+---------+------------+-------------+--------------+-------------- -(0 rows) - -drop table idxpart; --- If the partition to be attached already has a primary key, fail if --- it doesn't match the parent's PK. -CREATE TABLE idxpart (c1 INT PRIMARY KEY, c2 INT, c3 VARCHAR(10)) PARTITION BY RANGE(c1); -CREATE TABLE idxpart1 (LIKE idxpart); -ALTER TABLE idxpart1 ADD PRIMARY KEY (c1, c2); -ALTER TABLE idxpart ATTACH PARTITION idxpart1 FOR VALUES FROM (100) TO (200); -ERROR: multiple primary keys for table "idxpart1" are not allowed -DROP TABLE idxpart, idxpart1; --- Ditto if there is some distance between the PKs (subpartitioning) -create table idxpart (a int, b int, primary key (a)) partition by range (a); -create table idxpart1 (a int not null, b int) partition by range (a); -create table idxpart11 (a int not null, b int primary key); -alter table idxpart1 attach partition idxpart11 for values from (0) to (1000); -alter table idxpart attach partition idxpart1 for values from (0) to (10000); -ERROR: multiple primary keys for table "idxpart11" are not allowed -drop table idxpart, idxpart1, idxpart11; --- If a partitioned table has a constraint whose index is not valid, --- attaching a missing partition makes it valid. -create table idxpart (a int) partition by range (a); -create table idxpart0 (like idxpart); -alter table idxpart0 add primary key (a); -alter table idxpart attach partition idxpart0 for values from (0) to (1000); -alter table only idxpart add primary key (a); -select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, - conname, conislocal, coninhcount, connoinherit, convalidated - from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) - left join pg_constraint con on (idx.indexrelid = con.conindid) - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated -----------+---------------+-----------+------------+---------------+------------+-------------+--------------+-------------- - idxpart0 | idxpart0_pkey | | t | idxpart0_pkey | t | 0 | t | t - idxpart | idxpart_pkey | | f | idxpart_pkey | t | 0 | t | t -(2 rows) - -alter index idxpart_pkey attach partition idxpart0_pkey; -select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, - conname, conislocal, coninhcount, connoinherit, convalidated - from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) - left join pg_constraint con on (idx.indexrelid = con.conindid) - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated -----------+---------------+--------------+------------+---------------+------------+-------------+--------------+-------------- - idxpart0 | idxpart0_pkey | idxpart_pkey | t | idxpart0_pkey | f | 1 | t | t - idxpart | idxpart_pkey | | t | idxpart_pkey | t | 0 | t | t -(2 rows) - -drop table idxpart; --- Related to the above scenario: ADD PRIMARY KEY on the parent mustn't --- automatically propagate NOT NULL to child columns. -create table idxpart (a int) partition by range (a); -create table idxpart0 (like idxpart); -alter table idxpart0 add unique (a); -alter table idxpart attach partition idxpart0 default; -alter table only idxpart add primary key (a); -- works, but idxpart0.a is nullable -\d idxpart0 - Table "public.idxpart0" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition of: idxpart DEFAULT -Indexes: - "idxpart0_a_key" UNIQUE CONSTRAINT, btree (a) - -alter index idxpart_pkey attach partition idxpart0_a_key; -- fails, lacks NOT NULL -ERROR: invalid primary key definition -DETAIL: Column "a" of relation "idxpart0" is not marked NOT NULL. -alter table idxpart0 alter column a set not null; -alter index idxpart_pkey attach partition idxpart0_a_key; -alter table idxpart0 alter column a drop not null; -- fail, pkey needs it -ERROR: column "a" is marked NOT NULL in parent table -drop table idxpart; --- if a partition has a unique index without a constraint, does not attach --- automatically; creates a new index instead. -create table idxpart (a int, b int) partition by range (a); -create table idxpart1 (a int not null, b int); -create unique index on idxpart1 (a); -alter table idxpart add primary key (a); -alter table idxpart attach partition idxpart1 for values from (1) to (1000); -select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, - conname, conislocal, coninhcount, connoinherit, convalidated - from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) - left join pg_constraint con on (idx.indexrelid = con.conindid) - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated -----------+----------------+--------------+------------+---------------+------------+-------------+--------------+-------------- - idxpart1 | idxpart1_a_idx | | t | | | | | - idxpart1 | idxpart1_pkey | idxpart_pkey | t | idxpart1_pkey | f | 1 | f | t - idxpart | idxpart_pkey | | t | idxpart_pkey | t | 0 | t | t -(3 rows) - -drop table idxpart; --- Can't attach an index without a corresponding constraint -create table idxpart (a int, b int) partition by range (a); -create table idxpart1 (a int not null, b int); -create unique index on idxpart1 (a); -alter table idxpart attach partition idxpart1 for values from (1) to (1000); -alter table only idxpart add primary key (a); -alter index idxpart_pkey attach partition idxpart1_a_idx; -- fail -ERROR: cannot attach index "idxpart1_a_idx" as a partition of index "idxpart_pkey" -DETAIL: The index "idxpart_pkey" belongs to a constraint in table "idxpart" but no constraint exists for index "idxpart1_a_idx". -drop table idxpart; --- Test that unique constraints are working -create table idxpart (a int, b text, primary key (a, b)) partition by range (a); -create table idxpart1 partition of idxpart for values from (0) to (100000); -create table idxpart2 (c int, like idxpart); -insert into idxpart2 (c, a, b) values (42, 572814, 'inserted first'); -alter table idxpart2 drop column c; -create unique index on idxpart (a); -alter table idxpart attach partition idxpart2 for values from (100000) to (1000000); -insert into idxpart values (0, 'zero'), (42, 'life'), (2^16, 'sixteen'); -insert into idxpart select 2^g, format('two to power of %s', g) from generate_series(15, 17) g; -ERROR: duplicate key value violates unique constraint "idxpart1_a_idx" -DETAIL: Key (a)=(65536) already exists. -insert into idxpart values (16, 'sixteen'); -insert into idxpart (b, a) values ('one', 142857), ('two', 285714); -insert into idxpart select a * 2, b || b from idxpart where a between 2^16 and 2^19; -ERROR: duplicate key value violates unique constraint "idxpart2_a_idx" -DETAIL: Key (a)=(285714) already exists. -insert into idxpart values (572814, 'five'); -ERROR: duplicate key value violates unique constraint "idxpart2_a_idx" -DETAIL: Key (a)=(572814) already exists. -insert into idxpart values (857142, 'six'); -select tableoid::regclass, * from idxpart order by a; - tableoid | a | b -----------+--------+---------------- - idxpart1 | 0 | zero - idxpart1 | 16 | sixteen - idxpart1 | 42 | life - idxpart1 | 65536 | sixteen - idxpart2 | 142857 | one - idxpart2 | 285714 | two - idxpart2 | 572814 | inserted first - idxpart2 | 857142 | six -(8 rows) - -drop table idxpart; --- Test some other non-btree index types -create table idxpart (a int, b text, c int[]) partition by range (a); -create table idxpart1 partition of idxpart for values from (0) to (100000); -set enable_seqscan to off; -create index idxpart_brin on idxpart using brin(b); -explain (costs off) select * from idxpart where b = 'abcd'; - QUERY PLAN -------------------------------------------- - Bitmap Heap Scan on idxpart1 idxpart - Recheck Cond: (b = 'abcd'::text) - -> Bitmap Index Scan on idxpart1_b_idx - Index Cond: (b = 'abcd'::text) -(4 rows) - -drop index idxpart_brin; -create index idxpart_spgist on idxpart using spgist(b); -explain (costs off) select * from idxpart where b = 'abcd'; - QUERY PLAN -------------------------------------------- - Bitmap Heap Scan on idxpart1 idxpart - Recheck Cond: (b = 'abcd'::text) - -> Bitmap Index Scan on idxpart1_b_idx - Index Cond: (b = 'abcd'::text) -(4 rows) - -drop index idxpart_spgist; -create index idxpart_gin on idxpart using gin(c); -explain (costs off) select * from idxpart where c @> array[42]; - QUERY PLAN ----------------------------------------------- - Bitmap Heap Scan on idxpart1 idxpart - Recheck Cond: (c @> '{42}'::integer[]) - -> Bitmap Index Scan on idxpart1_c_idx - Index Cond: (c @> '{42}'::integer[]) -(4 rows) - -drop index idxpart_gin; -reset enable_seqscan; -drop table idxpart; --- intentionally leave some objects around -create table idxpart (a int) partition by range (a); -create table idxpart1 partition of idxpart for values from (0) to (100); -create table idxpart2 partition of idxpart for values from (100) to (1000) - partition by range (a); -create table idxpart21 partition of idxpart2 for values from (100) to (200); -create table idxpart22 partition of idxpart2 for values from (200) to (300); -create index on idxpart22 (a); -create index on only idxpart2 (a); -alter index idxpart2_a_idx attach partition idxpart22_a_idx; -create index on idxpart (a); -create table idxpart_another (a int, b int, primary key (a, b)) partition by range (a); -create table idxpart_another_1 partition of idxpart_another for values from (0) to (100); -create table idxpart3 (c int, b int, a int) partition by range (a); -alter table idxpart3 drop column b, drop column c; -create table idxpart31 partition of idxpart3 for values from (1000) to (1200); -create table idxpart32 partition of idxpart3 for values from (1200) to (1400); -alter table idxpart attach partition idxpart3 for values from (1000) to (2000); --- More objects intentionally left behind, to verify some pg_dump/pg_upgrade --- behavior; see https://postgr.es/m/20190321204928.GA17535@alvherre.pgsql -create schema regress_indexing; -set search_path to regress_indexing; -create table pk (a int primary key) partition by range (a); -create table pk1 partition of pk for values from (0) to (1000); -create table pk2 (b int, a int); -alter table pk2 drop column b; -alter table pk2 alter a set not null; -alter table pk attach partition pk2 for values from (1000) to (2000); -create table pk3 partition of pk for values from (2000) to (3000); -create table pk4 (like pk); -alter table pk attach partition pk4 for values from (3000) to (4000); -create table pk5 (like pk) partition by range (a); -create table pk51 partition of pk5 for values from (4000) to (4500); -create table pk52 partition of pk5 for values from (4500) to (5000); -alter table pk attach partition pk5 for values from (4000) to (5000); -reset search_path; --- Test that covering partitioned indexes work in various cases -create table covidxpart (a int, b int) partition by list (a); -create unique index on covidxpart (a) include (b); -create table covidxpart1 partition of covidxpart for values in (1); -create table covidxpart2 partition of covidxpart for values in (2); -insert into covidxpart values (1, 1); -insert into covidxpart values (1, 1); -ERROR: duplicate key value violates unique constraint "covidxpart1_a_b_idx" -DETAIL: Key (a)=(1) already exists. -create table covidxpart3 (b int, c int, a int); -alter table covidxpart3 drop c; -alter table covidxpart attach partition covidxpart3 for values in (3); -insert into covidxpart values (3, 1); -insert into covidxpart values (3, 1); -ERROR: duplicate key value violates unique constraint "covidxpart3_a_b_idx" -DETAIL: Key (a)=(3) already exists. -create table covidxpart4 (b int, a int); -create unique index on covidxpart4 (a) include (b); -create unique index on covidxpart4 (a); -alter table covidxpart attach partition covidxpart4 for values in (4); -insert into covidxpart values (4, 1); -insert into covidxpart values (4, 1); -ERROR: duplicate key value violates unique constraint "covidxpart4_a_b_idx" -DETAIL: Key (a)=(4) already exists. -create unique index on covidxpart (b) include (a); -- should fail -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: UNIQUE constraint on table "covidxpart" lacks column "a" which is part of the partition key. --- check that detaching a partition also detaches the primary key constraint -create table parted_pk_detach_test (a int primary key) partition by list (a); -create table parted_pk_detach_test1 partition of parted_pk_detach_test for values in (1); -alter table parted_pk_detach_test1 drop constraint parted_pk_detach_test1_pkey; -- should fail -ERROR: cannot drop inherited constraint "parted_pk_detach_test1_pkey" of relation "parted_pk_detach_test1" -alter table parted_pk_detach_test detach partition parted_pk_detach_test1; -alter table parted_pk_detach_test1 drop constraint parted_pk_detach_test1_pkey; -drop table parted_pk_detach_test, parted_pk_detach_test1; -create table parted_uniq_detach_test (a int unique) partition by list (a); -create table parted_uniq_detach_test1 partition of parted_uniq_detach_test for values in (1); -alter table parted_uniq_detach_test1 drop constraint parted_uniq_detach_test1_a_key; -- should fail -ERROR: cannot drop inherited constraint "parted_uniq_detach_test1_a_key" of relation "parted_uniq_detach_test1" -alter table parted_uniq_detach_test detach partition parted_uniq_detach_test1; -alter table parted_uniq_detach_test1 drop constraint parted_uniq_detach_test1_a_key; -drop table parted_uniq_detach_test, parted_uniq_detach_test1; --- check that dropping a column takes with it any partitioned indexes --- depending on it. -create table parted_index_col_drop(a int, b int, c int) - partition by list (a); -create table parted_index_col_drop1 partition of parted_index_col_drop - for values in (1) partition by list (a); --- leave this partition without children. -create table parted_index_col_drop2 partition of parted_index_col_drop - for values in (2) partition by list (a); -create table parted_index_col_drop11 partition of parted_index_col_drop1 - for values in (1); -create index on parted_index_col_drop (b); -create index on parted_index_col_drop (c); -create index on parted_index_col_drop (b, c); -alter table parted_index_col_drop drop column c; -\d parted_index_col_drop - Partitioned table "public.parted_index_col_drop" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition key: LIST (a) -Indexes: - "parted_index_col_drop_b_idx" btree (b) -Number of partitions: 2 (Use \d+ to list them.) - -\d parted_index_col_drop1 - Partitioned table "public.parted_index_col_drop1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition of: parted_index_col_drop FOR VALUES IN (1) -Partition key: LIST (a) -Indexes: - "parted_index_col_drop1_b_idx" btree (b) -Number of partitions: 1 (Use \d+ to list them.) - -\d parted_index_col_drop2 - Partitioned table "public.parted_index_col_drop2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition of: parted_index_col_drop FOR VALUES IN (2) -Partition key: LIST (a) -Indexes: - "parted_index_col_drop2_b_idx" btree (b) -Number of partitions: 0 - -\d parted_index_col_drop11 - Table "public.parted_index_col_drop11" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition of: parted_index_col_drop1 FOR VALUES IN (1) -Indexes: - "parted_index_col_drop11_b_idx" btree (b) - -drop table parted_index_col_drop; --- Check that invalid indexes are not selected when attaching a partition. -create table parted_inval_tab (a int) partition by range (a); -create index parted_inval_idx on parted_inval_tab (a); -create table parted_inval_tab_1 (a int) partition by range (a); -create table parted_inval_tab_1_1 partition of parted_inval_tab_1 - for values from (0) to (10); -create table parted_inval_tab_1_2 partition of parted_inval_tab_1 - for values from (10) to (20); --- this creates an invalid index. -create index parted_inval_ixd_1 on only parted_inval_tab_1 (a); --- this creates new indexes for all the partitions of parted_inval_tab_1, --- discarding the invalid index created previously as what is chosen. -alter table parted_inval_tab attach partition parted_inval_tab_1 - for values from (1) to (100); -select indexrelid::regclass, indisvalid, - indrelid::regclass, inhparent::regclass - from pg_index idx left join - pg_inherits inh on (idx.indexrelid = inh.inhrelid) - where indexrelid::regclass::text like 'parted_inval%' - order by indexrelid::regclass::text collate "C"; - indexrelid | indisvalid | indrelid | inhparent -----------------------------+------------+----------------------+-------------------------- - parted_inval_idx | t | parted_inval_tab | - parted_inval_ixd_1 | f | parted_inval_tab_1 | - parted_inval_tab_1_1_a_idx | t | parted_inval_tab_1_1 | parted_inval_tab_1_a_idx - parted_inval_tab_1_2_a_idx | t | parted_inval_tab_1_2 | parted_inval_tab_1_a_idx - parted_inval_tab_1_a_idx | t | parted_inval_tab_1 | parted_inval_idx -(5 rows) - -drop table parted_inval_tab; --- Check setup of indisvalid across a complex partition tree on index --- creation. If one index in a partition index is invalid, so should its --- partitioned index. -create table parted_isvalid_tab (a int, b int) partition by range (a); -create table parted_isvalid_tab_1 partition of parted_isvalid_tab - for values from (1) to (10) partition by range (a); -create table parted_isvalid_tab_2 partition of parted_isvalid_tab - for values from (10) to (20) partition by range (a); -create table parted_isvalid_tab_11 partition of parted_isvalid_tab_1 - for values from (1) to (5); -create table parted_isvalid_tab_12 partition of parted_isvalid_tab_1 - for values from (5) to (10); --- create an invalid index on one of the partitions. -insert into parted_isvalid_tab_11 values (1, 0); -create index concurrently parted_isvalid_idx_11 on parted_isvalid_tab_11 ((a/b)); -ERROR: division by zero --- The previous invalid index is selected, invalidating all the indexes up to --- the top-most parent. -create index parted_isvalid_idx on parted_isvalid_tab ((a/b)); -select indexrelid::regclass, indisvalid, - indrelid::regclass, inhparent::regclass - from pg_index idx left join - pg_inherits inh on (idx.indexrelid = inh.inhrelid) - where indexrelid::regclass::text like 'parted_isvalid%' - order by indexrelid::regclass::text collate "C"; - indexrelid | indisvalid | indrelid | inhparent ---------------------------------+------------+-----------------------+------------------------------- - parted_isvalid_idx | f | parted_isvalid_tab | - parted_isvalid_idx_11 | f | parted_isvalid_tab_11 | parted_isvalid_tab_1_expr_idx - parted_isvalid_tab_12_expr_idx | t | parted_isvalid_tab_12 | parted_isvalid_tab_1_expr_idx - parted_isvalid_tab_1_expr_idx | f | parted_isvalid_tab_1 | parted_isvalid_idx - parted_isvalid_tab_2_expr_idx | t | parted_isvalid_tab_2 | parted_isvalid_idx -(5 rows) - -drop table parted_isvalid_tab; --- Check state of replica indexes when attaching a partition. -begin; -create table parted_replica_tab (id int not null) partition by range (id); -create table parted_replica_tab_1 partition of parted_replica_tab - for values from (1) to (10) partition by range (id); -create table parted_replica_tab_11 partition of parted_replica_tab_1 - for values from (1) to (5); -create unique index parted_replica_idx - on only parted_replica_tab using btree (id); -create unique index parted_replica_idx_1 - on only parted_replica_tab_1 using btree (id); --- This triggers an update of pg_index.indisreplident for parted_replica_idx. -alter table only parted_replica_tab_1 replica identity - using index parted_replica_idx_1; -create unique index parted_replica_idx_11 on parted_replica_tab_11 USING btree (id); -select indexrelid::regclass, indisvalid, indisreplident, - indrelid::regclass, inhparent::regclass - from pg_index idx left join - pg_inherits inh on (idx.indexrelid = inh.inhrelid) - where indexrelid::regclass::text like 'parted_replica%' - order by indexrelid::regclass::text collate "C"; - indexrelid | indisvalid | indisreplident | indrelid | inhparent ------------------------+------------+----------------+-----------------------+----------- - parted_replica_idx | f | f | parted_replica_tab | - parted_replica_idx_1 | f | t | parted_replica_tab_1 | - parted_replica_idx_11 | t | f | parted_replica_tab_11 | -(3 rows) - --- parted_replica_idx is not valid yet here, because parted_replica_idx_1 --- is not valid. -alter index parted_replica_idx ATTACH PARTITION parted_replica_idx_1; -select indexrelid::regclass, indisvalid, indisreplident, - indrelid::regclass, inhparent::regclass - from pg_index idx left join - pg_inherits inh on (idx.indexrelid = inh.inhrelid) - where indexrelid::regclass::text like 'parted_replica%' - order by indexrelid::regclass::text collate "C"; - indexrelid | indisvalid | indisreplident | indrelid | inhparent ------------------------+------------+----------------+-----------------------+-------------------- - parted_replica_idx | f | f | parted_replica_tab | - parted_replica_idx_1 | f | t | parted_replica_tab_1 | parted_replica_idx - parted_replica_idx_11 | t | f | parted_replica_tab_11 | -(3 rows) - --- parted_replica_idx becomes valid here. -alter index parted_replica_idx_1 ATTACH PARTITION parted_replica_idx_11; -alter table only parted_replica_tab_1 replica identity - using index parted_replica_idx_1; -commit; -select indexrelid::regclass, indisvalid, indisreplident, - indrelid::regclass, inhparent::regclass - from pg_index idx left join - pg_inherits inh on (idx.indexrelid = inh.inhrelid) - where indexrelid::regclass::text like 'parted_replica%' - order by indexrelid::regclass::text collate "C"; - indexrelid | indisvalid | indisreplident | indrelid | inhparent ------------------------+------------+----------------+-----------------------+---------------------- - parted_replica_idx | t | f | parted_replica_tab | - parted_replica_idx_1 | t | t | parted_replica_tab_1 | parted_replica_idx - parted_replica_idx_11 | t | f | parted_replica_tab_11 | parted_replica_idx_1 -(3 rows) - -drop table parted_replica_tab; +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/partition_aggregate.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/partition_aggregate.out --- /tmp/cirrus-ci-build/src/test/regress/expected/partition_aggregate.out 2024-03-13 23:12:37.625928000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/partition_aggregate.out 2024-03-13 23:14:45.710783000 +0000 @@ -339,1182 +339,7 @@ -- ROLLUP, partitionwise aggregation does not apply EXPLAIN (COSTS OFF) SELECT c, sum(a) FROM pagg_tab GROUP BY rollup(c) ORDER BY 1, 2; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: pagg_tab.c, (sum(pagg_tab.a)) - -> MixedAggregate - Hash Key: pagg_tab.c - Group Key: () - -> Append - -> Seq Scan on pagg_tab_p1 pagg_tab_1 - -> Seq Scan on pagg_tab_p2 pagg_tab_2 - -> Seq Scan on pagg_tab_p3 pagg_tab_3 -(9 rows) - --- ORDERED SET within the aggregate. --- Full aggregation; since all the rows that belong to the same group come --- from the same partition, having an ORDER BY within the aggregate doesn't --- make any difference. -EXPLAIN (COSTS OFF) -SELECT c, sum(b order by a) FROM pagg_tab GROUP BY c ORDER BY 1, 2; - QUERY PLAN ---------------------------------------------------------------- - Sort - Sort Key: pagg_tab.c, (sum(pagg_tab.b ORDER BY pagg_tab.a)) - -> Append - -> GroupAggregate - Group Key: pagg_tab.c - -> Sort - Sort Key: pagg_tab.c, pagg_tab.a - -> Seq Scan on pagg_tab_p1 pagg_tab - -> GroupAggregate - Group Key: pagg_tab_1.c - -> Sort - Sort Key: pagg_tab_1.c, pagg_tab_1.a - -> Seq Scan on pagg_tab_p2 pagg_tab_1 - -> GroupAggregate - Group Key: pagg_tab_2.c - -> Sort - Sort Key: pagg_tab_2.c, pagg_tab_2.a - -> Seq Scan on pagg_tab_p3 pagg_tab_2 -(18 rows) - --- Since GROUP BY clause does not match with PARTITION KEY; we need to do --- partial aggregation. However, ORDERED SET are not partial safe and thus --- partitionwise aggregation plan is not generated. -EXPLAIN (COSTS OFF) -SELECT a, sum(b order by a) FROM pagg_tab GROUP BY a ORDER BY 1, 2; - QUERY PLAN ---------------------------------------------------------------- - Sort - Sort Key: pagg_tab.a, (sum(pagg_tab.b ORDER BY pagg_tab.a)) - -> GroupAggregate - Group Key: pagg_tab.a - -> Sort - Sort Key: pagg_tab.a - -> Append - -> Seq Scan on pagg_tab_p1 pagg_tab_1 - -> Seq Scan on pagg_tab_p2 pagg_tab_2 - -> Seq Scan on pagg_tab_p3 pagg_tab_3 -(10 rows) - --- JOIN query -CREATE TABLE pagg_tab1(x int, y int) PARTITION BY RANGE(x); -CREATE TABLE pagg_tab1_p1 PARTITION OF pagg_tab1 FOR VALUES FROM (0) TO (10); -CREATE TABLE pagg_tab1_p2 PARTITION OF pagg_tab1 FOR VALUES FROM (10) TO (20); -CREATE TABLE pagg_tab1_p3 PARTITION OF pagg_tab1 FOR VALUES FROM (20) TO (30); -CREATE TABLE pagg_tab2(x int, y int) PARTITION BY RANGE(y); -CREATE TABLE pagg_tab2_p1 PARTITION OF pagg_tab2 FOR VALUES FROM (0) TO (10); -CREATE TABLE pagg_tab2_p2 PARTITION OF pagg_tab2 FOR VALUES FROM (10) TO (20); -CREATE TABLE pagg_tab2_p3 PARTITION OF pagg_tab2 FOR VALUES FROM (20) TO (30); -INSERT INTO pagg_tab1 SELECT i % 30, i % 20 FROM generate_series(0, 299, 2) i; -INSERT INTO pagg_tab2 SELECT i % 20, i % 30 FROM generate_series(0, 299, 3) i; -ANALYZE pagg_tab1; -ANALYZE pagg_tab2; --- When GROUP BY clause matches; full aggregation is performed for each partition. -EXPLAIN (COSTS OFF) -SELECT t1.x, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------- - Sort - Sort Key: t1.x, (sum(t1.y)), (count(*)) - -> Append - -> HashAggregate - Group Key: t1.x - -> Hash Join - Hash Cond: (t1.x = t2.y) - -> Seq Scan on pagg_tab1_p1 t1 - -> Hash - -> Seq Scan on pagg_tab2_p1 t2 - -> HashAggregate - Group Key: t1_1.x - -> Hash Join - Hash Cond: (t1_1.x = t2_1.y) - -> Seq Scan on pagg_tab1_p2 t1_1 - -> Hash - -> Seq Scan on pagg_tab2_p2 t2_1 - -> HashAggregate - Group Key: t1_2.x - -> Hash Join - Hash Cond: (t2_2.y = t1_2.x) - -> Seq Scan on pagg_tab2_p3 t2_2 - -> Hash - -> Seq Scan on pagg_tab1_p3 t1_2 -(24 rows) - -SELECT t1.x, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; - x | sum | count -----+------+------- - 0 | 500 | 100 - 6 | 1100 | 100 - 12 | 700 | 100 - 18 | 1300 | 100 - 24 | 900 | 100 -(5 rows) - --- Check with whole-row reference; partitionwise aggregation does not apply -EXPLAIN (COSTS OFF) -SELECT t1.x, sum(t1.y), count(t1) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------- - Sort - Sort Key: t1.x, (sum(t1.y)), (count(((t1.*)::pagg_tab1))) - -> HashAggregate - Group Key: t1.x - -> Hash Join - Hash Cond: (t1.x = t2.y) - -> Append - -> Seq Scan on pagg_tab1_p1 t1_1 - -> Seq Scan on pagg_tab1_p2 t1_2 - -> Seq Scan on pagg_tab1_p3 t1_3 - -> Hash - -> Append - -> Seq Scan on pagg_tab2_p1 t2_1 - -> Seq Scan on pagg_tab2_p2 t2_2 - -> Seq Scan on pagg_tab2_p3 t2_3 -(15 rows) - -SELECT t1.x, sum(t1.y), count(t1) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; - x | sum | count -----+------+------- - 0 | 500 | 100 - 6 | 1100 | 100 - 12 | 700 | 100 - 18 | 1300 | 100 - 24 | 900 | 100 -(5 rows) - --- GROUP BY having other matching key -EXPLAIN (COSTS OFF) -SELECT t2.y, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t2.y ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------- - Sort - Sort Key: t2.y, (sum(t1.y)), (count(*)) - -> Append - -> HashAggregate - Group Key: t2.y - -> Hash Join - Hash Cond: (t1.x = t2.y) - -> Seq Scan on pagg_tab1_p1 t1 - -> Hash - -> Seq Scan on pagg_tab2_p1 t2 - -> HashAggregate - Group Key: t2_1.y - -> Hash Join - Hash Cond: (t1_1.x = t2_1.y) - -> Seq Scan on pagg_tab1_p2 t1_1 - -> Hash - -> Seq Scan on pagg_tab2_p2 t2_1 - -> HashAggregate - Group Key: t2_2.y - -> Hash Join - Hash Cond: (t2_2.y = t1_2.x) - -> Seq Scan on pagg_tab2_p3 t2_2 - -> Hash - -> Seq Scan on pagg_tab1_p3 t1_2 -(24 rows) - --- When GROUP BY clause does not match; partial aggregation is performed for each partition. --- Also test GroupAggregate paths by disabling hash aggregates. -SET enable_hashagg TO false; -EXPLAIN (COSTS OFF) -SELECT t1.y, sum(t1.x), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.y HAVING avg(t1.x) > 10 ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------------------- - Sort - Sort Key: t1.y, (sum(t1.x)), (count(*)) - -> Finalize GroupAggregate - Group Key: t1.y - Filter: (avg(t1.x) > '10'::numeric) - -> Merge Append - Sort Key: t1.y - -> Partial GroupAggregate - Group Key: t1.y - -> Sort - Sort Key: t1.y - -> Hash Join - Hash Cond: (t1.x = t2.y) - -> Seq Scan on pagg_tab1_p1 t1 - -> Hash - -> Seq Scan on pagg_tab2_p1 t2 - -> Partial GroupAggregate - Group Key: t1_1.y - -> Sort - Sort Key: t1_1.y - -> Hash Join - Hash Cond: (t1_1.x = t2_1.y) - -> Seq Scan on pagg_tab1_p2 t1_1 - -> Hash - -> Seq Scan on pagg_tab2_p2 t2_1 - -> Partial GroupAggregate - Group Key: t1_2.y - -> Sort - Sort Key: t1_2.y - -> Hash Join - Hash Cond: (t2_2.y = t1_2.x) - -> Seq Scan on pagg_tab2_p3 t2_2 - -> Hash - -> Seq Scan on pagg_tab1_p3 t1_2 -(34 rows) - -SELECT t1.y, sum(t1.x), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.y HAVING avg(t1.x) > 10 ORDER BY 1, 2, 3; - y | sum | count -----+------+------- - 2 | 600 | 50 - 4 | 1200 | 50 - 8 | 900 | 50 - 12 | 600 | 50 - 14 | 1200 | 50 - 18 | 900 | 50 -(6 rows) - -RESET enable_hashagg; --- Check with LEFT/RIGHT/FULL OUTER JOINs which produces NULL values for --- aggregation --- LEFT JOIN, should produce partial partitionwise aggregation plan as --- GROUP BY is on nullable column -EXPLAIN (COSTS OFF) -SELECT b.y, sum(a.y) FROM pagg_tab1 a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; - QUERY PLAN ------------------------------------------------------------------- - Finalize GroupAggregate - Group Key: b.y - -> Sort - Sort Key: b.y - -> Append - -> Partial HashAggregate - Group Key: b.y - -> Hash Left Join - Hash Cond: (a.x = b.y) - -> Seq Scan on pagg_tab1_p1 a - -> Hash - -> Seq Scan on pagg_tab2_p1 b - -> Partial HashAggregate - Group Key: b_1.y - -> Hash Left Join - Hash Cond: (a_1.x = b_1.y) - -> Seq Scan on pagg_tab1_p2 a_1 - -> Hash - -> Seq Scan on pagg_tab2_p2 b_1 - -> Partial HashAggregate - Group Key: b_2.y - -> Hash Right Join - Hash Cond: (b_2.y = a_2.x) - -> Seq Scan on pagg_tab2_p3 b_2 - -> Hash - -> Seq Scan on pagg_tab1_p3 a_2 -(26 rows) - -SELECT b.y, sum(a.y) FROM pagg_tab1 a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; - y | sum -----+------ - 0 | 500 - 6 | 1100 - 12 | 700 - 18 | 1300 - 24 | 900 - | 900 -(6 rows) - --- RIGHT JOIN, should produce full partitionwise aggregation plan as --- GROUP BY is on non-nullable column -EXPLAIN (COSTS OFF) -SELECT b.y, sum(a.y) FROM pagg_tab1 a RIGHT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; - QUERY PLAN ------------------------------------------------------------- - Sort - Sort Key: b.y - -> Append - -> HashAggregate - Group Key: b.y - -> Hash Right Join - Hash Cond: (a.x = b.y) - -> Seq Scan on pagg_tab1_p1 a - -> Hash - -> Seq Scan on pagg_tab2_p1 b - -> HashAggregate - Group Key: b_1.y - -> Hash Right Join - Hash Cond: (a_1.x = b_1.y) - -> Seq Scan on pagg_tab1_p2 a_1 - -> Hash - -> Seq Scan on pagg_tab2_p2 b_1 - -> HashAggregate - Group Key: b_2.y - -> Hash Left Join - Hash Cond: (b_2.y = a_2.x) - -> Seq Scan on pagg_tab2_p3 b_2 - -> Hash - -> Seq Scan on pagg_tab1_p3 a_2 -(24 rows) - -SELECT b.y, sum(a.y) FROM pagg_tab1 a RIGHT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; - y | sum -----+------ - 0 | 500 - 3 | - 6 | 1100 - 9 | - 12 | 700 - 15 | - 18 | 1300 - 21 | - 24 | 900 - 27 | -(10 rows) - --- FULL JOIN, should produce partial partitionwise aggregation plan as --- GROUP BY is on nullable column -EXPLAIN (COSTS OFF) -SELECT a.x, sum(b.x) FROM pagg_tab1 a FULL OUTER JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x ORDER BY 1 NULLS LAST; - QUERY PLAN ------------------------------------------------------------------- - Finalize GroupAggregate - Group Key: a.x - -> Sort - Sort Key: a.x - -> Append - -> Partial HashAggregate - Group Key: a.x - -> Hash Full Join - Hash Cond: (a.x = b.y) - -> Seq Scan on pagg_tab1_p1 a - -> Hash - -> Seq Scan on pagg_tab2_p1 b - -> Partial HashAggregate - Group Key: a_1.x - -> Hash Full Join - Hash Cond: (a_1.x = b_1.y) - -> Seq Scan on pagg_tab1_p2 a_1 - -> Hash - -> Seq Scan on pagg_tab2_p2 b_1 - -> Partial HashAggregate - Group Key: a_2.x - -> Hash Full Join - Hash Cond: (b_2.y = a_2.x) - -> Seq Scan on pagg_tab2_p3 b_2 - -> Hash - -> Seq Scan on pagg_tab1_p3 a_2 -(26 rows) - -SELECT a.x, sum(b.x) FROM pagg_tab1 a FULL OUTER JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x ORDER BY 1 NULLS LAST; - x | sum -----+------ - 0 | 500 - 2 | - 4 | - 6 | 1100 - 8 | - 10 | - 12 | 700 - 14 | - 16 | - 18 | 1300 - 20 | - 22 | - 24 | 900 - 26 | - 28 | - | 500 -(16 rows) - --- LEFT JOIN, with dummy relation on right side, ideally --- should produce full partitionwise aggregation plan as GROUP BY is on --- non-nullable columns. --- But right now we are unable to do partitionwise join in this case. -EXPLAIN (COSTS OFF) -SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a LEFT JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: pagg_tab1.x, pagg_tab2.y - -> HashAggregate - Group Key: pagg_tab1.x, pagg_tab2.y - -> Hash Left Join - Hash Cond: (pagg_tab1.x = pagg_tab2.y) - Filter: ((pagg_tab1.x > 5) OR (pagg_tab2.y < 20)) - -> Append - -> Seq Scan on pagg_tab1_p1 pagg_tab1_1 - Filter: (x < 20) - -> Seq Scan on pagg_tab1_p2 pagg_tab1_2 - Filter: (x < 20) - -> Hash - -> Append - -> Seq Scan on pagg_tab2_p2 pagg_tab2_1 - Filter: (y > 10) - -> Seq Scan on pagg_tab2_p3 pagg_tab2_2 - Filter: (y > 10) -(18 rows) - -SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a LEFT JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; - x | y | count -----+----+------- - 6 | | 10 - 8 | | 10 - 10 | | 10 - 12 | 12 | 100 - 14 | | 10 - 16 | | 10 - 18 | 18 | 100 -(7 rows) - --- FULL JOIN, with dummy relations on both sides, ideally --- should produce partial partitionwise aggregation plan as GROUP BY is on --- nullable columns. --- But right now we are unable to do partitionwise join in this case. -EXPLAIN (COSTS OFF) -SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a FULL JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: pagg_tab1.x, pagg_tab2.y - -> HashAggregate - Group Key: pagg_tab1.x, pagg_tab2.y - -> Hash Full Join - Hash Cond: (pagg_tab1.x = pagg_tab2.y) - Filter: ((pagg_tab1.x > 5) OR (pagg_tab2.y < 20)) - -> Append - -> Seq Scan on pagg_tab1_p1 pagg_tab1_1 - Filter: (x < 20) - -> Seq Scan on pagg_tab1_p2 pagg_tab1_2 - Filter: (x < 20) - -> Hash - -> Append - -> Seq Scan on pagg_tab2_p2 pagg_tab2_1 - Filter: (y > 10) - -> Seq Scan on pagg_tab2_p3 pagg_tab2_2 - Filter: (y > 10) -(18 rows) - -SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a FULL JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; - x | y | count -----+----+------- - 6 | | 10 - 8 | | 10 - 10 | | 10 - 12 | 12 | 100 - 14 | | 10 - 16 | | 10 - 18 | 18 | 100 - | 15 | 10 -(8 rows) - --- Empty join relation because of empty outer side, no partitionwise agg plan -EXPLAIN (COSTS OFF) -SELECT a.x, a.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x = 1 AND x = 2) a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x, a.y ORDER BY 1, 2; - QUERY PLAN --------------------------------------- - GroupAggregate - Group Key: pagg_tab1.y - -> Sort - Sort Key: pagg_tab1.y - -> Result - One-Time Filter: false -(6 rows) - -SELECT a.x, a.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x = 1 AND x = 2) a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x, a.y ORDER BY 1, 2; - x | y | count ----+---+------- -(0 rows) - --- Partition by multiple columns -CREATE TABLE pagg_tab_m (a int, b int, c int) PARTITION BY RANGE(a, ((a+b)/2)); -CREATE TABLE pagg_tab_m_p1 PARTITION OF pagg_tab_m FOR VALUES FROM (0, 0) TO (12, 12); -CREATE TABLE pagg_tab_m_p2 PARTITION OF pagg_tab_m FOR VALUES FROM (12, 12) TO (22, 22); -CREATE TABLE pagg_tab_m_p3 PARTITION OF pagg_tab_m FOR VALUES FROM (22, 22) TO (30, 30); -INSERT INTO pagg_tab_m SELECT i % 30, i % 40, i % 50 FROM generate_series(0, 2999) i; -ANALYZE pagg_tab_m; --- Partial aggregation as GROUP BY clause does not match with PARTITION KEY -EXPLAIN (COSTS OFF) -SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a HAVING avg(c) < 22 ORDER BY 1, 2, 3; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_m.a, (sum(pagg_tab_m.b)), (avg(pagg_tab_m.c)) - -> Finalize HashAggregate - Group Key: pagg_tab_m.a - Filter: (avg(pagg_tab_m.c) < '22'::numeric) - -> Append - -> Partial HashAggregate - Group Key: pagg_tab_m.a - -> Seq Scan on pagg_tab_m_p1 pagg_tab_m - -> Partial HashAggregate - Group Key: pagg_tab_m_1.a - -> Seq Scan on pagg_tab_m_p2 pagg_tab_m_1 - -> Partial HashAggregate - Group Key: pagg_tab_m_2.a - -> Seq Scan on pagg_tab_m_p3 pagg_tab_m_2 -(15 rows) - -SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a HAVING avg(c) < 22 ORDER BY 1, 2, 3; - a | sum | avg | count -----+------+---------------------+------- - 0 | 1500 | 20.0000000000000000 | 100 - 1 | 1600 | 21.0000000000000000 | 100 - 10 | 1500 | 20.0000000000000000 | 100 - 11 | 1600 | 21.0000000000000000 | 100 - 20 | 1500 | 20.0000000000000000 | 100 - 21 | 1600 | 21.0000000000000000 | 100 -(6 rows) - --- Full aggregation as GROUP BY clause matches with PARTITION KEY -EXPLAIN (COSTS OFF) -SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a, (a+b)/2 HAVING sum(b) < 50 ORDER BY 1, 2, 3; - QUERY PLAN ----------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_m.a, (sum(pagg_tab_m.b)), (avg(pagg_tab_m.c)) - -> Append - -> HashAggregate - Group Key: pagg_tab_m.a, ((pagg_tab_m.a + pagg_tab_m.b) / 2) - Filter: (sum(pagg_tab_m.b) < 50) - -> Seq Scan on pagg_tab_m_p1 pagg_tab_m - -> HashAggregate - Group Key: pagg_tab_m_1.a, ((pagg_tab_m_1.a + pagg_tab_m_1.b) / 2) - Filter: (sum(pagg_tab_m_1.b) < 50) - -> Seq Scan on pagg_tab_m_p2 pagg_tab_m_1 - -> HashAggregate - Group Key: pagg_tab_m_2.a, ((pagg_tab_m_2.a + pagg_tab_m_2.b) / 2) - Filter: (sum(pagg_tab_m_2.b) < 50) - -> Seq Scan on pagg_tab_m_p3 pagg_tab_m_2 -(15 rows) - -SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a, (a+b)/2 HAVING sum(b) < 50 ORDER BY 1, 2, 3; - a | sum | avg | count -----+-----+---------------------+------- - 0 | 0 | 20.0000000000000000 | 25 - 1 | 25 | 21.0000000000000000 | 25 - 10 | 0 | 20.0000000000000000 | 25 - 11 | 25 | 21.0000000000000000 | 25 - 20 | 0 | 20.0000000000000000 | 25 - 21 | 25 | 21.0000000000000000 | 25 -(6 rows) - --- Full aggregation as PARTITION KEY is part of GROUP BY clause -EXPLAIN (COSTS OFF) -SELECT a, c, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY (a+b)/2, 2, 1 HAVING sum(b) = 50 AND avg(c) > 25 ORDER BY 1, 2, 3; - QUERY PLAN --------------------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_m.a, pagg_tab_m.c, (sum(pagg_tab_m.b)) - -> Append - -> HashAggregate - Group Key: ((pagg_tab_m.a + pagg_tab_m.b) / 2), pagg_tab_m.c, pagg_tab_m.a - Filter: ((sum(pagg_tab_m.b) = 50) AND (avg(pagg_tab_m.c) > '25'::numeric)) - -> Seq Scan on pagg_tab_m_p1 pagg_tab_m - -> HashAggregate - Group Key: ((pagg_tab_m_1.a + pagg_tab_m_1.b) / 2), pagg_tab_m_1.c, pagg_tab_m_1.a - Filter: ((sum(pagg_tab_m_1.b) = 50) AND (avg(pagg_tab_m_1.c) > '25'::numeric)) - -> Seq Scan on pagg_tab_m_p2 pagg_tab_m_1 - -> HashAggregate - Group Key: ((pagg_tab_m_2.a + pagg_tab_m_2.b) / 2), pagg_tab_m_2.c, pagg_tab_m_2.a - Filter: ((sum(pagg_tab_m_2.b) = 50) AND (avg(pagg_tab_m_2.c) > '25'::numeric)) - -> Seq Scan on pagg_tab_m_p3 pagg_tab_m_2 -(15 rows) - -SELECT a, c, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY (a+b)/2, 2, 1 HAVING sum(b) = 50 AND avg(c) > 25 ORDER BY 1, 2, 3; - a | c | sum | avg | count -----+----+-----+---------------------+------- - 0 | 30 | 50 | 30.0000000000000000 | 5 - 0 | 40 | 50 | 40.0000000000000000 | 5 - 10 | 30 | 50 | 30.0000000000000000 | 5 - 10 | 40 | 50 | 40.0000000000000000 | 5 - 20 | 30 | 50 | 30.0000000000000000 | 5 - 20 | 40 | 50 | 40.0000000000000000 | 5 -(6 rows) - --- Test with multi-level partitioning scheme -CREATE TABLE pagg_tab_ml (a int, b int, c text) PARTITION BY RANGE(a); -CREATE TABLE pagg_tab_ml_p1 PARTITION OF pagg_tab_ml FOR VALUES FROM (0) TO (12); -CREATE TABLE pagg_tab_ml_p2 PARTITION OF pagg_tab_ml FOR VALUES FROM (12) TO (20) PARTITION BY LIST (c); -CREATE TABLE pagg_tab_ml_p2_s1 PARTITION OF pagg_tab_ml_p2 FOR VALUES IN ('0000', '0001', '0002'); -CREATE TABLE pagg_tab_ml_p2_s2 PARTITION OF pagg_tab_ml_p2 FOR VALUES IN ('0003'); --- This level of partitioning has different column positions than the parent -CREATE TABLE pagg_tab_ml_p3(b int, c text, a int) PARTITION BY RANGE (b); -CREATE TABLE pagg_tab_ml_p3_s1(c text, a int, b int); -CREATE TABLE pagg_tab_ml_p3_s2 PARTITION OF pagg_tab_ml_p3 FOR VALUES FROM (7) TO (10); -ALTER TABLE pagg_tab_ml_p3 ATTACH PARTITION pagg_tab_ml_p3_s1 FOR VALUES FROM (0) TO (7); -ALTER TABLE pagg_tab_ml ATTACH PARTITION pagg_tab_ml_p3 FOR VALUES FROM (20) TO (30); -INSERT INTO pagg_tab_ml SELECT i % 30, i % 10, to_char(i % 4, 'FM0000') FROM generate_series(0, 29999) i; -ANALYZE pagg_tab_ml; --- For Parallel Append -SET max_parallel_workers_per_gather TO 2; -SET parallel_setup_cost = 0; --- Full aggregation at level 1 as GROUP BY clause matches with PARTITION KEY --- for level 1 only. For subpartitions, GROUP BY clause does not match with --- PARTITION KEY, but still we do not see a partial aggregation as array_agg() --- is not partial agg safe. -EXPLAIN (COSTS OFF) -SELECT a, sum(b), array_agg(distinct c), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - QUERY PLAN --------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (array_agg(DISTINCT pagg_tab_ml.c)) - -> Gather - Workers Planned: 2 - -> Parallel Append - -> GroupAggregate - Group Key: pagg_tab_ml.a - Filter: (avg(pagg_tab_ml.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml.a, pagg_tab_ml.c - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> GroupAggregate - Group Key: pagg_tab_ml_5.a - Filter: (avg(pagg_tab_ml_5.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_5.a, pagg_tab_ml_5.c - -> Append - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_5 - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_6 - -> GroupAggregate - Group Key: pagg_tab_ml_2.a - Filter: (avg(pagg_tab_ml_2.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_2.a, pagg_tab_ml_2.c - -> Append - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_2 - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_3 -(27 rows) - -SELECT a, sum(b), array_agg(distinct c), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - a | sum | array_agg | count -----+------+-------------+------- - 0 | 0 | {0000,0002} | 1000 - 1 | 1000 | {0001,0003} | 1000 - 2 | 2000 | {0000,0002} | 1000 - 10 | 0 | {0000,0002} | 1000 - 11 | 1000 | {0001,0003} | 1000 - 12 | 2000 | {0000,0002} | 1000 - 20 | 0 | {0000,0002} | 1000 - 21 | 1000 | {0001,0003} | 1000 - 22 | 2000 | {0000,0002} | 1000 -(9 rows) - --- Without ORDER BY clause, to test Gather at top-most path -EXPLAIN (COSTS OFF) -SELECT a, sum(b), array_agg(distinct c), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3; - QUERY PLAN ---------------------------------------------------------------------------- - Gather - Workers Planned: 2 - -> Parallel Append - -> GroupAggregate - Group Key: pagg_tab_ml.a - Filter: (avg(pagg_tab_ml.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml.a, pagg_tab_ml.c - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> GroupAggregate - Group Key: pagg_tab_ml_5.a - Filter: (avg(pagg_tab_ml_5.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_5.a, pagg_tab_ml_5.c - -> Append - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_5 - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_6 - -> GroupAggregate - Group Key: pagg_tab_ml_2.a - Filter: (avg(pagg_tab_ml_2.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_2.a, pagg_tab_ml_2.c - -> Append - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_2 - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_3 -(25 rows) - -RESET parallel_setup_cost; --- Full aggregation at level 1 as GROUP BY clause matches with PARTITION KEY --- for level 1 only. For subpartitions, GROUP BY clause does not match with --- PARTITION KEY, thus we will have a partial aggregation for them. -EXPLAIN (COSTS OFF) -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - QUERY PLAN ---------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (count(*)) - -> Append - -> HashAggregate - Group Key: pagg_tab_ml.a - Filter: (avg(pagg_tab_ml.b) < '3'::numeric) - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> Finalize GroupAggregate - Group Key: pagg_tab_ml_2.a - Filter: (avg(pagg_tab_ml_2.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_2.a - -> Append - -> Partial HashAggregate - Group Key: pagg_tab_ml_2.a - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_2 - -> Partial HashAggregate - Group Key: pagg_tab_ml_3.a - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_3 - -> Finalize GroupAggregate - Group Key: pagg_tab_ml_5.a - Filter: (avg(pagg_tab_ml_5.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_5.a - -> Append - -> Partial HashAggregate - Group Key: pagg_tab_ml_5.a - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_5 - -> Partial HashAggregate - Group Key: pagg_tab_ml_6.a - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_6 -(31 rows) - -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - a | sum | count -----+------+------- - 0 | 0 | 1000 - 1 | 1000 | 1000 - 2 | 2000 | 1000 - 10 | 0 | 1000 - 11 | 1000 | 1000 - 12 | 2000 | 1000 - 20 | 0 | 1000 - 21 | 1000 | 1000 - 22 | 2000 | 1000 -(9 rows) - --- Partial aggregation at all levels as GROUP BY clause does not match with --- PARTITION KEY -EXPLAIN (COSTS OFF) -SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b ORDER BY 1, 2, 3; - QUERY PLAN ---------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.b, (sum(pagg_tab_ml.a)), (count(*)) - -> Finalize GroupAggregate - Group Key: pagg_tab_ml.b - -> Sort - Sort Key: pagg_tab_ml.b - -> Append - -> Partial HashAggregate - Group Key: pagg_tab_ml.b - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> Partial HashAggregate - Group Key: pagg_tab_ml_1.b - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1 - -> Partial HashAggregate - Group Key: pagg_tab_ml_2.b - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2 - -> Partial HashAggregate - Group Key: pagg_tab_ml_3.b - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3 - -> Partial HashAggregate - Group Key: pagg_tab_ml_4.b - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4 -(22 rows) - -SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b HAVING avg(a) < 15 ORDER BY 1, 2, 3; - b | sum | count ----+-------+------- - 0 | 30000 | 3000 - 1 | 33000 | 3000 - 2 | 36000 | 3000 - 3 | 39000 | 3000 - 4 | 42000 | 3000 -(5 rows) - --- Full aggregation at all levels as GROUP BY clause matches with PARTITION KEY -EXPLAIN (COSTS OFF) -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; - QUERY PLAN ----------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (count(*)) - -> Append - -> HashAggregate - Group Key: pagg_tab_ml.a, pagg_tab_ml.b, pagg_tab_ml.c - Filter: (avg(pagg_tab_ml.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> HashAggregate - Group Key: pagg_tab_ml_1.a, pagg_tab_ml_1.b, pagg_tab_ml_1.c - Filter: (avg(pagg_tab_ml_1.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1 - -> HashAggregate - Group Key: pagg_tab_ml_2.a, pagg_tab_ml_2.b, pagg_tab_ml_2.c - Filter: (avg(pagg_tab_ml_2.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2 - -> HashAggregate - Group Key: pagg_tab_ml_3.a, pagg_tab_ml_3.b, pagg_tab_ml_3.c - Filter: (avg(pagg_tab_ml_3.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3 - -> HashAggregate - Group Key: pagg_tab_ml_4.a, pagg_tab_ml_4.b, pagg_tab_ml_4.c - Filter: (avg(pagg_tab_ml_4.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4 -(23 rows) - -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; - a | sum | count -----+------+------- - 8 | 4000 | 500 - 8 | 4000 | 500 - 9 | 4500 | 500 - 9 | 4500 | 500 - 18 | 4000 | 500 - 18 | 4000 | 500 - 19 | 4500 | 500 - 19 | 4500 | 500 - 28 | 4000 | 500 - 28 | 4000 | 500 - 29 | 4500 | 500 - 29 | 4500 | 500 -(12 rows) - --- Parallelism within partitionwise aggregates -SET min_parallel_table_scan_size TO '8kB'; -SET parallel_setup_cost TO 0; --- Full aggregation at level 1 as GROUP BY clause matches with PARTITION KEY --- for level 1 only. For subpartitions, GROUP BY clause does not match with --- PARTITION KEY, thus we will have a partial aggregation for them. -EXPLAIN (COSTS OFF) -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - QUERY PLAN ------------------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (count(*)) - -> Append - -> Finalize GroupAggregate - Group Key: pagg_tab_ml.a - Filter: (avg(pagg_tab_ml.b) < '3'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_ml.a - -> Partial HashAggregate - Group Key: pagg_tab_ml.a - -> Parallel Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> Finalize GroupAggregate - Group Key: pagg_tab_ml_2.a - Filter: (avg(pagg_tab_ml_2.b) < '3'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_ml_2.a - -> Parallel Append - -> Partial HashAggregate - Group Key: pagg_tab_ml_2.a - -> Parallel Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_2 - -> Partial HashAggregate - Group Key: pagg_tab_ml_3.a - -> Parallel Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_3 - -> Finalize GroupAggregate - Group Key: pagg_tab_ml_5.a - Filter: (avg(pagg_tab_ml_5.b) < '3'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_ml_5.a - -> Parallel Append - -> Partial HashAggregate - Group Key: pagg_tab_ml_5.a - -> Parallel Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_5 - -> Partial HashAggregate - Group Key: pagg_tab_ml_6.a - -> Parallel Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_6 -(41 rows) - -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - a | sum | count -----+------+------- - 0 | 0 | 1000 - 1 | 1000 | 1000 - 2 | 2000 | 1000 - 10 | 0 | 1000 - 11 | 1000 | 1000 - 12 | 2000 | 1000 - 20 | 0 | 1000 - 21 | 1000 | 1000 - 22 | 2000 | 1000 -(9 rows) - --- Partial aggregation at all levels as GROUP BY clause does not match with --- PARTITION KEY -EXPLAIN (COSTS OFF) -SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b ORDER BY 1, 2, 3; - QUERY PLAN ------------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.b, (sum(pagg_tab_ml.a)), (count(*)) - -> Finalize GroupAggregate - Group Key: pagg_tab_ml.b - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_ml.b - -> Parallel Append - -> Partial HashAggregate - Group Key: pagg_tab_ml.b - -> Parallel Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> Partial HashAggregate - Group Key: pagg_tab_ml_3.b - -> Parallel Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3 - -> Partial HashAggregate - Group Key: pagg_tab_ml_1.b - -> Parallel Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1 - -> Partial HashAggregate - Group Key: pagg_tab_ml_4.b - -> Parallel Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4 - -> Partial HashAggregate - Group Key: pagg_tab_ml_2.b - -> Parallel Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2 -(24 rows) - -SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b HAVING avg(a) < 15 ORDER BY 1, 2, 3; - b | sum | count ----+-------+------- - 0 | 30000 | 3000 - 1 | 33000 | 3000 - 2 | 36000 | 3000 - 3 | 39000 | 3000 - 4 | 42000 | 3000 -(5 rows) - --- Full aggregation at all levels as GROUP BY clause matches with PARTITION KEY -EXPLAIN (COSTS OFF) -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; - QUERY PLAN ----------------------------------------------------------------------------------- - Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (count(*)) - -> Parallel Append - -> HashAggregate - Group Key: pagg_tab_ml.a, pagg_tab_ml.b, pagg_tab_ml.c - Filter: (avg(pagg_tab_ml.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> HashAggregate - Group Key: pagg_tab_ml_3.a, pagg_tab_ml_3.b, pagg_tab_ml_3.c - Filter: (avg(pagg_tab_ml_3.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3 - -> HashAggregate - Group Key: pagg_tab_ml_1.a, pagg_tab_ml_1.b, pagg_tab_ml_1.c - Filter: (avg(pagg_tab_ml_1.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1 - -> HashAggregate - Group Key: pagg_tab_ml_4.a, pagg_tab_ml_4.b, pagg_tab_ml_4.c - Filter: (avg(pagg_tab_ml_4.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4 - -> HashAggregate - Group Key: pagg_tab_ml_2.a, pagg_tab_ml_2.b, pagg_tab_ml_2.c - Filter: (avg(pagg_tab_ml_2.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2 -(25 rows) - -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; - a | sum | count -----+------+------- - 8 | 4000 | 500 - 8 | 4000 | 500 - 9 | 4500 | 500 - 9 | 4500 | 500 - 18 | 4000 | 500 - 18 | 4000 | 500 - 19 | 4500 | 500 - 19 | 4500 | 500 - 28 | 4000 | 500 - 28 | 4000 | 500 - 29 | 4500 | 500 - 29 | 4500 | 500 -(12 rows) - --- Parallelism within partitionwise aggregates (single level) --- Add few parallel setup cost, so that we will see a plan which gathers --- partially created paths even for full aggregation and sticks a single Gather --- followed by finalization step. --- Without this, the cost of doing partial aggregation + Gather + finalization --- for each partition and then Append over it turns out to be same and this --- wins as we add it first. This parallel_setup_cost plays a vital role in --- costing such plans. -SET parallel_setup_cost TO 10; -CREATE TABLE pagg_tab_para(x int, y int) PARTITION BY RANGE(x); -CREATE TABLE pagg_tab_para_p1 PARTITION OF pagg_tab_para FOR VALUES FROM (0) TO (12); -CREATE TABLE pagg_tab_para_p2 PARTITION OF pagg_tab_para FOR VALUES FROM (12) TO (22); -CREATE TABLE pagg_tab_para_p3 PARTITION OF pagg_tab_para FOR VALUES FROM (22) TO (30); -INSERT INTO pagg_tab_para SELECT i % 30, i % 20 FROM generate_series(0, 29999) i; -ANALYZE pagg_tab_para; --- When GROUP BY clause matches; full aggregation is performed for each partition. -EXPLAIN (COSTS OFF) -SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_para.x, (sum(pagg_tab_para.y)), (avg(pagg_tab_para.y)) - -> Finalize GroupAggregate - Group Key: pagg_tab_para.x - Filter: (avg(pagg_tab_para.y) < '7'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_para.x - -> Parallel Append - -> Partial HashAggregate - Group Key: pagg_tab_para.x - -> Parallel Seq Scan on pagg_tab_para_p1 pagg_tab_para - -> Partial HashAggregate - Group Key: pagg_tab_para_1.x - -> Parallel Seq Scan on pagg_tab_para_p2 pagg_tab_para_1 - -> Partial HashAggregate - Group Key: pagg_tab_para_2.x - -> Parallel Seq Scan on pagg_tab_para_p3 pagg_tab_para_2 -(19 rows) - -SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - x | sum | avg | count -----+------+--------------------+------- - 0 | 5000 | 5.0000000000000000 | 1000 - 1 | 6000 | 6.0000000000000000 | 1000 - 10 | 5000 | 5.0000000000000000 | 1000 - 11 | 6000 | 6.0000000000000000 | 1000 - 20 | 5000 | 5.0000000000000000 | 1000 - 21 | 6000 | 6.0000000000000000 | 1000 -(6 rows) - --- When GROUP BY clause does not match; partial aggregation is performed for each partition. -EXPLAIN (COSTS OFF) -SELECT y, sum(x), avg(x), count(*) FROM pagg_tab_para GROUP BY y HAVING avg(x) < 12 ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_para.y, (sum(pagg_tab_para.x)), (avg(pagg_tab_para.x)) - -> Finalize GroupAggregate - Group Key: pagg_tab_para.y - Filter: (avg(pagg_tab_para.x) < '12'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_para.y - -> Parallel Append - -> Partial HashAggregate - Group Key: pagg_tab_para.y - -> Parallel Seq Scan on pagg_tab_para_p1 pagg_tab_para - -> Partial HashAggregate - Group Key: pagg_tab_para_1.y - -> Parallel Seq Scan on pagg_tab_para_p2 pagg_tab_para_1 - -> Partial HashAggregate - Group Key: pagg_tab_para_2.y - -> Parallel Seq Scan on pagg_tab_para_p3 pagg_tab_para_2 -(19 rows) - -SELECT y, sum(x), avg(x), count(*) FROM pagg_tab_para GROUP BY y HAVING avg(x) < 12 ORDER BY 1, 2, 3; - y | sum | avg | count -----+-------+---------------------+------- - 0 | 15000 | 10.0000000000000000 | 1500 - 1 | 16500 | 11.0000000000000000 | 1500 - 10 | 15000 | 10.0000000000000000 | 1500 - 11 | 16500 | 11.0000000000000000 | 1500 -(4 rows) - --- Test when parent can produce parallel paths but not any (or some) of its children --- (Use one more aggregate to tilt the cost estimates for the plan we want) -ALTER TABLE pagg_tab_para_p1 SET (parallel_workers = 0); -ALTER TABLE pagg_tab_para_p3 SET (parallel_workers = 0); -ANALYZE pagg_tab_para; -EXPLAIN (COSTS OFF) -SELECT x, sum(y), avg(y), sum(x+y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_para.x, (sum(pagg_tab_para.y)), (avg(pagg_tab_para.y)) - -> Finalize GroupAggregate - Group Key: pagg_tab_para.x - Filter: (avg(pagg_tab_para.y) < '7'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_para.x - -> Partial HashAggregate - Group Key: pagg_tab_para.x - -> Parallel Append - -> Seq Scan on pagg_tab_para_p1 pagg_tab_para_1 - -> Seq Scan on pagg_tab_para_p3 pagg_tab_para_3 - -> Parallel Seq Scan on pagg_tab_para_p2 pagg_tab_para_2 -(15 rows) - -SELECT x, sum(y), avg(y), sum(x+y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - x | sum | avg | sum | count -----+------+--------------------+-------+------- - 0 | 5000 | 5.0000000000000000 | 5000 | 1000 - 1 | 6000 | 6.0000000000000000 | 7000 | 1000 - 10 | 5000 | 5.0000000000000000 | 15000 | 1000 - 11 | 6000 | 6.0000000000000000 | 17000 | 1000 - 20 | 5000 | 5.0000000000000000 | 25000 | 1000 - 21 | 6000 | 6.0000000000000000 | 27000 | 1000 -(6 rows) - -ALTER TABLE pagg_tab_para_p2 SET (parallel_workers = 0); -ANALYZE pagg_tab_para; -EXPLAIN (COSTS OFF) -SELECT x, sum(y), avg(y), sum(x+y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - QUERY PLAN ----------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_para.x, (sum(pagg_tab_para.y)), (avg(pagg_tab_para.y)) - -> Finalize GroupAggregate - Group Key: pagg_tab_para.x - Filter: (avg(pagg_tab_para.y) < '7'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_para.x - -> Partial HashAggregate - Group Key: pagg_tab_para.x - -> Parallel Append - -> Seq Scan on pagg_tab_para_p1 pagg_tab_para_1 - -> Seq Scan on pagg_tab_para_p2 pagg_tab_para_2 - -> Seq Scan on pagg_tab_para_p3 pagg_tab_para_3 -(15 rows) - -SELECT x, sum(y), avg(y), sum(x+y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - x | sum | avg | sum | count -----+------+--------------------+-------+------- - 0 | 5000 | 5.0000000000000000 | 5000 | 1000 - 1 | 6000 | 6.0000000000000000 | 7000 | 1000 - 10 | 5000 | 5.0000000000000000 | 15000 | 1000 - 11 | 6000 | 6.0000000000000000 | 17000 | 1000 - 20 | 5000 | 5.0000000000000000 | 25000 | 1000 - 21 | 6000 | 6.0000000000000000 | 27000 | 1000 -(6 rows) - --- Reset parallelism parameters to get partitionwise aggregation plan. -RESET min_parallel_table_scan_size; -RESET parallel_setup_cost; -EXPLAIN (COSTS OFF) -SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - QUERY PLAN ------------------------------------------------------------------------------ - Sort - Sort Key: pagg_tab_para.x, (sum(pagg_tab_para.y)), (avg(pagg_tab_para.y)) - -> Append - -> HashAggregate - Group Key: pagg_tab_para.x - Filter: (avg(pagg_tab_para.y) < '7'::numeric) - -> Seq Scan on pagg_tab_para_p1 pagg_tab_para - -> HashAggregate - Group Key: pagg_tab_para_1.x - Filter: (avg(pagg_tab_para_1.y) < '7'::numeric) - -> Seq Scan on pagg_tab_para_p2 pagg_tab_para_1 - -> HashAggregate - Group Key: pagg_tab_para_2.x - Filter: (avg(pagg_tab_para_2.y) < '7'::numeric) - -> Seq Scan on pagg_tab_para_p3 pagg_tab_para_2 -(15 rows) - -SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - x | sum | avg | count -----+------+--------------------+------- - 0 | 5000 | 5.0000000000000000 | 1000 - 1 | 6000 | 6.0000000000000000 | 1000 - 10 | 5000 | 5.0000000000000000 | 1000 - 11 | 6000 | 6.0000000000000000 | 1000 - 20 | 5000 | 5.0000000000000000 | 1000 - 21 | 6000 | 6.0000000000000000 | 1000 -(6 rows) - +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/tuplesort.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/tuplesort.out --- /tmp/cirrus-ci-build/src/test/regress/expected/tuplesort.out 2024-03-13 23:12:37.628138000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/tuplesort.out 2024-03-13 23:14:45.721199000 +0000 @@ -564,129 +564,10 @@ SELECT * FROM abbrev_abort_uuids UNION ALL SELECT NULL, NULL, NULL, NULL, NULL) s; - array_agg | array_agg | array_agg | percentile_disc | percentile_disc | percentile_disc | percentile_disc | rank ---------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+-----------------+-----------------+--------------------------------------+-----------------+------ - {NULL,20010,20009,20008,20007} | {00000000-0000-0000-0000-000000020000,00000000-0000-0000-0000-000000020000,00000000-0000-0000-0000-000000019999,00000000-0000-0000-0000-000000019998,00000000-0000-0000-0000-000000019997} | {9999,9998,9997,9996,9995} | 19810 | 200 | 00000000-0000-0000-0000-000000016003 | 136 | 2 -(1 row) - --- disk based (see also above) -BEGIN; -SET LOCAL work_mem = '100kB'; -SELECT - (array_agg(id ORDER BY id DESC NULLS FIRST))[0:5], - (array_agg(abort_increasing ORDER BY abort_increasing DESC NULLS LAST))[0:5], - (array_agg(id::text ORDER BY id::text DESC NULLS LAST))[0:5], - percentile_disc(0.99) WITHIN GROUP (ORDER BY id), - percentile_disc(0.01) WITHIN GROUP (ORDER BY id), - percentile_disc(0.8) WITHIN GROUP (ORDER BY abort_increasing), - percentile_disc(0.2) WITHIN GROUP (ORDER BY id::text), - rank('00000000-0000-0000-0000-000000000000', '2', '2') WITHIN GROUP (ORDER BY noabort_increasing, id, id::text) -FROM ( - SELECT * FROM abbrev_abort_uuids - UNION ALL - SELECT NULL, NULL, NULL, NULL, NULL) s; - array_agg | array_agg | array_agg | percentile_disc | percentile_disc | percentile_disc | percentile_disc | rank ---------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+-----------------+-----------------+--------------------------------------+-----------------+------ - {NULL,20010,20009,20008,20007} | {00000000-0000-0000-0000-000000020000,00000000-0000-0000-0000-000000020000,00000000-0000-0000-0000-000000019999,00000000-0000-0000-0000-000000019998,00000000-0000-0000-0000-000000019997} | {9999,9998,9997,9996,9995} | 19810 | 200 | 00000000-0000-0000-0000-000000016003 | 136 | 2 -(1 row) - -ROLLBACK; ----- --- test tuplesort mark/restore ---- -CREATE TEMP TABLE test_mark_restore(col1 int, col2 int, col12 int); --- need a few duplicates for mark/restore to matter -INSERT INTO test_mark_restore(col1, col2, col12) - SELECT a.i, b.i, a.i * b.i FROM generate_series(1, 500) a(i), generate_series(1, 5) b(i); -BEGIN; -SET LOCAL enable_nestloop = off; -SET LOCAL enable_hashjoin = off; -SET LOCAL enable_material = off; --- set query into variable once, to avoid repetition of the fairly long query -SELECT $$ - SELECT col12, count(distinct a.col1), count(distinct a.col2), count(distinct b.col1), count(distinct b.col2), count(*) - FROM test_mark_restore a - JOIN test_mark_restore b USING(col12) - GROUP BY 1 - HAVING count(*) > 1 - ORDER BY 2 DESC, 1 DESC, 3 DESC, 4 DESC, 5 DESC, 6 DESC - LIMIT 10 -$$ AS qry \gset --- test mark/restore with in-memory sorts -EXPLAIN (COSTS OFF) :qry; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ - Limit - -> Sort - Sort Key: (count(DISTINCT a.col1)) DESC, a.col12 DESC, (count(DISTINCT a.col2)) DESC, (count(DISTINCT b.col1)) DESC, (count(DISTINCT b.col2)) DESC, (count(*)) DESC - -> GroupAggregate - Group Key: a.col12 - Filter: (count(*) > 1) - -> Incremental Sort - Sort Key: a.col12 DESC, a.col1 - Presorted Key: a.col12 - -> Merge Join - Merge Cond: (a.col12 = b.col12) - -> Sort - Sort Key: a.col12 DESC - -> Seq Scan on test_mark_restore a - -> Sort - Sort Key: b.col12 DESC - -> Seq Scan on test_mark_restore b -(17 rows) - -:qry; - col12 | count | count | count | count | count --------+-------+-------+-------+-------+------- - 480 | 5 | 5 | 5 | 5 | 25 - 420 | 5 | 5 | 5 | 5 | 25 - 360 | 5 | 5 | 5 | 5 | 25 - 300 | 5 | 5 | 5 | 5 | 25 - 240 | 5 | 5 | 5 | 5 | 25 - 180 | 5 | 5 | 5 | 5 | 25 - 120 | 5 | 5 | 5 | 5 | 25 - 60 | 5 | 5 | 5 | 5 | 25 - 960 | 4 | 4 | 4 | 4 | 16 - 900 | 4 | 4 | 4 | 4 | 16 -(10 rows) - --- test mark/restore with on-disk sorts -SET LOCAL work_mem = '100kB'; -EXPLAIN (COSTS OFF) :qry; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ - Limit - -> Sort - Sort Key: (count(DISTINCT a.col1)) DESC, a.col12 DESC, (count(DISTINCT a.col2)) DESC, (count(DISTINCT b.col1)) DESC, (count(DISTINCT b.col2)) DESC, (count(*)) DESC - -> GroupAggregate - Group Key: a.col12 - Filter: (count(*) > 1) - -> Incremental Sort - Sort Key: a.col12 DESC, a.col1 - Presorted Key: a.col12 - -> Merge Join - Merge Cond: (a.col12 = b.col12) - -> Sort - Sort Key: a.col12 DESC - -> Seq Scan on test_mark_restore a - -> Sort - Sort Key: b.col12 DESC - -> Seq Scan on test_mark_restore b -(17 rows) - -:qry; - col12 | count | count | count | count | count --------+-------+-------+-------+-------+------- - 480 | 5 | 5 | 5 | 5 | 25 - 420 | 5 | 5 | 5 | 5 | 25 - 360 | 5 | 5 | 5 | 5 | 25 - 300 | 5 | 5 | 5 | 5 | 25 - 240 | 5 | 5 | 5 | 5 | 25 - 180 | 5 | 5 | 5 | 5 | 25 - 120 | 5 | 5 | 5 | 5 | 25 - 60 | 5 | 5 | 5 | 5 | 25 - 960 | 4 | 4 | 4 | 4 | 16 - 900 | 4 | 4 | 4 | 4 | 16 -(10 rows) - -COMMIT; +WARNING: terminating connection because of crash of another server process +DETAIL: The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory. +HINT: In a moment you should be able to reconnect to the database and repeat your command. +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/stats.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/stats.out --- /tmp/cirrus-ci-build/src/test/regress/expected/stats.out 2024-03-13 23:12:37.627427000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/stats.out 2024-03-13 23:14:45.713643000 +0000 @@ -1203,447 +1203,10 @@ (1 row) REINDEX index CONCURRENTLY stats_test_idx1; --- false for previous oid -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - f -(1 row) - --- true for new oid -SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid \gset -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - t -(1 row) - --- pg_stat_have_stats returns true for a rolled back drop index with stats -BEGIN; -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - t -(1 row) - -DROP index stats_test_idx1; -ROLLBACK; -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - t -(1 row) - --- put enable_seqscan back to on -SET enable_seqscan TO on; --- ensure that stats accessors handle NULL input correctly -SELECT pg_stat_get_replication_slot(NULL); - pg_stat_get_replication_slot ------------------------------- - -(1 row) - -SELECT pg_stat_get_subscription_stats(NULL); - pg_stat_get_subscription_stats --------------------------------- - -(1 row) - --- Test that the following operations are tracked in pg_stat_io: --- - reads of target blocks into shared buffers --- - writes of shared buffers to permanent storage --- - extends of relations using shared buffers --- - fsyncs done to ensure the durability of data dirtying shared buffers --- - shared buffer hits --- There is no test for blocks evicted from shared buffers, because we cannot --- be sure of the state of shared buffers at the point the test is run. --- Create a regular table and insert some data to generate IOCONTEXT_NORMAL --- extends. -SELECT sum(extends) AS io_sum_shared_before_extends - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset -SELECT sum(writes) AS writes, sum(fsyncs) AS fsyncs - FROM pg_stat_io - WHERE object = 'relation' \gset io_sum_shared_before_ -CREATE TABLE test_io_shared(a int); -INSERT INTO test_io_shared SELECT i FROM generate_series(1,100)i; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(extends) AS io_sum_shared_after_extends - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset -SELECT :io_sum_shared_after_extends > :io_sum_shared_before_extends; - ?column? ----------- - t -(1 row) - --- After a checkpoint, there should be some additional IOCONTEXT_NORMAL writes --- and fsyncs. --- See comment above for rationale for two explicit CHECKPOINTs. -CHECKPOINT; -CHECKPOINT; -SELECT sum(writes) AS writes, sum(fsyncs) AS fsyncs - FROM pg_stat_io - WHERE object = 'relation' \gset io_sum_shared_after_ -SELECT :io_sum_shared_after_writes > :io_sum_shared_before_writes; - ?column? ----------- - t -(1 row) - -SELECT current_setting('fsync') = 'off' - OR :io_sum_shared_after_fsyncs > :io_sum_shared_before_fsyncs; - ?column? ----------- - t -(1 row) - --- Change the tablespace so that the table is rewritten directly, then SELECT --- from it to cause it to be read back into shared buffers. -SELECT sum(reads) AS io_sum_shared_before_reads - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset --- Do this in a transaction to prevent spurious failures due to concurrent accesses to our newly --- rewritten table, e.g. by autovacuum. -BEGIN; -ALTER TABLE test_io_shared SET TABLESPACE regress_tblspace; --- SELECT from the table so that the data is read into shared buffers and --- context 'normal', object 'relation' reads are counted. -SELECT COUNT(*) FROM test_io_shared; - count -------- - 100 -(1 row) - -COMMIT; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(reads) AS io_sum_shared_after_reads - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset -SELECT :io_sum_shared_after_reads > :io_sum_shared_before_reads; - ?column? ----------- - t -(1 row) - -SELECT sum(hits) AS io_sum_shared_before_hits - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset --- Select from the table again to count hits. --- Ensure we generate hits by forcing a nested loop self-join with no --- materialize node. The outer side's buffer will stay pinned, preventing its --- eviction, while we loop through the inner side and generate hits. -BEGIN; -SET LOCAL enable_nestloop TO on; SET LOCAL enable_mergejoin TO off; -SET LOCAL enable_hashjoin TO off; SET LOCAL enable_material TO off; --- ensure plan stays as we expect it to -EXPLAIN (COSTS OFF) SELECT COUNT(*) FROM test_io_shared t1 INNER JOIN test_io_shared t2 USING (a); - QUERY PLAN -------------------------------------------- - Aggregate - -> Nested Loop - Join Filter: (t1.a = t2.a) - -> Seq Scan on test_io_shared t1 - -> Seq Scan on test_io_shared t2 -(5 rows) - -SELECT COUNT(*) FROM test_io_shared t1 INNER JOIN test_io_shared t2 USING (a); - count -------- - 100 -(1 row) - -COMMIT; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(hits) AS io_sum_shared_after_hits - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset -SELECT :io_sum_shared_after_hits > :io_sum_shared_before_hits; - ?column? ----------- - t -(1 row) - -DROP TABLE test_io_shared; --- Test that the follow IOCONTEXT_LOCAL IOOps are tracked in pg_stat_io: --- - eviction of local buffers in order to reuse them --- - reads of temporary table blocks into local buffers --- - writes of local buffers to permanent storage --- - extends of temporary tables --- Set temp_buffers to its minimum so that we can trigger writes with fewer --- inserted tuples. Do so in a new session in case temporary tables have been --- accessed by previous tests in this session. -\c -SET temp_buffers TO 100; -CREATE TEMPORARY TABLE test_io_local(a int, b TEXT); -SELECT sum(extends) AS extends, sum(evictions) AS evictions, sum(writes) AS writes - FROM pg_stat_io - WHERE context = 'normal' AND object = 'temp relation' \gset io_sum_local_before_ --- Insert tuples into the temporary table, generating extends in the stats. --- Insert enough values that we need to reuse and write out dirty local --- buffers, generating evictions and writes. -INSERT INTO test_io_local SELECT generate_series(1, 5000) as id, repeat('a', 200); --- Ensure the table is large enough to exceed our temp_buffers setting. -SELECT pg_relation_size('test_io_local') / current_setting('block_size')::int8 > 100; - ?column? ----------- - t -(1 row) - -SELECT sum(reads) AS io_sum_local_before_reads - FROM pg_stat_io WHERE context = 'normal' AND object = 'temp relation' \gset --- Read in evicted buffers, generating reads. -SELECT COUNT(*) FROM test_io_local; - count -------- - 5000 -(1 row) - -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(evictions) AS evictions, - sum(reads) AS reads, - sum(writes) AS writes, - sum(extends) AS extends - FROM pg_stat_io - WHERE context = 'normal' AND object = 'temp relation' \gset io_sum_local_after_ -SELECT :io_sum_local_after_evictions > :io_sum_local_before_evictions, - :io_sum_local_after_reads > :io_sum_local_before_reads, - :io_sum_local_after_writes > :io_sum_local_before_writes, - :io_sum_local_after_extends > :io_sum_local_before_extends; - ?column? | ?column? | ?column? | ?column? -----------+----------+----------+---------- - t | t | t | t -(1 row) - --- Change the tablespaces so that the temporary table is rewritten to other --- local buffers, exercising a different codepath than standard local buffer --- writes. -ALTER TABLE test_io_local SET TABLESPACE regress_tblspace; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(writes) AS io_sum_local_new_tblspc_writes - FROM pg_stat_io WHERE context = 'normal' AND object = 'temp relation' \gset -SELECT :io_sum_local_new_tblspc_writes > :io_sum_local_after_writes; - ?column? ----------- - t -(1 row) - -RESET temp_buffers; --- Test that reuse of strategy buffers and reads of blocks into these reused --- buffers while VACUUMing are tracked in pg_stat_io. If there is sufficient --- demand for shared buffers from concurrent queries, some buffers may be --- pinned by other backends before they can be reused. In such cases, the --- backend will evict a buffer from outside the ring and add it to the --- ring. This is considered an eviction and not a reuse. --- Set wal_skip_threshold smaller than the expected size of --- test_io_vac_strategy so that, even if wal_level is minimal, VACUUM FULL will --- fsync the newly rewritten test_io_vac_strategy instead of writing it to WAL. --- Writing it to WAL will result in the newly written relation pages being in --- shared buffers -- preventing us from testing BAS_VACUUM BufferAccessStrategy --- reads. -SET wal_skip_threshold = '1 kB'; -SELECT sum(reuses) AS reuses, sum(reads) AS reads, sum(evictions) AS evictions - FROM pg_stat_io WHERE context = 'vacuum' \gset io_sum_vac_strategy_before_ -CREATE TABLE test_io_vac_strategy(a int, b int) WITH (autovacuum_enabled = 'false'); -INSERT INTO test_io_vac_strategy SELECT i, i from generate_series(1, 4500)i; --- Ensure that the next VACUUM will need to perform IO by rewriting the table --- first with VACUUM (FULL). -VACUUM (FULL) test_io_vac_strategy; --- Use the minimum BUFFER_USAGE_LIMIT to cause reuses or evictions with the --- smallest table possible. -VACUUM (PARALLEL 0, BUFFER_USAGE_LIMIT 128) test_io_vac_strategy; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(reuses) AS reuses, sum(reads) AS reads, sum(evictions) AS evictions - FROM pg_stat_io WHERE context = 'vacuum' \gset io_sum_vac_strategy_after_ -SELECT :io_sum_vac_strategy_after_reads > :io_sum_vac_strategy_before_reads; - ?column? ----------- - t -(1 row) - -SELECT (:io_sum_vac_strategy_after_reuses + :io_sum_vac_strategy_after_evictions) > - (:io_sum_vac_strategy_before_reuses + :io_sum_vac_strategy_before_evictions); - ?column? ----------- - t -(1 row) - -RESET wal_skip_threshold; --- Test that extends done by a CTAS, which uses a BAS_BULKWRITE --- BufferAccessStrategy, are tracked in pg_stat_io. -SELECT sum(extends) AS io_sum_bulkwrite_strategy_extends_before - FROM pg_stat_io WHERE context = 'bulkwrite' \gset -CREATE TABLE test_io_bulkwrite_strategy AS SELECT i FROM generate_series(1,100)i; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(extends) AS io_sum_bulkwrite_strategy_extends_after - FROM pg_stat_io WHERE context = 'bulkwrite' \gset -SELECT :io_sum_bulkwrite_strategy_extends_after > :io_sum_bulkwrite_strategy_extends_before; - ?column? ----------- - t -(1 row) - --- Test IO stats reset -SELECT pg_stat_have_stats('io', 0, 0); - pg_stat_have_stats --------------------- - t -(1 row) - -SELECT sum(evictions) + sum(reuses) + sum(extends) + sum(fsyncs) + sum(reads) + sum(writes) + sum(writebacks) + sum(hits) AS io_stats_pre_reset - FROM pg_stat_io \gset -SELECT pg_stat_reset_shared('io'); - pg_stat_reset_shared ----------------------- - -(1 row) - -SELECT sum(evictions) + sum(reuses) + sum(extends) + sum(fsyncs) + sum(reads) + sum(writes) + sum(writebacks) + sum(hits) AS io_stats_post_reset - FROM pg_stat_io \gset -SELECT :io_stats_post_reset < :io_stats_pre_reset; - ?column? ----------- - t -(1 row) - --- test BRIN index doesn't block HOT update -CREATE TABLE brin_hot ( - id integer PRIMARY KEY, - val integer NOT NULL -) WITH (autovacuum_enabled = off, fillfactor = 70); -INSERT INTO brin_hot SELECT *, 0 FROM generate_series(1, 235); -CREATE INDEX val_brin ON brin_hot using brin(val); -CREATE FUNCTION wait_for_hot_stats() RETURNS void AS $$ -DECLARE - start_time timestamptz := clock_timestamp(); - updated bool; -BEGIN - -- we don't want to wait forever; loop will exit after 30 seconds - FOR i IN 1 .. 300 LOOP - SELECT (pg_stat_get_tuples_hot_updated('brin_hot'::regclass::oid) > 0) INTO updated; - EXIT WHEN updated; - - -- wait a little - PERFORM pg_sleep_for('100 milliseconds'); - -- reset stats snapshot so we can test again - PERFORM pg_stat_clear_snapshot(); - END LOOP; - -- report time waited in postmaster log (where it won't change test output) - RAISE log 'wait_for_hot_stats delayed % seconds', - EXTRACT(epoch FROM clock_timestamp() - start_time); -END -$$ LANGUAGE plpgsql; -UPDATE brin_hot SET val = -3 WHERE id = 42; --- We can't just call wait_for_hot_stats() at this point, because we only --- transmit stats when the session goes idle, and we probably didn't --- transmit the last couple of counts yet thanks to the rate-limiting logic --- in pgstat_report_stat(). But instead of waiting for the rate limiter's --- timeout to elapse, let's just start a new session. The old one will --- then send its stats before dying. -\c - -SELECT wait_for_hot_stats(); - wait_for_hot_stats --------------------- - -(1 row) - -SELECT pg_stat_get_tuples_hot_updated('brin_hot'::regclass::oid); - pg_stat_get_tuples_hot_updated --------------------------------- - 1 -(1 row) - -DROP TABLE brin_hot; -DROP FUNCTION wait_for_hot_stats(); --- Test handling of index predicates - updating attributes in precicates --- should not block HOT when summarizing indexes are involved. We update --- a row that was not indexed due to the index predicate, and becomes --- indexable - the HOT-updated tuple is forwarded to the BRIN index. -CREATE TABLE brin_hot_2 (a int, b int); -INSERT INTO brin_hot_2 VALUES (1, 100); -CREATE INDEX ON brin_hot_2 USING brin (b) WHERE a = 2; -UPDATE brin_hot_2 SET a = 2; -EXPLAIN (COSTS OFF) SELECT * FROM brin_hot_2 WHERE a = 2 AND b = 100; - QUERY PLAN ------------------------------------ - Seq Scan on brin_hot_2 - Filter: ((a = 2) AND (b = 100)) -(2 rows) - -SELECT COUNT(*) FROM brin_hot_2 WHERE a = 2 AND b = 100; - count -------- - 1 -(1 row) - -SET enable_seqscan = off; -EXPLAIN (COSTS OFF) SELECT * FROM brin_hot_2 WHERE a = 2 AND b = 100; - QUERY PLAN ---------------------------------------------- - Bitmap Heap Scan on brin_hot_2 - Recheck Cond: ((b = 100) AND (a = 2)) - -> Bitmap Index Scan on brin_hot_2_b_idx - Index Cond: (b = 100) -(4 rows) - -SELECT COUNT(*) FROM brin_hot_2 WHERE a = 2 AND b = 100; - count -------- - 1 -(1 row) - -DROP TABLE brin_hot_2; --- Test that updates to indexed columns are still propagated to the --- BRIN column. --- https://postgr.es/m/05ebcb44-f383-86e3-4f31-0a97a55634cf@enterprisedb.com -CREATE TABLE brin_hot_3 (a int, filler text) WITH (fillfactor = 10); -INSERT INTO brin_hot_3 SELECT 1, repeat(' ', 500) FROM generate_series(1, 20); -CREATE INDEX ON brin_hot_3 USING brin (a) WITH (pages_per_range = 1); -UPDATE brin_hot_3 SET a = 2; -EXPLAIN (COSTS OFF) SELECT * FROM brin_hot_3 WHERE a = 2; - QUERY PLAN ---------------------------------------------- - Bitmap Heap Scan on brin_hot_3 - Recheck Cond: (a = 2) - -> Bitmap Index Scan on brin_hot_3_a_idx - Index Cond: (a = 2) -(4 rows) - -SELECT COUNT(*) FROM brin_hot_3 WHERE a = 2; - count -------- - 20 -(1 row) - -DROP TABLE brin_hot_3; -SET enable_seqscan = on; --- End of Stats Test +WARNING: terminating connection because of crash of another server process +DETAIL: The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory. +HINT: In a moment you should be able to reconnect to the database and repeat your command. +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost