diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/brin.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/brin.out --- /tmp/cirrus-ci-build/src/test/regress/expected/brin.out 2024-03-07 14:25:00.329438000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/brin.out 2024-03-07 14:27:17.003986000 +0000 @@ -539,42 +539,10 @@ -- vacuum actually removes the TOAST rows. Creating an index concurrently -- is a one way to achieve that, because it does exactly such wait. CREATE INDEX CONCURRENTLY brin_test_temp_idx ON brintest_3(a); -DROP INDEX brin_test_temp_idx; --- vacuum the table, to discard TOAST data -VACUUM brintest_3; --- retry insert with a different random-looking (but deterministic) value --- the value is different, and so should replace either min or max in the --- brin summary -WITH rand_value AS (SELECT string_agg(fipshash((-i)::text),'') AS val FROM generate_series(1,60) s(i)) -INSERT INTO brintest_3 -SELECT val, val, val, val FROM rand_value; --- now try some queries, accessing the brin index -SET enable_seqscan = off; -EXPLAIN (COSTS OFF) -SELECT * FROM brintest_3 WHERE b < '0'; - QUERY PLAN ------------------------------------------------- - Bitmap Heap Scan on brintest_3 - Recheck Cond: (b < '0'::text) - -> Bitmap Index Scan on brin_test_toast_idx - Index Cond: (b < '0'::text) -(4 rows) - -SELECT * FROM brintest_3 WHERE b < '0'; - a | b | c | d ----+---+---+--- -(0 rows) - -DROP TABLE brintest_3; -RESET enable_seqscan; --- test an unlogged table, mostly to get coverage of brinbuildempty -CREATE UNLOGGED TABLE brintest_unlogged (n numrange); -CREATE INDEX brinidx_unlogged ON brintest_unlogged USING brin (n); -INSERT INTO brintest_unlogged VALUES (numrange(0, 2^1000::numeric)); -DROP TABLE brintest_unlogged; --- test that the insert optimization works if no rows end up inserted -CREATE TABLE brin_insert_optimization (a int); -INSERT INTO brin_insert_optimization VALUES (1); -CREATE INDEX ON brin_insert_optimization USING brin (a); -UPDATE brin_insert_optimization SET a = a; -DROP TABLE brin_insert_optimization; +WARNING: terminating connection because of crash of another server process +DETAIL: The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory. +HINT: In a moment you should be able to reconnect to the database and repeat your command. +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/privileges.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/privileges.out --- /tmp/cirrus-ci-build/src/test/regress/expected/privileges.out 2024-03-07 14:25:00.333041000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/privileges.out 2024-03-07 14:27:17.002940000 +0000 @@ -1731,1185 +1731,10 @@ -- Do the same concurrently CREATE INDEX CONCURRENTLY sro_idx ON sro_tab ((sro_ifun(a) + sro_ifun(0))) WHERE sro_ifun(a + 10) > sro_ifun(10); --- REINDEX -REINDEX TABLE sro_tab; -REINDEX INDEX sro_idx; -REINDEX TABLE CONCURRENTLY sro_tab; -DROP INDEX sro_idx; --- CLUSTER -CREATE INDEX sro_cluster_idx ON sro_tab ((sro_ifun(a) + sro_ifun(0))); -CLUSTER sro_tab USING sro_cluster_idx; -DROP INDEX sro_cluster_idx; --- BRIN index -CREATE INDEX sro_brin ON sro_tab USING brin ((sro_ifun(a) + sro_ifun(0))); -SELECT brin_desummarize_range('sro_brin', 0); - brin_desummarize_range ------------------------- - -(1 row) - -SELECT brin_summarize_range('sro_brin', 0); - brin_summarize_range ----------------------- - 1 -(1 row) - -DROP TABLE sro_tab; --- Check with a partitioned table -CREATE TABLE sro_ptab (a int) PARTITION BY RANGE (a); -ALTER TABLE sro_ptab OWNER TO regress_sro_user; -CREATE TABLE sro_part PARTITION OF sro_ptab FOR VALUES FROM (1) TO (10); -ALTER TABLE sro_part OWNER TO regress_sro_user; -INSERT INTO sro_ptab VALUES (1), (2), (3); -CREATE INDEX sro_pidx ON sro_ptab ((sro_ifun(a) + sro_ifun(0))) - WHERE sro_ifun(a + 10) > sro_ifun(10); -REINDEX TABLE sro_ptab; -REINDEX INDEX CONCURRENTLY sro_pidx; -SET SESSION AUTHORIZATION regress_sro_user; -CREATE FUNCTION unwanted_grant() RETURNS void LANGUAGE sql AS - 'GRANT regress_priv_group2 TO regress_sro_user'; -CREATE FUNCTION mv_action() RETURNS bool LANGUAGE sql AS - 'DECLARE c CURSOR WITH HOLD FOR SELECT public.unwanted_grant(); SELECT true'; --- REFRESH of this MV will queue a GRANT at end of transaction -CREATE MATERIALIZED VIEW sro_mv AS SELECT mv_action() WITH NO DATA; -REFRESH MATERIALIZED VIEW sro_mv; -ERROR: cannot create a cursor WITH HOLD within security-restricted operation -CONTEXT: SQL function "mv_action" statement 1 -\c - -REFRESH MATERIALIZED VIEW sro_mv; -ERROR: cannot create a cursor WITH HOLD within security-restricted operation -CONTEXT: SQL function "mv_action" statement 1 -SET SESSION AUTHORIZATION regress_sro_user; --- INSERT to this table will queue a GRANT at end of transaction -CREATE TABLE sro_trojan_table (); -CREATE FUNCTION sro_trojan() RETURNS trigger LANGUAGE plpgsql AS - 'BEGIN PERFORM public.unwanted_grant(); RETURN NULL; END'; -CREATE CONSTRAINT TRIGGER t AFTER INSERT ON sro_trojan_table - INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE sro_trojan(); --- Now, REFRESH will issue such an INSERT, queueing the GRANT -CREATE OR REPLACE FUNCTION mv_action() RETURNS bool LANGUAGE sql AS - 'INSERT INTO public.sro_trojan_table DEFAULT VALUES; SELECT true'; -REFRESH MATERIALIZED VIEW sro_mv; -ERROR: cannot fire deferred trigger within security-restricted operation -CONTEXT: SQL function "mv_action" statement 1 -\c - -REFRESH MATERIALIZED VIEW sro_mv; -ERROR: cannot fire deferred trigger within security-restricted operation -CONTEXT: SQL function "mv_action" statement 1 -BEGIN; SET CONSTRAINTS ALL IMMEDIATE; REFRESH MATERIALIZED VIEW sro_mv; COMMIT; -ERROR: permission denied to grant role "regress_priv_group2" -DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may grant this role. -CONTEXT: SQL function "unwanted_grant" statement 1 -SQL statement "SELECT public.unwanted_grant()" -PL/pgSQL function public.sro_trojan() line 1 at PERFORM -SQL function "mv_action" statement 1 --- REFRESH MATERIALIZED VIEW CONCURRENTLY use of eval_const_expressions() -SET SESSION AUTHORIZATION regress_sro_user; -CREATE FUNCTION unwanted_grant_nofail(int) RETURNS int - IMMUTABLE LANGUAGE plpgsql AS $$ -BEGIN - PERFORM public.unwanted_grant(); - RAISE WARNING 'owned'; - RETURN 1; -EXCEPTION WHEN OTHERS THEN - RETURN 2; -END$$; -CREATE MATERIALIZED VIEW sro_index_mv AS SELECT 1 AS c; -CREATE UNIQUE INDEX ON sro_index_mv (c) WHERE unwanted_grant_nofail(1) > 0; -\c - -REFRESH MATERIALIZED VIEW CONCURRENTLY sro_index_mv; -REFRESH MATERIALIZED VIEW sro_index_mv; -DROP OWNED BY regress_sro_user; -DROP ROLE regress_sro_user; --- Admin options -SET SESSION AUTHORIZATION regress_priv_user4; -CREATE FUNCTION dogrant_ok() RETURNS void LANGUAGE sql SECURITY DEFINER AS - 'GRANT regress_priv_group2 TO regress_priv_user5'; -GRANT regress_priv_group2 TO regress_priv_user5; -- ok: had ADMIN OPTION -SET ROLE regress_priv_group2; -GRANT regress_priv_group2 TO regress_priv_user5; -- fails: SET ROLE suspended privilege -ERROR: permission denied to grant role "regress_priv_group2" -DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may grant this role. -SET SESSION AUTHORIZATION regress_priv_user1; -GRANT regress_priv_group2 TO regress_priv_user5; -- fails: no ADMIN OPTION -ERROR: permission denied to grant role "regress_priv_group2" -DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may grant this role. -SELECT dogrant_ok(); -- ok: SECURITY DEFINER conveys ADMIN -NOTICE: role "regress_priv_user5" has already been granted membership in role "regress_priv_group2" by role "regress_priv_user4" - dogrant_ok ------------- - -(1 row) - -SET ROLE regress_priv_group2; -GRANT regress_priv_group2 TO regress_priv_user5; -- fails: SET ROLE did not help -ERROR: permission denied to grant role "regress_priv_group2" -DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may grant this role. -SET SESSION AUTHORIZATION regress_priv_group2; -GRANT regress_priv_group2 TO regress_priv_user5; -- fails: no self-admin -ERROR: permission denied to grant role "regress_priv_group2" -DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may grant this role. -SET SESSION AUTHORIZATION regress_priv_user4; -DROP FUNCTION dogrant_ok(); -REVOKE regress_priv_group2 FROM regress_priv_user5; --- has_sequence_privilege tests -\c - -CREATE SEQUENCE x_seq; -GRANT USAGE on x_seq to regress_priv_user2; -SELECT has_sequence_privilege('regress_priv_user1', 'atest1', 'SELECT'); -ERROR: "atest1" is not a sequence -SELECT has_sequence_privilege('regress_priv_user1', 'x_seq', 'INSERT'); -ERROR: unrecognized privilege type: "INSERT" -SELECT has_sequence_privilege('regress_priv_user1', 'x_seq', 'SELECT'); - has_sequence_privilege ------------------------- - f -(1 row) - -SET SESSION AUTHORIZATION regress_priv_user2; -SELECT has_sequence_privilege('x_seq', 'USAGE'); - has_sequence_privilege ------------------------- - t -(1 row) - --- largeobject privilege tests -\c - -SET SESSION AUTHORIZATION regress_priv_user1; -SELECT lo_create(1001); - lo_create ------------ - 1001 -(1 row) - -SELECT lo_create(1002); - lo_create ------------ - 1002 -(1 row) - -SELECT lo_create(1003); - lo_create ------------ - 1003 -(1 row) - -SELECT lo_create(1004); - lo_create ------------ - 1004 -(1 row) - -SELECT lo_create(1005); - lo_create ------------ - 1005 -(1 row) - -GRANT ALL ON LARGE OBJECT 1001 TO PUBLIC; -GRANT SELECT ON LARGE OBJECT 1003 TO regress_priv_user2; -GRANT SELECT,UPDATE ON LARGE OBJECT 1004 TO regress_priv_user2; -GRANT ALL ON LARGE OBJECT 1005 TO regress_priv_user2; -GRANT SELECT ON LARGE OBJECT 1005 TO regress_priv_user2 WITH GRANT OPTION; -GRANT SELECT, INSERT ON LARGE OBJECT 1001 TO PUBLIC; -- to be failed -ERROR: invalid privilege type INSERT for large object -GRANT SELECT, UPDATE ON LARGE OBJECT 1001 TO nosuchuser; -- to be failed -ERROR: role "nosuchuser" does not exist -GRANT SELECT, UPDATE ON LARGE OBJECT 999 TO PUBLIC; -- to be failed -ERROR: large object 999 does not exist -\c - -SET SESSION AUTHORIZATION regress_priv_user2; -SELECT lo_create(2001); - lo_create ------------ - 2001 -(1 row) - -SELECT lo_create(2002); - lo_create ------------ - 2002 -(1 row) - -SELECT loread(lo_open(1001, x'20000'::int), 32); -- allowed, for now - loread --------- - \x -(1 row) - -SELECT lowrite(lo_open(1001, x'40000'::int), 'abcd'); -- fail, wrong mode -ERROR: large object descriptor 0 was not opened for writing -SELECT loread(lo_open(1001, x'40000'::int), 32); - loread --------- - \x -(1 row) - -SELECT loread(lo_open(1002, x'40000'::int), 32); -- to be denied -ERROR: permission denied for large object 1002 -SELECT loread(lo_open(1003, x'40000'::int), 32); - loread --------- - \x -(1 row) - -SELECT loread(lo_open(1004, x'40000'::int), 32); - loread --------- - \x -(1 row) - -SELECT lowrite(lo_open(1001, x'20000'::int), 'abcd'); - lowrite ---------- - 4 -(1 row) - -SELECT lowrite(lo_open(1002, x'20000'::int), 'abcd'); -- to be denied -ERROR: permission denied for large object 1002 -SELECT lowrite(lo_open(1003, x'20000'::int), 'abcd'); -- to be denied -ERROR: permission denied for large object 1003 -SELECT lowrite(lo_open(1004, x'20000'::int), 'abcd'); - lowrite ---------- - 4 -(1 row) - -GRANT SELECT ON LARGE OBJECT 1005 TO regress_priv_user3; -GRANT UPDATE ON LARGE OBJECT 1006 TO regress_priv_user3; -- to be denied -ERROR: large object 1006 does not exist -REVOKE ALL ON LARGE OBJECT 2001, 2002 FROM PUBLIC; -GRANT ALL ON LARGE OBJECT 2001 TO regress_priv_user3; -SELECT lo_unlink(1001); -- to be denied -ERROR: must be owner of large object 1001 -SELECT lo_unlink(2002); - lo_unlink ------------ - 1 -(1 row) - -\c - --- confirm ACL setting -SELECT oid, pg_get_userbyid(lomowner) ownername, lomacl FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid; - oid | ownername | lomacl -------+--------------------+------------------------------------------------------------------------------------------------------------------------------ - 1001 | regress_priv_user1 | {regress_priv_user1=rw/regress_priv_user1,=rw/regress_priv_user1} - 1002 | regress_priv_user1 | - 1003 | regress_priv_user1 | {regress_priv_user1=rw/regress_priv_user1,regress_priv_user2=r/regress_priv_user1} - 1004 | regress_priv_user1 | {regress_priv_user1=rw/regress_priv_user1,regress_priv_user2=rw/regress_priv_user1} - 1005 | regress_priv_user1 | {regress_priv_user1=rw/regress_priv_user1,regress_priv_user2=r*w/regress_priv_user1,regress_priv_user3=r/regress_priv_user2} - 2001 | regress_priv_user2 | {regress_priv_user2=rw/regress_priv_user2,regress_priv_user3=rw/regress_priv_user2} -(6 rows) - -SET SESSION AUTHORIZATION regress_priv_user3; -SELECT loread(lo_open(1001, x'40000'::int), 32); - loread ------------- - \x61626364 -(1 row) - -SELECT loread(lo_open(1003, x'40000'::int), 32); -- to be denied -ERROR: permission denied for large object 1003 -SELECT loread(lo_open(1005, x'40000'::int), 32); - loread --------- - \x -(1 row) - -SELECT lo_truncate(lo_open(1005, x'20000'::int), 10); -- to be denied -ERROR: permission denied for large object 1005 -SELECT lo_truncate(lo_open(2001, x'20000'::int), 10); - lo_truncate -------------- - 0 -(1 row) - --- compatibility mode in largeobject permission -\c - -SET lo_compat_privileges = false; -- default setting -SET SESSION AUTHORIZATION regress_priv_user4; -SELECT loread(lo_open(1002, x'40000'::int), 32); -- to be denied -ERROR: permission denied for large object 1002 -SELECT lowrite(lo_open(1002, x'20000'::int), 'abcd'); -- to be denied -ERROR: permission denied for large object 1002 -SELECT lo_truncate(lo_open(1002, x'20000'::int), 10); -- to be denied -ERROR: permission denied for large object 1002 -SELECT lo_put(1002, 1, 'abcd'); -- to be denied -ERROR: permission denied for large object 1002 -SELECT lo_unlink(1002); -- to be denied -ERROR: must be owner of large object 1002 -SELECT lo_export(1001, '/dev/null'); -- to be denied -ERROR: permission denied for function lo_export -SELECT lo_import('/dev/null'); -- to be denied -ERROR: permission denied for function lo_import -SELECT lo_import('/dev/null', 2003); -- to be denied -ERROR: permission denied for function lo_import -\c - -SET lo_compat_privileges = true; -- compatibility mode -SET SESSION AUTHORIZATION regress_priv_user4; -SELECT loread(lo_open(1002, x'40000'::int), 32); - loread --------- - \x -(1 row) - -SELECT lowrite(lo_open(1002, x'20000'::int), 'abcd'); - lowrite ---------- - 4 -(1 row) - -SELECT lo_truncate(lo_open(1002, x'20000'::int), 10); - lo_truncate -------------- - 0 -(1 row) - -SELECT lo_unlink(1002); - lo_unlink ------------ - 1 -(1 row) - -SELECT lo_export(1001, '/dev/null'); -- to be denied -ERROR: permission denied for function lo_export --- don't allow unpriv users to access pg_largeobject contents -\c - -SELECT * FROM pg_largeobject LIMIT 0; - loid | pageno | data -------+--------+------ -(0 rows) - -SET SESSION AUTHORIZATION regress_priv_user1; -SELECT * FROM pg_largeobject LIMIT 0; -- to be denied -ERROR: permission denied for table pg_largeobject --- pg_signal_backend can't signal superusers -RESET SESSION AUTHORIZATION; -BEGIN; -CREATE OR REPLACE FUNCTION terminate_nothrow(pid int) RETURNS bool - LANGUAGE plpgsql SECURITY DEFINER SET client_min_messages = error AS $$ -BEGIN - RETURN pg_terminate_backend($1); -EXCEPTION WHEN OTHERS THEN - RETURN false; -END$$; -ALTER FUNCTION terminate_nothrow OWNER TO pg_signal_backend; -SELECT backend_type FROM pg_stat_activity -WHERE CASE WHEN COALESCE(usesysid, 10) = 10 THEN terminate_nothrow(pid) END; - backend_type --------------- -(0 rows) - -ROLLBACK; --- test pg_database_owner -RESET SESSION AUTHORIZATION; -GRANT pg_database_owner TO regress_priv_user1; -ERROR: role "pg_database_owner" cannot have explicit members -GRANT regress_priv_user1 TO pg_database_owner; -ERROR: role "pg_database_owner" cannot be a member of any role -CREATE TABLE datdba_only (); -ALTER TABLE datdba_only OWNER TO pg_database_owner; -REVOKE DELETE ON datdba_only FROM pg_database_owner; -SELECT - pg_has_role('regress_priv_user1', 'pg_database_owner', 'USAGE') as priv, - pg_has_role('regress_priv_user1', 'pg_database_owner', 'MEMBER') as mem, - pg_has_role('regress_priv_user1', 'pg_database_owner', - 'MEMBER WITH ADMIN OPTION') as admin; - priv | mem | admin -------+-----+------- - f | f | f -(1 row) - -BEGIN; -DO $$BEGIN EXECUTE format( - 'ALTER DATABASE %I OWNER TO regress_priv_group2', current_catalog); END$$; -SELECT - pg_has_role('regress_priv_user1', 'pg_database_owner', 'USAGE') as priv, - pg_has_role('regress_priv_user1', 'pg_database_owner', 'MEMBER') as mem, - pg_has_role('regress_priv_user1', 'pg_database_owner', - 'MEMBER WITH ADMIN OPTION') as admin; - priv | mem | admin -------+-----+------- - t | t | f -(1 row) - -SET SESSION AUTHORIZATION regress_priv_user1; -TABLE information_schema.enabled_roles ORDER BY role_name COLLATE "C"; - role_name ---------------------- - pg_database_owner - regress_priv_group2 - regress_priv_user1 -(3 rows) - -TABLE information_schema.applicable_roles ORDER BY role_name COLLATE "C"; - grantee | role_name | is_grantable ----------------------+---------------------+-------------- - regress_priv_group2 | pg_database_owner | NO - regress_priv_user1 | regress_priv_group2 | NO -(2 rows) - -INSERT INTO datdba_only DEFAULT VALUES; -SAVEPOINT q; DELETE FROM datdba_only; ROLLBACK TO q; -ERROR: permission denied for table datdba_only -SET SESSION AUTHORIZATION regress_priv_user2; -TABLE information_schema.enabled_roles; - role_name --------------------- - regress_priv_user2 -(1 row) - -INSERT INTO datdba_only DEFAULT VALUES; -ERROR: permission denied for table datdba_only -ROLLBACK; --- test default ACLs -\c - -CREATE SCHEMA testns; -GRANT ALL ON SCHEMA testns TO regress_priv_user1; -CREATE TABLE testns.acltest1 (x int); -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- no - has_table_privilege ---------------------- - f -(1 row) - -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no - has_table_privilege ---------------------- - f -(1 row) - --- placeholder for test with duplicated schema and role names -ALTER DEFAULT PRIVILEGES IN SCHEMA testns,testns GRANT SELECT ON TABLES TO public,public; -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- no - has_table_privilege ---------------------- - f -(1 row) - -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no - has_table_privilege ---------------------- - f -(1 row) - -DROP TABLE testns.acltest1; -CREATE TABLE testns.acltest1 (x int); -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- yes - has_table_privilege ---------------------- - t -(1 row) - -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no - has_table_privilege ---------------------- - f -(1 row) - -ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT INSERT ON TABLES TO regress_priv_user1; -DROP TABLE testns.acltest1; -CREATE TABLE testns.acltest1 (x int); -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- yes - has_table_privilege ---------------------- - t -(1 row) - -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- yes - has_table_privilege ---------------------- - t -(1 row) - -ALTER DEFAULT PRIVILEGES IN SCHEMA testns REVOKE INSERT ON TABLES FROM regress_priv_user1; -DROP TABLE testns.acltest1; -CREATE TABLE testns.acltest1 (x int); -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- yes - has_table_privilege ---------------------- - t -(1 row) - -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no - has_table_privilege ---------------------- - f -(1 row) - -ALTER DEFAULT PRIVILEGES FOR ROLE regress_priv_user1 REVOKE EXECUTE ON FUNCTIONS FROM public; -ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT USAGE ON SCHEMAS TO regress_priv_user2; -- error -ERROR: cannot use IN SCHEMA clause when using GRANT/REVOKE ON SCHEMAS --- Test makeaclitem() -SELECT makeaclitem('regress_priv_user1'::regrole, 'regress_priv_user2'::regrole, - 'SELECT', TRUE); -- single privilege - makeaclitem ------------------------------------------- - regress_priv_user1=r*/regress_priv_user2 -(1 row) - -SELECT makeaclitem('regress_priv_user1'::regrole, 'regress_priv_user2'::regrole, - 'SELECT, INSERT, UPDATE , DELETE ', FALSE); -- multiple privileges - makeaclitem --------------------------------------------- - regress_priv_user1=arwd/regress_priv_user2 -(1 row) - -SELECT makeaclitem('regress_priv_user1'::regrole, 'regress_priv_user2'::regrole, - 'SELECT, fake_privilege', FALSE); -- error -ERROR: unrecognized privilege type: "fake_privilege" --- Test non-throwing aclitem I/O -SELECT pg_input_is_valid('regress_priv_user1=r/regress_priv_user2', 'aclitem'); - pg_input_is_valid -------------------- - t -(1 row) - -SELECT pg_input_is_valid('regress_priv_user1=r/', 'aclitem'); - pg_input_is_valid -------------------- - f -(1 row) - -SELECT * FROM pg_input_error_info('regress_priv_user1=r/', 'aclitem'); - message | detail | hint | sql_error_code ----------------------------------+--------+------+---------------- - a name must follow the "/" sign | | | 22P02 -(1 row) - -SELECT pg_input_is_valid('regress_priv_user1=r/regress_no_such_user', 'aclitem'); - pg_input_is_valid -------------------- - f -(1 row) - -SELECT * FROM pg_input_error_info('regress_priv_user1=r/regress_no_such_user', 'aclitem'); - message | detail | hint | sql_error_code ---------------------------------------------+--------+------+---------------- - role "regress_no_such_user" does not exist | | | 42704 -(1 row) - -SELECT pg_input_is_valid('regress_priv_user1=rY', 'aclitem'); - pg_input_is_valid -------------------- - f -(1 row) - -SELECT * FROM pg_input_error_info('regress_priv_user1=rY', 'aclitem'); - message | detail | hint | sql_error_code ----------------------------------------------------------+--------+------+---------------- - invalid mode character: must be one of "arwdDxtXUCTcsA" | | | 22P02 -(1 row) - --- --- Testing blanket default grants is very hazardous since it might change --- the privileges attached to objects created by concurrent regression tests. --- To avoid that, be sure to revoke the privileges again before committing. --- -BEGIN; -ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS TO regress_priv_user2; -CREATE SCHEMA testns2; -SELECT has_schema_privilege('regress_priv_user2', 'testns2', 'USAGE'); -- yes - has_schema_privilege ----------------------- - t -(1 row) - -SELECT has_schema_privilege('regress_priv_user6', 'testns2', 'USAGE'); -- yes - has_schema_privilege ----------------------- - t -(1 row) - -SELECT has_schema_privilege('regress_priv_user2', 'testns2', 'CREATE'); -- no - has_schema_privilege ----------------------- - f -(1 row) - -ALTER DEFAULT PRIVILEGES REVOKE USAGE ON SCHEMAS FROM regress_priv_user2; -CREATE SCHEMA testns3; -SELECT has_schema_privilege('regress_priv_user2', 'testns3', 'USAGE'); -- no - has_schema_privilege ----------------------- - f -(1 row) - -SELECT has_schema_privilege('regress_priv_user2', 'testns3', 'CREATE'); -- no - has_schema_privilege ----------------------- - f -(1 row) - -ALTER DEFAULT PRIVILEGES GRANT ALL ON SCHEMAS TO regress_priv_user2; -CREATE SCHEMA testns4; -SELECT has_schema_privilege('regress_priv_user2', 'testns4', 'USAGE'); -- yes - has_schema_privilege ----------------------- - t -(1 row) - -SELECT has_schema_privilege('regress_priv_user2', 'testns4', 'CREATE'); -- yes - has_schema_privilege ----------------------- - t -(1 row) - -ALTER DEFAULT PRIVILEGES REVOKE ALL ON SCHEMAS FROM regress_priv_user2; -COMMIT; --- Test for DROP OWNED BY with shared dependencies. This is done in a --- separate, rollbacked, transaction to avoid any trouble with other --- regression sessions. -BEGIN; -ALTER DEFAULT PRIVILEGES GRANT ALL ON FUNCTIONS TO regress_priv_user2; -ALTER DEFAULT PRIVILEGES GRANT ALL ON SCHEMAS TO regress_priv_user2; -ALTER DEFAULT PRIVILEGES GRANT ALL ON SEQUENCES TO regress_priv_user2; -ALTER DEFAULT PRIVILEGES GRANT ALL ON TABLES TO regress_priv_user2; -ALTER DEFAULT PRIVILEGES GRANT ALL ON TYPES TO regress_priv_user2; -SELECT count(*) FROM pg_shdepend - WHERE deptype = 'a' AND - refobjid = 'regress_priv_user2'::regrole AND - classid = 'pg_default_acl'::regclass; - count -------- - 5 -(1 row) - -DROP OWNED BY regress_priv_user2, regress_priv_user2; -SELECT count(*) FROM pg_shdepend - WHERE deptype = 'a' AND - refobjid = 'regress_priv_user2'::regrole AND - classid = 'pg_default_acl'::regclass; - count -------- - 0 -(1 row) - -ROLLBACK; -CREATE SCHEMA testns5; -SELECT has_schema_privilege('regress_priv_user2', 'testns5', 'USAGE'); -- no - has_schema_privilege ----------------------- - f -(1 row) - -SELECT has_schema_privilege('regress_priv_user2', 'testns5', 'CREATE'); -- no - has_schema_privilege ----------------------- - f -(1 row) - -SET ROLE regress_priv_user1; -CREATE FUNCTION testns.foo() RETURNS int AS 'select 1' LANGUAGE sql; -CREATE AGGREGATE testns.agg1(int) (sfunc = int4pl, stype = int4); -CREATE PROCEDURE testns.bar() AS 'select 1' LANGUAGE sql; -SELECT has_function_privilege('regress_priv_user2', 'testns.foo()', 'EXECUTE'); -- no - has_function_privilege ------------------------- - f -(1 row) - -SELECT has_function_privilege('regress_priv_user2', 'testns.agg1(int)', 'EXECUTE'); -- no - has_function_privilege ------------------------- - f -(1 row) - -SELECT has_function_privilege('regress_priv_user2', 'testns.bar()', 'EXECUTE'); -- no - has_function_privilege ------------------------- - f -(1 row) - -ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT EXECUTE ON ROUTINES to public; -DROP FUNCTION testns.foo(); -CREATE FUNCTION testns.foo() RETURNS int AS 'select 1' LANGUAGE sql; -DROP AGGREGATE testns.agg1(int); -CREATE AGGREGATE testns.agg1(int) (sfunc = int4pl, stype = int4); -DROP PROCEDURE testns.bar(); -CREATE PROCEDURE testns.bar() AS 'select 1' LANGUAGE sql; -SELECT has_function_privilege('regress_priv_user2', 'testns.foo()', 'EXECUTE'); -- yes - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_priv_user2', 'testns.agg1(int)', 'EXECUTE'); -- yes - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_priv_user2', 'testns.bar()', 'EXECUTE'); -- yes (counts as function here) - has_function_privilege ------------------------- - t -(1 row) - -DROP FUNCTION testns.foo(); -DROP AGGREGATE testns.agg1(int); -DROP PROCEDURE testns.bar(); -ALTER DEFAULT PRIVILEGES FOR ROLE regress_priv_user1 REVOKE USAGE ON TYPES FROM public; -CREATE DOMAIN testns.priv_testdomain1 AS int; -SELECT has_type_privilege('regress_priv_user2', 'testns.priv_testdomain1', 'USAGE'); -- no - has_type_privilege --------------------- - f -(1 row) - -ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT USAGE ON TYPES to public; -DROP DOMAIN testns.priv_testdomain1; -CREATE DOMAIN testns.priv_testdomain1 AS int; -SELECT has_type_privilege('regress_priv_user2', 'testns.priv_testdomain1', 'USAGE'); -- yes - has_type_privilege --------------------- - t -(1 row) - -DROP DOMAIN testns.priv_testdomain1; -RESET ROLE; -SELECT count(*) - FROM pg_default_acl d LEFT JOIN pg_namespace n ON defaclnamespace = n.oid - WHERE nspname = 'testns'; - count -------- - 3 -(1 row) - -DROP SCHEMA testns CASCADE; -NOTICE: drop cascades to table testns.acltest1 -DROP SCHEMA testns2 CASCADE; -DROP SCHEMA testns3 CASCADE; -DROP SCHEMA testns4 CASCADE; -DROP SCHEMA testns5 CASCADE; -SELECT d.* -- check that entries went away - FROM pg_default_acl d LEFT JOIN pg_namespace n ON defaclnamespace = n.oid - WHERE nspname IS NULL AND defaclnamespace != 0; - oid | defaclrole | defaclnamespace | defaclobjtype | defaclacl ------+------------+-----------------+---------------+----------- -(0 rows) - --- Grant on all objects of given type in a schema -\c - -CREATE SCHEMA testns; -CREATE TABLE testns.t1 (f1 int); -CREATE TABLE testns.t2 (f1 int); -SELECT has_table_privilege('regress_priv_user1', 'testns.t1', 'SELECT'); -- false - has_table_privilege ---------------------- - f -(1 row) - -GRANT ALL ON ALL TABLES IN SCHEMA testns TO regress_priv_user1; -SELECT has_table_privilege('regress_priv_user1', 'testns.t1', 'SELECT'); -- true - has_table_privilege ---------------------- - t -(1 row) - -SELECT has_table_privilege('regress_priv_user1', 'testns.t2', 'SELECT'); -- true - has_table_privilege ---------------------- - t -(1 row) - -REVOKE ALL ON ALL TABLES IN SCHEMA testns FROM regress_priv_user1; -SELECT has_table_privilege('regress_priv_user1', 'testns.t1', 'SELECT'); -- false - has_table_privilege ---------------------- - f -(1 row) - -SELECT has_table_privilege('regress_priv_user1', 'testns.t2', 'SELECT'); -- false - has_table_privilege ---------------------- - f -(1 row) - -CREATE FUNCTION testns.priv_testfunc(int) RETURNS int AS 'select 3 * $1;' LANGUAGE sql; -CREATE AGGREGATE testns.priv_testagg(int) (sfunc = int4pl, stype = int4); -CREATE PROCEDURE testns.priv_testproc(int) AS 'select 3' LANGUAGE sql; -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testfunc(int)', 'EXECUTE'); -- true by default - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testagg(int)', 'EXECUTE'); -- true by default - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- true by default - has_function_privilege ------------------------- - t -(1 row) - -REVOKE ALL ON ALL FUNCTIONS IN SCHEMA testns FROM PUBLIC; -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testfunc(int)', 'EXECUTE'); -- false - has_function_privilege ------------------------- - f -(1 row) - -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testagg(int)', 'EXECUTE'); -- false - has_function_privilege ------------------------- - f -(1 row) - -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- still true, not a function - has_function_privilege ------------------------- - t -(1 row) - -REVOKE ALL ON ALL PROCEDURES IN SCHEMA testns FROM PUBLIC; -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- now false - has_function_privilege ------------------------- - f -(1 row) - -GRANT ALL ON ALL ROUTINES IN SCHEMA testns TO PUBLIC; -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testfunc(int)', 'EXECUTE'); -- true - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testagg(int)', 'EXECUTE'); -- true - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- true - has_function_privilege ------------------------- - t -(1 row) - -DROP SCHEMA testns CASCADE; -NOTICE: drop cascades to 5 other objects -DETAIL: drop cascades to table testns.t1 -drop cascades to table testns.t2 -drop cascades to function testns.priv_testfunc(integer) -drop cascades to function testns.priv_testagg(integer) -drop cascades to function testns.priv_testproc(integer) --- Change owner of the schema & and rename of new schema owner -\c - -CREATE ROLE regress_schemauser1 superuser login; -CREATE ROLE regress_schemauser2 superuser login; -SET SESSION ROLE regress_schemauser1; -CREATE SCHEMA testns; -SELECT nspname, rolname FROM pg_namespace, pg_roles WHERE pg_namespace.nspname = 'testns' AND pg_namespace.nspowner = pg_roles.oid; - nspname | rolname ----------+--------------------- - testns | regress_schemauser1 -(1 row) - -ALTER SCHEMA testns OWNER TO regress_schemauser2; -ALTER ROLE regress_schemauser2 RENAME TO regress_schemauser_renamed; -SELECT nspname, rolname FROM pg_namespace, pg_roles WHERE pg_namespace.nspname = 'testns' AND pg_namespace.nspowner = pg_roles.oid; - nspname | rolname ----------+---------------------------- - testns | regress_schemauser_renamed -(1 row) - -set session role regress_schemauser_renamed; -DROP SCHEMA testns CASCADE; --- clean up -\c - -DROP ROLE regress_schemauser1; -DROP ROLE regress_schemauser_renamed; --- test that dependent privileges are revoked (or not) properly -\c - -set session role regress_priv_user1; -create table dep_priv_test (a int); -grant select on dep_priv_test to regress_priv_user2 with grant option; -grant select on dep_priv_test to regress_priv_user3 with grant option; -set session role regress_priv_user2; -grant select on dep_priv_test to regress_priv_user4 with grant option; -set session role regress_priv_user3; -grant select on dep_priv_test to regress_priv_user4 with grant option; -set session role regress_priv_user4; -grant select on dep_priv_test to regress_priv_user5; -\dp dep_priv_test - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+---------------+-------+-----------------------------------------------+-------------------+---------- - public | dep_priv_test | table | regress_priv_user1=arwdDxt/regress_priv_user1+| | - | | | regress_priv_user2=r*/regress_priv_user1 +| | - | | | regress_priv_user3=r*/regress_priv_user1 +| | - | | | regress_priv_user4=r*/regress_priv_user2 +| | - | | | regress_priv_user4=r*/regress_priv_user3 +| | - | | | regress_priv_user5=r/regress_priv_user4 | | -(1 row) - -set session role regress_priv_user2; -revoke select on dep_priv_test from regress_priv_user4 cascade; -\dp dep_priv_test - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+---------------+-------+-----------------------------------------------+-------------------+---------- - public | dep_priv_test | table | regress_priv_user1=arwdDxt/regress_priv_user1+| | - | | | regress_priv_user2=r*/regress_priv_user1 +| | - | | | regress_priv_user3=r*/regress_priv_user1 +| | - | | | regress_priv_user4=r*/regress_priv_user3 +| | - | | | regress_priv_user5=r/regress_priv_user4 | | -(1 row) - -set session role regress_priv_user3; -revoke select on dep_priv_test from regress_priv_user4 cascade; -\dp dep_priv_test - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+---------------+-------+-----------------------------------------------+-------------------+---------- - public | dep_priv_test | table | regress_priv_user1=arwdDxt/regress_priv_user1+| | - | | | regress_priv_user2=r*/regress_priv_user1 +| | - | | | regress_priv_user3=r*/regress_priv_user1 | | -(1 row) - -set session role regress_priv_user1; -drop table dep_priv_test; --- clean up -\c -drop sequence x_seq; -DROP AGGREGATE priv_testagg1(int); -DROP FUNCTION priv_testfunc2(int); -DROP FUNCTION priv_testfunc4(boolean); -DROP PROCEDURE priv_testproc1(int); -DROP VIEW atestv0; -DROP VIEW atestv1; -DROP VIEW atestv2; --- this should cascade to drop atestv4 -DROP VIEW atestv3 CASCADE; -NOTICE: drop cascades to view atestv4 --- this should complain "does not exist" -DROP VIEW atestv4; -ERROR: view "atestv4" does not exist -DROP TABLE atest1; -DROP TABLE atest2; -DROP TABLE atest3; -DROP TABLE atest4; -DROP TABLE atest5; -DROP TABLE atest6; -DROP TABLE atestc; -DROP TABLE atestp1; -DROP TABLE atestp2; -SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid; - lo_unlink ------------ - 1 - 1 - 1 - 1 - 1 -(5 rows) - -DROP GROUP regress_priv_group1; -DROP GROUP regress_priv_group2; --- these are needed to clean up permissions -REVOKE USAGE ON LANGUAGE sql FROM regress_priv_user1; -DROP OWNED BY regress_priv_user1; -DROP USER regress_priv_user1; -DROP USER regress_priv_user2; -DROP USER regress_priv_user3; -DROP USER regress_priv_user4; -DROP USER regress_priv_user5; -DROP USER regress_priv_user6; -DROP USER regress_priv_user7; -DROP USER regress_priv_user8; -- does not exist -ERROR: role "regress_priv_user8" does not exist --- permissions with LOCK TABLE -CREATE USER regress_locktable_user; -CREATE TABLE lock_table (a int); --- LOCK TABLE and SELECT permission -GRANT SELECT ON lock_table TO regress_locktable_user; -SET SESSION AUTHORIZATION regress_locktable_user; -BEGIN; -LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass -COMMIT; -BEGIN; -LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should fail -ERROR: permission denied for table lock_table -ROLLBACK; -BEGIN; -LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should fail -ERROR: permission denied for table lock_table -ROLLBACK; -\c -REVOKE SELECT ON lock_table FROM regress_locktable_user; --- LOCK TABLE and INSERT permission -GRANT INSERT ON lock_table TO regress_locktable_user; -SET SESSION AUTHORIZATION regress_locktable_user; -BEGIN; -LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass -ROLLBACK; -BEGIN; -LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass -COMMIT; -BEGIN; -LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should fail -ERROR: permission denied for table lock_table -ROLLBACK; -\c -REVOKE INSERT ON lock_table FROM regress_locktable_user; --- LOCK TABLE and UPDATE permission -GRANT UPDATE ON lock_table TO regress_locktable_user; -SET SESSION AUTHORIZATION regress_locktable_user; -BEGIN; -LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass -ROLLBACK; -BEGIN; -LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass -COMMIT; -BEGIN; -LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass -COMMIT; -\c -REVOKE UPDATE ON lock_table FROM regress_locktable_user; --- LOCK TABLE and DELETE permission -GRANT DELETE ON lock_table TO regress_locktable_user; -SET SESSION AUTHORIZATION regress_locktable_user; -BEGIN; -LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass -ROLLBACK; -BEGIN; -LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass -COMMIT; -BEGIN; -LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass -COMMIT; -\c -REVOKE DELETE ON lock_table FROM regress_locktable_user; --- LOCK TABLE and TRUNCATE permission -GRANT TRUNCATE ON lock_table TO regress_locktable_user; -SET SESSION AUTHORIZATION regress_locktable_user; -BEGIN; -LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass -ROLLBACK; -BEGIN; -LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass -COMMIT; -BEGIN; -LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass -COMMIT; -\c -REVOKE TRUNCATE ON lock_table FROM regress_locktable_user; --- clean up -DROP TABLE lock_table; -DROP USER regress_locktable_user; --- test to check privileges of system views pg_shmem_allocations and --- pg_backend_memory_contexts. --- switch to superuser -\c - -CREATE ROLE regress_readallstats; -SELECT has_table_privilege('regress_readallstats','pg_backend_memory_contexts','SELECT'); -- no - has_table_privilege ---------------------- - f -(1 row) - -SELECT has_table_privilege('regress_readallstats','pg_shmem_allocations','SELECT'); -- no - has_table_privilege ---------------------- - f -(1 row) - -GRANT pg_read_all_stats TO regress_readallstats; -SELECT has_table_privilege('regress_readallstats','pg_backend_memory_contexts','SELECT'); -- yes - has_table_privilege ---------------------- - t -(1 row) - -SELECT has_table_privilege('regress_readallstats','pg_shmem_allocations','SELECT'); -- yes - has_table_privilege ---------------------- - t -(1 row) - --- run query to ensure that functions within views can be executed -SET ROLE regress_readallstats; -SELECT COUNT(*) >= 0 AS ok FROM pg_backend_memory_contexts; - ok ----- - t -(1 row) - -SELECT COUNT(*) >= 0 AS ok FROM pg_shmem_allocations; - ok ----- - t -(1 row) - -RESET ROLE; --- clean up -DROP ROLE regress_readallstats; --- test role grantor machinery -CREATE ROLE regress_group; -CREATE ROLE regress_group_direct_manager; -CREATE ROLE regress_group_indirect_manager; -CREATE ROLE regress_group_member; -GRANT regress_group TO regress_group_direct_manager WITH INHERIT FALSE, ADMIN TRUE; -GRANT regress_group_direct_manager TO regress_group_indirect_manager; -SET SESSION AUTHORIZATION regress_group_direct_manager; -GRANT regress_group TO regress_group_member; -SELECT member::regrole::text, CASE WHEN grantor = 10 THEN 'BOOTSTRAP SUPERUSER' ELSE grantor::regrole::text END FROM pg_auth_members WHERE roleid = 'regress_group'::regrole ORDER BY 1, 2; - member | grantor -------------------------------+------------------------------ - regress_group_direct_manager | BOOTSTRAP SUPERUSER - regress_group_member | regress_group_direct_manager -(2 rows) - -REVOKE regress_group FROM regress_group_member; -SET SESSION AUTHORIZATION regress_group_indirect_manager; -GRANT regress_group TO regress_group_member; -SELECT member::regrole::text, CASE WHEN grantor = 10 THEN 'BOOTSTRAP SUPERUSER' ELSE grantor::regrole::text END FROM pg_auth_members WHERE roleid = 'regress_group'::regrole ORDER BY 1, 2; - member | grantor -------------------------------+------------------------------ - regress_group_direct_manager | BOOTSTRAP SUPERUSER - regress_group_member | regress_group_direct_manager -(2 rows) - -REVOKE regress_group FROM regress_group_member; -RESET SESSION AUTHORIZATION; -DROP ROLE regress_group; -DROP ROLE regress_group_direct_manager; -DROP ROLE regress_group_indirect_manager; -DROP ROLE regress_group_member; --- test SET and INHERIT options with object ownership changes -CREATE ROLE regress_roleoption_protagonist; -CREATE ROLE regress_roleoption_donor; -CREATE ROLE regress_roleoption_recipient; -CREATE SCHEMA regress_roleoption; -GRANT CREATE, USAGE ON SCHEMA regress_roleoption TO PUBLIC; -GRANT regress_roleoption_donor TO regress_roleoption_protagonist WITH INHERIT TRUE, SET FALSE; -GRANT regress_roleoption_recipient TO regress_roleoption_protagonist WITH INHERIT FALSE, SET TRUE; -SET SESSION AUTHORIZATION regress_roleoption_protagonist; -CREATE TABLE regress_roleoption.t1 (a int); -CREATE TABLE regress_roleoption.t2 (a int); -SET SESSION AUTHORIZATION regress_roleoption_donor; -CREATE TABLE regress_roleoption.t3 (a int); -SET SESSION AUTHORIZATION regress_roleoption_recipient; -CREATE TABLE regress_roleoption.t4 (a int); -SET SESSION AUTHORIZATION regress_roleoption_protagonist; -ALTER TABLE regress_roleoption.t1 OWNER TO regress_roleoption_donor; -- fails, can't be come donor -ERROR: must be able to SET ROLE "regress_roleoption_donor" -ALTER TABLE regress_roleoption.t2 OWNER TO regress_roleoption_recipient; -- works -ALTER TABLE regress_roleoption.t3 OWNER TO regress_roleoption_protagonist; -- works -ALTER TABLE regress_roleoption.t4 OWNER TO regress_roleoption_protagonist; -- fails, we don't inherit from recipient -ERROR: must be owner of table t4 -RESET SESSION AUTHORIZATION; -DROP TABLE regress_roleoption.t1; -DROP TABLE regress_roleoption.t2; -DROP TABLE regress_roleoption.t3; -DROP TABLE regress_roleoption.t4; -DROP SCHEMA regress_roleoption; -DROP ROLE regress_roleoption_protagonist; -DROP ROLE regress_roleoption_donor; -DROP ROLE regress_roleoption_recipient; +WARNING: terminating connection because of crash of another server process +DETAIL: The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory. +HINT: In a moment you should be able to reconnect to the database and repeat your command. +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/rowsecurity.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/rowsecurity.out --- /tmp/cirrus-ci-build/src/test/regress/expected/rowsecurity.out 2024-03-07 14:25:00.333400000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/rowsecurity.out 2024-03-07 14:27:17.003071000 +0000 @@ -2862,1688 +2862,10 @@ -- Query as role that is not the owner of the table or view without permissions. SET SESSION AUTHORIZATION regress_rls_carol; SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for table z1_blacklist -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for table z1_blacklist --- Query as role that is not owner of table but is owner of view with permissions. -SET SESSION AUTHORIZATION regress_rls_alice; -GRANT SELECT ON z1_blacklist TO regress_rls_bob; -SET SESSION AUTHORIZATION regress_rls_bob; -SELECT * FROM rls_view; -NOTICE: f_leak => bbb - a | b ----+----- - 2 | bbb -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; - QUERY PLAN ----------------------------------------------------------------------- - Seq Scan on z1 - Filter: ((NOT (hashed SubPlan 1)) AND ((a % 2) = 0) AND f_leak(b)) - SubPlan 1 - -> Seq Scan on z1_blacklist -(4 rows) - --- Query as role that is not the owner of the table or view without permissions. -SET SESSION AUTHORIZATION regress_rls_carol; -SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for table z1_blacklist -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for table z1_blacklist --- Query as role that is not the owner of the table or view with permissions. -SET SESSION AUTHORIZATION regress_rls_alice; -GRANT SELECT ON z1_blacklist TO regress_rls_carol; -SET SESSION AUTHORIZATION regress_rls_carol; -SELECT * FROM rls_view; -NOTICE: f_leak => aba - a | b ----+----- - 1 | aba -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM rls_view; - QUERY PLAN ----------------------------------------------------------------------- - Seq Scan on z1 - Filter: ((NOT (hashed SubPlan 1)) AND ((a % 2) = 1) AND f_leak(b)) - SubPlan 1 - -> Seq Scan on z1_blacklist -(4 rows) - -SET SESSION AUTHORIZATION regress_rls_bob; -DROP VIEW rls_view; --- --- Command specific --- -SET SESSION AUTHORIZATION regress_rls_alice; -CREATE TABLE x1 (a int, b text, c text); -GRANT ALL ON x1 TO PUBLIC; -INSERT INTO x1 VALUES - (1, 'abc', 'regress_rls_bob'), - (2, 'bcd', 'regress_rls_bob'), - (3, 'cde', 'regress_rls_carol'), - (4, 'def', 'regress_rls_carol'), - (5, 'efg', 'regress_rls_bob'), - (6, 'fgh', 'regress_rls_bob'), - (7, 'fgh', 'regress_rls_carol'), - (8, 'fgh', 'regress_rls_carol'); -CREATE POLICY p0 ON x1 FOR ALL USING (c = current_user); -CREATE POLICY p1 ON x1 FOR SELECT USING (a % 2 = 0); -CREATE POLICY p2 ON x1 FOR INSERT WITH CHECK (a % 2 = 1); -CREATE POLICY p3 ON x1 FOR UPDATE USING (a % 2 = 0); -CREATE POLICY p4 ON x1 FOR DELETE USING (a < 8); -ALTER TABLE x1 ENABLE ROW LEVEL SECURITY; -SET SESSION AUTHORIZATION regress_rls_bob; -SELECT * FROM x1 WHERE f_leak(b) ORDER BY a ASC; -NOTICE: f_leak => abc -NOTICE: f_leak => bcd -NOTICE: f_leak => def -NOTICE: f_leak => efg -NOTICE: f_leak => fgh -NOTICE: f_leak => fgh - a | b | c ----+-----+------------------- - 1 | abc | regress_rls_bob - 2 | bcd | regress_rls_bob - 4 | def | regress_rls_carol - 5 | efg | regress_rls_bob - 6 | fgh | regress_rls_bob - 8 | fgh | regress_rls_carol -(6 rows) - -UPDATE x1 SET b = b || '_updt' WHERE f_leak(b) RETURNING *; -NOTICE: f_leak => abc -NOTICE: f_leak => bcd -NOTICE: f_leak => def -NOTICE: f_leak => efg -NOTICE: f_leak => fgh -NOTICE: f_leak => fgh - a | b | c ----+----------+------------------- - 1 | abc_updt | regress_rls_bob - 2 | bcd_updt | regress_rls_bob - 4 | def_updt | regress_rls_carol - 5 | efg_updt | regress_rls_bob - 6 | fgh_updt | regress_rls_bob - 8 | fgh_updt | regress_rls_carol -(6 rows) - -SET SESSION AUTHORIZATION regress_rls_carol; -SELECT * FROM x1 WHERE f_leak(b) ORDER BY a ASC; -NOTICE: f_leak => cde -NOTICE: f_leak => fgh -NOTICE: f_leak => bcd_updt -NOTICE: f_leak => def_updt -NOTICE: f_leak => fgh_updt -NOTICE: f_leak => fgh_updt - a | b | c ----+----------+------------------- - 2 | bcd_updt | regress_rls_bob - 3 | cde | regress_rls_carol - 4 | def_updt | regress_rls_carol - 6 | fgh_updt | regress_rls_bob - 7 | fgh | regress_rls_carol - 8 | fgh_updt | regress_rls_carol -(6 rows) - -UPDATE x1 SET b = b || '_updt' WHERE f_leak(b) RETURNING *; -NOTICE: f_leak => cde -NOTICE: f_leak => fgh -NOTICE: f_leak => bcd_updt -NOTICE: f_leak => def_updt -NOTICE: f_leak => fgh_updt -NOTICE: f_leak => fgh_updt - a | b | c ----+---------------+------------------- - 3 | cde_updt | regress_rls_carol - 7 | fgh_updt | regress_rls_carol - 2 | bcd_updt_updt | regress_rls_bob - 4 | def_updt_updt | regress_rls_carol - 6 | fgh_updt_updt | regress_rls_bob - 8 | fgh_updt_updt | regress_rls_carol -(6 rows) - -DELETE FROM x1 WHERE f_leak(b) RETURNING *; -NOTICE: f_leak => cde_updt -NOTICE: f_leak => fgh_updt -NOTICE: f_leak => bcd_updt_updt -NOTICE: f_leak => def_updt_updt -NOTICE: f_leak => fgh_updt_updt -NOTICE: f_leak => fgh_updt_updt - a | b | c ----+---------------+------------------- - 3 | cde_updt | regress_rls_carol - 7 | fgh_updt | regress_rls_carol - 2 | bcd_updt_updt | regress_rls_bob - 4 | def_updt_updt | regress_rls_carol - 6 | fgh_updt_updt | regress_rls_bob - 8 | fgh_updt_updt | regress_rls_carol -(6 rows) - --- --- Duplicate Policy Names --- -SET SESSION AUTHORIZATION regress_rls_alice; -CREATE TABLE y1 (a int, b text); -CREATE TABLE y2 (a int, b text); -GRANT ALL ON y1, y2 TO regress_rls_bob; -CREATE POLICY p1 ON y1 FOR ALL USING (a % 2 = 0); -CREATE POLICY p2 ON y1 FOR SELECT USING (a > 2); -CREATE POLICY p1 ON y1 FOR SELECT USING (a % 2 = 1); --fail -ERROR: policy "p1" for table "y1" already exists -CREATE POLICY p1 ON y2 FOR ALL USING (a % 2 = 0); --OK -ALTER TABLE y1 ENABLE ROW LEVEL SECURITY; -ALTER TABLE y2 ENABLE ROW LEVEL SECURITY; --- --- Expression structure with SBV --- --- Create view as table owner. RLS should NOT be applied. -SET SESSION AUTHORIZATION regress_rls_alice; -CREATE VIEW rls_sbv WITH (security_barrier) AS - SELECT * FROM y1 WHERE f_leak(b); -EXPLAIN (COSTS OFF) SELECT * FROM rls_sbv WHERE (a = 1); - QUERY PLAN ------------------------------------ - Seq Scan on y1 - Filter: (f_leak(b) AND (a = 1)) -(2 rows) - -DROP VIEW rls_sbv; --- Create view as role that does not own table. RLS should be applied. -SET SESSION AUTHORIZATION regress_rls_bob; -CREATE VIEW rls_sbv WITH (security_barrier) AS - SELECT * FROM y1 WHERE f_leak(b); -EXPLAIN (COSTS OFF) SELECT * FROM rls_sbv WHERE (a = 1); - QUERY PLAN ------------------------------------------------------------------- - Seq Scan on y1 - Filter: ((a = 1) AND ((a > 2) OR ((a % 2) = 0)) AND f_leak(b)) -(2 rows) - -DROP VIEW rls_sbv; --- --- Expression structure --- -SET SESSION AUTHORIZATION regress_rls_alice; -INSERT INTO y2 (SELECT x, public.fipshash(x::text) FROM generate_series(0,20) x); -CREATE POLICY p2 ON y2 USING (a % 3 = 0); -CREATE POLICY p3 ON y2 USING (a % 4 = 0); -SET SESSION AUTHORIZATION regress_rls_bob; -SELECT * FROM y2 WHERE f_leak(b); -NOTICE: f_leak => 5feceb66ffc86f38d952786c6d696c79 -NOTICE: f_leak => d4735e3a265e16eee03f59718b9b5d03 -NOTICE: f_leak => 4e07408562bedb8b60ce05c1decfe3ad -NOTICE: f_leak => 4b227777d4dd1fc61c6f884f48641d02 -NOTICE: f_leak => e7f6c011776e8db7cd330b54174fd76f -NOTICE: f_leak => 2c624232cdd221771294dfbb310aca00 -NOTICE: f_leak => 19581e27de7ced00ff1ce50b2047e7a5 -NOTICE: f_leak => 4a44dc15364204a80fe80e9039455cc1 -NOTICE: f_leak => 6b51d431df5d7f141cbececcf79edf3d -NOTICE: f_leak => 8527a891e224136950ff32ca212b45bc -NOTICE: f_leak => e629fa6598d732768f7c726b4b621285 -NOTICE: f_leak => b17ef6d19c7a5b1ee83b907c595526dc -NOTICE: f_leak => 4ec9599fc203d176a301536c2e091a19 -NOTICE: f_leak => f5ca38f748a1d6eaf726b8a42fb575c3 - a | b -----+---------------------------------- - 0 | 5feceb66ffc86f38d952786c6d696c79 - 2 | d4735e3a265e16eee03f59718b9b5d03 - 3 | 4e07408562bedb8b60ce05c1decfe3ad - 4 | 4b227777d4dd1fc61c6f884f48641d02 - 6 | e7f6c011776e8db7cd330b54174fd76f - 8 | 2c624232cdd221771294dfbb310aca00 - 9 | 19581e27de7ced00ff1ce50b2047e7a5 - 10 | 4a44dc15364204a80fe80e9039455cc1 - 12 | 6b51d431df5d7f141cbececcf79edf3d - 14 | 8527a891e224136950ff32ca212b45bc - 15 | e629fa6598d732768f7c726b4b621285 - 16 | b17ef6d19c7a5b1ee83b907c595526dc - 18 | 4ec9599fc203d176a301536c2e091a19 - 20 | f5ca38f748a1d6eaf726b8a42fb575c3 -(14 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM y2 WHERE f_leak(b); - QUERY PLAN ------------------------------------------------------------------------------ - Seq Scan on y2 - Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) -(2 rows) - --- --- Qual push-down of leaky functions, when not referring to table --- -SELECT * FROM y2 WHERE f_leak('abc'); -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc -NOTICE: f_leak => abc - a | b -----+---------------------------------- - 0 | 5feceb66ffc86f38d952786c6d696c79 - 2 | d4735e3a265e16eee03f59718b9b5d03 - 3 | 4e07408562bedb8b60ce05c1decfe3ad - 4 | 4b227777d4dd1fc61c6f884f48641d02 - 6 | e7f6c011776e8db7cd330b54174fd76f - 8 | 2c624232cdd221771294dfbb310aca00 - 9 | 19581e27de7ced00ff1ce50b2047e7a5 - 10 | 4a44dc15364204a80fe80e9039455cc1 - 12 | 6b51d431df5d7f141cbececcf79edf3d - 14 | 8527a891e224136950ff32ca212b45bc - 15 | e629fa6598d732768f7c726b4b621285 - 16 | b17ef6d19c7a5b1ee83b907c595526dc - 18 | 4ec9599fc203d176a301536c2e091a19 - 20 | f5ca38f748a1d6eaf726b8a42fb575c3 -(14 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM y2 WHERE f_leak('abc'); - QUERY PLAN ---------------------------------------------------------------------------------------- - Seq Scan on y2 - Filter: (f_leak('abc'::text) AND (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0))) -(2 rows) - -CREATE TABLE test_qual_pushdown ( - abc text -); -INSERT INTO test_qual_pushdown VALUES ('abc'),('def'); -SELECT * FROM y2 JOIN test_qual_pushdown ON (b = abc) WHERE f_leak(abc); -NOTICE: f_leak => abc -NOTICE: f_leak => def - a | b | abc ----+---+----- -(0 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM y2 JOIN test_qual_pushdown ON (b = abc) WHERE f_leak(abc); - QUERY PLAN -------------------------------------------------------------------------- - Hash Join - Hash Cond: (test_qual_pushdown.abc = y2.b) - -> Seq Scan on test_qual_pushdown - Filter: f_leak(abc) - -> Hash - -> Seq Scan on y2 - Filter: (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) -(7 rows) - -SELECT * FROM y2 JOIN test_qual_pushdown ON (b = abc) WHERE f_leak(b); -NOTICE: f_leak => 5feceb66ffc86f38d952786c6d696c79 -NOTICE: f_leak => d4735e3a265e16eee03f59718b9b5d03 -NOTICE: f_leak => 4e07408562bedb8b60ce05c1decfe3ad -NOTICE: f_leak => 4b227777d4dd1fc61c6f884f48641d02 -NOTICE: f_leak => e7f6c011776e8db7cd330b54174fd76f -NOTICE: f_leak => 2c624232cdd221771294dfbb310aca00 -NOTICE: f_leak => 19581e27de7ced00ff1ce50b2047e7a5 -NOTICE: f_leak => 4a44dc15364204a80fe80e9039455cc1 -NOTICE: f_leak => 6b51d431df5d7f141cbececcf79edf3d -NOTICE: f_leak => 8527a891e224136950ff32ca212b45bc -NOTICE: f_leak => e629fa6598d732768f7c726b4b621285 -NOTICE: f_leak => b17ef6d19c7a5b1ee83b907c595526dc -NOTICE: f_leak => 4ec9599fc203d176a301536c2e091a19 -NOTICE: f_leak => f5ca38f748a1d6eaf726b8a42fb575c3 - a | b | abc ----+---+----- -(0 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM y2 JOIN test_qual_pushdown ON (b = abc) WHERE f_leak(b); - QUERY PLAN ------------------------------------------------------------------------------------------ - Hash Join - Hash Cond: (test_qual_pushdown.abc = y2.b) - -> Seq Scan on test_qual_pushdown - -> Hash - -> Seq Scan on y2 - Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) -(6 rows) - -DROP TABLE test_qual_pushdown; --- --- Plancache invalidate on user change. --- -RESET SESSION AUTHORIZATION; -DROP TABLE t1 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table t2 -drop cascades to table t3 -CREATE TABLE t1 (a integer); -GRANT SELECT ON t1 TO regress_rls_bob, regress_rls_carol; -CREATE POLICY p1 ON t1 TO regress_rls_bob USING ((a % 2) = 0); -CREATE POLICY p2 ON t1 TO regress_rls_carol USING ((a % 4) = 0); -ALTER TABLE t1 ENABLE ROW LEVEL SECURITY; --- Prepare as regress_rls_bob -SET ROLE regress_rls_bob; -PREPARE role_inval AS SELECT * FROM t1; --- Check plan -EXPLAIN (COSTS OFF) EXECUTE role_inval; - QUERY PLAN -------------------------- - Seq Scan on t1 - Filter: ((a % 2) = 0) -(2 rows) - --- Change to regress_rls_carol -SET ROLE regress_rls_carol; --- Check plan- should be different -EXPLAIN (COSTS OFF) EXECUTE role_inval; - QUERY PLAN -------------------------- - Seq Scan on t1 - Filter: ((a % 4) = 0) -(2 rows) - --- Change back to regress_rls_bob -SET ROLE regress_rls_bob; --- Check plan- should be back to original -EXPLAIN (COSTS OFF) EXECUTE role_inval; - QUERY PLAN -------------------------- - Seq Scan on t1 - Filter: ((a % 2) = 0) -(2 rows) - --- --- CTE and RLS --- -RESET SESSION AUTHORIZATION; -DROP TABLE t1 CASCADE; -CREATE TABLE t1 (a integer, b text); -CREATE POLICY p1 ON t1 USING (a % 2 = 0); -ALTER TABLE t1 ENABLE ROW LEVEL SECURITY; -GRANT ALL ON t1 TO regress_rls_bob; -INSERT INTO t1 (SELECT x, public.fipshash(x::text) FROM generate_series(0,20) x); -SET SESSION AUTHORIZATION regress_rls_bob; -WITH cte1 AS MATERIALIZED (SELECT * FROM t1 WHERE f_leak(b)) SELECT * FROM cte1; -NOTICE: f_leak => 5feceb66ffc86f38d952786c6d696c79 -NOTICE: f_leak => d4735e3a265e16eee03f59718b9b5d03 -NOTICE: f_leak => 4b227777d4dd1fc61c6f884f48641d02 -NOTICE: f_leak => e7f6c011776e8db7cd330b54174fd76f -NOTICE: f_leak => 2c624232cdd221771294dfbb310aca00 -NOTICE: f_leak => 4a44dc15364204a80fe80e9039455cc1 -NOTICE: f_leak => 6b51d431df5d7f141cbececcf79edf3d -NOTICE: f_leak => 8527a891e224136950ff32ca212b45bc -NOTICE: f_leak => b17ef6d19c7a5b1ee83b907c595526dc -NOTICE: f_leak => 4ec9599fc203d176a301536c2e091a19 -NOTICE: f_leak => f5ca38f748a1d6eaf726b8a42fb575c3 - a | b -----+---------------------------------- - 0 | 5feceb66ffc86f38d952786c6d696c79 - 2 | d4735e3a265e16eee03f59718b9b5d03 - 4 | 4b227777d4dd1fc61c6f884f48641d02 - 6 | e7f6c011776e8db7cd330b54174fd76f - 8 | 2c624232cdd221771294dfbb310aca00 - 10 | 4a44dc15364204a80fe80e9039455cc1 - 12 | 6b51d431df5d7f141cbececcf79edf3d - 14 | 8527a891e224136950ff32ca212b45bc - 16 | b17ef6d19c7a5b1ee83b907c595526dc - 18 | 4ec9599fc203d176a301536c2e091a19 - 20 | f5ca38f748a1d6eaf726b8a42fb575c3 -(11 rows) - -EXPLAIN (COSTS OFF) -WITH cte1 AS MATERIALIZED (SELECT * FROM t1 WHERE f_leak(b)) SELECT * FROM cte1; - QUERY PLAN -------------------------------------------------- - CTE Scan on cte1 - CTE cte1 - -> Seq Scan on t1 - Filter: (((a % 2) = 0) AND f_leak(b)) -(4 rows) - -WITH cte1 AS (UPDATE t1 SET a = a + 1 RETURNING *) SELECT * FROM cte1; --fail -ERROR: new row violates row-level security policy for table "t1" -WITH cte1 AS (UPDATE t1 SET a = a RETURNING *) SELECT * FROM cte1; --ok - a | b -----+---------------------------------- - 0 | 5feceb66ffc86f38d952786c6d696c79 - 2 | d4735e3a265e16eee03f59718b9b5d03 - 4 | 4b227777d4dd1fc61c6f884f48641d02 - 6 | e7f6c011776e8db7cd330b54174fd76f - 8 | 2c624232cdd221771294dfbb310aca00 - 10 | 4a44dc15364204a80fe80e9039455cc1 - 12 | 6b51d431df5d7f141cbececcf79edf3d - 14 | 8527a891e224136950ff32ca212b45bc - 16 | b17ef6d19c7a5b1ee83b907c595526dc - 18 | 4ec9599fc203d176a301536c2e091a19 - 20 | f5ca38f748a1d6eaf726b8a42fb575c3 -(11 rows) - -WITH cte1 AS (INSERT INTO t1 VALUES (21, 'Fail') RETURNING *) SELECT * FROM cte1; --fail -ERROR: new row violates row-level security policy for table "t1" -WITH cte1 AS (INSERT INTO t1 VALUES (20, 'Success') RETURNING *) SELECT * FROM cte1; --ok - a | b -----+--------- - 20 | Success -(1 row) - --- --- Rename Policy --- -RESET SESSION AUTHORIZATION; -ALTER POLICY p1 ON t1 RENAME TO p1; --fail -ERROR: policy "p1" for table "t1" already exists -SELECT polname, relname - FROM pg_policy pol - JOIN pg_class pc ON (pc.oid = pol.polrelid) - WHERE relname = 't1'; - polname | relname ----------+--------- - p1 | t1 -(1 row) - -ALTER POLICY p1 ON t1 RENAME TO p2; --ok -SELECT polname, relname - FROM pg_policy pol - JOIN pg_class pc ON (pc.oid = pol.polrelid) - WHERE relname = 't1'; - polname | relname ----------+--------- - p2 | t1 -(1 row) - --- --- Check INSERT SELECT --- -SET SESSION AUTHORIZATION regress_rls_bob; -CREATE TABLE t2 (a integer, b text); -INSERT INTO t2 (SELECT * FROM t1); -EXPLAIN (COSTS OFF) INSERT INTO t2 (SELECT * FROM t1); - QUERY PLAN -------------------------------- - Insert on t2 - -> Seq Scan on t1 - Filter: ((a % 2) = 0) -(3 rows) - -SELECT * FROM t2; - a | b -----+---------------------------------- - 0 | 5feceb66ffc86f38d952786c6d696c79 - 2 | d4735e3a265e16eee03f59718b9b5d03 - 4 | 4b227777d4dd1fc61c6f884f48641d02 - 6 | e7f6c011776e8db7cd330b54174fd76f - 8 | 2c624232cdd221771294dfbb310aca00 - 10 | 4a44dc15364204a80fe80e9039455cc1 - 12 | 6b51d431df5d7f141cbececcf79edf3d - 14 | 8527a891e224136950ff32ca212b45bc - 16 | b17ef6d19c7a5b1ee83b907c595526dc - 18 | 4ec9599fc203d176a301536c2e091a19 - 20 | f5ca38f748a1d6eaf726b8a42fb575c3 - 20 | Success -(12 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM t2; - QUERY PLAN ----------------- - Seq Scan on t2 -(1 row) - -CREATE TABLE t3 AS SELECT * FROM t1; -SELECT * FROM t3; - a | b -----+---------------------------------- - 0 | 5feceb66ffc86f38d952786c6d696c79 - 2 | d4735e3a265e16eee03f59718b9b5d03 - 4 | 4b227777d4dd1fc61c6f884f48641d02 - 6 | e7f6c011776e8db7cd330b54174fd76f - 8 | 2c624232cdd221771294dfbb310aca00 - 10 | 4a44dc15364204a80fe80e9039455cc1 - 12 | 6b51d431df5d7f141cbececcf79edf3d - 14 | 8527a891e224136950ff32ca212b45bc - 16 | b17ef6d19c7a5b1ee83b907c595526dc - 18 | 4ec9599fc203d176a301536c2e091a19 - 20 | f5ca38f748a1d6eaf726b8a42fb575c3 - 20 | Success -(12 rows) - -SELECT * INTO t4 FROM t1; -SELECT * FROM t4; - a | b -----+---------------------------------- - 0 | 5feceb66ffc86f38d952786c6d696c79 - 2 | d4735e3a265e16eee03f59718b9b5d03 - 4 | 4b227777d4dd1fc61c6f884f48641d02 - 6 | e7f6c011776e8db7cd330b54174fd76f - 8 | 2c624232cdd221771294dfbb310aca00 - 10 | 4a44dc15364204a80fe80e9039455cc1 - 12 | 6b51d431df5d7f141cbececcf79edf3d - 14 | 8527a891e224136950ff32ca212b45bc - 16 | b17ef6d19c7a5b1ee83b907c595526dc - 18 | 4ec9599fc203d176a301536c2e091a19 - 20 | f5ca38f748a1d6eaf726b8a42fb575c3 - 20 | Success -(12 rows) - --- --- RLS with JOIN --- -SET SESSION AUTHORIZATION regress_rls_alice; -CREATE TABLE blog (id integer, author text, post text); -CREATE TABLE comment (blog_id integer, message text); -GRANT ALL ON blog, comment TO regress_rls_bob; -CREATE POLICY blog_1 ON blog USING (id % 2 = 0); -ALTER TABLE blog ENABLE ROW LEVEL SECURITY; -INSERT INTO blog VALUES - (1, 'alice', 'blog #1'), - (2, 'bob', 'blog #1'), - (3, 'alice', 'blog #2'), - (4, 'alice', 'blog #3'), - (5, 'john', 'blog #1'); -INSERT INTO comment VALUES - (1, 'cool blog'), - (1, 'fun blog'), - (3, 'crazy blog'), - (5, 'what?'), - (4, 'insane!'), - (2, 'who did it?'); -SET SESSION AUTHORIZATION regress_rls_bob; --- Check RLS JOIN with Non-RLS. -SELECT id, author, message FROM blog JOIN comment ON id = blog_id; - id | author | message -----+--------+------------- - 4 | alice | insane! - 2 | bob | who did it? -(2 rows) - --- Check Non-RLS JOIN with RLS. -SELECT id, author, message FROM comment JOIN blog ON id = blog_id; - id | author | message -----+--------+------------- - 4 | alice | insane! - 2 | bob | who did it? -(2 rows) - -SET SESSION AUTHORIZATION regress_rls_alice; -CREATE POLICY comment_1 ON comment USING (blog_id < 4); -ALTER TABLE comment ENABLE ROW LEVEL SECURITY; -SET SESSION AUTHORIZATION regress_rls_bob; --- Check RLS JOIN RLS -SELECT id, author, message FROM blog JOIN comment ON id = blog_id; - id | author | message -----+--------+------------- - 2 | bob | who did it? -(1 row) - -SELECT id, author, message FROM comment JOIN blog ON id = blog_id; - id | author | message -----+--------+------------- - 2 | bob | who did it? -(1 row) - -SET SESSION AUTHORIZATION regress_rls_alice; -DROP TABLE blog, comment; --- --- Default Deny Policy --- -RESET SESSION AUTHORIZATION; -DROP POLICY p2 ON t1; -ALTER TABLE t1 OWNER TO regress_rls_alice; --- Check that default deny does not apply to superuser. -RESET SESSION AUTHORIZATION; -SELECT * FROM t1; - a | b -----+---------------------------------- - 1 | 6b86b273ff34fce19d6b804eff5a3f57 - 3 | 4e07408562bedb8b60ce05c1decfe3ad - 5 | ef2d127de37b942baad06145e54b0c61 - 7 | 7902699be42c8a8e46fbbb4501726517 - 9 | 19581e27de7ced00ff1ce50b2047e7a5 - 11 | 4fc82b26aecb47d2868c4efbe3581732 - 13 | 3fdba35f04dc8c462986c992bcf87554 - 15 | e629fa6598d732768f7c726b4b621285 - 17 | 4523540f1504cd17100c4835e85b7eef - 19 | 9400f1b21cb527d7fa3d3eabba93557a - 0 | 5feceb66ffc86f38d952786c6d696c79 - 2 | d4735e3a265e16eee03f59718b9b5d03 - 4 | 4b227777d4dd1fc61c6f884f48641d02 - 6 | e7f6c011776e8db7cd330b54174fd76f - 8 | 2c624232cdd221771294dfbb310aca00 - 10 | 4a44dc15364204a80fe80e9039455cc1 - 12 | 6b51d431df5d7f141cbececcf79edf3d - 14 | 8527a891e224136950ff32ca212b45bc - 16 | b17ef6d19c7a5b1ee83b907c595526dc - 18 | 4ec9599fc203d176a301536c2e091a19 - 20 | f5ca38f748a1d6eaf726b8a42fb575c3 - 20 | Success -(22 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM t1; - QUERY PLAN ----------------- - Seq Scan on t1 -(1 row) - --- Check that default deny does not apply to table owner. -SET SESSION AUTHORIZATION regress_rls_alice; -SELECT * FROM t1; - a | b -----+---------------------------------- - 1 | 6b86b273ff34fce19d6b804eff5a3f57 - 3 | 4e07408562bedb8b60ce05c1decfe3ad - 5 | ef2d127de37b942baad06145e54b0c61 - 7 | 7902699be42c8a8e46fbbb4501726517 - 9 | 19581e27de7ced00ff1ce50b2047e7a5 - 11 | 4fc82b26aecb47d2868c4efbe3581732 - 13 | 3fdba35f04dc8c462986c992bcf87554 - 15 | e629fa6598d732768f7c726b4b621285 - 17 | 4523540f1504cd17100c4835e85b7eef - 19 | 9400f1b21cb527d7fa3d3eabba93557a - 0 | 5feceb66ffc86f38d952786c6d696c79 - 2 | d4735e3a265e16eee03f59718b9b5d03 - 4 | 4b227777d4dd1fc61c6f884f48641d02 - 6 | e7f6c011776e8db7cd330b54174fd76f - 8 | 2c624232cdd221771294dfbb310aca00 - 10 | 4a44dc15364204a80fe80e9039455cc1 - 12 | 6b51d431df5d7f141cbececcf79edf3d - 14 | 8527a891e224136950ff32ca212b45bc - 16 | b17ef6d19c7a5b1ee83b907c595526dc - 18 | 4ec9599fc203d176a301536c2e091a19 - 20 | f5ca38f748a1d6eaf726b8a42fb575c3 - 20 | Success -(22 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM t1; - QUERY PLAN ----------------- - Seq Scan on t1 -(1 row) - --- Check that default deny applies to non-owner/non-superuser when RLS on. -SET SESSION AUTHORIZATION regress_rls_bob; -SET row_security TO ON; -SELECT * FROM t1; - a | b ----+--- -(0 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM t1; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -SET SESSION AUTHORIZATION regress_rls_bob; -SELECT * FROM t1; - a | b ----+--- -(0 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM t1; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - --- --- COPY TO/FROM --- -RESET SESSION AUTHORIZATION; -DROP TABLE copy_t CASCADE; -ERROR: table "copy_t" does not exist -CREATE TABLE copy_t (a integer, b text); -CREATE POLICY p1 ON copy_t USING (a % 2 = 0); -ALTER TABLE copy_t ENABLE ROW LEVEL SECURITY; -GRANT ALL ON copy_t TO regress_rls_bob, regress_rls_exempt_user; -INSERT INTO copy_t (SELECT x, public.fipshash(x::text) FROM generate_series(0,10) x); --- Check COPY TO as Superuser/owner. -RESET SESSION AUTHORIZATION; -SET row_security TO OFF; -COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; -0,5feceb66ffc86f38d952786c6d696c79 -1,6b86b273ff34fce19d6b804eff5a3f57 -2,d4735e3a265e16eee03f59718b9b5d03 -3,4e07408562bedb8b60ce05c1decfe3ad -4,4b227777d4dd1fc61c6f884f48641d02 -5,ef2d127de37b942baad06145e54b0c61 -6,e7f6c011776e8db7cd330b54174fd76f -7,7902699be42c8a8e46fbbb4501726517 -8,2c624232cdd221771294dfbb310aca00 -9,19581e27de7ced00ff1ce50b2047e7a5 -10,4a44dc15364204a80fe80e9039455cc1 -SET row_security TO ON; -COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; -0,5feceb66ffc86f38d952786c6d696c79 -1,6b86b273ff34fce19d6b804eff5a3f57 -2,d4735e3a265e16eee03f59718b9b5d03 -3,4e07408562bedb8b60ce05c1decfe3ad -4,4b227777d4dd1fc61c6f884f48641d02 -5,ef2d127de37b942baad06145e54b0c61 -6,e7f6c011776e8db7cd330b54174fd76f -7,7902699be42c8a8e46fbbb4501726517 -8,2c624232cdd221771294dfbb310aca00 -9,19581e27de7ced00ff1ce50b2047e7a5 -10,4a44dc15364204a80fe80e9039455cc1 --- Check COPY TO as user with permissions. -SET SESSION AUTHORIZATION regress_rls_bob; -SET row_security TO OFF; -COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - would be affected by RLS -ERROR: query would be affected by row-level security policy for table "copy_t" -SET row_security TO ON; -COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --ok -0,5feceb66ffc86f38d952786c6d696c79 -2,d4735e3a265e16eee03f59718b9b5d03 -4,4b227777d4dd1fc61c6f884f48641d02 -6,e7f6c011776e8db7cd330b54174fd76f -8,2c624232cdd221771294dfbb310aca00 -10,4a44dc15364204a80fe80e9039455cc1 --- Check COPY TO as user with permissions and BYPASSRLS -SET SESSION AUTHORIZATION regress_rls_exempt_user; -SET row_security TO OFF; -COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --ok -0,5feceb66ffc86f38d952786c6d696c79 -1,6b86b273ff34fce19d6b804eff5a3f57 -2,d4735e3a265e16eee03f59718b9b5d03 -3,4e07408562bedb8b60ce05c1decfe3ad -4,4b227777d4dd1fc61c6f884f48641d02 -5,ef2d127de37b942baad06145e54b0c61 -6,e7f6c011776e8db7cd330b54174fd76f -7,7902699be42c8a8e46fbbb4501726517 -8,2c624232cdd221771294dfbb310aca00 -9,19581e27de7ced00ff1ce50b2047e7a5 -10,4a44dc15364204a80fe80e9039455cc1 -SET row_security TO ON; -COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --ok -0,5feceb66ffc86f38d952786c6d696c79 -1,6b86b273ff34fce19d6b804eff5a3f57 -2,d4735e3a265e16eee03f59718b9b5d03 -3,4e07408562bedb8b60ce05c1decfe3ad -4,4b227777d4dd1fc61c6f884f48641d02 -5,ef2d127de37b942baad06145e54b0c61 -6,e7f6c011776e8db7cd330b54174fd76f -7,7902699be42c8a8e46fbbb4501726517 -8,2c624232cdd221771294dfbb310aca00 -9,19581e27de7ced00ff1ce50b2047e7a5 -10,4a44dc15364204a80fe80e9039455cc1 --- Check COPY TO as user without permissions. SET row_security TO OFF; -SET SESSION AUTHORIZATION regress_rls_carol; -SET row_security TO OFF; -COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - would be affected by RLS -ERROR: query would be affected by row-level security policy for table "copy_t" -SET row_security TO ON; -COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - permission denied -ERROR: permission denied for table copy_t --- Check COPY relation TO; keep it just one row to avoid reordering issues -RESET SESSION AUTHORIZATION; -SET row_security TO ON; -CREATE TABLE copy_rel_to (a integer, b text); -CREATE POLICY p1 ON copy_rel_to USING (a % 2 = 0); -ALTER TABLE copy_rel_to ENABLE ROW LEVEL SECURITY; -GRANT ALL ON copy_rel_to TO regress_rls_bob, regress_rls_exempt_user; -INSERT INTO copy_rel_to VALUES (1, public.fipshash('1')); --- Check COPY TO as Superuser/owner. -RESET SESSION AUTHORIZATION; -SET row_security TO OFF; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; -1,6b86b273ff34fce19d6b804eff5a3f57 -SET row_security TO ON; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; -1,6b86b273ff34fce19d6b804eff5a3f57 --- Check COPY TO as user with permissions. -SET SESSION AUTHORIZATION regress_rls_bob; -SET row_security TO OFF; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - would be affected by RLS -ERROR: query would be affected by row-level security policy for table "copy_rel_to" -SET row_security TO ON; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok --- Check COPY TO as user with permissions and BYPASSRLS -SET SESSION AUTHORIZATION regress_rls_exempt_user; -SET row_security TO OFF; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok -1,6b86b273ff34fce19d6b804eff5a3f57 -SET row_security TO ON; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok -1,6b86b273ff34fce19d6b804eff5a3f57 --- Check COPY TO as user without permissions. SET row_security TO OFF; -SET SESSION AUTHORIZATION regress_rls_carol; -SET row_security TO OFF; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied -ERROR: permission denied for table copy_rel_to -SET row_security TO ON; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied -ERROR: permission denied for table copy_rel_to --- Check behavior with a child table. -RESET SESSION AUTHORIZATION; -SET row_security TO ON; -CREATE TABLE copy_rel_to_child () INHERITS (copy_rel_to); -INSERT INTO copy_rel_to_child VALUES (1, 'one'), (2, 'two'); --- Check COPY TO as Superuser/owner. -RESET SESSION AUTHORIZATION; -SET row_security TO OFF; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; -1,6b86b273ff34fce19d6b804eff5a3f57 -SET row_security TO ON; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; -1,6b86b273ff34fce19d6b804eff5a3f57 --- Check COPY TO as user with permissions. -SET SESSION AUTHORIZATION regress_rls_bob; -SET row_security TO OFF; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - would be affected by RLS -ERROR: query would be affected by row-level security policy for table "copy_rel_to" -SET row_security TO ON; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok --- Check COPY TO as user with permissions and BYPASSRLS -SET SESSION AUTHORIZATION regress_rls_exempt_user; -SET row_security TO OFF; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok -1,6b86b273ff34fce19d6b804eff5a3f57 -SET row_security TO ON; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok -1,6b86b273ff34fce19d6b804eff5a3f57 --- Check COPY TO as user without permissions. SET row_security TO OFF; -SET SESSION AUTHORIZATION regress_rls_carol; -SET row_security TO OFF; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied -ERROR: permission denied for table copy_rel_to -SET row_security TO ON; -COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied -ERROR: permission denied for table copy_rel_to --- Check COPY FROM as Superuser/owner. -RESET SESSION AUTHORIZATION; -SET row_security TO OFF; -COPY copy_t FROM STDIN; --ok -SET row_security TO ON; -COPY copy_t FROM STDIN; --ok --- Check COPY FROM as user with permissions. -SET SESSION AUTHORIZATION regress_rls_bob; -SET row_security TO OFF; -COPY copy_t FROM STDIN; --fail - would be affected by RLS. -ERROR: query would be affected by row-level security policy for table "copy_t" -SET row_security TO ON; -COPY copy_t FROM STDIN; --fail - COPY FROM not supported by RLS. -ERROR: COPY FROM not supported with row-level security -HINT: Use INSERT statements instead. --- Check COPY FROM as user with permissions and BYPASSRLS -SET SESSION AUTHORIZATION regress_rls_exempt_user; -SET row_security TO ON; -COPY copy_t FROM STDIN; --ok --- Check COPY FROM as user without permissions. -SET SESSION AUTHORIZATION regress_rls_carol; -SET row_security TO OFF; -COPY copy_t FROM STDIN; --fail - permission denied. -ERROR: permission denied for table copy_t -SET row_security TO ON; -COPY copy_t FROM STDIN; --fail - permission denied. -ERROR: permission denied for table copy_t -RESET SESSION AUTHORIZATION; -DROP TABLE copy_t; -DROP TABLE copy_rel_to CASCADE; -NOTICE: drop cascades to table copy_rel_to_child --- Check WHERE CURRENT OF -SET SESSION AUTHORIZATION regress_rls_alice; -CREATE TABLE current_check (currentid int, payload text, rlsuser text); -GRANT ALL ON current_check TO PUBLIC; -INSERT INTO current_check VALUES - (1, 'abc', 'regress_rls_bob'), - (2, 'bcd', 'regress_rls_bob'), - (3, 'cde', 'regress_rls_bob'), - (4, 'def', 'regress_rls_bob'); -CREATE POLICY p1 ON current_check FOR SELECT USING (currentid % 2 = 0); -CREATE POLICY p2 ON current_check FOR DELETE USING (currentid = 4 AND rlsuser = current_user); -CREATE POLICY p3 ON current_check FOR UPDATE USING (currentid = 4) WITH CHECK (rlsuser = current_user); -ALTER TABLE current_check ENABLE ROW LEVEL SECURITY; -SET SESSION AUTHORIZATION regress_rls_bob; --- Can SELECT even rows -SELECT * FROM current_check; - currentid | payload | rlsuser ------------+---------+----------------- - 2 | bcd | regress_rls_bob - 4 | def | regress_rls_bob -(2 rows) - --- Cannot UPDATE row 2 -UPDATE current_check SET payload = payload || '_new' WHERE currentid = 2 RETURNING *; - currentid | payload | rlsuser ------------+---------+--------- -(0 rows) - -BEGIN; -DECLARE current_check_cursor SCROLL CURSOR FOR SELECT * FROM current_check; --- Returns rows that can be seen according to SELECT policy, like plain SELECT --- above (even rows) -FETCH ABSOLUTE 1 FROM current_check_cursor; - currentid | payload | rlsuser ------------+---------+----------------- - 2 | bcd | regress_rls_bob -(1 row) - --- Still cannot UPDATE row 2 through cursor -UPDATE current_check SET payload = payload || '_new' WHERE CURRENT OF current_check_cursor RETURNING *; - currentid | payload | rlsuser ------------+---------+--------- -(0 rows) - --- Can update row 4 through cursor, which is the next visible row -FETCH RELATIVE 1 FROM current_check_cursor; - currentid | payload | rlsuser ------------+---------+----------------- - 4 | def | regress_rls_bob -(1 row) - -UPDATE current_check SET payload = payload || '_new' WHERE CURRENT OF current_check_cursor RETURNING *; - currentid | payload | rlsuser ------------+---------+----------------- - 4 | def_new | regress_rls_bob -(1 row) - -SELECT * FROM current_check; - currentid | payload | rlsuser ------------+---------+----------------- - 2 | bcd | regress_rls_bob - 4 | def_new | regress_rls_bob -(2 rows) - --- Plan should be a subquery TID scan -EXPLAIN (COSTS OFF) UPDATE current_check SET payload = payload WHERE CURRENT OF current_check_cursor; - QUERY PLAN -------------------------------------------------------------- - Update on current_check - -> Tid Scan on current_check - TID Cond: CURRENT OF current_check_cursor - Filter: ((currentid = 4) AND ((currentid % 2) = 0)) -(4 rows) - --- Similarly can only delete row 4 -FETCH ABSOLUTE 1 FROM current_check_cursor; - currentid | payload | rlsuser ------------+---------+----------------- - 2 | bcd | regress_rls_bob -(1 row) - -DELETE FROM current_check WHERE CURRENT OF current_check_cursor RETURNING *; - currentid | payload | rlsuser ------------+---------+--------- -(0 rows) - -FETCH RELATIVE 1 FROM current_check_cursor; - currentid | payload | rlsuser ------------+---------+----------------- - 4 | def | regress_rls_bob -(1 row) - -DELETE FROM current_check WHERE CURRENT OF current_check_cursor RETURNING *; - currentid | payload | rlsuser ------------+---------+----------------- - 4 | def_new | regress_rls_bob -(1 row) - -SELECT * FROM current_check; - currentid | payload | rlsuser ------------+---------+----------------- - 2 | bcd | regress_rls_bob -(1 row) - -COMMIT; --- --- check pg_stats view filtering --- -SET row_security TO ON; -SET SESSION AUTHORIZATION regress_rls_alice; -ANALYZE current_check; --- Stats visible -SELECT row_security_active('current_check'); - row_security_active ---------------------- - f -(1 row) - -SELECT attname, most_common_vals FROM pg_stats - WHERE tablename = 'current_check' - ORDER BY 1; - attname | most_common_vals ------------+------------------- - currentid | - payload | - rlsuser | {regress_rls_bob} -(3 rows) - -SET SESSION AUTHORIZATION regress_rls_bob; --- Stats not visible -SELECT row_security_active('current_check'); - row_security_active ---------------------- - t -(1 row) - -SELECT attname, most_common_vals FROM pg_stats - WHERE tablename = 'current_check' - ORDER BY 1; - attname | most_common_vals ----------+------------------ -(0 rows) - --- --- Collation support --- -BEGIN; -CREATE TABLE coll_t (c) AS VALUES ('bar'::text); -CREATE POLICY coll_p ON coll_t USING (c < ('foo'::text COLLATE "C")); -ALTER TABLE coll_t ENABLE ROW LEVEL SECURITY; -GRANT SELECT ON coll_t TO regress_rls_alice; -SELECT (string_to_array(polqual, ':'))[7] AS inputcollid FROM pg_policy WHERE polrelid = 'coll_t'::regclass; - inputcollid ------------------- - inputcollid 950 -(1 row) - -SET SESSION AUTHORIZATION regress_rls_alice; -SELECT * FROM coll_t; - c ------ - bar -(1 row) - -ROLLBACK; --- --- Shared Object Dependencies --- -RESET SESSION AUTHORIZATION; -BEGIN; -CREATE ROLE regress_rls_eve; -CREATE ROLE regress_rls_frank; -CREATE TABLE tbl1 (c) AS VALUES ('bar'::text); -GRANT SELECT ON TABLE tbl1 TO regress_rls_eve; -CREATE POLICY P ON tbl1 TO regress_rls_eve, regress_rls_frank USING (true); -SELECT refclassid::regclass, deptype - FROM pg_depend - WHERE classid = 'pg_policy'::regclass - AND refobjid = 'tbl1'::regclass; - refclassid | deptype -------------+--------- - pg_class | a -(1 row) - -SELECT refclassid::regclass, deptype - FROM pg_shdepend - WHERE classid = 'pg_policy'::regclass - AND refobjid IN ('regress_rls_eve'::regrole, 'regress_rls_frank'::regrole); - refclassid | deptype -------------+--------- - pg_authid | r - pg_authid | r -(2 rows) - -SAVEPOINT q; -DROP ROLE regress_rls_eve; --fails due to dependency on POLICY p -ERROR: role "regress_rls_eve" cannot be dropped because some objects depend on it -DETAIL: privileges for table tbl1 -target of policy p on table tbl1 -ROLLBACK TO q; -ALTER POLICY p ON tbl1 TO regress_rls_frank USING (true); -SAVEPOINT q; -DROP ROLE regress_rls_eve; --fails due to dependency on GRANT SELECT -ERROR: role "regress_rls_eve" cannot be dropped because some objects depend on it -DETAIL: privileges for table tbl1 -ROLLBACK TO q; -REVOKE ALL ON TABLE tbl1 FROM regress_rls_eve; -SAVEPOINT q; -DROP ROLE regress_rls_eve; --succeeds -ROLLBACK TO q; -SAVEPOINT q; -DROP ROLE regress_rls_frank; --fails due to dependency on POLICY p -ERROR: role "regress_rls_frank" cannot be dropped because some objects depend on it -DETAIL: target of policy p on table tbl1 -ROLLBACK TO q; -DROP POLICY p ON tbl1; -SAVEPOINT q; -DROP ROLE regress_rls_frank; -- succeeds -ROLLBACK TO q; -ROLLBACK; -- cleanup --- --- Policy expression handling --- -BEGIN; -CREATE TABLE t (c) AS VALUES ('bar'::text); -CREATE POLICY p ON t USING (max(c)); -- fails: aggregate functions are not allowed in policy expressions -ERROR: aggregate functions are not allowed in policy expressions -ROLLBACK; --- --- Non-target relations are only subject to SELECT policies --- -SET SESSION AUTHORIZATION regress_rls_alice; -CREATE TABLE r1 (a int); -CREATE TABLE r2 (a int); -INSERT INTO r1 VALUES (10), (20); -INSERT INTO r2 VALUES (10), (20); -GRANT ALL ON r1, r2 TO regress_rls_bob; -CREATE POLICY p1 ON r1 USING (true); -ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; -CREATE POLICY p1 ON r2 FOR SELECT USING (true); -CREATE POLICY p2 ON r2 FOR INSERT WITH CHECK (false); -CREATE POLICY p3 ON r2 FOR UPDATE USING (false); -CREATE POLICY p4 ON r2 FOR DELETE USING (false); -ALTER TABLE r2 ENABLE ROW LEVEL SECURITY; -SET SESSION AUTHORIZATION regress_rls_bob; -SELECT * FROM r1; - a ----- - 10 - 20 -(2 rows) - -SELECT * FROM r2; - a ----- - 10 - 20 -(2 rows) - --- r2 is read-only -INSERT INTO r2 VALUES (2); -- Not allowed -ERROR: new row violates row-level security policy for table "r2" -UPDATE r2 SET a = 2 RETURNING *; -- Updates nothing - a ---- -(0 rows) - -DELETE FROM r2 RETURNING *; -- Deletes nothing - a ---- -(0 rows) - --- r2 can be used as a non-target relation in DML -INSERT INTO r1 SELECT a + 1 FROM r2 RETURNING *; -- OK - a ----- - 11 - 21 -(2 rows) - -UPDATE r1 SET a = r2.a + 2 FROM r2 WHERE r1.a = r2.a RETURNING *; -- OK - a | a -----+---- - 12 | 10 - 22 | 20 -(2 rows) - -DELETE FROM r1 USING r2 WHERE r1.a = r2.a + 2 RETURNING *; -- OK - a | a -----+---- - 12 | 10 - 22 | 20 -(2 rows) - -SELECT * FROM r1; - a ----- - 11 - 21 -(2 rows) - -SELECT * FROM r2; - a ----- - 10 - 20 -(2 rows) - -SET SESSION AUTHORIZATION regress_rls_alice; -DROP TABLE r1; -DROP TABLE r2; --- --- FORCE ROW LEVEL SECURITY applies RLS to owners too --- -SET SESSION AUTHORIZATION regress_rls_alice; -SET row_security = on; -CREATE TABLE r1 (a int); -INSERT INTO r1 VALUES (10), (20); -CREATE POLICY p1 ON r1 USING (false); -ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; -ALTER TABLE r1 FORCE ROW LEVEL SECURITY; --- No error, but no rows -TABLE r1; - a ---- -(0 rows) - --- RLS error -INSERT INTO r1 VALUES (1); -ERROR: new row violates row-level security policy for table "r1" --- No error (unable to see any rows to update) -UPDATE r1 SET a = 1; -TABLE r1; - a ---- -(0 rows) - --- No error (unable to see any rows to delete) -DELETE FROM r1; -TABLE r1; - a ---- -(0 rows) - -SET row_security = off; --- these all fail, would be affected by RLS -TABLE r1; -ERROR: query would be affected by row-level security policy for table "r1" -HINT: To disable the policy for the table's owner, use ALTER TABLE NO FORCE ROW LEVEL SECURITY. -UPDATE r1 SET a = 1; -ERROR: query would be affected by row-level security policy for table "r1" -HINT: To disable the policy for the table's owner, use ALTER TABLE NO FORCE ROW LEVEL SECURITY. -DELETE FROM r1; -ERROR: query would be affected by row-level security policy for table "r1" -HINT: To disable the policy for the table's owner, use ALTER TABLE NO FORCE ROW LEVEL SECURITY. -DROP TABLE r1; --- --- FORCE ROW LEVEL SECURITY does not break RI --- -SET SESSION AUTHORIZATION regress_rls_alice; -SET row_security = on; -CREATE TABLE r1 (a int PRIMARY KEY); -CREATE TABLE r2 (a int REFERENCES r1); -INSERT INTO r1 VALUES (10), (20); -INSERT INTO r2 VALUES (10), (20); --- Create policies on r2 which prevent the --- owner from seeing any rows, but RI should --- still see them. -CREATE POLICY p1 ON r2 USING (false); -ALTER TABLE r2 ENABLE ROW LEVEL SECURITY; -ALTER TABLE r2 FORCE ROW LEVEL SECURITY; --- Errors due to rows in r2 -DELETE FROM r1; -ERROR: update or delete on table "r1" violates foreign key constraint "r2_a_fkey" on table "r2" -DETAIL: Key (a)=(10) is still referenced from table "r2". --- Reset r2 to no-RLS -DROP POLICY p1 ON r2; -ALTER TABLE r2 NO FORCE ROW LEVEL SECURITY; -ALTER TABLE r2 DISABLE ROW LEVEL SECURITY; --- clean out r2 for INSERT test below -DELETE FROM r2; --- Change r1 to not allow rows to be seen -CREATE POLICY p1 ON r1 USING (false); -ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; -ALTER TABLE r1 FORCE ROW LEVEL SECURITY; --- No rows seen -TABLE r1; - a ---- -(0 rows) - --- No error, RI still sees that row exists in r1 -INSERT INTO r2 VALUES (10); -DROP TABLE r2; -DROP TABLE r1; --- Ensure cascaded DELETE works -CREATE TABLE r1 (a int PRIMARY KEY); -CREATE TABLE r2 (a int REFERENCES r1 ON DELETE CASCADE); -INSERT INTO r1 VALUES (10), (20); -INSERT INTO r2 VALUES (10), (20); --- Create policies on r2 which prevent the --- owner from seeing any rows, but RI should --- still see them. -CREATE POLICY p1 ON r2 USING (false); -ALTER TABLE r2 ENABLE ROW LEVEL SECURITY; -ALTER TABLE r2 FORCE ROW LEVEL SECURITY; --- Deletes all records from both -DELETE FROM r1; --- Remove FORCE from r2 -ALTER TABLE r2 NO FORCE ROW LEVEL SECURITY; --- As owner, we now bypass RLS --- verify no rows in r2 now -TABLE r2; - a ---- -(0 rows) - -DROP TABLE r2; -DROP TABLE r1; --- Ensure cascaded UPDATE works -CREATE TABLE r1 (a int PRIMARY KEY); -CREATE TABLE r2 (a int REFERENCES r1 ON UPDATE CASCADE); -INSERT INTO r1 VALUES (10), (20); -INSERT INTO r2 VALUES (10), (20); --- Create policies on r2 which prevent the --- owner from seeing any rows, but RI should --- still see them. -CREATE POLICY p1 ON r2 USING (false); -ALTER TABLE r2 ENABLE ROW LEVEL SECURITY; -ALTER TABLE r2 FORCE ROW LEVEL SECURITY; --- Updates records in both -UPDATE r1 SET a = a+5; --- Remove FORCE from r2 -ALTER TABLE r2 NO FORCE ROW LEVEL SECURITY; --- As owner, we now bypass RLS --- verify records in r2 updated -TABLE r2; - a ----- - 15 - 25 -(2 rows) - -DROP TABLE r2; -DROP TABLE r1; --- --- Test INSERT+RETURNING applies SELECT policies as --- WithCheckOptions (meaning an error is thrown) --- -SET SESSION AUTHORIZATION regress_rls_alice; -SET row_security = on; -CREATE TABLE r1 (a int); -CREATE POLICY p1 ON r1 FOR SELECT USING (false); -CREATE POLICY p2 ON r1 FOR INSERT WITH CHECK (true); -ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; -ALTER TABLE r1 FORCE ROW LEVEL SECURITY; --- Works fine -INSERT INTO r1 VALUES (10), (20); --- No error, but no rows -TABLE r1; - a ---- -(0 rows) - -SET row_security = off; --- fail, would be affected by RLS -TABLE r1; -ERROR: query would be affected by row-level security policy for table "r1" -HINT: To disable the policy for the table's owner, use ALTER TABLE NO FORCE ROW LEVEL SECURITY. -SET row_security = on; --- Error -INSERT INTO r1 VALUES (10), (20) RETURNING *; -ERROR: new row violates row-level security policy for table "r1" -DROP TABLE r1; --- --- Test UPDATE+RETURNING applies SELECT policies as --- WithCheckOptions (meaning an error is thrown) --- -SET SESSION AUTHORIZATION regress_rls_alice; -SET row_security = on; -CREATE TABLE r1 (a int PRIMARY KEY); -CREATE POLICY p1 ON r1 FOR SELECT USING (a < 20); -CREATE POLICY p2 ON r1 FOR UPDATE USING (a < 20) WITH CHECK (true); -CREATE POLICY p3 ON r1 FOR INSERT WITH CHECK (true); -INSERT INTO r1 VALUES (10); -ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; -ALTER TABLE r1 FORCE ROW LEVEL SECURITY; --- Works fine -UPDATE r1 SET a = 30; --- Show updated rows -ALTER TABLE r1 NO FORCE ROW LEVEL SECURITY; -TABLE r1; - a ----- - 30 -(1 row) - --- reset value in r1 for test with RETURNING -UPDATE r1 SET a = 10; --- Verify row reset -TABLE r1; - a ----- - 10 -(1 row) - -ALTER TABLE r1 FORCE ROW LEVEL SECURITY; --- Error -UPDATE r1 SET a = 30 RETURNING *; -ERROR: new row violates row-level security policy for table "r1" --- UPDATE path of INSERT ... ON CONFLICT DO UPDATE should also error out -INSERT INTO r1 VALUES (10) - ON CONFLICT (a) DO UPDATE SET a = 30 RETURNING *; -ERROR: new row violates row-level security policy for table "r1" --- Should still error out without RETURNING (use of arbiter always requires --- SELECT permissions) -INSERT INTO r1 VALUES (10) - ON CONFLICT (a) DO UPDATE SET a = 30; -ERROR: new row violates row-level security policy for table "r1" -INSERT INTO r1 VALUES (10) - ON CONFLICT ON CONSTRAINT r1_pkey DO UPDATE SET a = 30; -ERROR: new row violates row-level security policy for table "r1" -DROP TABLE r1; --- Check dependency handling -RESET SESSION AUTHORIZATION; -CREATE TABLE dep1 (c1 int); -CREATE TABLE dep2 (c1 int); -CREATE POLICY dep_p1 ON dep1 TO regress_rls_bob USING (c1 > (select max(dep2.c1) from dep2)); -ALTER POLICY dep_p1 ON dep1 TO regress_rls_bob,regress_rls_carol; --- Should return one -SELECT count(*) = 1 FROM pg_depend - WHERE objid = (SELECT oid FROM pg_policy WHERE polname = 'dep_p1') - AND refobjid = (SELECT oid FROM pg_class WHERE relname = 'dep2'); - ?column? ----------- - t -(1 row) - -ALTER POLICY dep_p1 ON dep1 USING (true); --- Should return one -SELECT count(*) = 1 FROM pg_shdepend - WHERE objid = (SELECT oid FROM pg_policy WHERE polname = 'dep_p1') - AND refobjid = (SELECT oid FROM pg_authid WHERE rolname = 'regress_rls_bob'); - ?column? ----------- - t -(1 row) - --- Should return one -SELECT count(*) = 1 FROM pg_shdepend - WHERE objid = (SELECT oid FROM pg_policy WHERE polname = 'dep_p1') - AND refobjid = (SELECT oid FROM pg_authid WHERE rolname = 'regress_rls_carol'); - ?column? ----------- - t -(1 row) - --- Should return zero -SELECT count(*) = 0 FROM pg_depend - WHERE objid = (SELECT oid FROM pg_policy WHERE polname = 'dep_p1') - AND refobjid = (SELECT oid FROM pg_class WHERE relname = 'dep2'); - ?column? ----------- - t -(1 row) - --- DROP OWNED BY testing -RESET SESSION AUTHORIZATION; -CREATE ROLE regress_rls_dob_role1; -CREATE ROLE regress_rls_dob_role2; -CREATE TABLE dob_t1 (c1 int); -CREATE TABLE dob_t2 (c1 int) PARTITION BY RANGE (c1); -CREATE POLICY p1 ON dob_t1 TO regress_rls_dob_role1 USING (true); -DROP OWNED BY regress_rls_dob_role1; -DROP POLICY p1 ON dob_t1; -- should fail, already gone -ERROR: policy "p1" for table "dob_t1" does not exist -CREATE POLICY p1 ON dob_t1 TO regress_rls_dob_role1,regress_rls_dob_role2 USING (true); -DROP OWNED BY regress_rls_dob_role1; -DROP POLICY p1 ON dob_t1; -- should succeed --- same cases with duplicate polroles entries -CREATE POLICY p1 ON dob_t1 TO regress_rls_dob_role1,regress_rls_dob_role1 USING (true); -DROP OWNED BY regress_rls_dob_role1; -DROP POLICY p1 ON dob_t1; -- should fail, already gone -ERROR: policy "p1" for table "dob_t1" does not exist -CREATE POLICY p1 ON dob_t1 TO regress_rls_dob_role1,regress_rls_dob_role1,regress_rls_dob_role2 USING (true); -DROP OWNED BY regress_rls_dob_role1; -DROP POLICY p1 ON dob_t1; -- should succeed --- partitioned target -CREATE POLICY p1 ON dob_t2 TO regress_rls_dob_role1,regress_rls_dob_role2 USING (true); -DROP OWNED BY regress_rls_dob_role1; -DROP POLICY p1 ON dob_t2; -- should succeed -DROP USER regress_rls_dob_role1; -DROP USER regress_rls_dob_role2; --- Bug #15708: view + table with RLS should check policies as view owner -CREATE TABLE ref_tbl (a int); -INSERT INTO ref_tbl VALUES (1); -CREATE TABLE rls_tbl (a int); -INSERT INTO rls_tbl VALUES (10); -ALTER TABLE rls_tbl ENABLE ROW LEVEL SECURITY; -CREATE POLICY p1 ON rls_tbl USING (EXISTS (SELECT 1 FROM ref_tbl)); -GRANT SELECT ON ref_tbl TO regress_rls_bob; -GRANT SELECT ON rls_tbl TO regress_rls_bob; -CREATE VIEW rls_view AS SELECT * FROM rls_tbl; -ALTER VIEW rls_view OWNER TO regress_rls_bob; -GRANT SELECT ON rls_view TO regress_rls_alice; -SET SESSION AUTHORIZATION regress_rls_alice; -SELECT * FROM ref_tbl; -- Permission denied -ERROR: permission denied for table ref_tbl -SELECT * FROM rls_tbl; -- Permission denied -ERROR: permission denied for table rls_tbl -SELECT * FROM rls_view; -- OK - a ----- - 10 -(1 row) - -RESET SESSION AUTHORIZATION; -DROP VIEW rls_view; -DROP TABLE rls_tbl; -DROP TABLE ref_tbl; --- Leaky operator test -CREATE TABLE rls_tbl (a int); -INSERT INTO rls_tbl SELECT x/10 FROM generate_series(1, 100) x; -ANALYZE rls_tbl; -ALTER TABLE rls_tbl ENABLE ROW LEVEL SECURITY; -GRANT SELECT ON rls_tbl TO regress_rls_alice; -SET SESSION AUTHORIZATION regress_rls_alice; -CREATE FUNCTION op_leak(int, int) RETURNS bool - AS 'BEGIN RAISE NOTICE ''op_leak => %, %'', $1, $2; RETURN $1 < $2; END' - LANGUAGE plpgsql; -CREATE OPERATOR <<< (procedure = op_leak, leftarg = int, rightarg = int, - restrict = scalarltsel); -SELECT * FROM rls_tbl WHERE a <<< 1000; - a ---- -(0 rows) - -DROP OPERATOR <<< (int, int); -DROP FUNCTION op_leak(int, int); -RESET SESSION AUTHORIZATION; -DROP TABLE rls_tbl; --- Bug #16006: whole-row Vars in a policy don't play nice with sub-selects -SET SESSION AUTHORIZATION regress_rls_alice; -CREATE TABLE rls_tbl (a int, b int, c int); -CREATE POLICY p1 ON rls_tbl USING (rls_tbl >= ROW(1,1,1)); -ALTER TABLE rls_tbl ENABLE ROW LEVEL SECURITY; -ALTER TABLE rls_tbl FORCE ROW LEVEL SECURITY; -INSERT INTO rls_tbl SELECT 10, 20, 30; -EXPLAIN (VERBOSE, COSTS OFF) -INSERT INTO rls_tbl - SELECT * FROM (SELECT b, c FROM rls_tbl ORDER BY a) ss; - QUERY PLAN --------------------------------------------------------------------- - Insert on regress_rls_schema.rls_tbl - -> Subquery Scan on ss - Output: ss.b, ss.c, NULL::integer - -> Sort - Output: rls_tbl_1.b, rls_tbl_1.c, rls_tbl_1.a - Sort Key: rls_tbl_1.a - -> Seq Scan on regress_rls_schema.rls_tbl rls_tbl_1 - Output: rls_tbl_1.b, rls_tbl_1.c, rls_tbl_1.a - Filter: (rls_tbl_1.* >= '(1,1,1)'::record) -(9 rows) - -INSERT INTO rls_tbl - SELECT * FROM (SELECT b, c FROM rls_tbl ORDER BY a) ss; -SELECT * FROM rls_tbl; - a | b | c -----+----+---- - 10 | 20 | 30 - 20 | 30 | -(2 rows) - -DROP TABLE rls_tbl; -RESET SESSION AUTHORIZATION; --- CVE-2023-2455: inlining an SRF may introduce an RLS dependency -create table rls_t (c text); -insert into rls_t values ('invisible to bob'); -alter table rls_t enable row level security; -grant select on rls_t to regress_rls_alice, regress_rls_bob; -create policy p1 on rls_t for select to regress_rls_alice using (true); -create policy p2 on rls_t for select to regress_rls_bob using (false); -create function rls_f () returns setof rls_t - stable language sql - as $$ select * from rls_t $$; -prepare q as select current_user, * from rls_f(); -set role regress_rls_alice; -execute q; - current_user | c --------------------+------------------ - regress_rls_alice | invisible to bob -(1 row) - -set role regress_rls_bob; -execute q; - current_user | c ---------------+--- -(0 rows) - -RESET ROLE; -DROP FUNCTION rls_f(); -DROP TABLE rls_t; --- --- Clean up objects --- -RESET SESSION AUTHORIZATION; -DROP SCHEMA regress_rls_schema CASCADE; -NOTICE: drop cascades to 30 other objects -DETAIL: drop cascades to function f_leak(text) -drop cascades to table uaccount -drop cascades to table category -drop cascades to table document -drop cascades to table part_document -drop cascades to table dependent -drop cascades to table rec1 -drop cascades to table rec2 -drop cascades to view rec1v -drop cascades to view rec2v -drop cascades to table s1 -drop cascades to table s2 -drop cascades to view v2 -drop cascades to table b1 -drop cascades to view bv1 -drop cascades to table z1 -drop cascades to table z2 -drop cascades to table z1_blacklist -drop cascades to table x1 -drop cascades to table y1 -drop cascades to table y2 -drop cascades to table t1 -drop cascades to table t2 -drop cascades to table t3 -drop cascades to table t4 -drop cascades to table current_check -drop cascades to table dep1 -drop cascades to table dep2 -drop cascades to table dob_t1 -drop cascades to table dob_t2 -DROP USER regress_rls_alice; -DROP USER regress_rls_bob; -DROP USER regress_rls_carol; -DROP USER regress_rls_dave; -DROP USER regress_rls_exempt_user; -DROP ROLE regress_rls_group1; -DROP ROLE regress_rls_group2; --- Arrange to have a few policies left over, for testing --- pg_dump/pg_restore -CREATE SCHEMA regress_rls_schema; -CREATE TABLE rls_tbl (c1 int); -ALTER TABLE rls_tbl ENABLE ROW LEVEL SECURITY; -CREATE POLICY p1 ON rls_tbl USING (c1 > 5); -CREATE POLICY p2 ON rls_tbl FOR SELECT USING (c1 <= 3); -CREATE POLICY p3 ON rls_tbl FOR UPDATE USING (c1 <= 3) WITH CHECK (c1 > 5); -CREATE POLICY p4 ON rls_tbl FOR DELETE USING (c1 <= 3); -CREATE TABLE rls_tbl_force (c1 int); -ALTER TABLE rls_tbl_force ENABLE ROW LEVEL SECURITY; -ALTER TABLE rls_tbl_force FORCE ROW LEVEL SECURITY; -CREATE POLICY p1 ON rls_tbl_force USING (c1 = 5) WITH CHECK (c1 < 5); -CREATE POLICY p2 ON rls_tbl_force FOR SELECT USING (c1 = 8); -CREATE POLICY p3 ON rls_tbl_force FOR UPDATE USING (c1 = 8) WITH CHECK (c1 >= 5); -CREATE POLICY p4 ON rls_tbl_force FOR DELETE USING (c1 = 8); +WARNING: terminating connection because of crash of another server process +DETAIL: The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory. +HINT: In a moment you should be able to reconnect to the database and repeat your command. +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/groupingsets.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/groupingsets.out --- /tmp/cirrus-ci-build/src/test/regress/expected/groupingsets.out 2024-03-07 14:25:00.330832000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/groupingsets.out 2024-03-07 14:27:17.000791000 +0000 @@ -34,2234 +34,7 @@ -- (and with ordering differing from grouping) select a, b, grouping(a,b), sum(v), count(*), max(v) from gstest1 group by rollup (a,b); - a | b | grouping | sum | count | max ----+---+----------+-----+-------+----- - 1 | 1 | 0 | 21 | 2 | 11 - 1 | 2 | 0 | 25 | 2 | 13 - 1 | 3 | 0 | 14 | 1 | 14 - 1 | | 1 | 60 | 5 | 14 - 2 | 3 | 0 | 15 | 1 | 15 - 2 | | 1 | 15 | 1 | 15 - 3 | 3 | 0 | 16 | 1 | 16 - 3 | 4 | 0 | 17 | 1 | 17 - 3 | | 1 | 33 | 2 | 17 - 4 | 1 | 0 | 37 | 2 | 19 - 4 | | 1 | 37 | 2 | 19 - | | 3 | 145 | 10 | 19 -(12 rows) - -select a, b, grouping(a,b), sum(v), count(*), max(v) - from gstest1 group by rollup (a,b) order by a,b; - a | b | grouping | sum | count | max ----+---+----------+-----+-------+----- - 1 | 1 | 0 | 21 | 2 | 11 - 1 | 2 | 0 | 25 | 2 | 13 - 1 | 3 | 0 | 14 | 1 | 14 - 1 | | 1 | 60 | 5 | 14 - 2 | 3 | 0 | 15 | 1 | 15 - 2 | | 1 | 15 | 1 | 15 - 3 | 3 | 0 | 16 | 1 | 16 - 3 | 4 | 0 | 17 | 1 | 17 - 3 | | 1 | 33 | 2 | 17 - 4 | 1 | 0 | 37 | 2 | 19 - 4 | | 1 | 37 | 2 | 19 - | | 3 | 145 | 10 | 19 -(12 rows) - -select a, b, grouping(a,b), sum(v), count(*), max(v) - from gstest1 group by rollup (a,b) order by b desc, a; - a | b | grouping | sum | count | max ----+---+----------+-----+-------+----- - 1 | | 1 | 60 | 5 | 14 - 2 | | 1 | 15 | 1 | 15 - 3 | | 1 | 33 | 2 | 17 - 4 | | 1 | 37 | 2 | 19 - | | 3 | 145 | 10 | 19 - 3 | 4 | 0 | 17 | 1 | 17 - 1 | 3 | 0 | 14 | 1 | 14 - 2 | 3 | 0 | 15 | 1 | 15 - 3 | 3 | 0 | 16 | 1 | 16 - 1 | 2 | 0 | 25 | 2 | 13 - 1 | 1 | 0 | 21 | 2 | 11 - 4 | 1 | 0 | 37 | 2 | 19 -(12 rows) - -select a, b, grouping(a,b), sum(v), count(*), max(v) - from gstest1 group by rollup (a,b) order by coalesce(a,0)+coalesce(b,0); - a | b | grouping | sum | count | max ----+---+----------+-----+-------+----- - | | 3 | 145 | 10 | 19 - 1 | | 1 | 60 | 5 | 14 - 1 | 1 | 0 | 21 | 2 | 11 - 2 | | 1 | 15 | 1 | 15 - 3 | | 1 | 33 | 2 | 17 - 1 | 2 | 0 | 25 | 2 | 13 - 1 | 3 | 0 | 14 | 1 | 14 - 4 | | 1 | 37 | 2 | 19 - 4 | 1 | 0 | 37 | 2 | 19 - 2 | 3 | 0 | 15 | 1 | 15 - 3 | 3 | 0 | 16 | 1 | 16 - 3 | 4 | 0 | 17 | 1 | 17 -(12 rows) - --- various types of ordered aggs -select a, b, grouping(a,b), - array_agg(v order by v), - string_agg(v::text, ':' order by v desc), - percentile_disc(0.5) within group (order by v), - rank(1,2,12) within group (order by a,b,v) - from gstest1 group by rollup (a,b) order by a,b; - a | b | grouping | array_agg | string_agg | percentile_disc | rank ----+---+----------+---------------------------------+-------------------------------+-----------------+------ - 1 | 1 | 0 | {10,11} | 11:10 | 10 | 3 - 1 | 2 | 0 | {12,13} | 13:12 | 12 | 1 - 1 | 3 | 0 | {14} | 14 | 14 | 1 - 1 | | 1 | {10,11,12,13,14} | 14:13:12:11:10 | 12 | 3 - 2 | 3 | 0 | {15} | 15 | 15 | 1 - 2 | | 1 | {15} | 15 | 15 | 1 - 3 | 3 | 0 | {16} | 16 | 16 | 1 - 3 | 4 | 0 | {17} | 17 | 17 | 1 - 3 | | 1 | {16,17} | 17:16 | 16 | 1 - 4 | 1 | 0 | {18,19} | 19:18 | 18 | 1 - 4 | | 1 | {18,19} | 19:18 | 18 | 1 - | | 3 | {10,11,12,13,14,15,16,17,18,19} | 19:18:17:16:15:14:13:12:11:10 | 14 | 3 -(12 rows) - --- test usage of grouped columns in direct args of aggs -select grouping(a), a, array_agg(b), - rank(a) within group (order by b nulls first), - rank(a) within group (order by b nulls last) - from (values (1,1),(1,4),(1,5),(3,1),(3,2)) v(a,b) - group by rollup (a) order by a; - grouping | a | array_agg | rank | rank -----------+---+-------------+------+------ - 0 | 1 | {1,4,5} | 1 | 1 - 0 | 3 | {1,2} | 3 | 3 - 1 | | {1,4,5,1,2} | 1 | 6 -(3 rows) - --- nesting with window functions -select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum - from gstest2 group by rollup (a,b) order by rsum, a, b; - a | b | sum | rsum ----+---+-----+------ - 1 | 1 | 8 | 8 - 1 | 2 | 2 | 10 - 1 | | 10 | 20 - 2 | 2 | 2 | 22 - 2 | | 2 | 24 - | | 12 | 36 -(6 rows) - --- nesting with grouping sets -select sum(c) from gstest2 - group by grouping sets((), grouping sets((), grouping sets(()))) - order by 1 desc; - sum ------ - 12 - 12 - 12 -(3 rows) - -select sum(c) from gstest2 - group by grouping sets((), grouping sets((), grouping sets(((a, b))))) - order by 1 desc; - sum ------ - 12 - 12 - 8 - 2 - 2 -(5 rows) - -select sum(c) from gstest2 - group by grouping sets(grouping sets(rollup(c), grouping sets(cube(c)))) - order by 1 desc; - sum ------ - 12 - 12 - 6 - 6 - 6 - 6 -(6 rows) - -select sum(c) from gstest2 - group by grouping sets(a, grouping sets(a, cube(b))) - order by 1 desc; - sum ------ - 12 - 10 - 10 - 8 - 4 - 2 - 2 -(7 rows) - -select sum(c) from gstest2 - group by grouping sets(grouping sets((a, (b)))) - order by 1 desc; - sum ------ - 8 - 2 - 2 -(3 rows) - -select sum(c) from gstest2 - group by grouping sets(grouping sets((a, b))) - order by 1 desc; - sum ------ - 8 - 2 - 2 -(3 rows) - -select sum(c) from gstest2 - group by grouping sets(grouping sets(a, grouping sets(a), a)) - order by 1 desc; - sum ------ - 10 - 10 - 10 - 2 - 2 - 2 -(6 rows) - -select sum(c) from gstest2 - group by grouping sets(grouping sets(a, grouping sets(a, grouping sets(a), ((a)), a, grouping sets(a), (a)), a)) - order by 1 desc; - sum ------ - 10 - 10 - 10 - 10 - 10 - 10 - 10 - 10 - 2 - 2 - 2 - 2 - 2 - 2 - 2 - 2 -(16 rows) - -select sum(c) from gstest2 - group by grouping sets((a,(a,b)), grouping sets((a,(a,b)),a)) - order by 1 desc; - sum ------ - 10 - 8 - 8 - 2 - 2 - 2 - 2 - 2 -(8 rows) - --- empty input: first is 0 rows, second 1, third 3 etc. -select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a); - a | b | sum | count ----+---+-----+------- -(0 rows) - -select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),()); - a | b | sum | count ----+---+-----+------- - | | | 0 -(1 row) - -select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),(),(),()); - a | b | sum | count ----+---+-----+------- - | | | 0 - | | | 0 - | | | 0 -(3 rows) - -select sum(v), count(*) from gstest_empty group by grouping sets ((),(),()); - sum | count ------+------- - | 0 - | 0 - | 0 -(3 rows) - --- empty input with joins tests some important code paths -select t1.a, t2.b, sum(t1.v), count(*) from gstest_empty t1, gstest_empty t2 - group by grouping sets ((t1.a,t2.b),()); - a | b | sum | count ----+---+-----+------- - | | | 0 -(1 row) - --- simple joins, var resolution, GROUPING on join vars -select t1.a, t2.b, grouping(t1.a, t2.b), sum(t1.v), max(t2.a) - from gstest1 t1, gstest2 t2 - group by grouping sets ((t1.a, t2.b), ()); - a | b | grouping | sum | max ----+---+----------+------+----- - 1 | 1 | 0 | 420 | 1 - 1 | 2 | 0 | 120 | 2 - 2 | 1 | 0 | 105 | 1 - 2 | 2 | 0 | 30 | 2 - 3 | 1 | 0 | 231 | 1 - 3 | 2 | 0 | 66 | 2 - 4 | 1 | 0 | 259 | 1 - 4 | 2 | 0 | 74 | 2 - | | 3 | 1305 | 2 -(9 rows) - -select t1.a, t2.b, grouping(t1.a, t2.b), sum(t1.v), max(t2.a) - from gstest1 t1 join gstest2 t2 on (t1.a=t2.a) - group by grouping sets ((t1.a, t2.b), ()); - a | b | grouping | sum | max ----+---+----------+-----+----- - 1 | 1 | 0 | 420 | 1 - 1 | 2 | 0 | 60 | 1 - 2 | 2 | 0 | 15 | 2 - | | 3 | 495 | 2 -(4 rows) - -select a, b, grouping(a, b), sum(t1.v), max(t2.c) - from gstest1 t1 join gstest2 t2 using (a,b) - group by grouping sets ((a, b), ()); - a | b | grouping | sum | max ----+---+----------+-----+----- - 1 | 1 | 0 | 147 | 2 - 1 | 2 | 0 | 25 | 2 - | | 3 | 172 | 2 -(3 rows) - --- check that functionally dependent cols are not nulled -select a, d, grouping(a,b,c) - from gstest3 - group by grouping sets ((a,b), (a,c)); - a | d | grouping ----+---+---------- - 1 | 1 | 1 - 2 | 2 | 1 - 1 | 1 | 2 - 2 | 2 | 2 -(4 rows) - --- check that distinct grouping columns are kept separate --- even if they are equal() -explain (costs off) -select g as alias1, g as alias2 - from generate_series(1,3) g - group by alias1, rollup(alias2); - QUERY PLAN ------------------------------------------------- - GroupAggregate - Group Key: g, g - Group Key: g - -> Sort - Sort Key: g - -> Function Scan on generate_series g -(6 rows) - -select g as alias1, g as alias2 - from generate_series(1,3) g - group by alias1, rollup(alias2); - alias1 | alias2 ---------+-------- - 1 | 1 - 1 | - 2 | 2 - 2 | - 3 | 3 - 3 | -(6 rows) - --- check that pulled-up subquery outputs still go to null when appropriate -select four, x - from (select four, ten, 'foo'::text as x from tenk1) as t - group by grouping sets (four, x) - having x = 'foo'; - four | x -------+----- - | foo -(1 row) - -select four, x || 'x' - from (select four, ten, 'foo'::text as x from tenk1) as t - group by grouping sets (four, x) - order by four; - four | ?column? -------+---------- - 0 | - 1 | - 2 | - 3 | - | foox -(5 rows) - -select (x+y)*1, sum(z) - from (select 1 as x, 2 as y, 3 as z) s - group by grouping sets (x+y, x); - ?column? | sum -----------+----- - 3 | 3 - | 3 -(2 rows) - -select x, not x as not_x, q2 from - (select *, q1 = 1 as x from int8_tbl i1) as t - group by grouping sets(x, q2) - order by x, q2; - x | not_x | q2 ----+-------+------------------- - f | t | - | | -4567890123456789 - | | 123 - | | 456 - | | 4567890123456789 -(5 rows) - --- check qual push-down rules for a subquery with grouping sets -explain (verbose, costs off) -select * from ( - select 1 as x, q1, sum(q2) - from int8_tbl i1 - group by grouping sets(1, 2) -) ss -where x = 1 and q1 = 123; - QUERY PLAN --------------------------------------------------- - Subquery Scan on ss - Output: ss.x, ss.q1, ss.sum - Filter: ((ss.x = 1) AND (ss.q1 = 123)) - -> GroupAggregate - Output: (1), i1.q1, sum(i1.q2) - Group Key: (1) - Sort Key: i1.q1 - Group Key: i1.q1 - -> Sort - Output: (1), i1.q1, i1.q2 - Sort Key: (1) - -> Seq Scan on public.int8_tbl i1 - Output: 1, i1.q1, i1.q2 -(13 rows) - -select * from ( - select 1 as x, q1, sum(q2) - from int8_tbl i1 - group by grouping sets(1, 2) -) ss -where x = 1 and q1 = 123; - x | q1 | sum ----+----+----- -(0 rows) - --- check handling of pulled-up SubPlan in GROUPING() argument (bug #17479) -explain (verbose, costs off) -select grouping(ss.x) -from int8_tbl i1 -cross join lateral (select (select i1.q1) as x) ss -group by ss.x; - QUERY PLAN ------------------------------------------------- - GroupAggregate - Output: GROUPING((SubPlan 1)), ((SubPlan 2)) - Group Key: ((SubPlan 2)) - -> Sort - Output: ((SubPlan 2)), i1.q1 - Sort Key: ((SubPlan 2)) - -> Seq Scan on public.int8_tbl i1 - Output: (SubPlan 2), i1.q1 - SubPlan 2 - -> Result - Output: i1.q1 -(11 rows) - -select grouping(ss.x) -from int8_tbl i1 -cross join lateral (select (select i1.q1) as x) ss -group by ss.x; - grouping ----------- - 0 - 0 -(2 rows) - -explain (verbose, costs off) -select (select grouping(ss.x)) -from int8_tbl i1 -cross join lateral (select (select i1.q1) as x) ss -group by ss.x; - QUERY PLAN --------------------------------------------- - GroupAggregate - Output: (SubPlan 2), ((SubPlan 3)) - Group Key: ((SubPlan 3)) - -> Sort - Output: ((SubPlan 3)), i1.q1 - Sort Key: ((SubPlan 3)) - -> Seq Scan on public.int8_tbl i1 - Output: (SubPlan 3), i1.q1 - SubPlan 3 - -> Result - Output: i1.q1 - SubPlan 2 - -> Result - Output: GROUPING((SubPlan 1)) -(14 rows) - -select (select grouping(ss.x)) -from int8_tbl i1 -cross join lateral (select (select i1.q1) as x) ss -group by ss.x; - grouping ----------- - 0 - 0 -(2 rows) - --- simple rescan tests -select a, b, sum(v.x) - from (values (1),(2)) v(x), gstest_data(v.x) - group by rollup (a,b); - a | b | sum ----+---+----- - 1 | 1 | 1 - 1 | 2 | 1 - 1 | 3 | 1 - 1 | | 3 - 2 | 1 | 2 - 2 | 2 | 2 - 2 | 3 | 2 - 2 | | 6 - | | 9 -(9 rows) - -select * - from (values (1),(2)) v(x), - lateral (select a, b, sum(v.x) from gstest_data(v.x) group by rollup (a,b)) s; -ERROR: aggregate functions are not allowed in FROM clause of their own query level -LINE 3: lateral (select a, b, sum(v.x) from gstest_data(v.x) ... - ^ --- min max optimization should still work with GROUP BY () -explain (costs off) - select min(unique1) from tenk1 GROUP BY (); - QUERY PLAN ------------------------------------------------------------- - Result - InitPlan 1 (returns $0) - -> Limit - -> Index Only Scan using tenk1_unique1 on tenk1 - Index Cond: (unique1 IS NOT NULL) -(5 rows) - --- Views with GROUPING SET queries -CREATE VIEW gstest_view AS select a, b, grouping(a,b), sum(c), count(*), max(c) - from gstest2 group by rollup ((a,b,c),(c,d)); -NOTICE: view "gstest_view" will be a temporary view -select pg_get_viewdef('gstest_view'::regclass, true); - pg_get_viewdef ---------------------------------------- - SELECT a, + - b, + - GROUPING(a, b) AS "grouping", + - sum(c) AS sum, + - count(*) AS count, + - max(c) AS max + - FROM gstest2 + - GROUP BY ROLLUP((a, b, c), (c, d)); -(1 row) - --- Nested queries with 3 or more levels of nesting -select(select (select grouping(a,b) from (values (1)) v2(c)) from (values (1,2)) v1(a,b) group by (a,b)) from (values(6,7)) v3(e,f) GROUP BY ROLLUP(e,f); - grouping ----------- - 0 - 0 - 0 -(3 rows) - -select(select (select grouping(e,f) from (values (1)) v2(c)) from (values (1,2)) v1(a,b) group by (a,b)) from (values(6,7)) v3(e,f) GROUP BY ROLLUP(e,f); - grouping ----------- - 0 - 1 - 3 -(3 rows) - -select(select (select grouping(c) from (values (1)) v2(c) GROUP BY c) from (values (1,2)) v1(a,b) group by (a,b)) from (values(6,7)) v3(e,f) GROUP BY ROLLUP(e,f); - grouping ----------- - 0 - 0 - 0 -(3 rows) - --- Combinations of operations -select a, b, c, d from gstest2 group by rollup(a,b),grouping sets(c,d); - a | b | c | d ----+---+---+--- - 1 | 1 | 1 | - 1 | | 1 | - | | 1 | - 1 | 1 | 2 | - 1 | 2 | 2 | - 1 | | 2 | - 2 | 2 | 2 | - 2 | | 2 | - | | 2 | - 1 | 1 | | 1 - 1 | | | 1 - | | | 1 - 1 | 1 | | 2 - 1 | 2 | | 2 - 1 | | | 2 - 2 | 2 | | 2 - 2 | | | 2 - | | | 2 -(18 rows) - -select a, b from (values (1,2),(2,3)) v(a,b) group by a,b, grouping sets(a); - a | b ----+--- - 1 | 2 - 2 | 3 -(2 rows) - --- Tests for chained aggregates -select a, b, grouping(a,b), sum(v), count(*), max(v) - from gstest1 group by grouping sets ((a,b),(a+1,b+1),(a+2,b+2)) order by 3,6; - a | b | grouping | sum | count | max ----+---+----------+-----+-------+----- - 1 | 1 | 0 | 21 | 2 | 11 - 1 | 2 | 0 | 25 | 2 | 13 - 1 | 3 | 0 | 14 | 1 | 14 - 2 | 3 | 0 | 15 | 1 | 15 - 3 | 3 | 0 | 16 | 1 | 16 - 3 | 4 | 0 | 17 | 1 | 17 - 4 | 1 | 0 | 37 | 2 | 19 - | | 3 | 21 | 2 | 11 - | | 3 | 21 | 2 | 11 - | | 3 | 25 | 2 | 13 - | | 3 | 25 | 2 | 13 - | | 3 | 14 | 1 | 14 - | | 3 | 14 | 1 | 14 - | | 3 | 15 | 1 | 15 - | | 3 | 15 | 1 | 15 - | | 3 | 16 | 1 | 16 - | | 3 | 16 | 1 | 16 - | | 3 | 17 | 1 | 17 - | | 3 | 17 | 1 | 17 - | | 3 | 37 | 2 | 19 - | | 3 | 37 | 2 | 19 -(21 rows) - -select(select (select grouping(a,b) from (values (1)) v2(c)) from (values (1,2)) v1(a,b) group by (a,b)) from (values(6,7)) v3(e,f) GROUP BY ROLLUP((e+1),(f+1)); - grouping ----------- - 0 - 0 - 0 -(3 rows) - -select(select (select grouping(a,b) from (values (1)) v2(c)) from (values (1,2)) v1(a,b) group by (a,b)) from (values(6,7)) v3(e,f) GROUP BY CUBE((e+1),(f+1)) ORDER BY (e+1),(f+1); - grouping ----------- - 0 - 0 - 0 - 0 -(4 rows) - -select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum - from gstest2 group by cube (a,b) order by rsum, a, b; - a | b | sum | rsum ----+---+-----+------ - 1 | 1 | 8 | 8 - 1 | 2 | 2 | 10 - 1 | | 10 | 20 - 2 | 2 | 2 | 22 - 2 | | 2 | 24 - | 1 | 8 | 32 - | 2 | 4 | 36 - | | 12 | 48 -(8 rows) - -select a, b, sum(c) from (values (1,1,10),(1,1,11),(1,2,12),(1,2,13),(1,3,14),(2,3,15),(3,3,16),(3,4,17),(4,1,18),(4,1,19)) v(a,b,c) group by rollup (a,b); - a | b | sum ----+---+----- - 1 | 1 | 21 - 1 | 2 | 25 - 1 | 3 | 14 - 1 | | 60 - 2 | 3 | 15 - 2 | | 15 - 3 | 3 | 16 - 3 | 4 | 17 - 3 | | 33 - 4 | 1 | 37 - 4 | | 37 - | | 145 -(12 rows) - -select a, b, sum(v.x) - from (values (1),(2)) v(x), gstest_data(v.x) - group by cube (a,b) order by a,b; - a | b | sum ----+---+----- - 1 | 1 | 1 - 1 | 2 | 1 - 1 | 3 | 1 - 1 | | 3 - 2 | 1 | 2 - 2 | 2 | 2 - 2 | 3 | 2 - 2 | | 6 - | 1 | 3 - | 2 | 3 - | 3 | 3 - | | 9 -(12 rows) - --- Test reordering of grouping sets -explain (costs off) -select * from gstest1 group by grouping sets((a,b,v),(v)) order by v,b,a; - QUERY PLAN ------------------------------------------------------------------------------------- - Incremental Sort - Sort Key: "*VALUES*".column3, "*VALUES*".column2, "*VALUES*".column1 - Presorted Key: "*VALUES*".column3 - -> GroupAggregate - Group Key: "*VALUES*".column3, "*VALUES*".column2, "*VALUES*".column1 - Group Key: "*VALUES*".column3 - -> Sort - Sort Key: "*VALUES*".column3, "*VALUES*".column2, "*VALUES*".column1 - -> Values Scan on "*VALUES*" -(9 rows) - --- Agg level check. This query should error out. -select (select grouping(a,b) from gstest2) from gstest2 group by a,b; -ERROR: arguments to GROUPING must be grouping expressions of the associated query level -LINE 1: select (select grouping(a,b) from gstest2) from gstest2 grou... - ^ ---Nested queries -select a, b, sum(c), count(*) from gstest2 group by grouping sets (rollup(a,b),a); - a | b | sum | count ----+---+-----+------- - 1 | 1 | 8 | 7 - 1 | 2 | 2 | 1 - 1 | | 10 | 8 - 1 | | 10 | 8 - 2 | 2 | 2 | 1 - 2 | | 2 | 1 - 2 | | 2 | 1 - | | 12 | 9 -(8 rows) - --- HAVING queries -select ten, sum(distinct four) from onek a -group by grouping sets((ten,four),(ten)) -having exists (select 1 from onek b where sum(distinct a.four) = b.four); - ten | sum ------+----- - 0 | 0 - 0 | 2 - 0 | 2 - 1 | 1 - 1 | 3 - 2 | 0 - 2 | 2 - 2 | 2 - 3 | 1 - 3 | 3 - 4 | 0 - 4 | 2 - 4 | 2 - 5 | 1 - 5 | 3 - 6 | 0 - 6 | 2 - 6 | 2 - 7 | 1 - 7 | 3 - 8 | 0 - 8 | 2 - 8 | 2 - 9 | 1 - 9 | 3 -(25 rows) - --- Tests around pushdown of HAVING clauses, partially testing against previous bugs -select a,count(*) from gstest2 group by rollup(a) order by a; - a | count ----+------- - 1 | 8 - 2 | 1 - | 9 -(3 rows) - -select a,count(*) from gstest2 group by rollup(a) having a is distinct from 1 order by a; - a | count ----+------- - 2 | 1 - | 9 -(2 rows) - -explain (costs off) - select a,count(*) from gstest2 group by rollup(a) having a is distinct from 1 order by a; - QUERY PLAN ----------------------------------------- - Sort - Sort Key: a - -> GroupAggregate - Group Key: a - Group Key: () - Filter: (a IS DISTINCT FROM 1) - -> Sort - Sort Key: a - -> Seq Scan on gstest2 -(9 rows) - -select v.c, (select count(*) from gstest2 group by () having v.c) - from (values (false),(true)) v(c) order by v.c; - c | count ----+------- - f | - t | 9 -(2 rows) - -explain (costs off) - select v.c, (select count(*) from gstest2 group by () having v.c) - from (values (false),(true)) v(c) order by v.c; - QUERY PLAN ------------------------------------------------------------ - Sort - Sort Key: "*VALUES*".column1 - -> Values Scan on "*VALUES*" - SubPlan 1 - -> Aggregate - Group Key: () - Filter: "*VALUES*".column1 - -> Result - One-Time Filter: "*VALUES*".column1 - -> Seq Scan on gstest2 -(10 rows) - --- HAVING with GROUPING queries -select ten, grouping(ten) from onek -group by grouping sets(ten) having grouping(ten) >= 0 -order by 2,1; - ten | grouping ------+---------- - 0 | 0 - 1 | 0 - 2 | 0 - 3 | 0 - 4 | 0 - 5 | 0 - 6 | 0 - 7 | 0 - 8 | 0 - 9 | 0 -(10 rows) - -select ten, grouping(ten) from onek -group by grouping sets(ten, four) having grouping(ten) > 0 -order by 2,1; - ten | grouping ------+---------- - | 1 - | 1 - | 1 - | 1 -(4 rows) - -select ten, grouping(ten) from onek -group by rollup(ten) having grouping(ten) > 0 -order by 2,1; - ten | grouping ------+---------- - | 1 -(1 row) - -select ten, grouping(ten) from onek -group by cube(ten) having grouping(ten) > 0 -order by 2,1; - ten | grouping ------+---------- - | 1 -(1 row) - -select ten, grouping(ten) from onek -group by (ten) having grouping(ten) >= 0 -order by 2,1; - ten | grouping ------+---------- - 0 | 0 - 1 | 0 - 2 | 0 - 3 | 0 - 4 | 0 - 5 | 0 - 6 | 0 - 7 | 0 - 8 | 0 - 9 | 0 -(10 rows) - --- FILTER queries -select ten, sum(distinct four) filter (where four::text ~ '123') from onek a -group by rollup(ten); - ten | sum ------+----- - 0 | - 1 | - 2 | - 3 | - 4 | - 5 | - 6 | - 7 | - 8 | - 9 | - | -(11 rows) - --- More rescan tests -select * from (values (1),(2)) v(a) left join lateral (select v.a, four, ten, count(*) from onek group by cube(four,ten)) s on true order by v.a,four,ten; - a | a | four | ten | count ----+---+------+-----+------- - 1 | 1 | 0 | 0 | 50 - 1 | 1 | 0 | 2 | 50 - 1 | 1 | 0 | 4 | 50 - 1 | 1 | 0 | 6 | 50 - 1 | 1 | 0 | 8 | 50 - 1 | 1 | 0 | | 250 - 1 | 1 | 1 | 1 | 50 - 1 | 1 | 1 | 3 | 50 - 1 | 1 | 1 | 5 | 50 - 1 | 1 | 1 | 7 | 50 - 1 | 1 | 1 | 9 | 50 - 1 | 1 | 1 | | 250 - 1 | 1 | 2 | 0 | 50 - 1 | 1 | 2 | 2 | 50 - 1 | 1 | 2 | 4 | 50 - 1 | 1 | 2 | 6 | 50 - 1 | 1 | 2 | 8 | 50 - 1 | 1 | 2 | | 250 - 1 | 1 | 3 | 1 | 50 - 1 | 1 | 3 | 3 | 50 - 1 | 1 | 3 | 5 | 50 - 1 | 1 | 3 | 7 | 50 - 1 | 1 | 3 | 9 | 50 - 1 | 1 | 3 | | 250 - 1 | 1 | | 0 | 100 - 1 | 1 | | 1 | 100 - 1 | 1 | | 2 | 100 - 1 | 1 | | 3 | 100 - 1 | 1 | | 4 | 100 - 1 | 1 | | 5 | 100 - 1 | 1 | | 6 | 100 - 1 | 1 | | 7 | 100 - 1 | 1 | | 8 | 100 - 1 | 1 | | 9 | 100 - 1 | 1 | | | 1000 - 2 | 2 | 0 | 0 | 50 - 2 | 2 | 0 | 2 | 50 - 2 | 2 | 0 | 4 | 50 - 2 | 2 | 0 | 6 | 50 - 2 | 2 | 0 | 8 | 50 - 2 | 2 | 0 | | 250 - 2 | 2 | 1 | 1 | 50 - 2 | 2 | 1 | 3 | 50 - 2 | 2 | 1 | 5 | 50 - 2 | 2 | 1 | 7 | 50 - 2 | 2 | 1 | 9 | 50 - 2 | 2 | 1 | | 250 - 2 | 2 | 2 | 0 | 50 - 2 | 2 | 2 | 2 | 50 - 2 | 2 | 2 | 4 | 50 - 2 | 2 | 2 | 6 | 50 - 2 | 2 | 2 | 8 | 50 - 2 | 2 | 2 | | 250 - 2 | 2 | 3 | 1 | 50 - 2 | 2 | 3 | 3 | 50 - 2 | 2 | 3 | 5 | 50 - 2 | 2 | 3 | 7 | 50 - 2 | 2 | 3 | 9 | 50 - 2 | 2 | 3 | | 250 - 2 | 2 | | 0 | 100 - 2 | 2 | | 1 | 100 - 2 | 2 | | 2 | 100 - 2 | 2 | | 3 | 100 - 2 | 2 | | 4 | 100 - 2 | 2 | | 5 | 100 - 2 | 2 | | 6 | 100 - 2 | 2 | | 7 | 100 - 2 | 2 | | 8 | 100 - 2 | 2 | | 9 | 100 - 2 | 2 | | | 1000 -(70 rows) - -select array(select row(v.a,s1.*) from (select two,four, count(*) from onek group by cube(two,four) order by two,four) s1) from (values (1),(2)) v(a); - array ------------------------------------------------------------------------------------------------------------------------------------------------------- - {"(1,0,0,250)","(1,0,2,250)","(1,0,,500)","(1,1,1,250)","(1,1,3,250)","(1,1,,500)","(1,,0,250)","(1,,1,250)","(1,,2,250)","(1,,3,250)","(1,,,1000)"} - {"(2,0,0,250)","(2,0,2,250)","(2,0,,500)","(2,1,1,250)","(2,1,3,250)","(2,1,,500)","(2,,0,250)","(2,,1,250)","(2,,2,250)","(2,,3,250)","(2,,,1000)"} -(2 rows) - --- Grouping on text columns -select sum(ten) from onek group by two, rollup(four::text) order by 1; - sum ------- - 1000 - 1000 - 1250 - 1250 - 2000 - 2500 -(6 rows) - -select sum(ten) from onek group by rollup(four::text), two order by 1; - sum ------- - 1000 - 1000 - 1250 - 1250 - 2000 - 2500 -(6 rows) - --- hashing support -set enable_hashagg = true; --- failure cases -select count(*) from gstest4 group by rollup(unhashable_col,unsortable_col); -ERROR: could not implement GROUP BY -DETAIL: Some of the datatypes only support hashing, while others only support sorting. -select array_agg(v order by v) from gstest4 group by grouping sets ((id,unsortable_col),(id)); -ERROR: could not implement GROUP BY -DETAIL: Some of the datatypes only support hashing, while others only support sorting. --- simple cases -select a, b, grouping(a,b), sum(v), count(*), max(v) - from gstest1 group by grouping sets ((a),(b)) order by 3,1,2; - a | b | grouping | sum | count | max ----+---+----------+-----+-------+----- - 1 | | 1 | 60 | 5 | 14 - 2 | | 1 | 15 | 1 | 15 - 3 | | 1 | 33 | 2 | 17 - 4 | | 1 | 37 | 2 | 19 - | 1 | 2 | 58 | 4 | 19 - | 2 | 2 | 25 | 2 | 13 - | 3 | 2 | 45 | 3 | 16 - | 4 | 2 | 17 | 1 | 17 -(8 rows) - -explain (costs off) select a, b, grouping(a,b), sum(v), count(*), max(v) - from gstest1 group by grouping sets ((a),(b)) order by 3,1,2; - QUERY PLAN --------------------------------------------------------------------------------------------------------- - Sort - Sort Key: (GROUPING("*VALUES*".column1, "*VALUES*".column2)), "*VALUES*".column1, "*VALUES*".column2 - -> HashAggregate - Hash Key: "*VALUES*".column1 - Hash Key: "*VALUES*".column2 - -> Values Scan on "*VALUES*" -(6 rows) - -select a, b, grouping(a,b), sum(v), count(*), max(v) - from gstest1 group by cube(a,b) order by 3,1,2; - a | b | grouping | sum | count | max ----+---+----------+-----+-------+----- - 1 | 1 | 0 | 21 | 2 | 11 - 1 | 2 | 0 | 25 | 2 | 13 - 1 | 3 | 0 | 14 | 1 | 14 - 2 | 3 | 0 | 15 | 1 | 15 - 3 | 3 | 0 | 16 | 1 | 16 - 3 | 4 | 0 | 17 | 1 | 17 - 4 | 1 | 0 | 37 | 2 | 19 - 1 | | 1 | 60 | 5 | 14 - 2 | | 1 | 15 | 1 | 15 - 3 | | 1 | 33 | 2 | 17 - 4 | | 1 | 37 | 2 | 19 - | 1 | 2 | 58 | 4 | 19 - | 2 | 2 | 25 | 2 | 13 - | 3 | 2 | 45 | 3 | 16 - | 4 | 2 | 17 | 1 | 17 - | | 3 | 145 | 10 | 19 -(16 rows) - -explain (costs off) select a, b, grouping(a,b), sum(v), count(*), max(v) - from gstest1 group by cube(a,b) order by 3,1,2; - QUERY PLAN --------------------------------------------------------------------------------------------------------- - Sort - Sort Key: (GROUPING("*VALUES*".column1, "*VALUES*".column2)), "*VALUES*".column1, "*VALUES*".column2 - -> MixedAggregate - Hash Key: "*VALUES*".column1, "*VALUES*".column2 - Hash Key: "*VALUES*".column1 - Hash Key: "*VALUES*".column2 - Group Key: () - -> Values Scan on "*VALUES*" -(8 rows) - --- shouldn't try and hash -explain (costs off) - select a, b, grouping(a,b), array_agg(v order by v) - from gstest1 group by cube(a,b); - QUERY PLAN ----------------------------------------------------------- - GroupAggregate - Group Key: "*VALUES*".column1, "*VALUES*".column2 - Group Key: "*VALUES*".column1 - Group Key: () - Sort Key: "*VALUES*".column2 - Group Key: "*VALUES*".column2 - -> Sort - Sort Key: "*VALUES*".column1, "*VALUES*".column2 - -> Values Scan on "*VALUES*" -(9 rows) - --- unsortable cases -select unsortable_col, count(*) - from gstest4 group by grouping sets ((unsortable_col),(unsortable_col)) - order by unsortable_col::text; - unsortable_col | count -----------------+------- - 1 | 4 - 1 | 4 - 2 | 4 - 2 | 4 -(4 rows) - --- mixed hashable/sortable cases -select unhashable_col, unsortable_col, - grouping(unhashable_col, unsortable_col), - count(*), sum(v) - from gstest4 group by grouping sets ((unhashable_col),(unsortable_col)) - order by 3, 5; - unhashable_col | unsortable_col | grouping | count | sum -----------------+----------------+----------+-------+----- - 0000 | | 1 | 2 | 17 - 0001 | | 1 | 2 | 34 - 0010 | | 1 | 2 | 68 - 0011 | | 1 | 2 | 136 - | 2 | 2 | 4 | 60 - | 1 | 2 | 4 | 195 -(6 rows) - -explain (costs off) - select unhashable_col, unsortable_col, - grouping(unhashable_col, unsortable_col), - count(*), sum(v) - from gstest4 group by grouping sets ((unhashable_col),(unsortable_col)) - order by 3,5; - QUERY PLAN ------------------------------------------------------------------- - Sort - Sort Key: (GROUPING(unhashable_col, unsortable_col)), (sum(v)) - -> MixedAggregate - Hash Key: unsortable_col - Group Key: unhashable_col - -> Sort - Sort Key: unhashable_col - -> Seq Scan on gstest4 -(8 rows) - -select unhashable_col, unsortable_col, - grouping(unhashable_col, unsortable_col), - count(*), sum(v) - from gstest4 group by grouping sets ((v,unhashable_col),(v,unsortable_col)) - order by 3,5; - unhashable_col | unsortable_col | grouping | count | sum -----------------+----------------+----------+-------+----- - 0000 | | 1 | 1 | 1 - 0001 | | 1 | 1 | 2 - 0010 | | 1 | 1 | 4 - 0011 | | 1 | 1 | 8 - 0000 | | 1 | 1 | 16 - 0001 | | 1 | 1 | 32 - 0010 | | 1 | 1 | 64 - 0011 | | 1 | 1 | 128 - | 1 | 2 | 1 | 1 - | 1 | 2 | 1 | 2 - | 2 | 2 | 1 | 4 - | 2 | 2 | 1 | 8 - | 2 | 2 | 1 | 16 - | 2 | 2 | 1 | 32 - | 1 | 2 | 1 | 64 - | 1 | 2 | 1 | 128 -(16 rows) - -explain (costs off) - select unhashable_col, unsortable_col, - grouping(unhashable_col, unsortable_col), - count(*), sum(v) - from gstest4 group by grouping sets ((v,unhashable_col),(v,unsortable_col)) - order by 3,5; - QUERY PLAN ------------------------------------------------------------------- - Sort - Sort Key: (GROUPING(unhashable_col, unsortable_col)), (sum(v)) - -> MixedAggregate - Hash Key: v, unsortable_col - Group Key: v, unhashable_col - -> Sort - Sort Key: v, unhashable_col - -> Seq Scan on gstest4 -(8 rows) - --- empty input: first is 0 rows, second 1, third 3 etc. -select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a); - a | b | sum | count ----+---+-----+------- -(0 rows) - -explain (costs off) - select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a); - QUERY PLAN --------------------------------- - HashAggregate - Hash Key: a, b - Hash Key: a - -> Seq Scan on gstest_empty -(4 rows) - -select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),()); - a | b | sum | count ----+---+-----+------- - | | | 0 -(1 row) - -select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),(),(),()); - a | b | sum | count ----+---+-----+------- - | | | 0 - | | | 0 - | | | 0 -(3 rows) - -explain (costs off) - select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),(),(),()); - QUERY PLAN --------------------------------- - MixedAggregate - Hash Key: a, b - Group Key: () - Group Key: () - Group Key: () - -> Seq Scan on gstest_empty -(6 rows) - -select sum(v), count(*) from gstest_empty group by grouping sets ((),(),()); - sum | count ------+------- - | 0 - | 0 - | 0 -(3 rows) - -explain (costs off) - select sum(v), count(*) from gstest_empty group by grouping sets ((),(),()); - QUERY PLAN --------------------------------- - Aggregate - Group Key: () - Group Key: () - Group Key: () - -> Seq Scan on gstest_empty -(5 rows) - --- check that functionally dependent cols are not nulled -select a, d, grouping(a,b,c) - from gstest3 - group by grouping sets ((a,b), (a,c)); - a | d | grouping ----+---+---------- - 1 | 1 | 1 - 2 | 2 | 1 - 1 | 1 | 2 - 2 | 2 | 2 -(4 rows) - -explain (costs off) - select a, d, grouping(a,b,c) - from gstest3 - group by grouping sets ((a,b), (a,c)); - QUERY PLAN ---------------------------- - HashAggregate - Hash Key: a, b - Hash Key: a, c - -> Seq Scan on gstest3 -(4 rows) - --- simple rescan tests -select a, b, sum(v.x) - from (values (1),(2)) v(x), gstest_data(v.x) - group by grouping sets (a,b) - order by 1, 2, 3; - a | b | sum ----+---+----- - 1 | | 3 - 2 | | 6 - | 1 | 3 - | 2 | 3 - | 3 | 3 -(5 rows) - -explain (costs off) - select a, b, sum(v.x) - from (values (1),(2)) v(x), gstest_data(v.x) - group by grouping sets (a,b) - order by 3, 1, 2; - QUERY PLAN ---------------------------------------------------------------------- - Sort - Sort Key: (sum("*VALUES*".column1)), gstest_data.a, gstest_data.b - -> HashAggregate - Hash Key: gstest_data.a - Hash Key: gstest_data.b - -> Nested Loop - -> Values Scan on "*VALUES*" - -> Function Scan on gstest_data -(8 rows) - -select * - from (values (1),(2)) v(x), - lateral (select a, b, sum(v.x) from gstest_data(v.x) group by grouping sets (a,b)) s; -ERROR: aggregate functions are not allowed in FROM clause of their own query level -LINE 3: lateral (select a, b, sum(v.x) from gstest_data(v.x) ... - ^ -explain (costs off) - select * - from (values (1),(2)) v(x), - lateral (select a, b, sum(v.x) from gstest_data(v.x) group by grouping sets (a,b)) s; -ERROR: aggregate functions are not allowed in FROM clause of their own query level -LINE 4: lateral (select a, b, sum(v.x) from gstest_data(v.x... - ^ --- Tests for chained aggregates -select a, b, grouping(a,b), sum(v), count(*), max(v) - from gstest1 group by grouping sets ((a,b),(a+1,b+1),(a+2,b+2)) order by 3,6; - a | b | grouping | sum | count | max ----+---+----------+-----+-------+----- - 1 | 1 | 0 | 21 | 2 | 11 - 1 | 2 | 0 | 25 | 2 | 13 - 1 | 3 | 0 | 14 | 1 | 14 - 2 | 3 | 0 | 15 | 1 | 15 - 3 | 3 | 0 | 16 | 1 | 16 - 3 | 4 | 0 | 17 | 1 | 17 - 4 | 1 | 0 | 37 | 2 | 19 - | | 3 | 21 | 2 | 11 - | | 3 | 21 | 2 | 11 - | | 3 | 25 | 2 | 13 - | | 3 | 25 | 2 | 13 - | | 3 | 14 | 1 | 14 - | | 3 | 14 | 1 | 14 - | | 3 | 15 | 1 | 15 - | | 3 | 15 | 1 | 15 - | | 3 | 16 | 1 | 16 - | | 3 | 16 | 1 | 16 - | | 3 | 17 | 1 | 17 - | | 3 | 17 | 1 | 17 - | | 3 | 37 | 2 | 19 - | | 3 | 37 | 2 | 19 -(21 rows) - -explain (costs off) - select a, b, grouping(a,b), sum(v), count(*), max(v) - from gstest1 group by grouping sets ((a,b),(a+1,b+1),(a+2,b+2)) order by 3,6; - QUERY PLAN -------------------------------------------------------------------------------------------- - Sort - Sort Key: (GROUPING("*VALUES*".column1, "*VALUES*".column2)), (max("*VALUES*".column3)) - -> HashAggregate - Hash Key: "*VALUES*".column1, "*VALUES*".column2 - Hash Key: ("*VALUES*".column1 + 1), ("*VALUES*".column2 + 1) - Hash Key: ("*VALUES*".column1 + 2), ("*VALUES*".column2 + 2) - -> Values Scan on "*VALUES*" -(7 rows) - -select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum - from gstest2 group by cube (a,b) order by rsum, a, b; - a | b | sum | rsum ----+---+-----+------ - 1 | 1 | 8 | 8 - 1 | 2 | 2 | 10 - 1 | | 10 | 20 - 2 | 2 | 2 | 22 - 2 | | 2 | 24 - | 1 | 8 | 32 - | 2 | 4 | 36 - | | 12 | 48 -(8 rows) - -explain (costs off) - select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum - from gstest2 group by cube (a,b) order by rsum, a, b; - QUERY PLAN ---------------------------------------------- - Sort - Sort Key: (sum((sum(c))) OVER (?)), a, b - -> WindowAgg - -> Sort - Sort Key: a, b - -> MixedAggregate - Hash Key: a, b - Hash Key: a - Hash Key: b - Group Key: () - -> Seq Scan on gstest2 -(11 rows) - -select a, b, sum(v.x) - from (values (1),(2)) v(x), gstest_data(v.x) - group by cube (a,b) order by a,b; - a | b | sum ----+---+----- - 1 | 1 | 1 - 1 | 2 | 1 - 1 | 3 | 1 - 1 | | 3 - 2 | 1 | 2 - 2 | 2 | 2 - 2 | 3 | 2 - 2 | | 6 - | 1 | 3 - | 2 | 3 - | 3 | 3 - | | 9 -(12 rows) - -explain (costs off) - select a, b, sum(v.x) - from (values (1),(2)) v(x), gstest_data(v.x) - group by cube (a,b) order by a,b; - QUERY PLAN ------------------------------------------------- - Sort - Sort Key: gstest_data.a, gstest_data.b - -> MixedAggregate - Hash Key: gstest_data.a, gstest_data.b - Hash Key: gstest_data.a - Hash Key: gstest_data.b - Group Key: () - -> Nested Loop - -> Values Scan on "*VALUES*" - -> Function Scan on gstest_data -(10 rows) - --- Verify that we correctly handle the child node returning a --- non-minimal slot, which happens if the input is pre-sorted, --- e.g. due to an index scan. -BEGIN; -SET LOCAL enable_hashagg = false; -EXPLAIN (COSTS OFF) SELECT a, b, count(*), max(a), max(b) FROM gstest3 GROUP BY GROUPING SETS(a, b,()) ORDER BY a, b; - QUERY PLAN ---------------------------------------- - Sort - Sort Key: a, b - -> GroupAggregate - Group Key: a - Group Key: () - Sort Key: b - Group Key: b - -> Sort - Sort Key: a - -> Seq Scan on gstest3 -(10 rows) - -SELECT a, b, count(*), max(a), max(b) FROM gstest3 GROUP BY GROUPING SETS(a, b,()) ORDER BY a, b; - a | b | count | max | max ----+---+-------+-----+----- - 1 | | 1 | 1 | 1 - 2 | | 1 | 2 | 2 - | 1 | 1 | 1 | 1 - | 2 | 1 | 2 | 2 - | | 2 | 2 | 2 -(5 rows) - -SET LOCAL enable_seqscan = false; -EXPLAIN (COSTS OFF) SELECT a, b, count(*), max(a), max(b) FROM gstest3 GROUP BY GROUPING SETS(a, b,()) ORDER BY a, b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: a, b - -> GroupAggregate - Group Key: a - Group Key: () - Sort Key: b - Group Key: b - -> Index Scan using gstest3_pkey on gstest3 -(8 rows) - -SELECT a, b, count(*), max(a), max(b) FROM gstest3 GROUP BY GROUPING SETS(a, b,()) ORDER BY a, b; - a | b | count | max | max ----+---+-------+-----+----- - 1 | | 1 | 1 | 1 - 2 | | 1 | 2 | 2 - | 1 | 1 | 1 | 1 - | 2 | 1 | 2 | 2 - | | 2 | 2 | 2 -(5 rows) - -COMMIT; --- More rescan tests -select * from (values (1),(2)) v(a) left join lateral (select v.a, four, ten, count(*) from onek group by cube(four,ten)) s on true order by v.a,four,ten; - a | a | four | ten | count ----+---+------+-----+------- - 1 | 1 | 0 | 0 | 50 - 1 | 1 | 0 | 2 | 50 - 1 | 1 | 0 | 4 | 50 - 1 | 1 | 0 | 6 | 50 - 1 | 1 | 0 | 8 | 50 - 1 | 1 | 0 | | 250 - 1 | 1 | 1 | 1 | 50 - 1 | 1 | 1 | 3 | 50 - 1 | 1 | 1 | 5 | 50 - 1 | 1 | 1 | 7 | 50 - 1 | 1 | 1 | 9 | 50 - 1 | 1 | 1 | | 250 - 1 | 1 | 2 | 0 | 50 - 1 | 1 | 2 | 2 | 50 - 1 | 1 | 2 | 4 | 50 - 1 | 1 | 2 | 6 | 50 - 1 | 1 | 2 | 8 | 50 - 1 | 1 | 2 | | 250 - 1 | 1 | 3 | 1 | 50 - 1 | 1 | 3 | 3 | 50 - 1 | 1 | 3 | 5 | 50 - 1 | 1 | 3 | 7 | 50 - 1 | 1 | 3 | 9 | 50 - 1 | 1 | 3 | | 250 - 1 | 1 | | 0 | 100 - 1 | 1 | | 1 | 100 - 1 | 1 | | 2 | 100 - 1 | 1 | | 3 | 100 - 1 | 1 | | 4 | 100 - 1 | 1 | | 5 | 100 - 1 | 1 | | 6 | 100 - 1 | 1 | | 7 | 100 - 1 | 1 | | 8 | 100 - 1 | 1 | | 9 | 100 - 1 | 1 | | | 1000 - 2 | 2 | 0 | 0 | 50 - 2 | 2 | 0 | 2 | 50 - 2 | 2 | 0 | 4 | 50 - 2 | 2 | 0 | 6 | 50 - 2 | 2 | 0 | 8 | 50 - 2 | 2 | 0 | | 250 - 2 | 2 | 1 | 1 | 50 - 2 | 2 | 1 | 3 | 50 - 2 | 2 | 1 | 5 | 50 - 2 | 2 | 1 | 7 | 50 - 2 | 2 | 1 | 9 | 50 - 2 | 2 | 1 | | 250 - 2 | 2 | 2 | 0 | 50 - 2 | 2 | 2 | 2 | 50 - 2 | 2 | 2 | 4 | 50 - 2 | 2 | 2 | 6 | 50 - 2 | 2 | 2 | 8 | 50 - 2 | 2 | 2 | | 250 - 2 | 2 | 3 | 1 | 50 - 2 | 2 | 3 | 3 | 50 - 2 | 2 | 3 | 5 | 50 - 2 | 2 | 3 | 7 | 50 - 2 | 2 | 3 | 9 | 50 - 2 | 2 | 3 | | 250 - 2 | 2 | | 0 | 100 - 2 | 2 | | 1 | 100 - 2 | 2 | | 2 | 100 - 2 | 2 | | 3 | 100 - 2 | 2 | | 4 | 100 - 2 | 2 | | 5 | 100 - 2 | 2 | | 6 | 100 - 2 | 2 | | 7 | 100 - 2 | 2 | | 8 | 100 - 2 | 2 | | 9 | 100 - 2 | 2 | | | 1000 -(70 rows) - -select array(select row(v.a,s1.*) from (select two,four, count(*) from onek group by cube(two,four) order by two,four) s1) from (values (1),(2)) v(a); - array ------------------------------------------------------------------------------------------------------------------------------------------------------- - {"(1,0,0,250)","(1,0,2,250)","(1,0,,500)","(1,1,1,250)","(1,1,3,250)","(1,1,,500)","(1,,0,250)","(1,,1,250)","(1,,2,250)","(1,,3,250)","(1,,,1000)"} - {"(2,0,0,250)","(2,0,2,250)","(2,0,,500)","(2,1,1,250)","(2,1,3,250)","(2,1,,500)","(2,,0,250)","(2,,1,250)","(2,,2,250)","(2,,3,250)","(2,,,1000)"} -(2 rows) - --- Rescan logic changes when there are no empty grouping sets, so test --- that too: -select * from (values (1),(2)) v(a) left join lateral (select v.a, four, ten, count(*) from onek group by grouping sets(four,ten)) s on true order by v.a,four,ten; - a | a | four | ten | count ----+---+------+-----+------- - 1 | 1 | 0 | | 250 - 1 | 1 | 1 | | 250 - 1 | 1 | 2 | | 250 - 1 | 1 | 3 | | 250 - 1 | 1 | | 0 | 100 - 1 | 1 | | 1 | 100 - 1 | 1 | | 2 | 100 - 1 | 1 | | 3 | 100 - 1 | 1 | | 4 | 100 - 1 | 1 | | 5 | 100 - 1 | 1 | | 6 | 100 - 1 | 1 | | 7 | 100 - 1 | 1 | | 8 | 100 - 1 | 1 | | 9 | 100 - 2 | 2 | 0 | | 250 - 2 | 2 | 1 | | 250 - 2 | 2 | 2 | | 250 - 2 | 2 | 3 | | 250 - 2 | 2 | | 0 | 100 - 2 | 2 | | 1 | 100 - 2 | 2 | | 2 | 100 - 2 | 2 | | 3 | 100 - 2 | 2 | | 4 | 100 - 2 | 2 | | 5 | 100 - 2 | 2 | | 6 | 100 - 2 | 2 | | 7 | 100 - 2 | 2 | | 8 | 100 - 2 | 2 | | 9 | 100 -(28 rows) - -select array(select row(v.a,s1.*) from (select two,four, count(*) from onek group by grouping sets(two,four) order by two,four) s1) from (values (1),(2)) v(a); - array ---------------------------------------------------------------------------------- - {"(1,0,,500)","(1,1,,500)","(1,,0,250)","(1,,1,250)","(1,,2,250)","(1,,3,250)"} - {"(2,0,,500)","(2,1,,500)","(2,,0,250)","(2,,1,250)","(2,,2,250)","(2,,3,250)"} -(2 rows) - --- test the knapsack -set enable_indexscan = false; -set hash_mem_multiplier = 1.0; -set work_mem = '64kB'; -explain (costs off) - select unique1, - count(two), count(four), count(ten), - count(hundred), count(thousand), count(twothousand), - count(*) - from tenk1 group by grouping sets (unique1,twothousand,thousand,hundred,ten,four,two); - QUERY PLAN -------------------------------- - MixedAggregate - Hash Key: two - Hash Key: four - Hash Key: ten - Hash Key: hundred - Group Key: unique1 - Sort Key: twothousand - Group Key: twothousand - Sort Key: thousand - Group Key: thousand - -> Sort - Sort Key: unique1 - -> Seq Scan on tenk1 -(13 rows) - -explain (costs off) - select unique1, - count(two), count(four), count(ten), - count(hundred), count(thousand), count(twothousand), - count(*) - from tenk1 group by grouping sets (unique1,hundred,ten,four,two); - QUERY PLAN -------------------------------- - MixedAggregate - Hash Key: two - Hash Key: four - Hash Key: ten - Hash Key: hundred - Group Key: unique1 - -> Sort - Sort Key: unique1 - -> Seq Scan on tenk1 -(9 rows) - -set work_mem = '384kB'; -explain (costs off) - select unique1, - count(two), count(four), count(ten), - count(hundred), count(thousand), count(twothousand), - count(*) - from tenk1 group by grouping sets (unique1,twothousand,thousand,hundred,ten,four,two); - QUERY PLAN -------------------------------- - MixedAggregate - Hash Key: two - Hash Key: four - Hash Key: ten - Hash Key: hundred - Hash Key: thousand - Group Key: unique1 - Sort Key: twothousand - Group Key: twothousand - -> Sort - Sort Key: unique1 - -> Seq Scan on tenk1 -(12 rows) - --- check collation-sensitive matching between grouping expressions --- (similar to a check for aggregates, but there are additional code --- paths for GROUPING, so check again here) -select v||'a', case grouping(v||'a') when 1 then 1 else 0 end, count(*) - from unnest(array[1,1], array['a','b']) u(i,v) - group by rollup(i, v||'a') order by 1,3; - ?column? | case | count -----------+------+------- - aa | 0 | 1 - ba | 0 | 1 - | 1 | 2 - | 1 | 2 -(4 rows) - -select v||'a', case when grouping(v||'a') = 1 then 1 else 0 end, count(*) - from unnest(array[1,1], array['a','b']) u(i,v) - group by rollup(i, v||'a') order by 1,3; - ?column? | case | count -----------+------+------- - aa | 0 | 1 - ba | 0 | 1 - | 1 | 2 - | 1 | 2 -(4 rows) - --- Bug #16784 -create table bug_16784(i int, j int); -analyze bug_16784; -alter table bug_16784 set (autovacuum_enabled = 'false'); -update pg_class set reltuples = 10 where relname='bug_16784'; -insert into bug_16784 select g/10, g from generate_series(1,40) g; -set work_mem='64kB'; -set enable_sort = false; -select * from - (values (1),(2)) v(a), - lateral (select a, i, j, count(*) from - bug_16784 group by cube(i,j)) s - order by v.a, i, j; - a | a | i | j | count ----+---+---+----+------- - 1 | 1 | 0 | 1 | 1 - 1 | 1 | 0 | 2 | 1 - 1 | 1 | 0 | 3 | 1 - 1 | 1 | 0 | 4 | 1 - 1 | 1 | 0 | 5 | 1 - 1 | 1 | 0 | 6 | 1 - 1 | 1 | 0 | 7 | 1 - 1 | 1 | 0 | 8 | 1 - 1 | 1 | 0 | 9 | 1 - 1 | 1 | 0 | | 9 - 1 | 1 | 1 | 10 | 1 - 1 | 1 | 1 | 11 | 1 - 1 | 1 | 1 | 12 | 1 - 1 | 1 | 1 | 13 | 1 - 1 | 1 | 1 | 14 | 1 - 1 | 1 | 1 | 15 | 1 - 1 | 1 | 1 | 16 | 1 - 1 | 1 | 1 | 17 | 1 - 1 | 1 | 1 | 18 | 1 - 1 | 1 | 1 | 19 | 1 - 1 | 1 | 1 | | 10 - 1 | 1 | 2 | 20 | 1 - 1 | 1 | 2 | 21 | 1 - 1 | 1 | 2 | 22 | 1 - 1 | 1 | 2 | 23 | 1 - 1 | 1 | 2 | 24 | 1 - 1 | 1 | 2 | 25 | 1 - 1 | 1 | 2 | 26 | 1 - 1 | 1 | 2 | 27 | 1 - 1 | 1 | 2 | 28 | 1 - 1 | 1 | 2 | 29 | 1 - 1 | 1 | 2 | | 10 - 1 | 1 | 3 | 30 | 1 - 1 | 1 | 3 | 31 | 1 - 1 | 1 | 3 | 32 | 1 - 1 | 1 | 3 | 33 | 1 - 1 | 1 | 3 | 34 | 1 - 1 | 1 | 3 | 35 | 1 - 1 | 1 | 3 | 36 | 1 - 1 | 1 | 3 | 37 | 1 - 1 | 1 | 3 | 38 | 1 - 1 | 1 | 3 | 39 | 1 - 1 | 1 | 3 | | 10 - 1 | 1 | 4 | 40 | 1 - 1 | 1 | 4 | | 1 - 1 | 1 | | 1 | 1 - 1 | 1 | | 2 | 1 - 1 | 1 | | 3 | 1 - 1 | 1 | | 4 | 1 - 1 | 1 | | 5 | 1 - 1 | 1 | | 6 | 1 - 1 | 1 | | 7 | 1 - 1 | 1 | | 8 | 1 - 1 | 1 | | 9 | 1 - 1 | 1 | | 10 | 1 - 1 | 1 | | 11 | 1 - 1 | 1 | | 12 | 1 - 1 | 1 | | 13 | 1 - 1 | 1 | | 14 | 1 - 1 | 1 | | 15 | 1 - 1 | 1 | | 16 | 1 - 1 | 1 | | 17 | 1 - 1 | 1 | | 18 | 1 - 1 | 1 | | 19 | 1 - 1 | 1 | | 20 | 1 - 1 | 1 | | 21 | 1 - 1 | 1 | | 22 | 1 - 1 | 1 | | 23 | 1 - 1 | 1 | | 24 | 1 - 1 | 1 | | 25 | 1 - 1 | 1 | | 26 | 1 - 1 | 1 | | 27 | 1 - 1 | 1 | | 28 | 1 - 1 | 1 | | 29 | 1 - 1 | 1 | | 30 | 1 - 1 | 1 | | 31 | 1 - 1 | 1 | | 32 | 1 - 1 | 1 | | 33 | 1 - 1 | 1 | | 34 | 1 - 1 | 1 | | 35 | 1 - 1 | 1 | | 36 | 1 - 1 | 1 | | 37 | 1 - 1 | 1 | | 38 | 1 - 1 | 1 | | 39 | 1 - 1 | 1 | | 40 | 1 - 1 | 1 | | | 40 - 2 | 2 | 0 | 1 | 1 - 2 | 2 | 0 | 2 | 1 - 2 | 2 | 0 | 3 | 1 - 2 | 2 | 0 | 4 | 1 - 2 | 2 | 0 | 5 | 1 - 2 | 2 | 0 | 6 | 1 - 2 | 2 | 0 | 7 | 1 - 2 | 2 | 0 | 8 | 1 - 2 | 2 | 0 | 9 | 1 - 2 | 2 | 0 | | 9 - 2 | 2 | 1 | 10 | 1 - 2 | 2 | 1 | 11 | 1 - 2 | 2 | 1 | 12 | 1 - 2 | 2 | 1 | 13 | 1 - 2 | 2 | 1 | 14 | 1 - 2 | 2 | 1 | 15 | 1 - 2 | 2 | 1 | 16 | 1 - 2 | 2 | 1 | 17 | 1 - 2 | 2 | 1 | 18 | 1 - 2 | 2 | 1 | 19 | 1 - 2 | 2 | 1 | | 10 - 2 | 2 | 2 | 20 | 1 - 2 | 2 | 2 | 21 | 1 - 2 | 2 | 2 | 22 | 1 - 2 | 2 | 2 | 23 | 1 - 2 | 2 | 2 | 24 | 1 - 2 | 2 | 2 | 25 | 1 - 2 | 2 | 2 | 26 | 1 - 2 | 2 | 2 | 27 | 1 - 2 | 2 | 2 | 28 | 1 - 2 | 2 | 2 | 29 | 1 - 2 | 2 | 2 | | 10 - 2 | 2 | 3 | 30 | 1 - 2 | 2 | 3 | 31 | 1 - 2 | 2 | 3 | 32 | 1 - 2 | 2 | 3 | 33 | 1 - 2 | 2 | 3 | 34 | 1 - 2 | 2 | 3 | 35 | 1 - 2 | 2 | 3 | 36 | 1 - 2 | 2 | 3 | 37 | 1 - 2 | 2 | 3 | 38 | 1 - 2 | 2 | 3 | 39 | 1 - 2 | 2 | 3 | | 10 - 2 | 2 | 4 | 40 | 1 - 2 | 2 | 4 | | 1 - 2 | 2 | | 1 | 1 - 2 | 2 | | 2 | 1 - 2 | 2 | | 3 | 1 - 2 | 2 | | 4 | 1 - 2 | 2 | | 5 | 1 - 2 | 2 | | 6 | 1 - 2 | 2 | | 7 | 1 - 2 | 2 | | 8 | 1 - 2 | 2 | | 9 | 1 - 2 | 2 | | 10 | 1 - 2 | 2 | | 11 | 1 - 2 | 2 | | 12 | 1 - 2 | 2 | | 13 | 1 - 2 | 2 | | 14 | 1 - 2 | 2 | | 15 | 1 - 2 | 2 | | 16 | 1 - 2 | 2 | | 17 | 1 - 2 | 2 | | 18 | 1 - 2 | 2 | | 19 | 1 - 2 | 2 | | 20 | 1 - 2 | 2 | | 21 | 1 - 2 | 2 | | 22 | 1 - 2 | 2 | | 23 | 1 - 2 | 2 | | 24 | 1 - 2 | 2 | | 25 | 1 - 2 | 2 | | 26 | 1 - 2 | 2 | | 27 | 1 - 2 | 2 | | 28 | 1 - 2 | 2 | | 29 | 1 - 2 | 2 | | 30 | 1 - 2 | 2 | | 31 | 1 - 2 | 2 | | 32 | 1 - 2 | 2 | | 33 | 1 - 2 | 2 | | 34 | 1 - 2 | 2 | | 35 | 1 - 2 | 2 | | 36 | 1 - 2 | 2 | | 37 | 1 - 2 | 2 | | 38 | 1 - 2 | 2 | | 39 | 1 - 2 | 2 | | 40 | 1 - 2 | 2 | | | 40 -(172 rows) - --- --- Compare results between plans using sorting and plans using hash --- aggregation. Force spilling in both cases by setting work_mem low --- and altering the statistics. --- -create table gs_data_1 as -select g%1000 as g1000, g%100 as g100, g%10 as g10, g - from generate_series(0,1999) g; -analyze gs_data_1; -alter table gs_data_1 set (autovacuum_enabled = 'false'); -update pg_class set reltuples = 10 where relname='gs_data_1'; -set work_mem='64kB'; --- Produce results with sorting. -set enable_sort = true; -set enable_hashagg = false; -set jit_above_cost = 0; -explain (costs off) -select g100, g10, sum(g::numeric), count(*), max(g::text) -from gs_data_1 group by cube (g1000, g100,g10); - QUERY PLAN ------------------------------------- - GroupAggregate - Group Key: g1000, g100, g10 - Group Key: g1000, g100 - Group Key: g1000 - Group Key: () - Sort Key: g100, g10 - Group Key: g100, g10 - Group Key: g100 - Sort Key: g10, g1000 - Group Key: g10, g1000 - Group Key: g10 - -> Sort - Sort Key: g1000, g100, g10 - -> Seq Scan on gs_data_1 -(14 rows) - -create table gs_group_1 as -select g100, g10, sum(g::numeric), count(*), max(g::text) -from gs_data_1 group by cube (g1000, g100,g10); --- Produce results with hash aggregation. -set enable_hashagg = true; -set enable_sort = false; -explain (costs off) -select g100, g10, sum(g::numeric), count(*), max(g::text) -from gs_data_1 group by cube (g1000, g100,g10); - QUERY PLAN ------------------------------- - MixedAggregate - Hash Key: g1000, g100, g10 - Hash Key: g1000, g100 - Hash Key: g1000 - Hash Key: g100, g10 - Hash Key: g100 - Hash Key: g10, g1000 - Hash Key: g10 - Group Key: () - -> Seq Scan on gs_data_1 -(10 rows) - -create table gs_hash_1 as -select g100, g10, sum(g::numeric), count(*), max(g::text) -from gs_data_1 group by cube (g1000, g100,g10); -set enable_sort = true; -set work_mem to default; -set hash_mem_multiplier to default; --- Compare results -(select * from gs_hash_1 except select * from gs_group_1) - union all -(select * from gs_group_1 except select * from gs_hash_1); - g100 | g10 | sum | count | max -------+-----+-----+-------+----- -(0 rows) - -drop table gs_group_1; -drop table gs_hash_1; --- GROUP BY DISTINCT --- "normal" behavior... -select a, b, c -from (values (1, 2, 3), (4, null, 6), (7, 8, 9)) as t (a, b, c) -group by all rollup(a, b), rollup(a, c) -order by a, b, c; - a | b | c ----+---+--- - 1 | 2 | 3 - 1 | 2 | - 1 | 2 | - 1 | | 3 - 1 | | 3 - 1 | | - 1 | | - 1 | | - 4 | | 6 - 4 | | 6 - 4 | | 6 - 4 | | - 4 | | - 4 | | - 4 | | - 4 | | - 7 | 8 | 9 - 7 | 8 | - 7 | 8 | - 7 | | 9 - 7 | | 9 - 7 | | - 7 | | - 7 | | - | | -(25 rows) - --- ...which is also the default -select a, b, c -from (values (1, 2, 3), (4, null, 6), (7, 8, 9)) as t (a, b, c) -group by rollup(a, b), rollup(a, c) -order by a, b, c; - a | b | c ----+---+--- - 1 | 2 | 3 - 1 | 2 | - 1 | 2 | - 1 | | 3 - 1 | | 3 - 1 | | - 1 | | - 1 | | - 4 | | 6 - 4 | | 6 - 4 | | 6 - 4 | | - 4 | | - 4 | | - 4 | | - 4 | | - 7 | 8 | 9 - 7 | 8 | - 7 | 8 | - 7 | | 9 - 7 | | 9 - 7 | | - 7 | | - 7 | | - | | -(25 rows) - --- "group by distinct" behavior... -select a, b, c -from (values (1, 2, 3), (4, null, 6), (7, 8, 9)) as t (a, b, c) -group by distinct rollup(a, b), rollup(a, c) -order by a, b, c; - a | b | c ----+---+--- - 1 | 2 | 3 - 1 | 2 | - 1 | | 3 - 1 | | - 4 | | 6 - 4 | | 6 - 4 | | - 4 | | - 7 | 8 | 9 - 7 | 8 | - 7 | | 9 - 7 | | - | | -(13 rows) - --- ...which is not the same as "select distinct" -select distinct a, b, c -from (values (1, 2, 3), (4, null, 6), (7, 8, 9)) as t (a, b, c) -group by rollup(a, b), rollup(a, c) -order by a, b, c; - a | b | c ----+---+--- - 1 | 2 | 3 - 1 | 2 | - 1 | | 3 - 1 | | - 4 | | 6 - 4 | | - 7 | 8 | 9 - 7 | 8 | - 7 | | 9 - 7 | | - | | -(11 rows) - --- test handling of outer GroupingFunc within subqueries -explain (costs off) -select (select grouping(v1)) from (values ((select 1))) v(v1) group by cube(v1); - QUERY PLAN ---------------------------- - MixedAggregate - Hash Key: $2 - Group Key: () - InitPlan 1 (returns $1) - -> Result - InitPlan 3 (returns $2) - -> Result - -> Result - SubPlan 2 - -> Result -(10 rows) - -select (select grouping(v1)) from (values ((select 1))) v(v1) group by cube(v1); - grouping ----------- - 1 - 0 -(2 rows) - -explain (costs off) -select (select grouping(v1)) from (values ((select 1))) v(v1) group by v1; - QUERY PLAN ---------------------------- - GroupAggregate - InitPlan 1 (returns $1) - -> Result - InitPlan 3 (returns $2) - -> Result - -> Result - SubPlan 2 - -> Result -(8 rows) - -select (select grouping(v1)) from (values ((select 1))) v(v1) group by v1; - grouping ----------- - 0 -(1 row) - --- expressions nullable by grouping sets -explain (costs off) -select distinct on (a, b) a, b -from (values (1, 1), (2, 2)) as t (a, b) where a = b -group by grouping sets((a, b), (a)) -order by a, b; - QUERY PLAN ----------------------------------------------------------------- - Unique - -> Sort - Sort Key: "*VALUES*".column1, "*VALUES*".column2 - -> HashAggregate - Hash Key: "*VALUES*".column1, "*VALUES*".column2 - Hash Key: "*VALUES*".column1 - -> Values Scan on "*VALUES*" - Filter: (column1 = column2) -(8 rows) - -select distinct on (a, b) a, b -from (values (1, 1), (2, 2)) as t (a, b) where a = b -group by grouping sets((a, b), (a)) -order by a, b; - a | b ----+--- - 1 | 1 - 1 | - 2 | 2 - 2 | -(4 rows) - -explain (costs off) -select distinct on (a, b+1) a, b+1 -from (values (1, 0), (2, 1)) as t (a, b) where a = b+1 -group by grouping sets((a, b+1), (a)) -order by a, b+1; - QUERY PLAN ----------------------------------------------------------------------- - Unique - -> Sort - Sort Key: "*VALUES*".column1, (("*VALUES*".column2 + 1)) - -> HashAggregate - Hash Key: "*VALUES*".column1, ("*VALUES*".column2 + 1) - Hash Key: "*VALUES*".column1 - -> Values Scan on "*VALUES*" - Filter: (column1 = (column2 + 1)) -(8 rows) - -select distinct on (a, b+1) a, b+1 -from (values (1, 0), (2, 1)) as t (a, b) where a = b+1 -group by grouping sets((a, b+1), (a)) -order by a, b+1; - a | ?column? ----+---------- - 1 | 1 - 1 | - 2 | 2 - 2 | -(4 rows) - -explain (costs off) -select a, b -from (values (1, 1), (2, 2)) as t (a, b) where a = b -group by grouping sets((a, b), (a)) -order by a, b nulls first; - QUERY PLAN ----------------------------------------------------------------- - Sort - Sort Key: "*VALUES*".column1, "*VALUES*".column2 NULLS FIRST - -> HashAggregate - Hash Key: "*VALUES*".column1, "*VALUES*".column2 - Hash Key: "*VALUES*".column1 - -> Values Scan on "*VALUES*" - Filter: (column1 = column2) -(7 rows) - -select a, b -from (values (1, 1), (2, 2)) as t (a, b) where a = b -group by grouping sets((a, b), (a)) -order by a, b nulls first; - a | b ----+--- - 1 | - 1 | 1 - 2 | - 2 | 2 -(4 rows) - -explain (costs off) -select 1 as one group by rollup(one) order by one nulls first; - QUERY PLAN ------------------------------ - Sort - Sort Key: (1) NULLS FIRST - -> MixedAggregate - Hash Key: 1 - Group Key: () - -> Result -(6 rows) - -select 1 as one group by rollup(one) order by one nulls first; - one ------ - - 1 -(2 rows) - --- end +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/generated.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/generated.out --- /tmp/cirrus-ci-build/src/test/regress/expected/generated.out 2024-03-07 14:25:00.330666000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/generated.out 2024-03-07 14:27:17.004021000 +0000 @@ -1081,274 +1081,10 @@ (2 rows) \d gtest29 - Table "public.gtest29" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - a | integer | | | - b | integer | | | generated always as (a * 2) stored - -ALTER TABLE gtest29 ALTER COLUMN a SET EXPRESSION AS (a * 3); -- error -ERROR: column "a" of relation "gtest29" is not a generated column -ALTER TABLE gtest29 ALTER COLUMN a DROP EXPRESSION; -- error -ERROR: column "a" of relation "gtest29" is not a stored generated column -ALTER TABLE gtest29 ALTER COLUMN a DROP EXPRESSION IF EXISTS; -- notice -NOTICE: column "a" of relation "gtest29" is not a stored generated column, skipping --- Change the expression -ALTER TABLE gtest29 ALTER COLUMN b SET EXPRESSION AS (a * 3); -SELECT * FROM gtest29; - a | b ----+---- - 3 | 9 - 4 | 12 -(2 rows) - -\d gtest29 - Table "public.gtest29" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - a | integer | | | - b | integer | | | generated always as (a * 3) stored - -ALTER TABLE gtest29 ALTER COLUMN b DROP EXPRESSION; -INSERT INTO gtest29 (a) VALUES (5); -INSERT INTO gtest29 (a, b) VALUES (6, 66); -SELECT * FROM gtest29; - a | b ----+---- - 3 | 9 - 4 | 12 - 5 | - 6 | 66 -(4 rows) - -\d gtest29 - Table "public.gtest29" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | - --- check that dependencies between columns have also been removed -ALTER TABLE gtest29 DROP COLUMN a; -- should not drop b -\d gtest29 - Table "public.gtest29" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - b | integer | | | - --- with inheritance -CREATE TABLE gtest30 ( - a int, - b int GENERATED ALWAYS AS (a * 2) STORED -); -CREATE TABLE gtest30_1 () INHERITS (gtest30); -ALTER TABLE gtest30 ALTER COLUMN b DROP EXPRESSION; -\d gtest30 - Table "public.gtest30" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Number of child tables: 1 (Use \d+ to list them.) - -\d gtest30_1 - Table "public.gtest30_1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Inherits: gtest30 - -DROP TABLE gtest30 CASCADE; -NOTICE: drop cascades to table gtest30_1 -CREATE TABLE gtest30 ( - a int, - b int GENERATED ALWAYS AS (a * 2) STORED -); -CREATE TABLE gtest30_1 () INHERITS (gtest30); -ALTER TABLE ONLY gtest30 ALTER COLUMN b DROP EXPRESSION; -- error -ERROR: ALTER TABLE / DROP EXPRESSION must be applied to child tables too -\d gtest30 - Table "public.gtest30" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - a | integer | | | - b | integer | | | generated always as (a * 2) stored -Number of child tables: 1 (Use \d+ to list them.) - -\d gtest30_1 - Table "public.gtest30_1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - a | integer | | | - b | integer | | | generated always as (a * 2) stored -Inherits: gtest30 - -ALTER TABLE gtest30_1 ALTER COLUMN b DROP EXPRESSION; -- error -ERROR: cannot drop generation expression from inherited column --- triggers -CREATE TABLE gtest26 ( - a int PRIMARY KEY, - b int GENERATED ALWAYS AS (a * 2) STORED -); -CREATE FUNCTION gtest_trigger_func() RETURNS trigger - LANGUAGE plpgsql -AS $$ -BEGIN - IF tg_op IN ('DELETE', 'UPDATE') THEN - RAISE INFO '%: %: old = %', TG_NAME, TG_WHEN, OLD; - END IF; - IF tg_op IN ('INSERT', 'UPDATE') THEN - RAISE INFO '%: %: new = %', TG_NAME, TG_WHEN, NEW; - END IF; - IF tg_op = 'DELETE' THEN - RETURN OLD; - ELSE - RETURN NEW; - END IF; -END -$$; -CREATE TRIGGER gtest1 BEFORE DELETE OR UPDATE ON gtest26 - FOR EACH ROW - WHEN (OLD.b < 0) -- ok - EXECUTE PROCEDURE gtest_trigger_func(); -CREATE TRIGGER gtest2a BEFORE INSERT OR UPDATE ON gtest26 - FOR EACH ROW - WHEN (NEW.b < 0) -- error - EXECUTE PROCEDURE gtest_trigger_func(); -ERROR: BEFORE trigger's WHEN condition cannot reference NEW generated columns -LINE 3: WHEN (NEW.b < 0) -- error - ^ -DETAIL: Column "b" is a generated column. -CREATE TRIGGER gtest2b BEFORE INSERT OR UPDATE ON gtest26 - FOR EACH ROW - WHEN (NEW.* IS NOT NULL) -- error - EXECUTE PROCEDURE gtest_trigger_func(); -ERROR: BEFORE trigger's WHEN condition cannot reference NEW generated columns -LINE 3: WHEN (NEW.* IS NOT NULL) -- error - ^ -DETAIL: A whole-row reference is used and the table contains generated columns. -CREATE TRIGGER gtest2 BEFORE INSERT ON gtest26 - FOR EACH ROW - WHEN (NEW.a < 0) - EXECUTE PROCEDURE gtest_trigger_func(); -CREATE TRIGGER gtest3 AFTER DELETE OR UPDATE ON gtest26 - FOR EACH ROW - WHEN (OLD.b < 0) -- ok - EXECUTE PROCEDURE gtest_trigger_func(); -CREATE TRIGGER gtest4 AFTER INSERT OR UPDATE ON gtest26 - FOR EACH ROW - WHEN (NEW.b < 0) -- ok - EXECUTE PROCEDURE gtest_trigger_func(); -INSERT INTO gtest26 (a) VALUES (-2), (0), (3); -INFO: gtest2: BEFORE: new = (-2,) -INFO: gtest4: AFTER: new = (-2,-4) -SELECT * FROM gtest26 ORDER BY a; - a | b -----+---- - -2 | -4 - 0 | 0 - 3 | 6 -(3 rows) - -UPDATE gtest26 SET a = a * -2; -INFO: gtest1: BEFORE: old = (-2,-4) -INFO: gtest1: BEFORE: new = (4,) -INFO: gtest3: AFTER: old = (-2,-4) -INFO: gtest3: AFTER: new = (4,8) -INFO: gtest4: AFTER: old = (3,6) -INFO: gtest4: AFTER: new = (-6,-12) -SELECT * FROM gtest26 ORDER BY a; - a | b -----+----- - -6 | -12 - 0 | 0 - 4 | 8 -(3 rows) - -DELETE FROM gtest26 WHERE a = -6; -INFO: gtest1: BEFORE: old = (-6,-12) -INFO: gtest3: AFTER: old = (-6,-12) -SELECT * FROM gtest26 ORDER BY a; - a | b ----+--- - 0 | 0 - 4 | 8 -(2 rows) - -DROP TRIGGER gtest1 ON gtest26; -DROP TRIGGER gtest2 ON gtest26; -DROP TRIGGER gtest3 ON gtest26; --- Check that an UPDATE of "a" fires the trigger for UPDATE OF b, per --- SQL standard. -CREATE FUNCTION gtest_trigger_func3() RETURNS trigger - LANGUAGE plpgsql -AS $$ -BEGIN - RAISE NOTICE 'OK'; - RETURN NEW; -END -$$; -CREATE TRIGGER gtest11 BEFORE UPDATE OF b ON gtest26 - FOR EACH ROW - EXECUTE PROCEDURE gtest_trigger_func3(); -UPDATE gtest26 SET a = 1 WHERE a = 0; -NOTICE: OK -DROP TRIGGER gtest11 ON gtest26; -TRUNCATE gtest26; --- check that modifications of stored generated columns in triggers do --- not get propagated -CREATE FUNCTION gtest_trigger_func4() RETURNS trigger - LANGUAGE plpgsql -AS $$ -BEGIN - NEW.a = 10; - NEW.b = 300; - RETURN NEW; -END; -$$; -CREATE TRIGGER gtest12_01 BEFORE UPDATE ON gtest26 - FOR EACH ROW - EXECUTE PROCEDURE gtest_trigger_func(); -CREATE TRIGGER gtest12_02 BEFORE UPDATE ON gtest26 - FOR EACH ROW - EXECUTE PROCEDURE gtest_trigger_func4(); -CREATE TRIGGER gtest12_03 BEFORE UPDATE ON gtest26 - FOR EACH ROW - EXECUTE PROCEDURE gtest_trigger_func(); -INSERT INTO gtest26 (a) VALUES (1); -UPDATE gtest26 SET a = 11 WHERE a = 1; -INFO: gtest12_01: BEFORE: old = (1,2) -INFO: gtest12_01: BEFORE: new = (11,) -INFO: gtest12_03: BEFORE: old = (1,2) -INFO: gtest12_03: BEFORE: new = (10,) -SELECT * FROM gtest26 ORDER BY a; - a | b -----+---- - 10 | 20 -(1 row) - --- LIKE INCLUDING GENERATED and dropped column handling -CREATE TABLE gtest28a ( - a int, - b int, - c int, - x int GENERATED ALWAYS AS (b * 2) STORED -); -ALTER TABLE gtest28a DROP COLUMN a; -CREATE TABLE gtest28b (LIKE gtest28a INCLUDING GENERATED); -\d gtest28* - Table "public.gtest28a" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - b | integer | | | - c | integer | | | - x | integer | | | generated always as (b * 2) stored - - Table "public.gtest28b" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - b | integer | | | - c | integer | | | - x | integer | | | generated always as (b * 2) stored - +WARNING: terminating connection because of crash of another server process +DETAIL: The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory. +HINT: In a moment you should be able to reconnect to the database and repeat your command. +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/join_hash.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/join_hash.out --- /tmp/cirrus-ci-build/src/test/regress/expected/join_hash.out 2024-03-07 14:25:00.331557000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/join_hash.out 2024-03-07 14:27:17.003169000 +0000 @@ -391,776 +391,7 @@ -- parallel with parallel-aware hash join savepoint settings; set local max_parallel_workers_per_gather = 1; -set local work_mem = '192kB'; -set local hash_mem_multiplier = 1.0; -set local enable_parallel_hash = on; -explain (costs off) - select count(*) from simple r join bigger_than_it_looks s using (id); - QUERY PLAN ---------------------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 1 - -> Partial Aggregate - -> Parallel Hash Join - Hash Cond: (r.id = s.id) - -> Parallel Seq Scan on simple r - -> Parallel Hash - -> Parallel Seq Scan on bigger_than_it_looks s -(9 rows) - -select count(*) from simple r join bigger_than_it_looks s using (id); - count -------- - 20000 -(1 row) - -select original > 1 as initially_multibatch, final > original as increased_batches - from hash_join_batches( -$$ - select count(*) from simple r join bigger_than_it_looks s using (id); -$$); - initially_multibatch | increased_batches -----------------------+------------------- - f | t -(1 row) - -rollback to settings; --- The "ugly" case: increasing the number of batches during execution --- doesn't help, so stop trying to fit in work_mem and hope for the --- best; in this case we plan for 1 batch, increases just once and --- then stop increasing because that didn't help at all, so we blow --- right through the work_mem budget and hope for the best... --- non-parallel -savepoint settings; -set local max_parallel_workers_per_gather = 0; -set local work_mem = '128kB'; -set local hash_mem_multiplier = 1.0; -explain (costs off) - select count(*) from simple r join extremely_skewed s using (id); - QUERY PLAN --------------------------------------------------- - Aggregate - -> Hash Join - Hash Cond: (r.id = s.id) - -> Seq Scan on simple r - -> Hash - -> Seq Scan on extremely_skewed s -(6 rows) - -select count(*) from simple r join extremely_skewed s using (id); - count -------- - 20000 -(1 row) - -select * from hash_join_batches( -$$ - select count(*) from simple r join extremely_skewed s using (id); -$$); - original | final -----------+------- - 1 | 2 -(1 row) - -rollback to settings; --- parallel with parallel-oblivious hash join -savepoint settings; -set local max_parallel_workers_per_gather = 2; -set local work_mem = '128kB'; -set local hash_mem_multiplier = 1.0; -set local enable_parallel_hash = off; -explain (costs off) - select count(*) from simple r join extremely_skewed s using (id); - QUERY PLAN --------------------------------------------------------- - Aggregate - -> Gather - Workers Planned: 2 - -> Hash Join - Hash Cond: (r.id = s.id) - -> Parallel Seq Scan on simple r - -> Hash - -> Seq Scan on extremely_skewed s -(8 rows) - -select count(*) from simple r join extremely_skewed s using (id); - count -------- - 20000 -(1 row) - -select * from hash_join_batches( -$$ - select count(*) from simple r join extremely_skewed s using (id); -$$); - original | final -----------+------- - 1 | 2 -(1 row) - -rollback to settings; --- parallel with parallel-aware hash join -savepoint settings; -set local max_parallel_workers_per_gather = 1; -set local work_mem = '128kB'; -set local hash_mem_multiplier = 1.0; -set local enable_parallel_hash = on; -explain (costs off) - select count(*) from simple r join extremely_skewed s using (id); - QUERY PLAN ------------------------------------------------------------------------ - Finalize Aggregate - -> Gather - Workers Planned: 1 - -> Partial Aggregate - -> Parallel Hash Join - Hash Cond: (r.id = s.id) - -> Parallel Seq Scan on simple r - -> Parallel Hash - -> Parallel Seq Scan on extremely_skewed s -(9 rows) - -select count(*) from simple r join extremely_skewed s using (id); - count -------- - 20000 -(1 row) - -select * from hash_join_batches( -$$ - select count(*) from simple r join extremely_skewed s using (id); -$$); - original | final -----------+------- - 1 | 4 -(1 row) - -rollback to settings; --- A couple of other hash join tests unrelated to work_mem management. --- Check that EXPLAIN ANALYZE has data even if the leader doesn't participate -savepoint settings; -set local max_parallel_workers_per_gather = 2; -set local work_mem = '4MB'; -set local hash_mem_multiplier = 1.0; -set local parallel_leader_participation = off; -select * from hash_join_batches( -$$ - select count(*) from simple r join simple s using (id); -$$); - original | final -----------+------- - 1 | 1 -(1 row) - -rollback to settings; --- Exercise rescans. We'll turn off parallel_leader_participation so --- that we can check that instrumentation comes back correctly. -create table join_foo as select generate_series(1, 3) as id, 'xxxxx'::text as t; -alter table join_foo set (parallel_workers = 0); -create table join_bar as select generate_series(1, 10000) as id, 'xxxxx'::text as t; -alter table join_bar set (parallel_workers = 2); --- multi-batch with rescan, parallel-oblivious -savepoint settings; -set enable_parallel_hash = off; -set parallel_leader_participation = off; -set min_parallel_table_scan_size = 0; -set parallel_setup_cost = 0; -set parallel_tuple_cost = 0; -set max_parallel_workers_per_gather = 2; -set enable_material = off; -set enable_mergejoin = off; -set work_mem = '64kB'; -set hash_mem_multiplier = 1.0; -explain (costs off) - select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; - QUERY PLAN ------------------------------------------------------------------------------------- - Aggregate - -> Nested Loop Left Join - Join Filter: ((join_foo.id < (b1.id + 1)) AND (join_foo.id > (b1.id - 1))) - -> Seq Scan on join_foo - -> Gather - Workers Planned: 2 - -> Hash Join - Hash Cond: (b1.id = b2.id) - -> Parallel Seq Scan on join_bar b1 - -> Hash - -> Seq Scan on join_bar b2 -(11 rows) - -select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; - count -------- - 3 -(1 row) - -select final > 1 as multibatch - from hash_join_batches( -$$ - select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; -$$); - multibatch ------------- - t -(1 row) - -rollback to settings; --- single-batch with rescan, parallel-oblivious -savepoint settings; -set enable_parallel_hash = off; -set parallel_leader_participation = off; -set min_parallel_table_scan_size = 0; -set parallel_setup_cost = 0; -set parallel_tuple_cost = 0; -set max_parallel_workers_per_gather = 2; -set enable_material = off; -set enable_mergejoin = off; -set work_mem = '4MB'; -set hash_mem_multiplier = 1.0; -explain (costs off) - select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; - QUERY PLAN ------------------------------------------------------------------------------------- - Aggregate - -> Nested Loop Left Join - Join Filter: ((join_foo.id < (b1.id + 1)) AND (join_foo.id > (b1.id - 1))) - -> Seq Scan on join_foo - -> Gather - Workers Planned: 2 - -> Hash Join - Hash Cond: (b1.id = b2.id) - -> Parallel Seq Scan on join_bar b1 - -> Hash - -> Seq Scan on join_bar b2 -(11 rows) - -select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; - count -------- - 3 -(1 row) - -select final > 1 as multibatch - from hash_join_batches( -$$ - select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; -$$); - multibatch ------------- - f -(1 row) - -rollback to settings; --- multi-batch with rescan, parallel-aware -savepoint settings; -set enable_parallel_hash = on; -set parallel_leader_participation = off; -set min_parallel_table_scan_size = 0; -set parallel_setup_cost = 0; -set parallel_tuple_cost = 0; -set max_parallel_workers_per_gather = 2; -set enable_material = off; -set enable_mergejoin = off; -set work_mem = '64kB'; -set hash_mem_multiplier = 1.0; -explain (costs off) - select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; - QUERY PLAN ------------------------------------------------------------------------------------- - Aggregate - -> Nested Loop Left Join - Join Filter: ((join_foo.id < (b1.id + 1)) AND (join_foo.id > (b1.id - 1))) - -> Seq Scan on join_foo - -> Gather - Workers Planned: 2 - -> Parallel Hash Join - Hash Cond: (b1.id = b2.id) - -> Parallel Seq Scan on join_bar b1 - -> Parallel Hash - -> Parallel Seq Scan on join_bar b2 -(11 rows) - -select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; - count -------- - 3 -(1 row) - -select final > 1 as multibatch - from hash_join_batches( -$$ - select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; -$$); - multibatch ------------- - t -(1 row) - -rollback to settings; --- single-batch with rescan, parallel-aware -savepoint settings; -set enable_parallel_hash = on; -set parallel_leader_participation = off; -set min_parallel_table_scan_size = 0; -set parallel_setup_cost = 0; -set parallel_tuple_cost = 0; -set max_parallel_workers_per_gather = 2; -set enable_material = off; -set enable_mergejoin = off; -set work_mem = '4MB'; -set hash_mem_multiplier = 1.0; -explain (costs off) - select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; - QUERY PLAN ------------------------------------------------------------------------------------- - Aggregate - -> Nested Loop Left Join - Join Filter: ((join_foo.id < (b1.id + 1)) AND (join_foo.id > (b1.id - 1))) - -> Seq Scan on join_foo - -> Gather - Workers Planned: 2 - -> Parallel Hash Join - Hash Cond: (b1.id = b2.id) - -> Parallel Seq Scan on join_bar b1 - -> Parallel Hash - -> Parallel Seq Scan on join_bar b2 -(11 rows) - -select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; - count -------- - 3 -(1 row) - -select final > 1 as multibatch - from hash_join_batches( -$$ - select count(*) from join_foo - left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss - on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; -$$); - multibatch ------------- - f -(1 row) - -rollback to settings; --- A full outer join where every record is matched. --- non-parallel -savepoint settings; -set local max_parallel_workers_per_gather = 0; -explain (costs off) - select count(*) from simple r full outer join simple s using (id); - QUERY PLAN ----------------------------------------- - Aggregate - -> Hash Full Join - Hash Cond: (r.id = s.id) - -> Seq Scan on simple r - -> Hash - -> Seq Scan on simple s -(6 rows) - -select count(*) from simple r full outer join simple s using (id); - count -------- - 20000 -(1 row) - -rollback to settings; --- parallelism not possible with parallel-oblivious full hash join -savepoint settings; -set enable_parallel_hash = off; -set local max_parallel_workers_per_gather = 2; -explain (costs off) - select count(*) from simple r full outer join simple s using (id); - QUERY PLAN ----------------------------------------- - Aggregate - -> Hash Full Join - Hash Cond: (r.id = s.id) - -> Seq Scan on simple r - -> Hash - -> Seq Scan on simple s -(6 rows) - -select count(*) from simple r full outer join simple s using (id); - count -------- - 20000 -(1 row) - -rollback to settings; --- parallelism is possible with parallel-aware full hash join -savepoint settings; -set local max_parallel_workers_per_gather = 2; -explain (costs off) - select count(*) from simple r full outer join simple s using (id); - QUERY PLAN -------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 2 - -> Partial Aggregate - -> Parallel Hash Full Join - Hash Cond: (r.id = s.id) - -> Parallel Seq Scan on simple r - -> Parallel Hash - -> Parallel Seq Scan on simple s -(9 rows) - -select count(*) from simple r full outer join simple s using (id); - count -------- - 20000 -(1 row) - -rollback to settings; --- A full outer join where every record is not matched. --- non-parallel -savepoint settings; -set local max_parallel_workers_per_gather = 0; -explain (costs off) - select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); - QUERY PLAN ----------------------------------------- - Aggregate - -> Hash Full Join - Hash Cond: ((0 - s.id) = r.id) - -> Seq Scan on simple s - -> Hash - -> Seq Scan on simple r -(6 rows) - -select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); - count -------- - 40000 -(1 row) - -rollback to settings; --- parallelism not possible with parallel-oblivious full hash join -savepoint settings; -set enable_parallel_hash = off; -set local max_parallel_workers_per_gather = 2; -explain (costs off) - select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); - QUERY PLAN ----------------------------------------- - Aggregate - -> Hash Full Join - Hash Cond: ((0 - s.id) = r.id) - -> Seq Scan on simple s - -> Hash - -> Seq Scan on simple r -(6 rows) - -select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); - count -------- - 40000 -(1 row) - -rollback to settings; --- parallelism is possible with parallel-aware full hash join -savepoint settings; -set local max_parallel_workers_per_gather = 2; -explain (costs off) - select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); - QUERY PLAN -------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 2 - -> Partial Aggregate - -> Parallel Hash Full Join - Hash Cond: ((0 - s.id) = r.id) - -> Parallel Seq Scan on simple s - -> Parallel Hash - -> Parallel Seq Scan on simple r -(9 rows) - -select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); - count -------- - 40000 -(1 row) - -rollback to settings; --- exercise special code paths for huge tuples (note use of non-strict --- expression and left join required to get the detoasted tuple into --- the hash table) --- parallel with parallel-aware hash join (hits ExecParallelHashLoadTuple and --- sts_puttuple oversized tuple cases because it's multi-batch) -savepoint settings; -set max_parallel_workers_per_gather = 2; -set enable_parallel_hash = on; -set work_mem = '128kB'; -set hash_mem_multiplier = 1.0; -explain (costs off) - select length(max(s.t)) - from wide left join (select id, coalesce(t, '') || '' as t from wide) s using (id); - QUERY PLAN ----------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 2 - -> Partial Aggregate - -> Parallel Hash Left Join - Hash Cond: (wide.id = wide_1.id) - -> Parallel Seq Scan on wide - -> Parallel Hash - -> Parallel Seq Scan on wide wide_1 -(9 rows) - -select length(max(s.t)) -from wide left join (select id, coalesce(t, '') || '' as t from wide) s using (id); - length --------- - 320000 -(1 row) - -select final > 1 as multibatch - from hash_join_batches( -$$ - select length(max(s.t)) - from wide left join (select id, coalesce(t, '') || '' as t from wide) s using (id); -$$); - multibatch ------------- - t -(1 row) - -rollback to settings; --- Hash join reuses the HOT status bit to indicate match status. This can only --- be guaranteed to produce correct results if all the hash join tuple match --- bits are reset before reuse. This is done upon loading them into the --- hashtable. -SAVEPOINT settings; -SET enable_parallel_hash = on; -SET min_parallel_table_scan_size = 0; -SET parallel_setup_cost = 0; -SET parallel_tuple_cost = 0; -CREATE TABLE hjtest_matchbits_t1(id int); -CREATE TABLE hjtest_matchbits_t2(id int); -INSERT INTO hjtest_matchbits_t1 VALUES (1); -INSERT INTO hjtest_matchbits_t2 VALUES (2); --- Update should create a HOT tuple. If this status bit isn't cleared, we won't --- correctly emit the NULL-extended unmatching tuple in full hash join. -UPDATE hjtest_matchbits_t2 set id = 2; -SELECT * FROM hjtest_matchbits_t1 t1 FULL JOIN hjtest_matchbits_t2 t2 ON t1.id = t2.id - ORDER BY t1.id; - id | id -----+---- - 1 | - | 2 -(2 rows) - --- Test serial full hash join. --- Resetting parallel_setup_cost should force a serial plan. --- Just to be safe, however, set enable_parallel_hash to off, as parallel full --- hash joins are only supported with shared hashtables. -RESET parallel_setup_cost; -SET enable_parallel_hash = off; -SELECT * FROM hjtest_matchbits_t1 t1 FULL JOIN hjtest_matchbits_t2 t2 ON t1.id = t2.id; - id | id -----+---- - 1 | - | 2 -(2 rows) - -ROLLBACK TO settings; -rollback; --- Verify that hash key expressions reference the correct --- nodes. Hashjoin's hashkeys need to reference its outer plan, Hash's --- need to reference Hash's outer plan (which is below HashJoin's --- inner plan). It's not trivial to verify that the references are --- correct (we don't display the hashkeys themselves), but if the --- hashkeys contain subplan references, those will be displayed. Force --- subplans to appear just about everywhere. --- --- Bug report: --- https://www.postgresql.org/message-id/CAPpHfdvGVegF_TKKRiBrSmatJL2dR9uwFCuR%2BteQ_8tEXU8mxg%40mail.gmail.com --- -BEGIN; -SET LOCAL enable_sort = OFF; -- avoid mergejoins -SET LOCAL from_collapse_limit = 1; -- allows easy changing of join order -CREATE TABLE hjtest_1 (a text, b int, id int, c bool); -CREATE TABLE hjtest_2 (a bool, id int, b text, c int); -INSERT INTO hjtest_1(a, b, id, c) VALUES ('text', 2, 1, false); -- matches -INSERT INTO hjtest_1(a, b, id, c) VALUES ('text', 1, 2, false); -- fails id join condition -INSERT INTO hjtest_1(a, b, id, c) VALUES ('text', 20, 1, false); -- fails < 50 -INSERT INTO hjtest_1(a, b, id, c) VALUES ('text', 1, 1, false); -- fails (SELECT hjtest_1.b * 5) = (SELECT hjtest_2.c*5) -INSERT INTO hjtest_2(a, id, b, c) VALUES (true, 1, 'another', 2); -- matches -INSERT INTO hjtest_2(a, id, b, c) VALUES (true, 3, 'another', 7); -- fails id join condition -INSERT INTO hjtest_2(a, id, b, c) VALUES (true, 1, 'another', 90); -- fails < 55 -INSERT INTO hjtest_2(a, id, b, c) VALUES (true, 1, 'another', 3); -- fails (SELECT hjtest_1.b * 5) = (SELECT hjtest_2.c*5) -INSERT INTO hjtest_2(a, id, b, c) VALUES (true, 1, 'text', 1); -- fails hjtest_1.a <> hjtest_2.b; -EXPLAIN (COSTS OFF, VERBOSE) -SELECT hjtest_1.a a1, hjtest_2.a a2,hjtest_1.tableoid::regclass t1, hjtest_2.tableoid::regclass t2 -FROM hjtest_1, hjtest_2 -WHERE - hjtest_1.id = (SELECT 1 WHERE hjtest_2.id = 1) - AND (SELECT hjtest_1.b * 5) = (SELECT hjtest_2.c*5) - AND (SELECT hjtest_1.b * 5) < 50 - AND (SELECT hjtest_2.c * 5) < 55 - AND hjtest_1.a <> hjtest_2.b; - QUERY PLAN ------------------------------------------------------------------------------------------------- - Hash Join - Output: hjtest_1.a, hjtest_2.a, (hjtest_1.tableoid)::regclass, (hjtest_2.tableoid)::regclass - Hash Cond: ((hjtest_1.id = (SubPlan 1)) AND ((SubPlan 2) = (SubPlan 3))) - Join Filter: (hjtest_1.a <> hjtest_2.b) - -> Seq Scan on public.hjtest_1 - Output: hjtest_1.a, hjtest_1.tableoid, hjtest_1.id, hjtest_1.b - Filter: ((SubPlan 4) < 50) - SubPlan 4 - -> Result - Output: (hjtest_1.b * 5) - -> Hash - Output: hjtest_2.a, hjtest_2.tableoid, hjtest_2.id, hjtest_2.c, hjtest_2.b - -> Seq Scan on public.hjtest_2 - Output: hjtest_2.a, hjtest_2.tableoid, hjtest_2.id, hjtest_2.c, hjtest_2.b - Filter: ((SubPlan 5) < 55) - SubPlan 5 - -> Result - Output: (hjtest_2.c * 5) - SubPlan 1 - -> Result - Output: 1 - One-Time Filter: (hjtest_2.id = 1) - SubPlan 3 - -> Result - Output: (hjtest_2.c * 5) - SubPlan 2 - -> Result - Output: (hjtest_1.b * 5) -(28 rows) - -SELECT hjtest_1.a a1, hjtest_2.a a2,hjtest_1.tableoid::regclass t1, hjtest_2.tableoid::regclass t2 -FROM hjtest_1, hjtest_2 -WHERE - hjtest_1.id = (SELECT 1 WHERE hjtest_2.id = 1) - AND (SELECT hjtest_1.b * 5) = (SELECT hjtest_2.c*5) - AND (SELECT hjtest_1.b * 5) < 50 - AND (SELECT hjtest_2.c * 5) < 55 - AND hjtest_1.a <> hjtest_2.b; - a1 | a2 | t1 | t2 -------+----+----------+---------- - text | t | hjtest_1 | hjtest_2 -(1 row) - -EXPLAIN (COSTS OFF, VERBOSE) -SELECT hjtest_1.a a1, hjtest_2.a a2,hjtest_1.tableoid::regclass t1, hjtest_2.tableoid::regclass t2 -FROM hjtest_2, hjtest_1 -WHERE - hjtest_1.id = (SELECT 1 WHERE hjtest_2.id = 1) - AND (SELECT hjtest_1.b * 5) = (SELECT hjtest_2.c*5) - AND (SELECT hjtest_1.b * 5) < 50 - AND (SELECT hjtest_2.c * 5) < 55 - AND hjtest_1.a <> hjtest_2.b; - QUERY PLAN ------------------------------------------------------------------------------------------------- - Hash Join - Output: hjtest_1.a, hjtest_2.a, (hjtest_1.tableoid)::regclass, (hjtest_2.tableoid)::regclass - Hash Cond: (((SubPlan 1) = hjtest_1.id) AND ((SubPlan 3) = (SubPlan 2))) - Join Filter: (hjtest_1.a <> hjtest_2.b) - -> Seq Scan on public.hjtest_2 - Output: hjtest_2.a, hjtest_2.tableoid, hjtest_2.id, hjtest_2.c, hjtest_2.b - Filter: ((SubPlan 5) < 55) - SubPlan 5 - -> Result - Output: (hjtest_2.c * 5) - -> Hash - Output: hjtest_1.a, hjtest_1.tableoid, hjtest_1.id, hjtest_1.b - -> Seq Scan on public.hjtest_1 - Output: hjtest_1.a, hjtest_1.tableoid, hjtest_1.id, hjtest_1.b - Filter: ((SubPlan 4) < 50) - SubPlan 4 - -> Result - Output: (hjtest_1.b * 5) - SubPlan 2 - -> Result - Output: (hjtest_1.b * 5) - SubPlan 1 - -> Result - Output: 1 - One-Time Filter: (hjtest_2.id = 1) - SubPlan 3 - -> Result - Output: (hjtest_2.c * 5) -(28 rows) - -SELECT hjtest_1.a a1, hjtest_2.a a2,hjtest_1.tableoid::regclass t1, hjtest_2.tableoid::regclass t2 -FROM hjtest_2, hjtest_1 -WHERE - hjtest_1.id = (SELECT 1 WHERE hjtest_2.id = 1) - AND (SELECT hjtest_1.b * 5) = (SELECT hjtest_2.c*5) - AND (SELECT hjtest_1.b * 5) < 50 - AND (SELECT hjtest_2.c * 5) < 55 - AND hjtest_1.a <> hjtest_2.b; - a1 | a2 | t1 | t2 -------+----+----------+---------- - text | t | hjtest_1 | hjtest_2 -(1 row) - -ROLLBACK; --- Verify that we behave sanely when the inner hash keys contain parameters --- (that is, outer or lateral references). This situation has to defeat --- re-use of the inner hash table across rescans. -begin; -set local enable_hashjoin = on; -explain (costs off) -select i8.q2, ss.* from -int8_tbl i8, -lateral (select t1.fivethous, i4.f1 from tenk1 t1 join int4_tbl i4 - on t1.fivethous = i4.f1+i8.q2 order by 1,2) ss; - QUERY PLAN ------------------------------------------------------------ - Nested Loop - -> Seq Scan on int8_tbl i8 - -> Sort - Sort Key: t1.fivethous, i4.f1 - -> Hash Join - Hash Cond: (t1.fivethous = (i4.f1 + i8.q2)) - -> Seq Scan on tenk1 t1 - -> Hash - -> Seq Scan on int4_tbl i4 -(9 rows) - -select i8.q2, ss.* from -int8_tbl i8, -lateral (select t1.fivethous, i4.f1 from tenk1 t1 join int4_tbl i4 - on t1.fivethous = i4.f1+i8.q2 order by 1,2) ss; - q2 | fivethous | f1 ------+-----------+---- - 456 | 456 | 0 - 456 | 456 | 0 - 123 | 123 | 0 - 123 | 123 | 0 -(4 rows) - -rollback; +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/brin_bloom.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/brin_bloom.out --- /tmp/cirrus-ci-build/src/test/regress/expected/brin_bloom.out 2024-03-07 14:25:00.329455000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/brin_bloom.out 2024-03-07 14:27:17.054943000 +0000 @@ -1,428 +1,2 @@ -CREATE TABLE brintest_bloom (byteacol bytea, - charcol "char", - namecol name, - int8col bigint, - int2col smallint, - int4col integer, - textcol text, - oidcol oid, - float4col real, - float8col double precision, - macaddrcol macaddr, - inetcol inet, - cidrcol cidr, - bpcharcol character, - datecol date, - timecol time without time zone, - timestampcol timestamp without time zone, - timestamptzcol timestamp with time zone, - intervalcol interval, - timetzcol time with time zone, - numericcol numeric, - uuidcol uuid, - lsncol pg_lsn -) WITH (fillfactor=10); -INSERT INTO brintest_bloom SELECT - repeat(stringu1, 8)::bytea, - substr(stringu1, 1, 1)::"char", - stringu1::name, 142857 * tenthous, - thousand, - twothousand, - repeat(stringu1, 8), - unique1::oid, - (four + 1.0)/(hundred+1), - odd::float8 / (tenthous + 1), - format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, - inet '10.2.3.4/24' + tenthous, - cidr '10.2.3/24' + tenthous, - substr(stringu1, 1, 1)::bpchar, - date '1995-08-15' + tenthous, - time '01:20:30' + thousand * interval '18.5 second', - timestamp '1942-07-23 03:05:09' + tenthous * interval '36.38 hours', - timestamptz '1972-10-10 03:00' + thousand * interval '1 hour', - justify_days(justify_hours(tenthous * interval '12 minutes')), - timetz '01:30:20+02' + hundred * interval '15 seconds', - tenthous::numeric(36,30) * fivethous * even / (hundred + 1), - format('%s%s-%s-%s-%s-%s%s%s', to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'))::uuid, - format('%s/%s%s', odd, even, tenthous)::pg_lsn -FROM tenk1 ORDER BY unique2 LIMIT 100; --- throw in some NULL's and different values -INSERT INTO brintest_bloom (inetcol, cidrcol) SELECT - inet 'fe80::6e40:8ff:fea9:8c46' + tenthous, - cidr 'fe80::6e40:8ff:fea9:8c46' + tenthous -FROM tenk1 ORDER BY thousand, tenthous LIMIT 25; --- test bloom specific index options --- ndistinct must be >= -1.0 -CREATE INDEX brinidx_bloom ON brintest_bloom USING brin ( - byteacol bytea_bloom_ops(n_distinct_per_range = -1.1) -); -ERROR: value -1.1 out of bounds for option "n_distinct_per_range" -DETAIL: Valid values are between "-1.000000" and "2147483647.000000". --- false_positive_rate must be between 0.0001 and 0.25 -CREATE INDEX brinidx_bloom ON brintest_bloom USING brin ( - byteacol bytea_bloom_ops(false_positive_rate = 0.00009) -); -ERROR: value 0.00009 out of bounds for option "false_positive_rate" -DETAIL: Valid values are between "0.000100" and "0.250000". -CREATE INDEX brinidx_bloom ON brintest_bloom USING brin ( - byteacol bytea_bloom_ops(false_positive_rate = 0.26) -); -ERROR: value 0.26 out of bounds for option "false_positive_rate" -DETAIL: Valid values are between "0.000100" and "0.250000". -CREATE INDEX brinidx_bloom ON brintest_bloom USING brin ( - byteacol bytea_bloom_ops, - charcol char_bloom_ops, - namecol name_bloom_ops, - int8col int8_bloom_ops, - int2col int2_bloom_ops, - int4col int4_bloom_ops, - textcol text_bloom_ops, - oidcol oid_bloom_ops, - float4col float4_bloom_ops, - float8col float8_bloom_ops, - macaddrcol macaddr_bloom_ops, - inetcol inet_bloom_ops, - cidrcol inet_bloom_ops, - bpcharcol bpchar_bloom_ops, - datecol date_bloom_ops, - timecol time_bloom_ops, - timestampcol timestamp_bloom_ops, - timestamptzcol timestamptz_bloom_ops, - intervalcol interval_bloom_ops, - timetzcol timetz_bloom_ops, - numericcol numeric_bloom_ops, - uuidcol uuid_bloom_ops, - lsncol pg_lsn_bloom_ops -) with (pages_per_range = 1); -CREATE TABLE brinopers_bloom (colname name, typ text, - op text[], value text[], matches int[], - check (cardinality(op) = cardinality(value)), - check (cardinality(op) = cardinality(matches))); -INSERT INTO brinopers_bloom VALUES - ('byteacol', 'bytea', - '{=}', - '{BNAAAABNAAAABNAAAABNAAAABNAAAABNAAAABNAAAABNAAAA}', - '{1}'), - ('charcol', '"char"', - '{=}', - '{M}', - '{6}'), - ('namecol', 'name', - '{=}', - '{MAAAAA}', - '{2}'), - ('int2col', 'int2', - '{=}', - '{800}', - '{1}'), - ('int4col', 'int4', - '{=}', - '{800}', - '{1}'), - ('int8col', 'int8', - '{=}', - '{1257141600}', - '{1}'), - ('textcol', 'text', - '{=}', - '{BNAAAABNAAAABNAAAABNAAAABNAAAABNAAAABNAAAABNAAAA}', - '{1}'), - ('oidcol', 'oid', - '{=}', - '{8800}', - '{1}'), - ('float4col', 'float4', - '{=}', - '{1}', - '{4}'), - ('float8col', 'float8', - '{=}', - '{0}', - '{1}'), - ('macaddrcol', 'macaddr', - '{=}', - '{2c:00:2d:00:16:00}', - '{2}'), - ('inetcol', 'inet', - '{=}', - '{10.2.14.231/24}', - '{1}'), - ('inetcol', 'cidr', - '{=}', - '{fe80::6e40:8ff:fea9:8c46}', - '{1}'), - ('cidrcol', 'inet', - '{=}', - '{10.2.14/24}', - '{2}'), - ('cidrcol', 'inet', - '{=}', - '{fe80::6e40:8ff:fea9:8c46}', - '{1}'), - ('cidrcol', 'cidr', - '{=}', - '{10.2.14/24}', - '{2}'), - ('cidrcol', 'cidr', - '{=}', - '{fe80::6e40:8ff:fea9:8c46}', - '{1}'), - ('bpcharcol', 'bpchar', - '{=}', - '{W}', - '{6}'), - ('datecol', 'date', - '{=}', - '{2009-12-01}', - '{1}'), - ('timecol', 'time', - '{=}', - '{02:28:57}', - '{1}'), - ('timestampcol', 'timestamp', - '{=}', - '{1964-03-24 19:26:45}', - '{1}'), - ('timestamptzcol', 'timestamptz', - '{=}', - '{1972-10-19 09:00:00-07}', - '{1}'), - ('intervalcol', 'interval', - '{=}', - '{1 mons 13 days 12:24}', - '{1}'), - ('timetzcol', 'timetz', - '{=}', - '{01:35:50+02}', - '{2}'), - ('numericcol', 'numeric', - '{=}', - '{2268164.347826086956521739130434782609}', - '{1}'), - ('uuidcol', 'uuid', - '{=}', - '{52225222-5222-5222-5222-522252225222}', - '{1}'), - ('lsncol', 'pg_lsn', - '{=, IS, IS NOT}', - '{44/455222, NULL, NULL}', - '{1, 25, 100}'); -DO $x$ -DECLARE - r record; - r2 record; - cond text; - idx_ctids tid[]; - ss_ctids tid[]; - count int; - plan_ok bool; - plan_line text; -BEGIN - FOR r IN SELECT colname, oper, typ, value[ordinality], matches[ordinality] FROM brinopers_bloom, unnest(op) WITH ORDINALITY AS oper LOOP - - -- prepare the condition - IF r.value IS NULL THEN - cond := format('%I %s %L', r.colname, r.oper, r.value); - ELSE - cond := format('%I %s %L::%s', r.colname, r.oper, r.value, r.typ); - END IF; - - -- run the query using the brin index - SET enable_seqscan = 0; - SET enable_bitmapscan = 1; - - plan_ok := false; - FOR plan_line IN EXECUTE format($y$EXPLAIN SELECT array_agg(ctid) FROM brintest_bloom WHERE %s $y$, cond) LOOP - IF plan_line LIKE '%Bitmap Heap Scan on brintest_bloom%' THEN - plan_ok := true; - END IF; - END LOOP; - IF NOT plan_ok THEN - RAISE WARNING 'did not get bitmap indexscan plan for %', r; - END IF; - - EXECUTE format($y$SELECT array_agg(ctid) FROM brintest_bloom WHERE %s $y$, cond) - INTO idx_ctids; - - -- run the query using a seqscan - SET enable_seqscan = 1; - SET enable_bitmapscan = 0; - - plan_ok := false; - FOR plan_line IN EXECUTE format($y$EXPLAIN SELECT array_agg(ctid) FROM brintest_bloom WHERE %s $y$, cond) LOOP - IF plan_line LIKE '%Seq Scan on brintest_bloom%' THEN - plan_ok := true; - END IF; - END LOOP; - IF NOT plan_ok THEN - RAISE WARNING 'did not get seqscan plan for %', r; - END IF; - - EXECUTE format($y$SELECT array_agg(ctid) FROM brintest_bloom WHERE %s $y$, cond) - INTO ss_ctids; - - -- make sure both return the same results - count := array_length(idx_ctids, 1); - - IF NOT (count = array_length(ss_ctids, 1) AND - idx_ctids @> ss_ctids AND - idx_ctids <@ ss_ctids) THEN - -- report the results of each scan to make the differences obvious - RAISE WARNING 'something not right in %: count %', r, count; - SET enable_seqscan = 1; - SET enable_bitmapscan = 0; - FOR r2 IN EXECUTE 'SELECT ' || r.colname || ' FROM brintest_bloom WHERE ' || cond LOOP - RAISE NOTICE 'seqscan: %', r2; - END LOOP; - - SET enable_seqscan = 0; - SET enable_bitmapscan = 1; - FOR r2 IN EXECUTE 'SELECT ' || r.colname || ' FROM brintest_bloom WHERE ' || cond LOOP - RAISE NOTICE 'bitmapscan: %', r2; - END LOOP; - END IF; - - -- make sure we found expected number of matches - IF count != r.matches THEN RAISE WARNING 'unexpected number of results % for %', count, r; END IF; - END LOOP; -END; -$x$; -RESET enable_seqscan; -RESET enable_bitmapscan; -INSERT INTO brintest_bloom SELECT - repeat(stringu1, 42)::bytea, - substr(stringu1, 1, 1)::"char", - stringu1::name, 142857 * tenthous, - thousand, - twothousand, - repeat(stringu1, 42), - unique1::oid, - (four + 1.0)/(hundred+1), - odd::float8 / (tenthous + 1), - format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, - inet '10.2.3.4' + tenthous, - cidr '10.2.3/24' + tenthous, - substr(stringu1, 1, 1)::bpchar, - date '1995-08-15' + tenthous, - time '01:20:30' + thousand * interval '18.5 second', - timestamp '1942-07-23 03:05:09' + tenthous * interval '36.38 hours', - timestamptz '1972-10-10 03:00' + thousand * interval '1 hour', - justify_days(justify_hours(tenthous * interval '12 minutes')), - timetz '01:30:20' + hundred * interval '15 seconds', - tenthous::numeric(36,30) * fivethous * even / (hundred + 1), - format('%s%s-%s-%s-%s-%s%s%s', to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'))::uuid, - format('%s/%s%s', odd, even, tenthous)::pg_lsn -FROM tenk1 ORDER BY unique2 LIMIT 5 OFFSET 5; -SELECT brin_desummarize_range('brinidx_bloom', 0); - brin_desummarize_range ------------------------- - -(1 row) - -VACUUM brintest_bloom; -- force a summarization cycle in brinidx -UPDATE brintest_bloom SET int8col = int8col * int4col; -UPDATE brintest_bloom SET textcol = '' WHERE textcol IS NOT NULL; --- Tests for brin_summarize_new_values -SELECT brin_summarize_new_values('brintest_bloom'); -- error, not an index -ERROR: "brintest_bloom" is not an index -SELECT brin_summarize_new_values('tenk1_unique1'); -- error, not a BRIN index -ERROR: "tenk1_unique1" is not a BRIN index -SELECT brin_summarize_new_values('brinidx_bloom'); -- ok, no change expected - brin_summarize_new_values ---------------------------- - 0 -(1 row) - --- Tests for brin_desummarize_range -SELECT brin_desummarize_range('brinidx_bloom', -1); -- error, invalid range -ERROR: block number out of range: -1 -SELECT brin_desummarize_range('brinidx_bloom', 0); - brin_desummarize_range ------------------------- - -(1 row) - -SELECT brin_desummarize_range('brinidx_bloom', 0); - brin_desummarize_range ------------------------- - -(1 row) - -SELECT brin_desummarize_range('brinidx_bloom', 100000000); - brin_desummarize_range ------------------------- - -(1 row) - --- Test brin_summarize_range -CREATE TABLE brin_summarize_bloom ( - value int -) WITH (fillfactor=10, autovacuum_enabled=false); -CREATE INDEX brin_summarize_bloom_idx ON brin_summarize_bloom USING brin (value) WITH (pages_per_range=2); --- Fill a few pages -DO $$ -DECLARE curtid tid; -BEGIN - LOOP - INSERT INTO brin_summarize_bloom VALUES (1) RETURNING ctid INTO curtid; - EXIT WHEN curtid > tid '(2, 0)'; - END LOOP; -END; -$$; --- summarize one range -SELECT brin_summarize_range('brin_summarize_bloom_idx', 0); - brin_summarize_range ----------------------- - 0 -(1 row) - --- nothing: already summarized -SELECT brin_summarize_range('brin_summarize_bloom_idx', 1); - brin_summarize_range ----------------------- - 0 -(1 row) - --- summarize one range -SELECT brin_summarize_range('brin_summarize_bloom_idx', 2); - brin_summarize_range ----------------------- - 1 -(1 row) - --- nothing: page doesn't exist in table -SELECT brin_summarize_range('brin_summarize_bloom_idx', 4294967295); - brin_summarize_range ----------------------- - 0 -(1 row) - --- invalid block number values -SELECT brin_summarize_range('brin_summarize_bloom_idx', -1); -ERROR: block number out of range: -1 -SELECT brin_summarize_range('brin_summarize_bloom_idx', 4294967296); -ERROR: block number out of range: 4294967296 --- test brin cost estimates behave sanely based on correlation of values -CREATE TABLE brin_test_bloom (a INT, b INT); -INSERT INTO brin_test_bloom SELECT x/100,x%100 FROM generate_series(1,10000) x(x); -CREATE INDEX brin_test_bloom_a_idx ON brin_test_bloom USING brin (a) WITH (pages_per_range = 2); -CREATE INDEX brin_test_bloom_b_idx ON brin_test_bloom USING brin (b) WITH (pages_per_range = 2); -VACUUM ANALYZE brin_test_bloom; --- Ensure brin index is used when columns are perfectly correlated -EXPLAIN (COSTS OFF) SELECT * FROM brin_test_bloom WHERE a = 1; - QUERY PLAN --------------------------------------------------- - Bitmap Heap Scan on brin_test_bloom - Recheck Cond: (a = 1) - -> Bitmap Index Scan on brin_test_bloom_a_idx - Index Cond: (a = 1) -(4 rows) - --- Ensure brin index is not used when values are not correlated -EXPLAIN (COSTS OFF) SELECT * FROM brin_test_bloom WHERE b = 1; - QUERY PLAN ------------------------------ - Seq Scan on brin_test_bloom - Filter: (b = 1) -(2 rows) - +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/brin_multi.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/brin_multi.out --- /tmp/cirrus-ci-build/src/test/regress/expected/brin_multi.out 2024-03-07 14:25:00.329471000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/brin_multi.out 2024-03-07 14:27:17.055251000 +0000 @@ -1,974 +1,2 @@ -CREATE TABLE brintest_multi ( - int8col bigint, - int2col smallint, - int4col integer, - oidcol oid, - tidcol tid, - float4col real, - float8col double precision, - macaddrcol macaddr, - macaddr8col macaddr8, - inetcol inet, - cidrcol cidr, - datecol date, - timecol time without time zone, - timestampcol timestamp without time zone, - timestamptzcol timestamp with time zone, - intervalcol interval, - timetzcol time with time zone, - numericcol numeric, - uuidcol uuid, - lsncol pg_lsn -) WITH (fillfactor=10); -INSERT INTO brintest_multi SELECT - 142857 * tenthous, - thousand, - twothousand, - unique1::oid, - format('(%s,%s)', tenthous, twenty)::tid, - (four + 1.0)/(hundred+1), - odd::float8 / (tenthous + 1), - format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, - substr(fipshash(unique1::text), 1, 16)::macaddr8, - inet '10.2.3.4/24' + tenthous, - cidr '10.2.3/24' + tenthous, - date '1995-08-15' + tenthous, - time '01:20:30' + thousand * interval '18.5 second', - timestamp '1942-07-23 03:05:09' + tenthous * interval '36.38 hours', - timestamptz '1972-10-10 03:00' + thousand * interval '1 hour', - justify_days(justify_hours(tenthous * interval '12 minutes')), - timetz '01:30:20+02' + hundred * interval '15 seconds', - tenthous::numeric(36,30) * fivethous * even / (hundred + 1), - format('%s%s-%s-%s-%s-%s%s%s', to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'))::uuid, - format('%s/%s%s', odd, even, tenthous)::pg_lsn -FROM tenk1 ORDER BY unique2 LIMIT 100; --- throw in some NULL's and different values -INSERT INTO brintest_multi (inetcol, cidrcol) SELECT - inet 'fe80::6e40:8ff:fea9:8c46' + tenthous, - cidr 'fe80::6e40:8ff:fea9:8c46' + tenthous -FROM tenk1 ORDER BY thousand, tenthous LIMIT 25; --- test minmax-multi specific index options --- number of values must be >= 16 -CREATE INDEX brinidx_multi ON brintest_multi USING brin ( - int8col int8_minmax_multi_ops(values_per_range = 7) -); -ERROR: value 7 out of bounds for option "values_per_range" -DETAIL: Valid values are between "8" and "256". --- number of values must be <= 256 -CREATE INDEX brinidx_multi ON brintest_multi USING brin ( - int8col int8_minmax_multi_ops(values_per_range = 257) -); -ERROR: value 257 out of bounds for option "values_per_range" -DETAIL: Valid values are between "8" and "256". --- first create an index with a single page range, to force compaction --- due to exceeding the number of values per summary -CREATE INDEX brinidx_multi ON brintest_multi USING brin ( - int8col int8_minmax_multi_ops, - int2col int2_minmax_multi_ops, - int4col int4_minmax_multi_ops, - oidcol oid_minmax_multi_ops, - tidcol tid_minmax_multi_ops, - float4col float4_minmax_multi_ops, - float8col float8_minmax_multi_ops, - macaddrcol macaddr_minmax_multi_ops, - macaddr8col macaddr8_minmax_multi_ops, - inetcol inet_minmax_multi_ops, - cidrcol inet_minmax_multi_ops, - datecol date_minmax_multi_ops, - timecol time_minmax_multi_ops, - timestampcol timestamp_minmax_multi_ops, - timestamptzcol timestamptz_minmax_multi_ops, - intervalcol interval_minmax_multi_ops, - timetzcol timetz_minmax_multi_ops, - numericcol numeric_minmax_multi_ops, - uuidcol uuid_minmax_multi_ops, - lsncol pg_lsn_minmax_multi_ops -); -DROP INDEX brinidx_multi; -CREATE INDEX brinidx_multi ON brintest_multi USING brin ( - int8col int8_minmax_multi_ops, - int2col int2_minmax_multi_ops, - int4col int4_minmax_multi_ops, - oidcol oid_minmax_multi_ops, - tidcol tid_minmax_multi_ops, - float4col float4_minmax_multi_ops, - float8col float8_minmax_multi_ops, - macaddrcol macaddr_minmax_multi_ops, - macaddr8col macaddr8_minmax_multi_ops, - inetcol inet_minmax_multi_ops, - cidrcol inet_minmax_multi_ops, - datecol date_minmax_multi_ops, - timecol time_minmax_multi_ops, - timestampcol timestamp_minmax_multi_ops, - timestamptzcol timestamptz_minmax_multi_ops, - intervalcol interval_minmax_multi_ops, - timetzcol timetz_minmax_multi_ops, - numericcol numeric_minmax_multi_ops, - uuidcol uuid_minmax_multi_ops, - lsncol pg_lsn_minmax_multi_ops -) with (pages_per_range = 1); -CREATE TABLE brinopers_multi (colname name, typ text, - op text[], value text[], matches int[], - check (cardinality(op) = cardinality(value)), - check (cardinality(op) = cardinality(matches))); -INSERT INTO brinopers_multi VALUES - ('int2col', 'int2', - '{>, >=, =, <=, <}', - '{0, 0, 800, 999, 999}', - '{100, 100, 1, 100, 100}'), - ('int2col', 'int4', - '{>, >=, =, <=, <}', - '{0, 0, 800, 999, 1999}', - '{100, 100, 1, 100, 100}'), - ('int2col', 'int8', - '{>, >=, =, <=, <}', - '{0, 0, 800, 999, 1428427143}', - '{100, 100, 1, 100, 100}'), - ('int4col', 'int2', - '{>, >=, =, <=, <}', - '{0, 0, 800, 1999, 1999}', - '{100, 100, 1, 100, 100}'), - ('int4col', 'int4', - '{>, >=, =, <=, <}', - '{0, 0, 800, 1999, 1999}', - '{100, 100, 1, 100, 100}'), - ('int4col', 'int8', - '{>, >=, =, <=, <}', - '{0, 0, 800, 1999, 1428427143}', - '{100, 100, 1, 100, 100}'), - ('int8col', 'int2', - '{>, >=}', - '{0, 0}', - '{100, 100}'), - ('int8col', 'int4', - '{>, >=}', - '{0, 0}', - '{100, 100}'), - ('int8col', 'int8', - '{>, >=, =, <=, <}', - '{0, 0, 1257141600, 1428427143, 1428427143}', - '{100, 100, 1, 100, 100}'), - ('oidcol', 'oid', - '{>, >=, =, <=, <}', - '{0, 0, 8800, 9999, 9999}', - '{100, 100, 1, 100, 100}'), - ('tidcol', 'tid', - '{>, >=, =, <=, <}', - '{"(0,0)", "(0,0)", "(8800,0)", "(9999,19)", "(9999,19)"}', - '{100, 100, 1, 100, 100}'), - ('float4col', 'float4', - '{>, >=, =, <=, <}', - '{0.0103093, 0.0103093, 1, 1, 1}', - '{100, 100, 4, 100, 96}'), - ('float4col', 'float8', - '{>, >=, =, <=, <}', - '{0.0103093, 0.0103093, 1, 1, 1}', - '{100, 100, 4, 100, 96}'), - ('float8col', 'float4', - '{>, >=, =, <=, <}', - '{0, 0, 0, 1.98, 1.98}', - '{99, 100, 1, 100, 100}'), - ('float8col', 'float8', - '{>, >=, =, <=, <}', - '{0, 0, 0, 1.98, 1.98}', - '{99, 100, 1, 100, 100}'), - ('macaddrcol', 'macaddr', - '{>, >=, =, <=, <}', - '{00:00:01:00:00:00, 00:00:01:00:00:00, 2c:00:2d:00:16:00, ff:fe:00:00:00:00, ff:fe:00:00:00:00}', - '{99, 100, 2, 100, 100}'), - ('macaddr8col', 'macaddr8', - '{>, >=, =, <=, <}', - '{b1:d1:0e:7b:af:a4:42:12, d9:35:91:bd:f7:86:0e:1e, 72:8f:20:6c:2a:01:bf:57, 23:e8:46:63:86:07:ad:cb, 13:16:8e:6a:2e:6c:84:b4}', - '{31, 17, 1, 11, 4}'), - ('inetcol', 'inet', - '{=, <, <=, >, >=}', - '{10.2.14.231/24, 255.255.255.255, 255.255.255.255, 0.0.0.0, 0.0.0.0}', - '{1, 100, 100, 125, 125}'), - ('inetcol', 'cidr', - '{<, <=, >, >=}', - '{255.255.255.255, 255.255.255.255, 0.0.0.0, 0.0.0.0}', - '{100, 100, 125, 125}'), - ('cidrcol', 'inet', - '{=, <, <=, >, >=}', - '{10.2.14/24, 255.255.255.255, 255.255.255.255, 0.0.0.0, 0.0.0.0}', - '{2, 100, 100, 125, 125}'), - ('cidrcol', 'cidr', - '{=, <, <=, >, >=}', - '{10.2.14/24, 255.255.255.255, 255.255.255.255, 0.0.0.0, 0.0.0.0}', - '{2, 100, 100, 125, 125}'), - ('datecol', 'date', - '{>, >=, =, <=, <}', - '{1995-08-15, 1995-08-15, 2009-12-01, 2022-12-30, 2022-12-30}', - '{100, 100, 1, 100, 100}'), - ('timecol', 'time', - '{>, >=, =, <=, <}', - '{01:20:30, 01:20:30, 02:28:57, 06:28:31.5, 06:28:31.5}', - '{100, 100, 1, 100, 100}'), - ('timestampcol', 'timestamp', - '{>, >=, =, <=, <}', - '{1942-07-23 03:05:09, 1942-07-23 03:05:09, 1964-03-24 19:26:45, 1984-01-20 22:42:21, 1984-01-20 22:42:21}', - '{100, 100, 1, 100, 100}'), - ('timestampcol', 'timestamptz', - '{>, >=, =, <=, <}', - '{1942-07-23 03:05:09, 1942-07-23 03:05:09, 1964-03-24 19:26:45, 1984-01-20 22:42:21, 1984-01-20 22:42:21}', - '{100, 100, 1, 100, 100}'), - ('timestamptzcol', 'timestamptz', - '{>, >=, =, <=, <}', - '{1972-10-10 03:00:00-04, 1972-10-10 03:00:00-04, 1972-10-19 09:00:00-07, 1972-11-20 19:00:00-03, 1972-11-20 19:00:00-03}', - '{100, 100, 1, 100, 100}'), - ('intervalcol', 'interval', - '{>, >=, =, <=, <}', - '{00:00:00, 00:00:00, 1 mons 13 days 12:24, 2 mons 23 days 07:48:00, 1 year}', - '{100, 100, 1, 100, 100}'), - ('timetzcol', 'timetz', - '{>, >=, =, <=, <}', - '{01:30:20+02, 01:30:20+02, 01:35:50+02, 23:55:05+02, 23:55:05+02}', - '{99, 100, 2, 100, 100}'), - ('numericcol', 'numeric', - '{>, >=, =, <=, <}', - '{0.00, 0.01, 2268164.347826086956521739130434782609, 99470151.9, 99470151.9}', - '{100, 100, 1, 100, 100}'), - ('uuidcol', 'uuid', - '{>, >=, =, <=, <}', - '{00040004-0004-0004-0004-000400040004, 00040004-0004-0004-0004-000400040004, 52225222-5222-5222-5222-522252225222, 99989998-9998-9998-9998-999899989998, 99989998-9998-9998-9998-999899989998}', - '{100, 100, 1, 100, 100}'), - ('lsncol', 'pg_lsn', - '{>, >=, =, <=, <, IS, IS NOT}', - '{0/1200, 0/1200, 44/455222, 198/1999799, 198/1999799, NULL, NULL}', - '{100, 100, 1, 100, 100, 25, 100}'); -DO $x$ -DECLARE - r record; - r2 record; - cond text; - idx_ctids tid[]; - ss_ctids tid[]; - count int; - plan_ok bool; - plan_line text; -BEGIN - FOR r IN SELECT colname, oper, typ, value[ordinality], matches[ordinality] FROM brinopers_multi, unnest(op) WITH ORDINALITY AS oper LOOP - - -- prepare the condition - IF r.value IS NULL THEN - cond := format('%I %s %L', r.colname, r.oper, r.value); - ELSE - cond := format('%I %s %L::%s', r.colname, r.oper, r.value, r.typ); - END IF; - - -- run the query using the brin index - SET enable_seqscan = 0; - SET enable_bitmapscan = 1; - - plan_ok := false; - FOR plan_line IN EXECUTE format($y$EXPLAIN SELECT array_agg(ctid) FROM brintest_multi WHERE %s $y$, cond) LOOP - IF plan_line LIKE '%Bitmap Heap Scan on brintest_multi%' THEN - plan_ok := true; - END IF; - END LOOP; - IF NOT plan_ok THEN - RAISE WARNING 'did not get bitmap indexscan plan for %', r; - END IF; - - EXECUTE format($y$SELECT array_agg(ctid) FROM brintest_multi WHERE %s $y$, cond) - INTO idx_ctids; - - -- run the query using a seqscan - SET enable_seqscan = 1; - SET enable_bitmapscan = 0; - - plan_ok := false; - FOR plan_line IN EXECUTE format($y$EXPLAIN SELECT array_agg(ctid) FROM brintest_multi WHERE %s $y$, cond) LOOP - IF plan_line LIKE '%Seq Scan on brintest_multi%' THEN - plan_ok := true; - END IF; - END LOOP; - IF NOT plan_ok THEN - RAISE WARNING 'did not get seqscan plan for %', r; - END IF; - - EXECUTE format($y$SELECT array_agg(ctid) FROM brintest_multi WHERE %s $y$, cond) - INTO ss_ctids; - - -- make sure both return the same results - count := array_length(idx_ctids, 1); - - IF NOT (count = array_length(ss_ctids, 1) AND - idx_ctids @> ss_ctids AND - idx_ctids <@ ss_ctids) THEN - -- report the results of each scan to make the differences obvious - RAISE WARNING 'something not right in %: count %', r, count; - SET enable_seqscan = 1; - SET enable_bitmapscan = 0; - FOR r2 IN EXECUTE 'SELECT ' || r.colname || ' FROM brintest_multi WHERE ' || cond LOOP - RAISE NOTICE 'seqscan: %', r2; - END LOOP; - - SET enable_seqscan = 0; - SET enable_bitmapscan = 1; - FOR r2 IN EXECUTE 'SELECT ' || r.colname || ' FROM brintest_multi WHERE ' || cond LOOP - RAISE NOTICE 'bitmapscan: %', r2; - END LOOP; - END IF; - - -- make sure we found expected number of matches - IF count != r.matches THEN RAISE WARNING 'unexpected number of results % for %', count, r; END IF; - END LOOP; -END; -$x$; -RESET enable_seqscan; -RESET enable_bitmapscan; -INSERT INTO brintest_multi SELECT - 142857 * tenthous, - thousand, - twothousand, - unique1::oid, - format('(%s,%s)', tenthous, twenty)::tid, - (four + 1.0)/(hundred+1), - odd::float8 / (tenthous + 1), - format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, - substr(fipshash(unique1::text), 1, 16)::macaddr8, - inet '10.2.3.4' + tenthous, - cidr '10.2.3/24' + tenthous, - date '1995-08-15' + tenthous, - time '01:20:30' + thousand * interval '18.5 second', - timestamp '1942-07-23 03:05:09' + tenthous * interval '36.38 hours', - timestamptz '1972-10-10 03:00' + thousand * interval '1 hour', - justify_days(justify_hours(tenthous * interval '12 minutes')), - timetz '01:30:20' + hundred * interval '15 seconds', - tenthous::numeric(36,30) * fivethous * even / (hundred + 1), - format('%s%s-%s-%s-%s-%s%s%s', to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'))::uuid, - format('%s/%s%s', odd, even, tenthous)::pg_lsn -FROM tenk1 ORDER BY unique2 LIMIT 5 OFFSET 5; -SELECT brin_desummarize_range('brinidx_multi', 0); - brin_desummarize_range ------------------------- - -(1 row) - -VACUUM brintest_multi; -- force a summarization cycle in brinidx --- Try inserting a values with NaN, to test distance calculation. -insert into public.brintest_multi (float4col) values (real 'nan'); -insert into public.brintest_multi (float8col) values (real 'nan'); -UPDATE brintest_multi SET int8col = int8col * int4col; --- Test handling of inet netmasks with inet_minmax_multi_ops -CREATE TABLE brin_test_inet (a inet); -CREATE INDEX ON brin_test_inet USING brin (a inet_minmax_multi_ops); -INSERT INTO brin_test_inet VALUES ('127.0.0.1/0'); -INSERT INTO brin_test_inet VALUES ('0.0.0.0/12'); -DROP TABLE brin_test_inet; --- Tests for brin_summarize_new_values -SELECT brin_summarize_new_values('brintest_multi'); -- error, not an index -ERROR: "brintest_multi" is not an index -SELECT brin_summarize_new_values('tenk1_unique1'); -- error, not a BRIN index -ERROR: "tenk1_unique1" is not a BRIN index -SELECT brin_summarize_new_values('brinidx_multi'); -- ok, no change expected - brin_summarize_new_values ---------------------------- - 0 -(1 row) - --- Tests for brin_desummarize_range -SELECT brin_desummarize_range('brinidx_multi', -1); -- error, invalid range -ERROR: block number out of range: -1 -SELECT brin_desummarize_range('brinidx_multi', 0); - brin_desummarize_range ------------------------- - -(1 row) - -SELECT brin_desummarize_range('brinidx_multi', 0); - brin_desummarize_range ------------------------- - -(1 row) - -SELECT brin_desummarize_range('brinidx_multi', 100000000); - brin_desummarize_range ------------------------- - -(1 row) - --- test building an index with many values, to force compaction of the buffer -CREATE TABLE brin_large_range (a int4); -INSERT INTO brin_large_range SELECT i FROM generate_series(1,10000) s(i); -CREATE INDEX brin_large_range_idx ON brin_large_range USING brin (a int4_minmax_multi_ops); -DROP TABLE brin_large_range; --- Test brin_summarize_range -CREATE TABLE brin_summarize_multi ( - value int -) WITH (fillfactor=10, autovacuum_enabled=false); -CREATE INDEX brin_summarize_multi_idx ON brin_summarize_multi USING brin (value) WITH (pages_per_range=2); --- Fill a few pages -DO $$ -DECLARE curtid tid; -BEGIN - LOOP - INSERT INTO brin_summarize_multi VALUES (1) RETURNING ctid INTO curtid; - EXIT WHEN curtid > tid '(2, 0)'; - END LOOP; -END; -$$; --- summarize one range -SELECT brin_summarize_range('brin_summarize_multi_idx', 0); - brin_summarize_range ----------------------- - 0 -(1 row) - --- nothing: already summarized -SELECT brin_summarize_range('brin_summarize_multi_idx', 1); - brin_summarize_range ----------------------- - 0 -(1 row) - --- summarize one range -SELECT brin_summarize_range('brin_summarize_multi_idx', 2); - brin_summarize_range ----------------------- - 1 -(1 row) - --- nothing: page doesn't exist in table -SELECT brin_summarize_range('brin_summarize_multi_idx', 4294967295); - brin_summarize_range ----------------------- - 0 -(1 row) - --- invalid block number values -SELECT brin_summarize_range('brin_summarize_multi_idx', -1); -ERROR: block number out of range: -1 -SELECT brin_summarize_range('brin_summarize_multi_idx', 4294967296); -ERROR: block number out of range: 4294967296 --- test brin cost estimates behave sanely based on correlation of values -CREATE TABLE brin_test_multi (a INT, b INT); -INSERT INTO brin_test_multi SELECT x/100,x%100 FROM generate_series(1,10000) x(x); -CREATE INDEX brin_test_multi_a_idx ON brin_test_multi USING brin (a) WITH (pages_per_range = 2); -CREATE INDEX brin_test_multi_b_idx ON brin_test_multi USING brin (b) WITH (pages_per_range = 2); -VACUUM ANALYZE brin_test_multi; --- Ensure brin index is used when columns are perfectly correlated -EXPLAIN (COSTS OFF) SELECT * FROM brin_test_multi WHERE a = 1; - QUERY PLAN --------------------------------------------------- - Bitmap Heap Scan on brin_test_multi - Recheck Cond: (a = 1) - -> Bitmap Index Scan on brin_test_multi_a_idx - Index Cond: (a = 1) -(4 rows) - --- Ensure brin index is not used when values are not correlated -EXPLAIN (COSTS OFF) SELECT * FROM brin_test_multi WHERE b = 1; - QUERY PLAN ------------------------------ - Seq Scan on brin_test_multi - Filter: (b = 1) -(2 rows) - --- do some inequality tests -CREATE TABLE brin_test_multi_1 (a INT, b BIGINT) WITH (fillfactor=10); -INSERT INTO brin_test_multi_1 -SELECT i/5 + mod(911 * i + 483, 25), - i/10 + mod(751 * i + 221, 41) - FROM generate_series(1,1000) s(i); -CREATE INDEX brin_test_multi_1_idx_1 ON brin_test_multi_1 USING brin (a int4_minmax_multi_ops) WITH (pages_per_range=5); -CREATE INDEX brin_test_multi_1_idx_2 ON brin_test_multi_1 USING brin (b int8_minmax_multi_ops) WITH (pages_per_range=5); -SET enable_seqscan=off; --- int: less than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a < 37; - count -------- - 124 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a < 113; - count -------- - 504 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a <= 177; - count -------- - 829 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a <= 25; - count -------- - 69 -(1 row) - --- int: greater than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a > 120; - count -------- - 456 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a >= 180; - count -------- - 161 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a > 71; - count -------- - 701 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a >= 63; - count -------- - 746 -(1 row) - --- int: equals -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a = 207; - count -------- - 3 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a = 177; - count -------- - 5 -(1 row) - --- bigint: less than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b < 73; - count -------- - 529 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b <= 47; - count -------- - 279 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b < 199; - count -------- - 1000 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b <= 150; - count -------- - 1000 -(1 row) - --- bigint: greater than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b > 93; - count -------- - 261 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b > 37; - count -------- - 821 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b >= 215; - count -------- - 0 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b > 201; - count -------- - 0 -(1 row) - --- bigint: equals -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b = 88; - count -------- - 10 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b = 103; - count -------- - 9 -(1 row) - --- now do the same, but insert the rows with the indexes already created --- so that we don't use the "build callback" and instead use the regular --- approach of adding rows into existing ranges -TRUNCATE brin_test_multi_1; -INSERT INTO brin_test_multi_1 -SELECT i/5 + mod(911 * i + 483, 25), - i/10 + mod(751 * i + 221, 41) - FROM generate_series(1,1000) s(i); --- int: less than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a < 37; - count -------- - 124 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a < 113; - count -------- - 504 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a <= 177; - count -------- - 829 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a <= 25; - count -------- - 69 -(1 row) - --- int: greater than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a > 120; - count -------- - 456 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a >= 180; - count -------- - 161 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a > 71; - count -------- - 701 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a >= 63; - count -------- - 746 -(1 row) - --- int: equals -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a = 207; - count -------- - 3 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a = 177; - count -------- - 5 -(1 row) - --- bigint: less than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b < 73; - count -------- - 529 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b <= 47; - count -------- - 279 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b < 199; - count -------- - 1000 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b <= 150; - count -------- - 1000 -(1 row) - --- bigint: greater than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b > 93; - count -------- - 261 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b > 37; - count -------- - 821 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b >= 215; - count -------- - 0 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b > 201; - count -------- - 0 -(1 row) - --- bigint: equals -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b = 88; - count -------- - 10 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b = 103; - count -------- - 9 -(1 row) - -DROP TABLE brin_test_multi_1; -RESET enable_seqscan; --- do some inequality tests for varlena data types -CREATE TABLE brin_test_multi_2 (a UUID) WITH (fillfactor=10); -INSERT INTO brin_test_multi_2 -SELECT v::uuid FROM (SELECT row_number() OVER (ORDER BY v) c, v FROM (SELECT fipshash((i/13)::text) AS v FROM generate_series(1,1000) s(i)) foo) bar ORDER BY c + 25 * random(); -CREATE INDEX brin_test_multi_2_idx ON brin_test_multi_2 USING brin (a uuid_minmax_multi_ops) WITH (pages_per_range=5); -SET enable_seqscan=off; -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a < '3d914f93-48c9-cc0f-f8a7-9716700b9fcd'; - count -------- - 195 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a > '3d914f93-48c9-cc0f-f8a7-9716700b9fcd'; - count -------- - 792 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a <= 'f369cb89-fc62-7e66-8987-007d121ed1ea'; - count -------- - 961 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a >= 'aea92132-c4cb-eb26-3e6a-c2bf6c183b5d'; - count -------- - 273 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a = '5feceb66-ffc8-6f38-d952-786c6d696c79'; - count -------- - 12 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a = '86e50149-6586-6131-2a9e-0b35558d84f6'; - count -------- - 13 -(1 row) - --- now do the same, but insert the rows with the indexes already created --- so that we don't use the "build callback" and instead use the regular --- approach of adding rows into existing ranges -TRUNCATE brin_test_multi_2; -INSERT INTO brin_test_multi_2 -SELECT v::uuid FROM (SELECT row_number() OVER (ORDER BY v) c, v FROM (SELECT fipshash((i/13)::text) AS v FROM generate_series(1,1000) s(i)) foo) bar ORDER BY c + 25 * random(); -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a < '3d914f93-48c9-cc0f-f8a7-9716700b9fcd'; - count -------- - 195 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a > '3d914f93-48c9-cc0f-f8a7-9716700b9fcd'; - count -------- - 792 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a <= 'f369cb89-fc62-7e66-8987-007d121ed1ea'; - count -------- - 961 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a >= 'aea92132-c4cb-eb26-3e6a-c2bf6c183b5d'; - count -------- - 273 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a = '5feceb66-ffc8-6f38-d952-786c6d696c79'; - count -------- - 12 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a = '86e50149-6586-6131-2a9e-0b35558d84f6'; - count -------- - 13 -(1 row) - -DROP TABLE brin_test_multi_2; -RESET enable_seqscan; --- test overflows during CREATE INDEX with extreme timestamp values -CREATE TABLE brin_timestamp_test(a TIMESTAMPTZ); -SET datestyle TO iso; --- values close to timestamp minimum -INSERT INTO brin_timestamp_test -SELECT '4713-01-01 00:00:01 BC'::timestamptz + (i || ' seconds')::interval - FROM generate_series(1,30) s(i); --- values close to timestamp maximum -INSERT INTO brin_timestamp_test -SELECT '294276-12-01 00:00:01'::timestamptz + (i || ' seconds')::interval - FROM generate_series(1,30) s(i); -CREATE INDEX ON brin_timestamp_test USING brin (a timestamptz_minmax_multi_ops) WITH (pages_per_range=1); -DROP TABLE brin_timestamp_test; --- test overflows during CREATE INDEX with extreme date values -CREATE TABLE brin_date_test(a DATE); --- insert values close to date minimum -INSERT INTO brin_date_test SELECT '4713-01-01 BC'::date + i FROM generate_series(1, 30) s(i); --- insert values close to date minimum -INSERT INTO brin_date_test SELECT '5874897-12-01'::date + i FROM generate_series(1, 30) s(i); -CREATE INDEX ON brin_date_test USING brin (a date_minmax_multi_ops) WITH (pages_per_range=1); -SET enable_seqscan = off; --- make sure the ranges were built correctly and 2023-01-01 eliminates all -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) -SELECT * FROM brin_date_test WHERE a = '2023-01-01'::date; - QUERY PLAN -------------------------------------------------------------------------- - Bitmap Heap Scan on brin_date_test (actual rows=0 loops=1) - Recheck Cond: (a = '2023-01-01'::date) - -> Bitmap Index Scan on brin_date_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '2023-01-01'::date) -(4 rows) - -DROP TABLE brin_date_test; -RESET enable_seqscan; --- test handling of infinite timestamp values -CREATE TABLE brin_timestamp_test(a TIMESTAMP); -INSERT INTO brin_timestamp_test VALUES ('-infinity'), ('infinity'); -INSERT INTO brin_timestamp_test -SELECT i FROM generate_series('2000-01-01'::timestamp, '2000-02-09'::timestamp, '1 day'::interval) s(i); -CREATE INDEX ON brin_timestamp_test USING brin (a timestamp_minmax_multi_ops) WITH (pages_per_range=1); -SET enable_seqscan = off; -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) -SELECT * FROM brin_timestamp_test WHERE a = '2023-01-01'::timestamp; - QUERY PLAN ------------------------------------------------------------------------------- - Bitmap Heap Scan on brin_timestamp_test (actual rows=0 loops=1) - Recheck Cond: (a = '2023-01-01 00:00:00'::timestamp without time zone) - -> Bitmap Index Scan on brin_timestamp_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '2023-01-01 00:00:00'::timestamp without time zone) -(4 rows) - -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) -SELECT * FROM brin_timestamp_test WHERE a = '1900-01-01'::timestamp; - QUERY PLAN ------------------------------------------------------------------------------- - Bitmap Heap Scan on brin_timestamp_test (actual rows=0 loops=1) - Recheck Cond: (a = '1900-01-01 00:00:00'::timestamp without time zone) - -> Bitmap Index Scan on brin_timestamp_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '1900-01-01 00:00:00'::timestamp without time zone) -(4 rows) - -DROP TABLE brin_timestamp_test; -RESET enable_seqscan; --- test handling of infinite date values -CREATE TABLE brin_date_test(a DATE); -INSERT INTO brin_date_test VALUES ('-infinity'), ('infinity'); -INSERT INTO brin_date_test SELECT '2000-01-01'::date + i FROM generate_series(1, 40) s(i); -CREATE INDEX ON brin_date_test USING brin (a date_minmax_multi_ops) WITH (pages_per_range=1); -SET enable_seqscan = off; -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) -SELECT * FROM brin_date_test WHERE a = '2023-01-01'::date; - QUERY PLAN -------------------------------------------------------------------------- - Bitmap Heap Scan on brin_date_test (actual rows=0 loops=1) - Recheck Cond: (a = '2023-01-01'::date) - -> Bitmap Index Scan on brin_date_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '2023-01-01'::date) -(4 rows) - -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) -SELECT * FROM brin_date_test WHERE a = '1900-01-01'::date; - QUERY PLAN -------------------------------------------------------------------------- - Bitmap Heap Scan on brin_date_test (actual rows=0 loops=1) - Recheck Cond: (a = '1900-01-01'::date) - -> Bitmap Index Scan on brin_date_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '1900-01-01'::date) -(4 rows) - -DROP TABLE brin_date_test; -RESET enable_seqscan; -RESET datestyle; --- test handling of overflow for interval values -CREATE TABLE brin_interval_test(a INTERVAL); -INSERT INTO brin_interval_test SELECT (i || ' years')::interval FROM generate_series(-178000000, -177999980) s(i); -INSERT INTO brin_interval_test SELECT (i || ' years')::interval FROM generate_series( 177999980, 178000000) s(i); -CREATE INDEX ON brin_interval_test USING brin (a interval_minmax_multi_ops) WITH (pages_per_range=1); -SET enable_seqscan = off; -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) -SELECT * FROM brin_interval_test WHERE a = '-30 years'::interval; - QUERY PLAN ------------------------------------------------------------------------------ - Bitmap Heap Scan on brin_interval_test (actual rows=0 loops=1) - Recheck Cond: (a = '@ 30 years ago'::interval) - -> Bitmap Index Scan on brin_interval_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '@ 30 years ago'::interval) -(4 rows) - -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) -SELECT * FROM brin_interval_test WHERE a = '30 years'::interval; - QUERY PLAN ------------------------------------------------------------------------------ - Bitmap Heap Scan on brin_interval_test (actual rows=0 loops=1) - Recheck Cond: (a = '@ 30 years'::interval) - -> Bitmap Index Scan on brin_interval_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '@ 30 years'::interval) -(4 rows) - -DROP TABLE brin_interval_test; -RESET enable_seqscan; --- test handling of infinite interval values -CREATE TABLE brin_interval_test(a INTERVAL); -INSERT INTO brin_interval_test VALUES ('-infinity'), ('infinity'); -INSERT INTO brin_interval_test SELECT (i || ' days')::interval FROM generate_series(100, 140) s(i); -CREATE INDEX ON brin_interval_test USING brin (a interval_minmax_multi_ops) WITH (pages_per_range=1); -SET enable_seqscan = off; -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) -SELECT * FROM brin_interval_test WHERE a = '-30 years'::interval; - QUERY PLAN ------------------------------------------------------------------------------ - Bitmap Heap Scan on brin_interval_test (actual rows=0 loops=1) - Recheck Cond: (a = '@ 30 years ago'::interval) - -> Bitmap Index Scan on brin_interval_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '@ 30 years ago'::interval) -(4 rows) - -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) -SELECT * FROM brin_interval_test WHERE a = '30 years'::interval; - QUERY PLAN ------------------------------------------------------------------------------ - Bitmap Heap Scan on brin_interval_test (actual rows=0 loops=1) - Recheck Cond: (a = '@ 30 years'::interval) - -> Bitmap Index Scan on brin_interval_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '@ 30 years'::interval) -(4 rows) - -DROP TABLE brin_interval_test; -RESET enable_seqscan; -RESET datestyle; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/create_table_like.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/create_table_like.out --- /tmp/cirrus-ci-build/src/test/regress/expected/create_table_like.out 2024-03-07 14:25:00.330065000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/create_table_like.out 2024-03-07 14:27:17.081016000 +0000 @@ -1,530 +1,2 @@ -/* Test inheritance of structure (LIKE) */ -CREATE TABLE inhx (xx text DEFAULT 'text'); -/* - * Test double inheritance - * - * Ensure that defaults are NOT included unless - * INCLUDING DEFAULTS is specified - */ -CREATE TABLE ctla (aa TEXT); -CREATE TABLE ctlb (bb TEXT) INHERITS (ctla); -CREATE TABLE foo (LIKE nonexistent); -ERROR: relation "nonexistent" does not exist -LINE 1: CREATE TABLE foo (LIKE nonexistent); - ^ -CREATE TABLE inhe (ee text, LIKE inhx) inherits (ctlb); -INSERT INTO inhe VALUES ('ee-col1', 'ee-col2', DEFAULT, 'ee-col4'); -SELECT * FROM inhe; /* Columns aa, bb, xx value NULL, ee */ - aa | bb | ee | xx ----------+---------+----+--------- - ee-col1 | ee-col2 | | ee-col4 -(1 row) - -SELECT * FROM inhx; /* Empty set since LIKE inherits structure only */ - xx ----- -(0 rows) - -SELECT * FROM ctlb; /* Has ee entry */ - aa | bb ----------+--------- - ee-col1 | ee-col2 -(1 row) - -SELECT * FROM ctla; /* Has ee entry */ - aa ---------- - ee-col1 -(1 row) - -CREATE TABLE inhf (LIKE inhx, LIKE inhx); /* Throw error */ -ERROR: column "xx" specified more than once -CREATE TABLE inhf (LIKE inhx INCLUDING DEFAULTS INCLUDING CONSTRAINTS); -INSERT INTO inhf DEFAULT VALUES; -SELECT * FROM inhf; /* Single entry with value 'text' */ - xx ------- - text -(1 row) - -ALTER TABLE inhx add constraint foo CHECK (xx = 'text'); -ALTER TABLE inhx ADD PRIMARY KEY (xx); -CREATE TABLE inhg (LIKE inhx); /* Doesn't copy constraint */ -INSERT INTO inhg VALUES ('foo'); -DROP TABLE inhg; -CREATE TABLE inhg (x text, LIKE inhx INCLUDING CONSTRAINTS, y text); /* Copies constraints */ -INSERT INTO inhg VALUES ('x', 'text', 'y'); /* Succeeds */ -INSERT INTO inhg VALUES ('x', 'text', 'y'); /* Succeeds -- Unique constraints not copied */ -INSERT INTO inhg VALUES ('x', 'foo', 'y'); /* fails due to constraint */ -ERROR: new row for relation "inhg" violates check constraint "foo" -DETAIL: Failing row contains (x, foo, y). -SELECT * FROM inhg; /* Two records with three columns in order x=x, xx=text, y=y */ - x | xx | y ----+------+--- - x | text | y - x | text | y -(2 rows) - -DROP TABLE inhg; -CREATE TABLE test_like_id_1 (a bigint GENERATED ALWAYS AS IDENTITY, b text); -\d test_like_id_1 - Table "public.test_like_id_1" - Column | Type | Collation | Nullable | Default ---------+--------+-----------+----------+------------------------------ - a | bigint | | not null | generated always as identity - b | text | | | - -INSERT INTO test_like_id_1 (b) VALUES ('b1'); -SELECT * FROM test_like_id_1; - a | b ----+---- - 1 | b1 -(1 row) - -CREATE TABLE test_like_id_2 (LIKE test_like_id_1); -\d test_like_id_2 - Table "public.test_like_id_2" - Column | Type | Collation | Nullable | Default ---------+--------+-----------+----------+--------- - a | bigint | | not null | - b | text | | | - -INSERT INTO test_like_id_2 (b) VALUES ('b2'); -ERROR: null value in column "a" of relation "test_like_id_2" violates not-null constraint -DETAIL: Failing row contains (null, b2). -SELECT * FROM test_like_id_2; -- identity was not copied - a | b ----+--- -(0 rows) - -CREATE TABLE test_like_id_3 (LIKE test_like_id_1 INCLUDING IDENTITY); -\d test_like_id_3 - Table "public.test_like_id_3" - Column | Type | Collation | Nullable | Default ---------+--------+-----------+----------+------------------------------ - a | bigint | | not null | generated always as identity - b | text | | | - -INSERT INTO test_like_id_3 (b) VALUES ('b3'); -SELECT * FROM test_like_id_3; -- identity was copied and applied - a | b ----+---- - 1 | b3 -(1 row) - -DROP TABLE test_like_id_1, test_like_id_2, test_like_id_3; -CREATE TABLE test_like_gen_1 (a int, b int GENERATED ALWAYS AS (a * 2) STORED); -\d test_like_gen_1 - Table "public.test_like_gen_1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - a | integer | | | - b | integer | | | generated always as (a * 2) stored - -INSERT INTO test_like_gen_1 (a) VALUES (1); -SELECT * FROM test_like_gen_1; - a | b ----+--- - 1 | 2 -(1 row) - -CREATE TABLE test_like_gen_2 (LIKE test_like_gen_1); -\d test_like_gen_2 - Table "public.test_like_gen_2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | - -INSERT INTO test_like_gen_2 (a) VALUES (1); -SELECT * FROM test_like_gen_2; - a | b ----+--- - 1 | -(1 row) - -CREATE TABLE test_like_gen_3 (LIKE test_like_gen_1 INCLUDING GENERATED); -\d test_like_gen_3 - Table "public.test_like_gen_3" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - a | integer | | | - b | integer | | | generated always as (a * 2) stored - -INSERT INTO test_like_gen_3 (a) VALUES (1); -SELECT * FROM test_like_gen_3; - a | b ----+--- - 1 | 2 -(1 row) - -DROP TABLE test_like_gen_1, test_like_gen_2, test_like_gen_3; --- also test generated column with a "forward" reference (bug #16342) -CREATE TABLE test_like_4 (b int DEFAULT 42, - c int GENERATED ALWAYS AS (a * 2) STORED, - a int CHECK (a > 0)); -\d test_like_4 - Table "public.test_like_4" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - b | integer | | | 42 - c | integer | | | generated always as (a * 2) stored - a | integer | | | -Check constraints: - "test_like_4_a_check" CHECK (a > 0) - -CREATE TABLE test_like_4a (LIKE test_like_4); -CREATE TABLE test_like_4b (LIKE test_like_4 INCLUDING DEFAULTS); -CREATE TABLE test_like_4c (LIKE test_like_4 INCLUDING GENERATED); -CREATE TABLE test_like_4d (LIKE test_like_4 INCLUDING DEFAULTS INCLUDING GENERATED); -\d test_like_4a - Table "public.test_like_4a" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - b | integer | | | - c | integer | | | - a | integer | | | - -INSERT INTO test_like_4a (a) VALUES(11); -SELECT a, b, c FROM test_like_4a; - a | b | c -----+---+--- - 11 | | -(1 row) - -\d test_like_4b - Table "public.test_like_4b" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - b | integer | | | 42 - c | integer | | | - a | integer | | | - -INSERT INTO test_like_4b (a) VALUES(11); -SELECT a, b, c FROM test_like_4b; - a | b | c -----+----+--- - 11 | 42 | -(1 row) - -\d test_like_4c - Table "public.test_like_4c" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - b | integer | | | - c | integer | | | generated always as (a * 2) stored - a | integer | | | - -INSERT INTO test_like_4c (a) VALUES(11); -SELECT a, b, c FROM test_like_4c; - a | b | c -----+---+---- - 11 | | 22 -(1 row) - -\d test_like_4d - Table "public.test_like_4d" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - b | integer | | | 42 - c | integer | | | generated always as (a * 2) stored - a | integer | | | - -INSERT INTO test_like_4d (a) VALUES(11); -SELECT a, b, c FROM test_like_4d; - a | b | c -----+----+---- - 11 | 42 | 22 -(1 row) - --- Test renumbering of Vars when combining LIKE with inheritance -CREATE TABLE test_like_5 (x point, y point, z point); -CREATE TABLE test_like_5x (p int CHECK (p > 0), - q int GENERATED ALWAYS AS (p * 2) STORED); -CREATE TABLE test_like_5c (LIKE test_like_4 INCLUDING ALL) - INHERITS (test_like_5, test_like_5x); -\d test_like_5c - Table "public.test_like_5c" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - x | point | | | - y | point | | | - z | point | | | - p | integer | | | - q | integer | | | generated always as (p * 2) stored - b | integer | | | 42 - c | integer | | | generated always as (a * 2) stored - a | integer | | | -Check constraints: - "test_like_4_a_check" CHECK (a > 0) - "test_like_5x_p_check" CHECK (p > 0) -Inherits: test_like_5, - test_like_5x - -DROP TABLE test_like_4, test_like_4a, test_like_4b, test_like_4c, test_like_4d; -DROP TABLE test_like_5, test_like_5x, test_like_5c; -CREATE TABLE inhg (x text, LIKE inhx INCLUDING INDEXES, y text); /* copies indexes */ -INSERT INTO inhg VALUES (5, 10); -INSERT INTO inhg VALUES (20, 10); -- should fail -ERROR: duplicate key value violates unique constraint "inhg_pkey" -DETAIL: Key (xx)=(10) already exists. -DROP TABLE inhg; -/* Multiple primary keys creation should fail */ -CREATE TABLE inhg (x text, LIKE inhx INCLUDING INDEXES, PRIMARY KEY(x)); /* fails */ -ERROR: multiple primary keys for table "inhg" are not allowed -CREATE TABLE inhz (xx text DEFAULT 'text', yy int UNIQUE); -CREATE UNIQUE INDEX inhz_xx_idx on inhz (xx) WHERE xx <> 'test'; -/* Ok to create multiple unique indexes */ -CREATE TABLE inhg (x text UNIQUE, LIKE inhz INCLUDING INDEXES); -INSERT INTO inhg (xx, yy, x) VALUES ('test', 5, 10); -INSERT INTO inhg (xx, yy, x) VALUES ('test', 10, 15); -INSERT INTO inhg (xx, yy, x) VALUES ('foo', 10, 15); -- should fail -ERROR: duplicate key value violates unique constraint "inhg_x_key" -DETAIL: Key (x)=(15) already exists. -DROP TABLE inhg; -DROP TABLE inhz; -/* Use primary key imported by LIKE for self-referential FK constraint */ -CREATE TABLE inhz (x text REFERENCES inhz, LIKE inhx INCLUDING INDEXES); -\d inhz - Table "public.inhz" - Column | Type | Collation | Nullable | Default ---------+------+-----------+----------+--------- - x | text | | | - xx | text | | not null | -Indexes: - "inhz_pkey" PRIMARY KEY, btree (xx) -Foreign-key constraints: - "inhz_x_fkey" FOREIGN KEY (x) REFERENCES inhz(xx) -Referenced by: - TABLE "inhz" CONSTRAINT "inhz_x_fkey" FOREIGN KEY (x) REFERENCES inhz(xx) - -DROP TABLE inhz; --- including storage and comments -CREATE TABLE ctlt1 (a text CHECK (length(a) > 2) PRIMARY KEY, b text); -CREATE INDEX ctlt1_b_key ON ctlt1 (b); -CREATE INDEX ctlt1_fnidx ON ctlt1 ((a || b)); -CREATE STATISTICS ctlt1_a_b_stat ON a,b FROM ctlt1; -CREATE STATISTICS ctlt1_expr_stat ON (a || b) FROM ctlt1; -COMMENT ON STATISTICS ctlt1_a_b_stat IS 'ab stats'; -COMMENT ON STATISTICS ctlt1_expr_stat IS 'ab expr stats'; -COMMENT ON COLUMN ctlt1.a IS 'A'; -COMMENT ON COLUMN ctlt1.b IS 'B'; -COMMENT ON CONSTRAINT ctlt1_a_check ON ctlt1 IS 't1_a_check'; -COMMENT ON INDEX ctlt1_pkey IS 'index pkey'; -COMMENT ON INDEX ctlt1_b_key IS 'index b_key'; -ALTER TABLE ctlt1 ALTER COLUMN a SET STORAGE MAIN; -CREATE TABLE ctlt2 (c text); -ALTER TABLE ctlt2 ALTER COLUMN c SET STORAGE EXTERNAL; -COMMENT ON COLUMN ctlt2.c IS 'C'; -CREATE TABLE ctlt3 (a text CHECK (length(a) < 5), c text CHECK (length(c) < 7)); -ALTER TABLE ctlt3 ALTER COLUMN c SET STORAGE EXTERNAL; -ALTER TABLE ctlt3 ALTER COLUMN a SET STORAGE MAIN; -CREATE INDEX ctlt3_fnidx ON ctlt3 ((a || c)); -COMMENT ON COLUMN ctlt3.a IS 'A3'; -COMMENT ON COLUMN ctlt3.c IS 'C'; -COMMENT ON CONSTRAINT ctlt3_a_check ON ctlt3 IS 't3_a_check'; -CREATE TABLE ctlt4 (a text, c text); -ALTER TABLE ctlt4 ALTER COLUMN c SET STORAGE EXTERNAL; -CREATE TABLE ctlt12_storage (LIKE ctlt1 INCLUDING STORAGE, LIKE ctlt2 INCLUDING STORAGE); -\d+ ctlt12_storage - Table "public.ctlt12_storage" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | main | | - b | text | | | | extended | | - c | text | | | | external | | -Not-null constraints: - "ctlt12_storage_a_not_null" NOT NULL "a" - -CREATE TABLE ctlt12_comments (LIKE ctlt1 INCLUDING COMMENTS, LIKE ctlt2 INCLUDING COMMENTS); -\d+ ctlt12_comments - Table "public.ctlt12_comments" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | extended | | A - b | text | | | | extended | | B - c | text | | | | extended | | C -Not-null constraints: - "ctlt12_comments_a_not_null" NOT NULL "a" - -CREATE TABLE ctlt1_inh (LIKE ctlt1 INCLUDING CONSTRAINTS INCLUDING COMMENTS) INHERITS (ctlt1); -NOTICE: merging column "a" with inherited definition -NOTICE: merging column "b" with inherited definition -NOTICE: merging constraint "ctlt1_a_check" with inherited definition -\d+ ctlt1_inh - Table "public.ctlt1_inh" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | main | | A - b | text | | | | extended | | B -Check constraints: - "ctlt1_a_check" CHECK (length(a) > 2) -Not-null constraints: - "ctlt1_inh_a_not_null" NOT NULL "a" (local, inherited) -Inherits: ctlt1 - -SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt1_inh'::regclass; - description -------------- - t1_a_check -(1 row) - -CREATE TABLE ctlt13_inh () INHERITS (ctlt1, ctlt3); -NOTICE: merging multiple inherited definitions of column "a" -\d+ ctlt13_inh - Table "public.ctlt13_inh" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | main | | - b | text | | | | extended | | - c | text | | | | external | | -Check constraints: - "ctlt1_a_check" CHECK (length(a) > 2) - "ctlt3_a_check" CHECK (length(a) < 5) - "ctlt3_c_check" CHECK (length(c) < 7) -Not-null constraints: - "ctlt13_inh_a_not_null" NOT NULL "a" (inherited) -Inherits: ctlt1, - ctlt3 - -CREATE TABLE ctlt13_like (LIKE ctlt3 INCLUDING CONSTRAINTS INCLUDING INDEXES INCLUDING COMMENTS INCLUDING STORAGE) INHERITS (ctlt1); -NOTICE: merging column "a" with inherited definition -\d+ ctlt13_like - Table "public.ctlt13_like" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | main | | A3 - b | text | | | | extended | | - c | text | | | | external | | C -Indexes: - "ctlt13_like_expr_idx" btree ((a || c)) -Check constraints: - "ctlt1_a_check" CHECK (length(a) > 2) - "ctlt3_a_check" CHECK (length(a) < 5) - "ctlt3_c_check" CHECK (length(c) < 7) -Not-null constraints: - "ctlt13_like_a_not_null" NOT NULL "a" (inherited) -Inherits: ctlt1 - -SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt13_like'::regclass; - description -------------- - t3_a_check -(1 row) - -CREATE TABLE ctlt_all (LIKE ctlt1 INCLUDING ALL); -\d+ ctlt_all - Table "public.ctlt_all" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | main | | A - b | text | | | | extended | | B -Indexes: - "ctlt_all_pkey" PRIMARY KEY, btree (a) - "ctlt_all_b_idx" btree (b) - "ctlt_all_expr_idx" btree ((a || b)) -Check constraints: - "ctlt1_a_check" CHECK (length(a) > 2) -Statistics objects: - "public.ctlt_all_a_b_stat" ON a, b FROM ctlt_all - "public.ctlt_all_expr_stat" ON (a || b) FROM ctlt_all - -SELECT c.relname, objsubid, description FROM pg_description, pg_index i, pg_class c WHERE classoid = 'pg_class'::regclass AND objoid = i.indexrelid AND c.oid = i.indexrelid AND i.indrelid = 'ctlt_all'::regclass ORDER BY c.relname, objsubid; - relname | objsubid | description -----------------+----------+------------- - ctlt_all_b_idx | 0 | index b_key - ctlt_all_pkey | 0 | index pkey -(2 rows) - -SELECT s.stxname, objsubid, description FROM pg_description, pg_statistic_ext s WHERE classoid = 'pg_statistic_ext'::regclass AND objoid = s.oid AND s.stxrelid = 'ctlt_all'::regclass ORDER BY s.stxname, objsubid; - stxname | objsubid | description ---------------------+----------+--------------- - ctlt_all_a_b_stat | 0 | ab stats - ctlt_all_expr_stat | 0 | ab expr stats -(2 rows) - -CREATE TABLE inh_error1 () INHERITS (ctlt1, ctlt4); -NOTICE: merging multiple inherited definitions of column "a" -ERROR: inherited column "a" has a storage parameter conflict -DETAIL: MAIN versus EXTENDED -CREATE TABLE inh_error2 (LIKE ctlt4 INCLUDING STORAGE) INHERITS (ctlt1); -NOTICE: merging column "a" with inherited definition -ERROR: column "a" has a storage parameter conflict -DETAIL: MAIN versus EXTENDED --- Check that LIKE isn't confused by a system catalog of the same name -CREATE TABLE pg_attrdef (LIKE ctlt1 INCLUDING ALL); -\d+ public.pg_attrdef - Table "public.pg_attrdef" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | main | | A - b | text | | | | extended | | B -Indexes: - "pg_attrdef_pkey" PRIMARY KEY, btree (a) - "pg_attrdef_b_idx" btree (b) - "pg_attrdef_expr_idx" btree ((a || b)) -Check constraints: - "ctlt1_a_check" CHECK (length(a) > 2) -Statistics objects: - "public.pg_attrdef_a_b_stat" ON a, b FROM public.pg_attrdef - "public.pg_attrdef_expr_stat" ON (a || b) FROM public.pg_attrdef - -DROP TABLE public.pg_attrdef; --- Check that LIKE isn't confused when new table masks the old, either -BEGIN; -CREATE SCHEMA ctl_schema; -SET LOCAL search_path = ctl_schema, public; -CREATE TABLE ctlt1 (LIKE ctlt1 INCLUDING ALL); -\d+ ctlt1 - Table "ctl_schema.ctlt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | main | | A - b | text | | | | extended | | B -Indexes: - "ctlt1_pkey" PRIMARY KEY, btree (a) - "ctlt1_b_idx" btree (b) - "ctlt1_expr_idx" btree ((a || b)) -Check constraints: - "ctlt1_a_check" CHECK (length(a) > 2) -Statistics objects: - "ctl_schema.ctlt1_a_b_stat" ON a, b FROM ctlt1 - "ctl_schema.ctlt1_expr_stat" ON (a || b) FROM ctlt1 - -ROLLBACK; -DROP TABLE ctlt1, ctlt2, ctlt3, ctlt4, ctlt12_storage, ctlt12_comments, ctlt1_inh, ctlt13_inh, ctlt13_like, ctlt_all, ctla, ctlb CASCADE; -NOTICE: drop cascades to table inhe --- LIKE must respect NO INHERIT property of constraints -CREATE TABLE noinh_con_copy (a int CHECK (a > 0) NO INHERIT); -CREATE TABLE noinh_con_copy1 (LIKE noinh_con_copy INCLUDING CONSTRAINTS); -\d noinh_con_copy1 - Table "public.noinh_con_copy1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Check constraints: - "noinh_con_copy_a_check" CHECK (a > 0) NO INHERIT - --- fail, as partitioned tables don't allow NO INHERIT constraints -CREATE TABLE noinh_con_copy1_parted (LIKE noinh_con_copy INCLUDING ALL) - PARTITION BY LIST (a); -ERROR: cannot add NO INHERIT constraint to partitioned table "noinh_con_copy1_parted" -DROP TABLE noinh_con_copy, noinh_con_copy1; -/* LIKE with other relation kinds */ -CREATE TABLE ctlt4 (a int, b text); -CREATE SEQUENCE ctlseq1; -CREATE TABLE ctlt10 (LIKE ctlseq1); -- fail -ERROR: relation "ctlseq1" is invalid in LIKE clause -LINE 1: CREATE TABLE ctlt10 (LIKE ctlseq1); - ^ -DETAIL: This operation is not supported for sequences. -CREATE VIEW ctlv1 AS SELECT * FROM ctlt4; -CREATE TABLE ctlt11 (LIKE ctlv1); -CREATE TABLE ctlt11a (LIKE ctlv1 INCLUDING ALL); -CREATE TYPE ctlty1 AS (a int, b text); -CREATE TABLE ctlt12 (LIKE ctlty1); -DROP SEQUENCE ctlseq1; -DROP TYPE ctlty1; -DROP VIEW ctlv1; -DROP TABLE IF EXISTS ctlt4, ctlt10, ctlt11, ctlt11a, ctlt12; -NOTICE: table "ctlt10" does not exist, skipping +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/alter_generic.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/alter_generic.out --- /tmp/cirrus-ci-build/src/test/regress/expected/alter_generic.out 2024-03-07 14:25:00.329214000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/alter_generic.out 2024-03-07 14:27:17.083964000 +0000 @@ -1,755 +1,2 @@ --- --- Test for ALTER some_object {RENAME TO, OWNER TO, SET SCHEMA} --- --- directory paths and dlsuffix are passed to us in environment variables -\getenv libdir PG_LIBDIR -\getenv dlsuffix PG_DLSUFFIX -\set regresslib :libdir '/regress' :dlsuffix -CREATE FUNCTION test_opclass_options_func(internal) - RETURNS void - AS :'regresslib', 'test_opclass_options_func' - LANGUAGE C; --- Clean up in case a prior regression run failed -SET client_min_messages TO 'warning'; -DROP ROLE IF EXISTS regress_alter_generic_user1; -DROP ROLE IF EXISTS regress_alter_generic_user2; -DROP ROLE IF EXISTS regress_alter_generic_user3; -RESET client_min_messages; -CREATE USER regress_alter_generic_user3; -CREATE USER regress_alter_generic_user2; -CREATE USER regress_alter_generic_user1 IN ROLE regress_alter_generic_user3; -CREATE SCHEMA alt_nsp1; -CREATE SCHEMA alt_nsp2; -GRANT ALL ON SCHEMA alt_nsp1, alt_nsp2 TO public; -SET search_path = alt_nsp1, public; --- --- Function and Aggregate --- -SET SESSION AUTHORIZATION regress_alter_generic_user1; -CREATE FUNCTION alt_func1(int) RETURNS int LANGUAGE sql - AS 'SELECT $1 + 1'; -CREATE FUNCTION alt_func2(int) RETURNS int LANGUAGE sql - AS 'SELECT $1 - 1'; -CREATE AGGREGATE alt_agg1 ( - sfunc1 = int4pl, basetype = int4, stype1 = int4, initcond = 0 -); -CREATE AGGREGATE alt_agg2 ( - sfunc1 = int4mi, basetype = int4, stype1 = int4, initcond = 0 -); -ALTER AGGREGATE alt_func1(int) RENAME TO alt_func3; -- failed (not aggregate) -ERROR: function alt_func1(integer) is not an aggregate -ALTER AGGREGATE alt_func1(int) OWNER TO regress_alter_generic_user3; -- failed (not aggregate) -ERROR: function alt_func1(integer) is not an aggregate -ALTER AGGREGATE alt_func1(int) SET SCHEMA alt_nsp2; -- failed (not aggregate) -ERROR: function alt_func1(integer) is not an aggregate -ALTER FUNCTION alt_func1(int) RENAME TO alt_func2; -- failed (name conflict) -ERROR: function alt_func2(integer) already exists in schema "alt_nsp1" -ALTER FUNCTION alt_func1(int) RENAME TO alt_func3; -- OK -ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_generic_user3; -- OK -ALTER FUNCTION alt_func2(int) SET SCHEMA alt_nsp1; -- OK, already there -ALTER FUNCTION alt_func2(int) SET SCHEMA alt_nsp2; -- OK -ALTER AGGREGATE alt_agg1(int) RENAME TO alt_agg2; -- failed (name conflict) -ERROR: function alt_agg2(integer) already exists in schema "alt_nsp1" -ALTER AGGREGATE alt_agg1(int) RENAME TO alt_agg3; -- OK -ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user3; -- OK -ALTER AGGREGATE alt_agg2(int) SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_generic_user2; -CREATE FUNCTION alt_func1(int) RETURNS int LANGUAGE sql - AS 'SELECT $1 + 2'; -CREATE FUNCTION alt_func2(int) RETURNS int LANGUAGE sql - AS 'SELECT $1 - 2'; -CREATE AGGREGATE alt_agg1 ( - sfunc1 = int4pl, basetype = int4, stype1 = int4, initcond = 100 -); -CREATE AGGREGATE alt_agg2 ( - sfunc1 = int4mi, basetype = int4, stype1 = int4, initcond = -100 -); -ALTER FUNCTION alt_func3(int) RENAME TO alt_func4; -- failed (not owner) -ERROR: must be owner of function alt_func3 -ALTER FUNCTION alt_func1(int) RENAME TO alt_func4; -- OK -ALTER FUNCTION alt_func3(int) OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of function alt_func3 -ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER FUNCTION alt_func3(int) SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of function alt_func3 -ALTER FUNCTION alt_func2(int) SET SCHEMA alt_nsp2; -- failed (name conflicts) -ERROR: function alt_func2(integer) already exists in schema "alt_nsp2" -ALTER AGGREGATE alt_agg3(int) RENAME TO alt_agg4; -- failed (not owner) -ERROR: must be owner of function alt_agg3 -ALTER AGGREGATE alt_agg1(int) RENAME TO alt_agg4; -- OK -ALTER AGGREGATE alt_agg3(int) OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of function alt_agg3 -ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER AGGREGATE alt_agg3(int) SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of function alt_agg3 -ALTER AGGREGATE alt_agg2(int) SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: function alt_agg2(integer) already exists in schema "alt_nsp2" -RESET SESSION AUTHORIZATION; -SELECT n.nspname, proname, prorettype::regtype, prokind, a.rolname - FROM pg_proc p, pg_namespace n, pg_authid a - WHERE p.pronamespace = n.oid AND p.proowner = a.oid - AND n.nspname IN ('alt_nsp1', 'alt_nsp2') - ORDER BY nspname, proname; - nspname | proname | prorettype | prokind | rolname -----------+-----------+------------+---------+----------------------------- - alt_nsp1 | alt_agg2 | integer | a | regress_alter_generic_user2 - alt_nsp1 | alt_agg3 | integer | a | regress_alter_generic_user1 - alt_nsp1 | alt_agg4 | integer | a | regress_alter_generic_user2 - alt_nsp1 | alt_func2 | integer | f | regress_alter_generic_user2 - alt_nsp1 | alt_func3 | integer | f | regress_alter_generic_user1 - alt_nsp1 | alt_func4 | integer | f | regress_alter_generic_user2 - alt_nsp2 | alt_agg2 | integer | a | regress_alter_generic_user3 - alt_nsp2 | alt_func2 | integer | f | regress_alter_generic_user3 -(8 rows) - --- --- We would test collations here, but it's not possible because the error --- messages tend to be nonportable. --- --- --- Conversion --- -SET SESSION AUTHORIZATION regress_alter_generic_user1; -CREATE CONVERSION alt_conv1 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; -CREATE CONVERSION alt_conv2 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; -ALTER CONVERSION alt_conv1 RENAME TO alt_conv2; -- failed (name conflict) -ERROR: conversion "alt_conv2" already exists in schema "alt_nsp1" -ALTER CONVERSION alt_conv1 RENAME TO alt_conv3; -- OK -ALTER CONVERSION alt_conv2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER CONVERSION alt_conv2 OWNER TO regress_alter_generic_user3; -- OK -ALTER CONVERSION alt_conv2 SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_generic_user2; -CREATE CONVERSION alt_conv1 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; -CREATE CONVERSION alt_conv2 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; -ALTER CONVERSION alt_conv3 RENAME TO alt_conv4; -- failed (not owner) -ERROR: must be owner of conversion alt_conv3 -ALTER CONVERSION alt_conv1 RENAME TO alt_conv4; -- OK -ALTER CONVERSION alt_conv3 OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of conversion alt_conv3 -ALTER CONVERSION alt_conv2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER CONVERSION alt_conv3 SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of conversion alt_conv3 -ALTER CONVERSION alt_conv2 SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: conversion "alt_conv2" already exists in schema "alt_nsp2" -RESET SESSION AUTHORIZATION; -SELECT n.nspname, c.conname, a.rolname - FROM pg_conversion c, pg_namespace n, pg_authid a - WHERE c.connamespace = n.oid AND c.conowner = a.oid - AND n.nspname IN ('alt_nsp1', 'alt_nsp2') - ORDER BY nspname, conname; - nspname | conname | rolname -----------+-----------+----------------------------- - alt_nsp1 | alt_conv2 | regress_alter_generic_user2 - alt_nsp1 | alt_conv3 | regress_alter_generic_user1 - alt_nsp1 | alt_conv4 | regress_alter_generic_user2 - alt_nsp2 | alt_conv2 | regress_alter_generic_user3 -(4 rows) - --- --- Foreign Data Wrapper and Foreign Server --- -CREATE FOREIGN DATA WRAPPER alt_fdw1; -CREATE FOREIGN DATA WRAPPER alt_fdw2; -CREATE SERVER alt_fserv1 FOREIGN DATA WRAPPER alt_fdw1; -CREATE SERVER alt_fserv2 FOREIGN DATA WRAPPER alt_fdw2; -ALTER FOREIGN DATA WRAPPER alt_fdw1 RENAME TO alt_fdw2; -- failed (name conflict) -ERROR: foreign-data wrapper "alt_fdw2" already exists -ALTER FOREIGN DATA WRAPPER alt_fdw1 RENAME TO alt_fdw3; -- OK -ALTER SERVER alt_fserv1 RENAME TO alt_fserv2; -- failed (name conflict) -ERROR: server "alt_fserv2" already exists -ALTER SERVER alt_fserv1 RENAME TO alt_fserv3; -- OK -SELECT fdwname FROM pg_foreign_data_wrapper WHERE fdwname like 'alt_fdw%'; - fdwname ----------- - alt_fdw2 - alt_fdw3 -(2 rows) - -SELECT srvname FROM pg_foreign_server WHERE srvname like 'alt_fserv%'; - srvname ------------- - alt_fserv2 - alt_fserv3 -(2 rows) - --- --- Procedural Language --- -CREATE LANGUAGE alt_lang1 HANDLER plpgsql_call_handler; -CREATE LANGUAGE alt_lang2 HANDLER plpgsql_call_handler; -ALTER LANGUAGE alt_lang1 OWNER TO regress_alter_generic_user1; -- OK -ALTER LANGUAGE alt_lang2 OWNER TO regress_alter_generic_user2; -- OK -SET SESSION AUTHORIZATION regress_alter_generic_user1; -ALTER LANGUAGE alt_lang1 RENAME TO alt_lang2; -- failed (name conflict) -ERROR: language "alt_lang2" already exists -ALTER LANGUAGE alt_lang2 RENAME TO alt_lang3; -- failed (not owner) -ERROR: must be owner of language alt_lang2 -ALTER LANGUAGE alt_lang1 RENAME TO alt_lang3; -- OK -ALTER LANGUAGE alt_lang2 OWNER TO regress_alter_generic_user3; -- failed (not owner) -ERROR: must be owner of language alt_lang2 -ALTER LANGUAGE alt_lang3 OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER LANGUAGE alt_lang3 OWNER TO regress_alter_generic_user3; -- OK -RESET SESSION AUTHORIZATION; -SELECT lanname, a.rolname - FROM pg_language l, pg_authid a - WHERE l.lanowner = a.oid AND l.lanname like 'alt_lang%' - ORDER BY lanname; - lanname | rolname ------------+----------------------------- - alt_lang2 | regress_alter_generic_user2 - alt_lang3 | regress_alter_generic_user3 -(2 rows) - --- --- Operator --- -SET SESSION AUTHORIZATION regress_alter_generic_user1; -CREATE OPERATOR @-@ ( leftarg = int4, rightarg = int4, procedure = int4mi ); -CREATE OPERATOR @+@ ( leftarg = int4, rightarg = int4, procedure = int4pl ); -ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_generic_user3; -- OK -ALTER OPERATOR @-@(int4, int4) SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_generic_user2; -CREATE OPERATOR @-@ ( leftarg = int4, rightarg = int4, procedure = int4mi ); -ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of operator @+@ -ALTER OPERATOR @-@(int4, int4) OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER OPERATOR @+@(int4, int4) SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of operator @+@ --- can't test this: the error message includes the raw oid of namespace --- ALTER OPERATOR @-@(int4, int4) SET SCHEMA alt_nsp2; -- failed (name conflict) -RESET SESSION AUTHORIZATION; -SELECT n.nspname, oprname, a.rolname, - oprleft::regtype, oprright::regtype, oprcode::regproc - FROM pg_operator o, pg_namespace n, pg_authid a - WHERE o.oprnamespace = n.oid AND o.oprowner = a.oid - AND n.nspname IN ('alt_nsp1', 'alt_nsp2') - ORDER BY nspname, oprname; - nspname | oprname | rolname | oprleft | oprright | oprcode -----------+---------+-----------------------------+---------+----------+--------- - alt_nsp1 | @+@ | regress_alter_generic_user3 | integer | integer | int4pl - alt_nsp1 | @-@ | regress_alter_generic_user2 | integer | integer | int4mi - alt_nsp2 | @-@ | regress_alter_generic_user1 | integer | integer | int4mi -(3 rows) - --- --- OpFamily and OpClass --- -CREATE OPERATOR FAMILY alt_opf1 USING hash; -CREATE OPERATOR FAMILY alt_opf2 USING hash; -ALTER OPERATOR FAMILY alt_opf1 USING hash OWNER TO regress_alter_generic_user1; -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user1; -CREATE OPERATOR CLASS alt_opc1 FOR TYPE uuid USING hash AS STORAGE uuid; -CREATE OPERATOR CLASS alt_opc2 FOR TYPE uuid USING hash AS STORAGE uuid; -ALTER OPERATOR CLASS alt_opc1 USING hash OWNER TO regress_alter_generic_user1; -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user1; -SET SESSION AUTHORIZATION regress_alter_generic_user1; -ALTER OPERATOR FAMILY alt_opf1 USING hash RENAME TO alt_opf2; -- failed (name conflict) -ERROR: operator family "alt_opf2" for access method "hash" already exists in schema "alt_nsp1" -ALTER OPERATOR FAMILY alt_opf1 USING hash RENAME TO alt_opf3; -- OK -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user3; -- OK -ALTER OPERATOR FAMILY alt_opf2 USING hash SET SCHEMA alt_nsp2; -- OK -ALTER OPERATOR CLASS alt_opc1 USING hash RENAME TO alt_opc2; -- failed (name conflict) -ERROR: operator class "alt_opc2" for access method "hash" already exists in schema "alt_nsp1" -ALTER OPERATOR CLASS alt_opc1 USING hash RENAME TO alt_opc3; -- OK -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user3; -- OK -ALTER OPERATOR CLASS alt_opc2 USING hash SET SCHEMA alt_nsp2; -- OK -RESET SESSION AUTHORIZATION; -CREATE OPERATOR FAMILY alt_opf1 USING hash; -CREATE OPERATOR FAMILY alt_opf2 USING hash; -ALTER OPERATOR FAMILY alt_opf1 USING hash OWNER TO regress_alter_generic_user2; -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user2; -CREATE OPERATOR CLASS alt_opc1 FOR TYPE macaddr USING hash AS STORAGE macaddr; -CREATE OPERATOR CLASS alt_opc2 FOR TYPE macaddr USING hash AS STORAGE macaddr; -ALTER OPERATOR CLASS alt_opc1 USING hash OWNER TO regress_alter_generic_user2; -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user2; -SET SESSION AUTHORIZATION regress_alter_generic_user2; -ALTER OPERATOR FAMILY alt_opf3 USING hash RENAME TO alt_opf4; -- failed (not owner) -ERROR: must be owner of operator family alt_opf3 -ALTER OPERATOR FAMILY alt_opf1 USING hash RENAME TO alt_opf4; -- OK -ALTER OPERATOR FAMILY alt_opf3 USING hash OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of operator family alt_opf3 -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER OPERATOR FAMILY alt_opf3 USING hash SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of operator family alt_opf3 -ALTER OPERATOR FAMILY alt_opf2 USING hash SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: operator family "alt_opf2" for access method "hash" already exists in schema "alt_nsp2" -ALTER OPERATOR CLASS alt_opc3 USING hash RENAME TO alt_opc4; -- failed (not owner) -ERROR: must be owner of operator class alt_opc3 -ALTER OPERATOR CLASS alt_opc1 USING hash RENAME TO alt_opc4; -- OK -ALTER OPERATOR CLASS alt_opc3 USING hash OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of operator class alt_opc3 -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER OPERATOR CLASS alt_opc3 USING hash SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of operator class alt_opc3 -ALTER OPERATOR CLASS alt_opc2 USING hash SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: operator class "alt_opc2" for access method "hash" already exists in schema "alt_nsp2" -RESET SESSION AUTHORIZATION; -SELECT nspname, opfname, amname, rolname - FROM pg_opfamily o, pg_am m, pg_namespace n, pg_authid a - WHERE o.opfmethod = m.oid AND o.opfnamespace = n.oid AND o.opfowner = a.oid - AND n.nspname IN ('alt_nsp1', 'alt_nsp2') - AND NOT opfname LIKE 'alt_opc%' - ORDER BY nspname, opfname; - nspname | opfname | amname | rolname -----------+----------+--------+----------------------------- - alt_nsp1 | alt_opf2 | hash | regress_alter_generic_user2 - alt_nsp1 | alt_opf3 | hash | regress_alter_generic_user1 - alt_nsp1 | alt_opf4 | hash | regress_alter_generic_user2 - alt_nsp2 | alt_opf2 | hash | regress_alter_generic_user3 -(4 rows) - -SELECT nspname, opcname, amname, rolname - FROM pg_opclass o, pg_am m, pg_namespace n, pg_authid a - WHERE o.opcmethod = m.oid AND o.opcnamespace = n.oid AND o.opcowner = a.oid - AND n.nspname IN ('alt_nsp1', 'alt_nsp2') - ORDER BY nspname, opcname; - nspname | opcname | amname | rolname -----------+----------+--------+----------------------------- - alt_nsp1 | alt_opc2 | hash | regress_alter_generic_user2 - alt_nsp1 | alt_opc3 | hash | regress_alter_generic_user1 - alt_nsp1 | alt_opc4 | hash | regress_alter_generic_user2 - alt_nsp2 | alt_opc2 | hash | regress_alter_generic_user3 -(4 rows) - --- ALTER OPERATOR FAMILY ... ADD/DROP --- Should work. Textbook case of CREATE / ALTER ADD / ALTER DROP / DROP -BEGIN TRANSACTION; -CREATE OPERATOR FAMILY alt_opf4 USING btree; -ALTER OPERATOR FAMILY alt_opf4 USING btree ADD - -- int4 vs int2 - OPERATOR 1 < (int4, int2) , - OPERATOR 2 <= (int4, int2) , - OPERATOR 3 = (int4, int2) , - OPERATOR 4 >= (int4, int2) , - OPERATOR 5 > (int4, int2) , - FUNCTION 1 btint42cmp(int4, int2); -ALTER OPERATOR FAMILY alt_opf4 USING btree DROP - -- int4 vs int2 - OPERATOR 1 (int4, int2) , - OPERATOR 2 (int4, int2) , - OPERATOR 3 (int4, int2) , - OPERATOR 4 (int4, int2) , - OPERATOR 5 (int4, int2) , - FUNCTION 1 (int4, int2) ; -DROP OPERATOR FAMILY alt_opf4 USING btree; -ROLLBACK; --- Should fail. Invalid values for ALTER OPERATOR FAMILY .. ADD / DROP -CREATE OPERATOR FAMILY alt_opf4 USING btree; -ALTER OPERATOR FAMILY alt_opf4 USING invalid_index_method ADD OPERATOR 1 < (int4, int2); -- invalid indexing_method -ERROR: access method "invalid_index_method" does not exist -ALTER OPERATOR FAMILY alt_opf4 USING btree ADD OPERATOR 6 < (int4, int2); -- operator number should be between 1 and 5 -ERROR: invalid operator number 6, must be between 1 and 5 -ALTER OPERATOR FAMILY alt_opf4 USING btree ADD OPERATOR 0 < (int4, int2); -- operator number should be between 1 and 5 -ERROR: invalid operator number 0, must be between 1 and 5 -ALTER OPERATOR FAMILY alt_opf4 USING btree ADD OPERATOR 1 < ; -- operator without argument types -ERROR: operator argument types must be specified in ALTER OPERATOR FAMILY -ALTER OPERATOR FAMILY alt_opf4 USING btree ADD FUNCTION 0 btint42cmp(int4, int2); -- invalid options parsing function -ERROR: invalid function number 0, must be between 1 and 5 -ALTER OPERATOR FAMILY alt_opf4 USING btree ADD FUNCTION 6 btint42cmp(int4, int2); -- function number should be between 1 and 5 -ERROR: invalid function number 6, must be between 1 and 5 -ALTER OPERATOR FAMILY alt_opf4 USING btree ADD STORAGE invalid_storage; -- Ensure STORAGE is not a part of ALTER OPERATOR FAMILY -ERROR: STORAGE cannot be specified in ALTER OPERATOR FAMILY -DROP OPERATOR FAMILY alt_opf4 USING btree; --- Should fail. Need to be SUPERUSER to do ALTER OPERATOR FAMILY .. ADD / DROP -BEGIN TRANSACTION; -CREATE ROLE regress_alter_generic_user5 NOSUPERUSER; -CREATE OPERATOR FAMILY alt_opf5 USING btree; -SET ROLE regress_alter_generic_user5; -ALTER OPERATOR FAMILY alt_opf5 USING btree ADD OPERATOR 1 < (int4, int2), FUNCTION 1 btint42cmp(int4, int2); -ERROR: must be superuser to alter an operator family -RESET ROLE; -ERROR: current transaction is aborted, commands ignored until end of transaction block -DROP OPERATOR FAMILY alt_opf5 USING btree; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- Should fail. Need rights to namespace for ALTER OPERATOR FAMILY .. ADD / DROP -BEGIN TRANSACTION; -CREATE ROLE regress_alter_generic_user6; -CREATE SCHEMA alt_nsp6; -REVOKE ALL ON SCHEMA alt_nsp6 FROM regress_alter_generic_user6; -CREATE OPERATOR FAMILY alt_nsp6.alt_opf6 USING btree; -SET ROLE regress_alter_generic_user6; -ALTER OPERATOR FAMILY alt_nsp6.alt_opf6 USING btree ADD OPERATOR 1 < (int4, int2); -ERROR: permission denied for schema alt_nsp6 -ROLLBACK; --- Should fail. Only two arguments required for ALTER OPERATOR FAMILY ... DROP OPERATOR -CREATE OPERATOR FAMILY alt_opf7 USING btree; -ALTER OPERATOR FAMILY alt_opf7 USING btree ADD OPERATOR 1 < (int4, int2); -ALTER OPERATOR FAMILY alt_opf7 USING btree DROP OPERATOR 1 (int4, int2, int8); -ERROR: one or two argument types must be specified -DROP OPERATOR FAMILY alt_opf7 USING btree; --- Should work. During ALTER OPERATOR FAMILY ... DROP OPERATOR --- when left type is the same as right type, a DROP with only one argument type should work -CREATE OPERATOR FAMILY alt_opf8 USING btree; -ALTER OPERATOR FAMILY alt_opf8 USING btree ADD OPERATOR 1 < (int4, int4); -DROP OPERATOR FAMILY alt_opf8 USING btree; --- Should work. Textbook case of ALTER OPERATOR FAMILY ... ADD OPERATOR with FOR ORDER BY -CREATE OPERATOR FAMILY alt_opf9 USING gist; -ALTER OPERATOR FAMILY alt_opf9 USING gist ADD OPERATOR 1 < (int4, int4) FOR ORDER BY float_ops; -DROP OPERATOR FAMILY alt_opf9 USING gist; --- Should fail. Ensure correct ordering methods in ALTER OPERATOR FAMILY ... ADD OPERATOR .. FOR ORDER BY -CREATE OPERATOR FAMILY alt_opf10 USING btree; -ALTER OPERATOR FAMILY alt_opf10 USING btree ADD OPERATOR 1 < (int4, int4) FOR ORDER BY float_ops; -ERROR: access method "btree" does not support ordering operators -DROP OPERATOR FAMILY alt_opf10 USING btree; --- Should work. Textbook case of ALTER OPERATOR FAMILY ... ADD OPERATOR with FOR ORDER BY -CREATE OPERATOR FAMILY alt_opf11 USING gist; -ALTER OPERATOR FAMILY alt_opf11 USING gist ADD OPERATOR 1 < (int4, int4) FOR ORDER BY float_ops; -ALTER OPERATOR FAMILY alt_opf11 USING gist DROP OPERATOR 1 (int4, int4); -DROP OPERATOR FAMILY alt_opf11 USING gist; --- Should fail. btree comparison functions should return INTEGER in ALTER OPERATOR FAMILY ... ADD FUNCTION -BEGIN TRANSACTION; -CREATE OPERATOR FAMILY alt_opf12 USING btree; -CREATE FUNCTION fn_opf12 (int4, int2) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; -ALTER OPERATOR FAMILY alt_opf12 USING btree ADD FUNCTION 1 fn_opf12(int4, int2); -ERROR: btree comparison functions must return integer -DROP OPERATOR FAMILY alt_opf12 USING btree; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- Should fail. hash comparison functions should return INTEGER in ALTER OPERATOR FAMILY ... ADD FUNCTION -BEGIN TRANSACTION; -CREATE OPERATOR FAMILY alt_opf13 USING hash; -CREATE FUNCTION fn_opf13 (int4) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; -ALTER OPERATOR FAMILY alt_opf13 USING hash ADD FUNCTION 1 fn_opf13(int4); -ERROR: hash function 1 must return integer -DROP OPERATOR FAMILY alt_opf13 USING hash; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- Should fail. btree comparison functions should have two arguments in ALTER OPERATOR FAMILY ... ADD FUNCTION -BEGIN TRANSACTION; -CREATE OPERATOR FAMILY alt_opf14 USING btree; -CREATE FUNCTION fn_opf14 (int4) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; -ALTER OPERATOR FAMILY alt_opf14 USING btree ADD FUNCTION 1 fn_opf14(int4); -ERROR: btree comparison functions must have two arguments -DROP OPERATOR FAMILY alt_opf14 USING btree; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- Should fail. hash comparison functions should have one argument in ALTER OPERATOR FAMILY ... ADD FUNCTION -BEGIN TRANSACTION; -CREATE OPERATOR FAMILY alt_opf15 USING hash; -CREATE FUNCTION fn_opf15 (int4, int2) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; -ALTER OPERATOR FAMILY alt_opf15 USING hash ADD FUNCTION 1 fn_opf15(int4, int2); -ERROR: hash function 1 must have one argument -DROP OPERATOR FAMILY alt_opf15 USING hash; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- Should fail. In gist throw an error when giving different data types for function argument --- without defining left / right type in ALTER OPERATOR FAMILY ... ADD FUNCTION -CREATE OPERATOR FAMILY alt_opf16 USING gist; -ALTER OPERATOR FAMILY alt_opf16 USING gist ADD FUNCTION 1 btint42cmp(int4, int2); -ERROR: associated data types must be specified for index support function -DROP OPERATOR FAMILY alt_opf16 USING gist; --- Should fail. duplicate operator number / function number in ALTER OPERATOR FAMILY ... ADD FUNCTION -CREATE OPERATOR FAMILY alt_opf17 USING btree; -ALTER OPERATOR FAMILY alt_opf17 USING btree ADD OPERATOR 1 < (int4, int4), OPERATOR 1 < (int4, int4); -- operator # appears twice in same statement -ERROR: operator number 1 for (integer,integer) appears more than once -ALTER OPERATOR FAMILY alt_opf17 USING btree ADD OPERATOR 1 < (int4, int4); -- operator 1 requested first-time -ALTER OPERATOR FAMILY alt_opf17 USING btree ADD OPERATOR 1 < (int4, int4); -- operator 1 requested again in separate statement -ERROR: operator 1(integer,integer) already exists in operator family "alt_opf17" -ALTER OPERATOR FAMILY alt_opf17 USING btree ADD - OPERATOR 1 < (int4, int2) , - OPERATOR 2 <= (int4, int2) , - OPERATOR 3 = (int4, int2) , - OPERATOR 4 >= (int4, int2) , - OPERATOR 5 > (int4, int2) , - FUNCTION 1 btint42cmp(int4, int2) , - FUNCTION 1 btint42cmp(int4, int2); -- procedure 1 appears twice in same statement -ERROR: function number 1 for (integer,smallint) appears more than once -ALTER OPERATOR FAMILY alt_opf17 USING btree ADD - OPERATOR 1 < (int4, int2) , - OPERATOR 2 <= (int4, int2) , - OPERATOR 3 = (int4, int2) , - OPERATOR 4 >= (int4, int2) , - OPERATOR 5 > (int4, int2) , - FUNCTION 1 btint42cmp(int4, int2); -- procedure 1 appears first time -ALTER OPERATOR FAMILY alt_opf17 USING btree ADD - OPERATOR 1 < (int4, int2) , - OPERATOR 2 <= (int4, int2) , - OPERATOR 3 = (int4, int2) , - OPERATOR 4 >= (int4, int2) , - OPERATOR 5 > (int4, int2) , - FUNCTION 1 btint42cmp(int4, int2); -- procedure 1 requested again in separate statement -ERROR: operator 1(integer,smallint) already exists in operator family "alt_opf17" -DROP OPERATOR FAMILY alt_opf17 USING btree; --- Should fail. Ensure that DROP requests for missing OPERATOR / FUNCTIONS --- return appropriate message in ALTER OPERATOR FAMILY ... DROP OPERATOR / FUNCTION -CREATE OPERATOR FAMILY alt_opf18 USING btree; -ALTER OPERATOR FAMILY alt_opf18 USING btree DROP OPERATOR 1 (int4, int4); -ERROR: operator 1(integer,integer) does not exist in operator family "alt_opf18" -ALTER OPERATOR FAMILY alt_opf18 USING btree ADD - OPERATOR 1 < (int4, int2) , - OPERATOR 2 <= (int4, int2) , - OPERATOR 3 = (int4, int2) , - OPERATOR 4 >= (int4, int2) , - OPERATOR 5 > (int4, int2) , - FUNCTION 1 btint42cmp(int4, int2); --- Should fail. Not allowed to have cross-type equalimage function. -ALTER OPERATOR FAMILY alt_opf18 USING btree - ADD FUNCTION 4 (int4, int2) btequalimage(oid); -ERROR: btree equal image functions must not be cross-type -ALTER OPERATOR FAMILY alt_opf18 USING btree DROP FUNCTION 2 (int4, int4); -ERROR: function 2(integer,integer) does not exist in operator family "alt_opf18" -DROP OPERATOR FAMILY alt_opf18 USING btree; --- Should fail. Invalid opclass options function (#5) specifications. -CREATE OPERATOR FAMILY alt_opf19 USING btree; -ALTER OPERATOR FAMILY alt_opf19 USING btree ADD FUNCTION 5 test_opclass_options_func(internal, text[], bool); -ERROR: function test_opclass_options_func(internal, text[], boolean) does not exist -ALTER OPERATOR FAMILY alt_opf19 USING btree ADD FUNCTION 5 (int4) btint42cmp(int4, int2); -ERROR: invalid operator class options parsing function -HINT: Valid signature of operator class options parsing function is (internal) RETURNS void. -ALTER OPERATOR FAMILY alt_opf19 USING btree ADD FUNCTION 5 (int4, int2) btint42cmp(int4, int2); -ERROR: left and right associated data types for operator class options parsing functions must match -ALTER OPERATOR FAMILY alt_opf19 USING btree ADD FUNCTION 5 (int4) test_opclass_options_func(internal); -- Ok -ALTER OPERATOR FAMILY alt_opf19 USING btree DROP FUNCTION 5 (int4, int4); -DROP OPERATOR FAMILY alt_opf19 USING btree; --- --- Statistics --- -SET SESSION AUTHORIZATION regress_alter_generic_user1; -CREATE TABLE alt_regress_1 (a INTEGER, b INTEGER); -CREATE STATISTICS alt_stat1 ON a, b FROM alt_regress_1; -CREATE STATISTICS alt_stat2 ON a, b FROM alt_regress_1; -ALTER STATISTICS alt_stat1 RENAME TO alt_stat2; -- failed (name conflict) -ERROR: statistics object "alt_stat2" already exists in schema "alt_nsp1" -ALTER STATISTICS alt_stat1 RENAME TO alt_stat3; -- OK -ALTER STATISTICS alt_stat2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER STATISTICS alt_stat2 OWNER TO regress_alter_generic_user3; -- OK -ALTER STATISTICS alt_stat2 SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_generic_user2; -CREATE TABLE alt_regress_2 (a INTEGER, b INTEGER); -CREATE STATISTICS alt_stat1 ON a, b FROM alt_regress_2; -CREATE STATISTICS alt_stat2 ON a, b FROM alt_regress_2; -ALTER STATISTICS alt_stat3 RENAME TO alt_stat4; -- failed (not owner) -ERROR: must be owner of statistics object alt_stat3 -ALTER STATISTICS alt_stat1 RENAME TO alt_stat4; -- OK -ALTER STATISTICS alt_stat3 OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of statistics object alt_stat3 -ALTER STATISTICS alt_stat2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER STATISTICS alt_stat3 SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of statistics object alt_stat3 -ALTER STATISTICS alt_stat2 SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: statistics object "alt_stat2" already exists in schema "alt_nsp2" -RESET SESSION AUTHORIZATION; -SELECT nspname, stxname, rolname - FROM pg_statistic_ext s, pg_namespace n, pg_authid a - WHERE s.stxnamespace = n.oid AND s.stxowner = a.oid - AND n.nspname in ('alt_nsp1', 'alt_nsp2') - ORDER BY nspname, stxname; - nspname | stxname | rolname -----------+-----------+----------------------------- - alt_nsp1 | alt_stat2 | regress_alter_generic_user2 - alt_nsp1 | alt_stat3 | regress_alter_generic_user1 - alt_nsp1 | alt_stat4 | regress_alter_generic_user2 - alt_nsp2 | alt_stat2 | regress_alter_generic_user3 -(4 rows) - --- --- Text Search Dictionary --- -SET SESSION AUTHORIZATION regress_alter_generic_user1; -CREATE TEXT SEARCH DICTIONARY alt_ts_dict1 (template=simple); -CREATE TEXT SEARCH DICTIONARY alt_ts_dict2 (template=simple); -ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 RENAME TO alt_ts_dict2; -- failed (name conflict) -ERROR: text search dictionary "alt_ts_dict2" already exists in schema "alt_nsp1" -ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 RENAME TO alt_ts_dict3; -- OK -ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_generic_user3; -- OK -ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_generic_user2; -CREATE TEXT SEARCH DICTIONARY alt_ts_dict1 (template=simple); -CREATE TEXT SEARCH DICTIONARY alt_ts_dict2 (template=simple); -ALTER TEXT SEARCH DICTIONARY alt_ts_dict3 RENAME TO alt_ts_dict4; -- failed (not owner) -ERROR: must be owner of text search dictionary alt_ts_dict3 -ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 RENAME TO alt_ts_dict4; -- OK -ALTER TEXT SEARCH DICTIONARY alt_ts_dict3 OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of text search dictionary alt_ts_dict3 -ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER TEXT SEARCH DICTIONARY alt_ts_dict3 SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of text search dictionary alt_ts_dict3 -ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: text search dictionary "alt_ts_dict2" already exists in schema "alt_nsp2" -RESET SESSION AUTHORIZATION; -SELECT nspname, dictname, rolname - FROM pg_ts_dict t, pg_namespace n, pg_authid a - WHERE t.dictnamespace = n.oid AND t.dictowner = a.oid - AND n.nspname in ('alt_nsp1', 'alt_nsp2') - ORDER BY nspname, dictname; - nspname | dictname | rolname -----------+--------------+----------------------------- - alt_nsp1 | alt_ts_dict2 | regress_alter_generic_user2 - alt_nsp1 | alt_ts_dict3 | regress_alter_generic_user1 - alt_nsp1 | alt_ts_dict4 | regress_alter_generic_user2 - alt_nsp2 | alt_ts_dict2 | regress_alter_generic_user3 -(4 rows) - --- --- Text Search Configuration --- -SET SESSION AUTHORIZATION regress_alter_generic_user1; -CREATE TEXT SEARCH CONFIGURATION alt_ts_conf1 (copy=english); -CREATE TEXT SEARCH CONFIGURATION alt_ts_conf2 (copy=english); -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 RENAME TO alt_ts_conf2; -- failed (name conflict) -ERROR: text search configuration "alt_ts_conf2" already exists in schema "alt_nsp1" -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 RENAME TO alt_ts_conf3; -- OK -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_generic_user3; -- OK -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_generic_user2; -CREATE TEXT SEARCH CONFIGURATION alt_ts_conf1 (copy=english); -CREATE TEXT SEARCH CONFIGURATION alt_ts_conf2 (copy=english); -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf3 RENAME TO alt_ts_conf4; -- failed (not owner) -ERROR: must be owner of text search configuration alt_ts_conf3 -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 RENAME TO alt_ts_conf4; -- OK -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf3 OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of text search configuration alt_ts_conf3 -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf3 SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of text search configuration alt_ts_conf3 -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: text search configuration "alt_ts_conf2" already exists in schema "alt_nsp2" -RESET SESSION AUTHORIZATION; -SELECT nspname, cfgname, rolname - FROM pg_ts_config t, pg_namespace n, pg_authid a - WHERE t.cfgnamespace = n.oid AND t.cfgowner = a.oid - AND n.nspname in ('alt_nsp1', 'alt_nsp2') - ORDER BY nspname, cfgname; - nspname | cfgname | rolname -----------+--------------+----------------------------- - alt_nsp1 | alt_ts_conf2 | regress_alter_generic_user2 - alt_nsp1 | alt_ts_conf3 | regress_alter_generic_user1 - alt_nsp1 | alt_ts_conf4 | regress_alter_generic_user2 - alt_nsp2 | alt_ts_conf2 | regress_alter_generic_user3 -(4 rows) - --- --- Text Search Template --- -CREATE TEXT SEARCH TEMPLATE alt_ts_temp1 (lexize=dsimple_lexize); -CREATE TEXT SEARCH TEMPLATE alt_ts_temp2 (lexize=dsimple_lexize); -ALTER TEXT SEARCH TEMPLATE alt_ts_temp1 RENAME TO alt_ts_temp2; -- failed (name conflict) -ERROR: text search template "alt_ts_temp2" already exists in schema "alt_nsp1" -ALTER TEXT SEARCH TEMPLATE alt_ts_temp1 RENAME TO alt_ts_temp3; -- OK -ALTER TEXT SEARCH TEMPLATE alt_ts_temp2 SET SCHEMA alt_nsp2; -- OK -CREATE TEXT SEARCH TEMPLATE alt_ts_temp2 (lexize=dsimple_lexize); -ALTER TEXT SEARCH TEMPLATE alt_ts_temp2 SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: text search template "alt_ts_temp2" already exists in schema "alt_nsp2" --- invalid: non-lowercase quoted identifiers -CREATE TEXT SEARCH TEMPLATE tstemp_case ("Init" = init_function); -ERROR: text search template parameter "Init" not recognized -SELECT nspname, tmplname - FROM pg_ts_template t, pg_namespace n - WHERE t.tmplnamespace = n.oid AND nspname like 'alt_nsp%' - ORDER BY nspname, tmplname; - nspname | tmplname -----------+-------------- - alt_nsp1 | alt_ts_temp2 - alt_nsp1 | alt_ts_temp3 - alt_nsp2 | alt_ts_temp2 -(3 rows) - --- --- Text Search Parser --- -CREATE TEXT SEARCH PARSER alt_ts_prs1 - (start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype); -CREATE TEXT SEARCH PARSER alt_ts_prs2 - (start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype); -ALTER TEXT SEARCH PARSER alt_ts_prs1 RENAME TO alt_ts_prs2; -- failed (name conflict) -ERROR: text search parser "alt_ts_prs2" already exists in schema "alt_nsp1" -ALTER TEXT SEARCH PARSER alt_ts_prs1 RENAME TO alt_ts_prs3; -- OK -ALTER TEXT SEARCH PARSER alt_ts_prs2 SET SCHEMA alt_nsp2; -- OK -CREATE TEXT SEARCH PARSER alt_ts_prs2 - (start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype); -ALTER TEXT SEARCH PARSER alt_ts_prs2 SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: text search parser "alt_ts_prs2" already exists in schema "alt_nsp2" --- invalid: non-lowercase quoted identifiers -CREATE TEXT SEARCH PARSER tspars_case ("Start" = start_function); -ERROR: text search parser parameter "Start" not recognized -SELECT nspname, prsname - FROM pg_ts_parser t, pg_namespace n - WHERE t.prsnamespace = n.oid AND nspname like 'alt_nsp%' - ORDER BY nspname, prsname; - nspname | prsname -----------+------------- - alt_nsp1 | alt_ts_prs2 - alt_nsp1 | alt_ts_prs3 - alt_nsp2 | alt_ts_prs2 -(3 rows) - ---- ---- Cleanup resources ---- -DROP FOREIGN DATA WRAPPER alt_fdw2 CASCADE; -NOTICE: drop cascades to server alt_fserv2 -DROP FOREIGN DATA WRAPPER alt_fdw3 CASCADE; -NOTICE: drop cascades to server alt_fserv3 -DROP LANGUAGE alt_lang2 CASCADE; -DROP LANGUAGE alt_lang3 CASCADE; -DROP SCHEMA alt_nsp1 CASCADE; -NOTICE: drop cascades to 28 other objects -DETAIL: drop cascades to function alt_func3(integer) -drop cascades to function alt_agg3(integer) -drop cascades to function alt_func4(integer) -drop cascades to function alt_func2(integer) -drop cascades to function alt_agg4(integer) -drop cascades to function alt_agg2(integer) -drop cascades to conversion alt_conv3 -drop cascades to conversion alt_conv4 -drop cascades to conversion alt_conv2 -drop cascades to operator @+@(integer,integer) -drop cascades to operator @-@(integer,integer) -drop cascades to operator family alt_opf3 for access method hash -drop cascades to operator family alt_opc1 for access method hash -drop cascades to operator family alt_opc2 for access method hash -drop cascades to operator family alt_opf4 for access method hash -drop cascades to operator family alt_opf2 for access method hash -drop cascades to table alt_regress_1 -drop cascades to table alt_regress_2 -drop cascades to text search dictionary alt_ts_dict3 -drop cascades to text search dictionary alt_ts_dict4 -drop cascades to text search dictionary alt_ts_dict2 -drop cascades to text search configuration alt_ts_conf3 -drop cascades to text search configuration alt_ts_conf4 -drop cascades to text search configuration alt_ts_conf2 -drop cascades to text search template alt_ts_temp3 -drop cascades to text search template alt_ts_temp2 -drop cascades to text search parser alt_ts_prs3 -drop cascades to text search parser alt_ts_prs2 -DROP SCHEMA alt_nsp2 CASCADE; -NOTICE: drop cascades to 9 other objects -DETAIL: drop cascades to function alt_nsp2.alt_func2(integer) -drop cascades to function alt_nsp2.alt_agg2(integer) -drop cascades to conversion alt_nsp2.alt_conv2 -drop cascades to operator alt_nsp2.@-@(integer,integer) -drop cascades to operator family alt_nsp2.alt_opf2 for access method hash -drop cascades to text search dictionary alt_nsp2.alt_ts_dict2 -drop cascades to text search configuration alt_nsp2.alt_ts_conf2 -drop cascades to text search template alt_nsp2.alt_ts_temp2 -drop cascades to text search parser alt_nsp2.alt_ts_prs2 -DROP USER regress_alter_generic_user1; -DROP USER regress_alter_generic_user2; -DROP USER regress_alter_generic_user3; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/alter_operator.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/alter_operator.out --- /tmp/cirrus-ci-build/src/test/regress/expected/alter_operator.out 2024-03-07 14:25:00.329227000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/alter_operator.out 2024-03-07 14:27:17.075591000 +0000 @@ -1,267 +1,2 @@ -CREATE FUNCTION alter_op_test_fn(boolean, boolean) -RETURNS boolean AS $$ SELECT NULL::BOOLEAN; $$ LANGUAGE sql IMMUTABLE; -CREATE FUNCTION customcontsel(internal, oid, internal, integer) -RETURNS float8 AS 'contsel' LANGUAGE internal STABLE STRICT; -CREATE OPERATOR === ( - LEFTARG = boolean, - RIGHTARG = boolean, - PROCEDURE = alter_op_test_fn, - COMMUTATOR = ===, - NEGATOR = !==, - RESTRICT = customcontsel, - JOIN = contjoinsel, - HASHES, MERGES -); -SELECT pg_describe_object(refclassid,refobjid,refobjsubid) as ref, deptype -FROM pg_depend -WHERE classid = 'pg_operator'::regclass AND - objid = '===(bool,bool)'::regoperator -ORDER BY 1; - ref | deptype --------------------------------------------------------+--------- - function alter_op_test_fn(boolean,boolean) | n - function customcontsel(internal,oid,internal,integer) | n - schema public | n -(3 rows) - --- --- Test resetting and setting restrict and join attributes. --- -ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = NONE); -ALTER OPERATOR === (boolean, boolean) SET (JOIN = NONE); -SELECT oprrest, oprjoin FROM pg_operator WHERE oprname = '===' - AND oprleft = 'boolean'::regtype AND oprright = 'boolean'::regtype; - oprrest | oprjoin ----------+--------- - - | - -(1 row) - -SELECT pg_describe_object(refclassid,refobjid,refobjsubid) as ref, deptype -FROM pg_depend -WHERE classid = 'pg_operator'::regclass AND - objid = '===(bool,bool)'::regoperator -ORDER BY 1; - ref | deptype ---------------------------------------------+--------- - function alter_op_test_fn(boolean,boolean) | n - schema public | n -(2 rows) - -ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = contsel); -ALTER OPERATOR === (boolean, boolean) SET (JOIN = contjoinsel); -SELECT oprrest, oprjoin FROM pg_operator WHERE oprname = '===' - AND oprleft = 'boolean'::regtype AND oprright = 'boolean'::regtype; - oprrest | oprjoin ----------+------------- - contsel | contjoinsel -(1 row) - -SELECT pg_describe_object(refclassid,refobjid,refobjsubid) as ref, deptype -FROM pg_depend -WHERE classid = 'pg_operator'::regclass AND - objid = '===(bool,bool)'::regoperator -ORDER BY 1; - ref | deptype ---------------------------------------------+--------- - function alter_op_test_fn(boolean,boolean) | n - schema public | n -(2 rows) - -ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = NONE, JOIN = NONE); -SELECT oprrest, oprjoin FROM pg_operator WHERE oprname = '===' - AND oprleft = 'boolean'::regtype AND oprright = 'boolean'::regtype; - oprrest | oprjoin ----------+--------- - - | - -(1 row) - -SELECT pg_describe_object(refclassid,refobjid,refobjsubid) as ref, deptype -FROM pg_depend -WHERE classid = 'pg_operator'::regclass AND - objid = '===(bool,bool)'::regoperator -ORDER BY 1; - ref | deptype ---------------------------------------------+--------- - function alter_op_test_fn(boolean,boolean) | n - schema public | n -(2 rows) - -ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = customcontsel, JOIN = contjoinsel); -SELECT oprrest, oprjoin FROM pg_operator WHERE oprname = '===' - AND oprleft = 'boolean'::regtype AND oprright = 'boolean'::regtype; - oprrest | oprjoin ----------------+------------- - customcontsel | contjoinsel -(1 row) - -SELECT pg_describe_object(refclassid,refobjid,refobjsubid) as ref, deptype -FROM pg_depend -WHERE classid = 'pg_operator'::regclass AND - objid = '===(bool,bool)'::regoperator -ORDER BY 1; - ref | deptype --------------------------------------------------------+--------- - function alter_op_test_fn(boolean,boolean) | n - function customcontsel(internal,oid,internal,integer) | n - schema public | n -(3 rows) - --- --- Test invalid options. --- -ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = non_existent_func); -ERROR: function non_existent_func(internal, oid, internal, integer) does not exist -ALTER OPERATOR === (boolean, boolean) SET (JOIN = non_existent_func); -ERROR: function non_existent_func(internal, oid, internal, smallint, internal) does not exist --- invalid: non-lowercase quoted identifiers -ALTER OPERATOR & (bit, bit) SET ("Restrict" = _int_contsel, "Join" = _int_contjoinsel); -ERROR: operator attribute "Restrict" not recognized --- --- Test permission check. Must be owner to ALTER OPERATOR. --- -CREATE USER regress_alter_op_user; -SET SESSION AUTHORIZATION regress_alter_op_user; -ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = NONE); -ERROR: must be owner of operator === -RESET SESSION AUTHORIZATION; --- --- Test setting commutator, negator, merges, and hashes attributes, --- which can only be set if not already set --- -CREATE FUNCTION alter_op_test_fn_bool_real(boolean, real) -RETURNS boolean AS $$ SELECT NULL::BOOLEAN; $$ LANGUAGE sql IMMUTABLE; -CREATE FUNCTION alter_op_test_fn_real_bool(real, boolean) -RETURNS boolean AS $$ SELECT NULL::BOOLEAN; $$ LANGUAGE sql IMMUTABLE; --- operator -CREATE OPERATOR === ( - LEFTARG = boolean, - RIGHTARG = real, - PROCEDURE = alter_op_test_fn_bool_real -); --- commutator -CREATE OPERATOR ==== ( - LEFTARG = real, - RIGHTARG = boolean, - PROCEDURE = alter_op_test_fn_real_bool -); --- negator -CREATE OPERATOR !==== ( - LEFTARG = boolean, - RIGHTARG = real, - PROCEDURE = alter_op_test_fn_bool_real -); --- No-op setting already false hashes and merges to false works -ALTER OPERATOR === (boolean, real) SET (MERGES = false); -ALTER OPERATOR === (boolean, real) SET (HASHES = false); --- Test setting merges and hashes -ALTER OPERATOR === (boolean, real) SET (MERGES); -ALTER OPERATOR === (boolean, real) SET (HASHES); -SELECT oprcanmerge, oprcanhash -FROM pg_operator WHERE oprname = '===' - AND oprleft = 'boolean'::regtype AND oprright = 'real'::regtype; - oprcanmerge | oprcanhash --------------+------------ - t | t -(1 row) - --- Test setting commutator -ALTER OPERATOR === (boolean, real) SET (COMMUTATOR = ====); --- Check that oprcom has been set on both the operator and commutator, --- that they reference each other, and that the operator used is the existing --- one we created and not a new shell operator. -SELECT op.oprname AS operator_name, com.oprname AS commutator_name, - com.oprcode AS commutator_func - FROM pg_operator op - INNER JOIN pg_operator com ON (op.oid = com.oprcom AND op.oprcom = com.oid) - WHERE op.oprname = '===' - AND op.oprleft = 'boolean'::regtype AND op.oprright = 'real'::regtype; - operator_name | commutator_name | commutator_func ----------------+-----------------+---------------------------- - === | ==== | alter_op_test_fn_real_bool -(1 row) - --- Cannot set self as negator -ALTER OPERATOR === (boolean, real) SET (NEGATOR = ===); -ERROR: operator cannot be its own negator --- Test setting negator -ALTER OPERATOR === (boolean, real) SET (NEGATOR = !====); --- Check that oprnegate has been set on both the operator and negator, --- that they reference each other, and that the operator used is the existing --- one we created and not a new shell operator. -SELECT op.oprname AS operator_name, neg.oprname AS negator_name, - neg.oprcode AS negator_func - FROM pg_operator op - INNER JOIN pg_operator neg ON (op.oid = neg.oprnegate AND op.oprnegate = neg.oid) - WHERE op.oprname = '===' - AND op.oprleft = 'boolean'::regtype AND op.oprright = 'real'::regtype; - operator_name | negator_name | negator_func ----------------+--------------+---------------------------- - === | !==== | alter_op_test_fn_bool_real -(1 row) - --- Test that no-op set succeeds -ALTER OPERATOR === (boolean, real) SET (NEGATOR = !====); -ALTER OPERATOR === (boolean, real) SET (COMMUTATOR = ====); -ALTER OPERATOR === (boolean, real) SET (MERGES); -ALTER OPERATOR === (boolean, real) SET (HASHES); --- Check that the final state of the operator is as we expect -SELECT oprcanmerge, oprcanhash, - pg_describe_object('pg_operator'::regclass, oprcom, 0) AS commutator, - pg_describe_object('pg_operator'::regclass, oprnegate, 0) AS negator - FROM pg_operator WHERE oprname = '===' - AND oprleft = 'boolean'::regtype AND oprright = 'real'::regtype; - oprcanmerge | oprcanhash | commutator | negator --------------+------------+-----------------------------+------------------------------ - t | t | operator ====(real,boolean) | operator !====(boolean,real) -(1 row) - --- Cannot change commutator, negator, merges, and hashes when already set -CREATE OPERATOR @= ( - LEFTARG = real, - RIGHTARG = boolean, - PROCEDURE = alter_op_test_fn_real_bool -); -CREATE OPERATOR @!= ( - LEFTARG = boolean, - RIGHTARG = real, - PROCEDURE = alter_op_test_fn_bool_real -); -ALTER OPERATOR === (boolean, real) SET (COMMUTATOR = @=); -ERROR: operator attribute "commutator" cannot be changed if it has already been set -ALTER OPERATOR === (boolean, real) SET (NEGATOR = @!=); -ERROR: operator attribute "negator" cannot be changed if it has already been set -ALTER OPERATOR === (boolean, real) SET (MERGES = false); -ERROR: operator attribute "merges" cannot be changed if it has already been set -ALTER OPERATOR === (boolean, real) SET (HASHES = false); -ERROR: operator attribute "hashes" cannot be changed if it has already been set --- Cannot set an operator that already has a commutator as the commutator -ALTER OPERATOR @=(real, boolean) SET (COMMUTATOR = ===); -ERROR: commutator operator === is already the commutator of operator ==== --- Cannot set an operator that already has a negator as the negator -ALTER OPERATOR @!=(boolean, real) SET (NEGATOR = ===); -ERROR: negator operator === is already the negator of operator !==== --- Check no changes made -SELECT oprcanmerge, oprcanhash, - pg_describe_object('pg_operator'::regclass, oprcom, 0) AS commutator, - pg_describe_object('pg_operator'::regclass, oprnegate, 0) AS negator - FROM pg_operator WHERE oprname = '===' - AND oprleft = 'boolean'::regtype AND oprright = 'real'::regtype; - oprcanmerge | oprcanhash | commutator | negator --------------+------------+-----------------------------+------------------------------ - t | t | operator ====(real,boolean) | operator !====(boolean,real) -(1 row) - --- --- Clean up --- -DROP USER regress_alter_op_user; -DROP OPERATOR === (boolean, boolean); -DROP OPERATOR === (boolean, real); -DROP OPERATOR ==== (real, boolean); -DROP OPERATOR !==== (boolean, real); -DROP OPERATOR @= (real, boolean); -DROP OPERATOR @!= (boolean, real); -DROP FUNCTION customcontsel(internal, oid, internal, integer); -DROP FUNCTION alter_op_test_fn(boolean, boolean); -DROP FUNCTION alter_op_test_fn_bool_real(boolean, real); -DROP FUNCTION alter_op_test_fn_real_bool(real, boolean); +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/misc.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/misc.out --- /tmp/cirrus-ci-build/src/test/regress/expected/misc.out 2024-03-07 14:25:00.331982000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/misc.out 2024-03-07 14:27:17.079435000 +0000 @@ -1,398 +1,2 @@ --- --- MISC --- --- directory paths and dlsuffix are passed to us in environment variables -\getenv abs_srcdir PG_ABS_SRCDIR -\getenv abs_builddir PG_ABS_BUILDDIR -\getenv libdir PG_LIBDIR -\getenv dlsuffix PG_DLSUFFIX -\set regresslib :libdir '/regress' :dlsuffix -CREATE FUNCTION overpaid(emp) - RETURNS bool - AS :'regresslib' - LANGUAGE C STRICT; -CREATE FUNCTION reverse_name(name) - RETURNS name - AS :'regresslib' - LANGUAGE C STRICT; --- --- BTREE --- -UPDATE onek - SET unique1 = onek.unique1 + 1; -UPDATE onek - SET unique1 = onek.unique1 - 1; --- --- BTREE partial --- --- UPDATE onek2 --- SET unique1 = onek2.unique1 + 1; ---UPDATE onek2 --- SET unique1 = onek2.unique1 - 1; --- --- BTREE shutting out non-functional updates --- --- the following two tests seem to take a long time on some --- systems. This non-func update stuff needs to be examined --- more closely. - jolly (2/22/96) --- -SELECT two, stringu1, ten, string4 - INTO TABLE tmp - FROM onek; -UPDATE tmp - SET stringu1 = reverse_name(onek.stringu1) - FROM onek - WHERE onek.stringu1 = 'JBAAAA' and - onek.stringu1 = tmp.stringu1; -UPDATE tmp - SET stringu1 = reverse_name(onek2.stringu1) - FROM onek2 - WHERE onek2.stringu1 = 'JCAAAA' and - onek2.stringu1 = tmp.stringu1; -DROP TABLE tmp; ---UPDATE person* --- SET age = age + 1; ---UPDATE person* --- SET age = age + 3 --- WHERE name = 'linda'; --- --- copy --- -\set filename :abs_builddir '/results/onek.data' -COPY onek TO :'filename'; -CREATE TEMP TABLE onek_copy (LIKE onek); -COPY onek_copy FROM :'filename'; -SELECT * FROM onek EXCEPT ALL SELECT * FROM onek_copy; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- -(0 rows) - -SELECT * FROM onek_copy EXCEPT ALL SELECT * FROM onek; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- -(0 rows) - -\set filename :abs_builddir '/results/stud_emp.data' -COPY BINARY stud_emp TO :'filename'; -CREATE TEMP TABLE stud_emp_copy (LIKE stud_emp); -COPY BINARY stud_emp_copy FROM :'filename'; -SELECT * FROM stud_emp_copy; - name | age | location | salary | manager | gpa | percent --------+-----+------------+--------+---------+-----+--------- - jeff | 23 | (8,7.7) | 600 | sharon | 3.5 | - cim | 30 | (10.5,4.7) | 400 | | 3.4 | - linda | 19 | (0.9,6.1) | 100 | | 2.9 | -(3 rows) - --- --- test data for postquel functions --- -CREATE TABLE hobbies_r ( - name text, - person text -); -CREATE TABLE equipment_r ( - name text, - hobby text -); -INSERT INTO hobbies_r (name, person) - SELECT 'posthacking', p.name - FROM person* p - WHERE p.name = 'mike' or p.name = 'jeff'; -INSERT INTO hobbies_r (name, person) - SELECT 'basketball', p.name - FROM person p - WHERE p.name = 'joe' or p.name = 'sally'; -INSERT INTO hobbies_r (name) VALUES ('skywalking'); -INSERT INTO equipment_r (name, hobby) VALUES ('advil', 'posthacking'); -INSERT INTO equipment_r (name, hobby) VALUES ('peet''s coffee', 'posthacking'); -INSERT INTO equipment_r (name, hobby) VALUES ('hightops', 'basketball'); -INSERT INTO equipment_r (name, hobby) VALUES ('guts', 'skywalking'); --- --- postquel functions --- -CREATE FUNCTION hobbies(person) - RETURNS setof hobbies_r - AS 'select * from hobbies_r where person = $1.name' - LANGUAGE SQL; -CREATE FUNCTION hobby_construct(text, text) - RETURNS hobbies_r - AS 'select $1 as name, $2 as hobby' - LANGUAGE SQL; -CREATE FUNCTION hobby_construct_named(name text, hobby text) - RETURNS hobbies_r - AS 'select name, hobby' - LANGUAGE SQL; -CREATE FUNCTION hobbies_by_name(hobbies_r.name%TYPE) - RETURNS hobbies_r.person%TYPE - AS 'select person from hobbies_r where name = $1' - LANGUAGE SQL; -NOTICE: type reference hobbies_r.name%TYPE converted to text -NOTICE: type reference hobbies_r.person%TYPE converted to text -CREATE FUNCTION equipment(hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where hobby = $1.name' - LANGUAGE SQL; -CREATE FUNCTION equipment_named(hobby hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where equipment_r.hobby = equipment_named.hobby.name' - LANGUAGE SQL; -CREATE FUNCTION equipment_named_ambiguous_1a(hobby hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where hobby = equipment_named_ambiguous_1a.hobby.name' - LANGUAGE SQL; -CREATE FUNCTION equipment_named_ambiguous_1b(hobby hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where equipment_r.hobby = hobby.name' - LANGUAGE SQL; -CREATE FUNCTION equipment_named_ambiguous_1c(hobby hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where hobby = hobby.name' - LANGUAGE SQL; -CREATE FUNCTION equipment_named_ambiguous_2a(hobby text) - RETURNS setof equipment_r - AS 'select * from equipment_r where hobby = equipment_named_ambiguous_2a.hobby' - LANGUAGE SQL; -CREATE FUNCTION equipment_named_ambiguous_2b(hobby text) - RETURNS setof equipment_r - AS 'select * from equipment_r where equipment_r.hobby = hobby' - LANGUAGE SQL; --- --- mike does post_hacking, --- joe and sally play basketball, and --- everyone else does nothing. --- -SELECT p.name, name(p.hobbies) FROM ONLY person p; - name | name --------+------------- - mike | posthacking - joe | basketball - sally | basketball -(3 rows) - --- --- as above, but jeff also does post_hacking. --- -SELECT p.name, name(p.hobbies) FROM person* p; - name | name --------+------------- - mike | posthacking - joe | basketball - sally | basketball - jeff | posthacking -(4 rows) - --- --- the next two queries demonstrate how functions generate bogus duplicates. --- this is a "feature" .. --- -SELECT DISTINCT hobbies_r.name, name(hobbies_r.equipment) FROM hobbies_r - ORDER BY 1,2; - name | name --------------+--------------- - basketball | hightops - posthacking | advil - posthacking | peet's coffee - skywalking | guts -(4 rows) - -SELECT hobbies_r.name, (hobbies_r.equipment).name FROM hobbies_r; - name | name --------------+--------------- - posthacking | advil - posthacking | peet's coffee - posthacking | advil - posthacking | peet's coffee - basketball | hightops - basketball | hightops - skywalking | guts -(7 rows) - --- --- mike needs advil and peet's coffee, --- joe and sally need hightops, and --- everyone else is fine. --- -SELECT p.name, name(p.hobbies), name(equipment(p.hobbies)) FROM ONLY person p; - name | name | name --------+-------------+--------------- - mike | posthacking | advil - mike | posthacking | peet's coffee - joe | basketball | hightops - sally | basketball | hightops -(4 rows) - --- --- as above, but jeff needs advil and peet's coffee as well. --- -SELECT p.name, name(p.hobbies), name(equipment(p.hobbies)) FROM person* p; - name | name | name --------+-------------+--------------- - mike | posthacking | advil - mike | posthacking | peet's coffee - joe | basketball | hightops - sally | basketball | hightops - jeff | posthacking | advil - jeff | posthacking | peet's coffee -(6 rows) - --- --- just like the last two, but make sure that the target list fixup and --- unflattening is being done correctly. --- -SELECT name(equipment(p.hobbies)), p.name, name(p.hobbies) FROM ONLY person p; - name | name | name ----------------+-------+------------- - advil | mike | posthacking - peet's coffee | mike | posthacking - hightops | joe | basketball - hightops | sally | basketball -(4 rows) - -SELECT (p.hobbies).equipment.name, p.name, name(p.hobbies) FROM person* p; - name | name | name ----------------+-------+------------- - advil | mike | posthacking - peet's coffee | mike | posthacking - hightops | joe | basketball - hightops | sally | basketball - advil | jeff | posthacking - peet's coffee | jeff | posthacking -(6 rows) - -SELECT (p.hobbies).equipment.name, name(p.hobbies), p.name FROM ONLY person p; - name | name | name ----------------+-------------+------- - advil | posthacking | mike - peet's coffee | posthacking | mike - hightops | basketball | joe - hightops | basketball | sally -(4 rows) - -SELECT name(equipment(p.hobbies)), name(p.hobbies), p.name FROM person* p; - name | name | name ----------------+-------------+------- - advil | posthacking | mike - peet's coffee | posthacking | mike - hightops | basketball | joe - hightops | basketball | sally - advil | posthacking | jeff - peet's coffee | posthacking | jeff -(6 rows) - -SELECT name(equipment(hobby_construct(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment(hobby_construct_named(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment_named(hobby_construct_named(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment_named_ambiguous_1a(hobby_construct_named(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment_named_ambiguous_1b(hobby_construct_named(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment_named_ambiguous_1c(hobby_construct_named(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment_named_ambiguous_2a(text 'skywalking')); - name ------- - guts -(1 row) - -SELECT name(equipment_named_ambiguous_2b(text 'skywalking')); - name ---------------- - advil - peet's coffee - hightops - guts -(4 rows) - -SELECT hobbies_by_name('basketball'); - hobbies_by_name ------------------ - joe -(1 row) - -SELECT name, overpaid(emp.*) FROM emp; - name | overpaid ---------+---------- - sharon | t - sam | t - bill | t - jeff | f - cim | f - linda | f -(6 rows) - --- --- Try a few cases with SQL-spec row constructor expressions --- -SELECT * FROM equipment(ROW('skywalking', 'mer')); - name | hobby -------+------------ - guts | skywalking -(1 row) - -SELECT name(equipment(ROW('skywalking', 'mer'))); - name ------- - guts -(1 row) - -SELECT *, name(equipment(h.*)) FROM hobbies_r h; - name | person | name --------------+--------+--------------- - posthacking | mike | advil - posthacking | mike | peet's coffee - posthacking | jeff | advil - posthacking | jeff | peet's coffee - basketball | joe | hightops - basketball | sally | hightops - skywalking | | guts -(7 rows) - -SELECT *, (equipment(CAST((h.*) AS hobbies_r))).name FROM hobbies_r h; - name | person | name --------------+--------+--------------- - posthacking | mike | advil - posthacking | mike | peet's coffee - posthacking | jeff | advil - posthacking | jeff | peet's coffee - basketball | joe | hightops - basketball | sally | hightops - skywalking | | guts -(7 rows) - --- --- functional joins --- --- --- instance rules --- --- --- rewrite rules --- +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/async.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/async.out --- /tmp/cirrus-ci-build/src/test/regress/expected/async.out 2024-03-07 14:25:00.329365000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/async.out 2024-03-07 14:27:17.082214000 +0000 @@ -1,42 +1,2 @@ --- --- ASYNC --- ---Should work. Send a valid message via a valid channel name -SELECT pg_notify('notify_async1','sample message1'); - pg_notify ------------ - -(1 row) - -SELECT pg_notify('notify_async1',''); - pg_notify ------------ - -(1 row) - -SELECT pg_notify('notify_async1',NULL); - pg_notify ------------ - -(1 row) - --- Should fail. Send a valid message via an invalid channel name -SELECT pg_notify('','sample message1'); -ERROR: channel name cannot be empty -SELECT pg_notify(NULL,'sample message1'); -ERROR: channel name cannot be empty -SELECT pg_notify('notify_async_channel_name_too_long______________________________','sample_message1'); -ERROR: channel name too long ---Should work. Valid NOTIFY/LISTEN/UNLISTEN commands -NOTIFY notify_async2; -LISTEN notify_async2; -UNLISTEN notify_async2; -UNLISTEN *; --- Should return zero while there are no pending notifications. --- src/test/isolation/specs/async-notify.spec tests for actual usage. -SELECT pg_notification_queue_usage(); - pg_notification_queue_usage ------------------------------ - 0 -(1 row) - +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/dbsize.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/dbsize.out --- /tmp/cirrus-ci-build/src/test/regress/expected/dbsize.out 2024-03-07 14:25:00.330125000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/dbsize.out 2024-03-07 14:27:17.080613000 +0000 @@ -1,195 +1,2 @@ -SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM - (VALUES (10::bigint), (1000::bigint), (1000000::bigint), - (1000000000::bigint), (1000000000000::bigint), - (1000000000000000::bigint)) x(size); - size | pg_size_pretty | pg_size_pretty -------------------+----------------+---------------- - 10 | 10 bytes | -10 bytes - 1000 | 1000 bytes | -1000 bytes - 1000000 | 977 kB | -977 kB - 1000000000 | 954 MB | -954 MB - 1000000000000 | 931 GB | -931 GB - 1000000000000000 | 909 TB | -909 TB -(6 rows) - -SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM - (VALUES (10::numeric), (1000::numeric), (1000000::numeric), - (1000000000::numeric), (1000000000000::numeric), - (1000000000000000::numeric), - (10.5::numeric), (1000.5::numeric), (1000000.5::numeric), - (1000000000.5::numeric), (1000000000000.5::numeric), - (1000000000000000.5::numeric)) x(size); - size | pg_size_pretty | pg_size_pretty ---------------------+----------------+---------------- - 10 | 10 bytes | -10 bytes - 1000 | 1000 bytes | -1000 bytes - 1000000 | 977 kB | -977 kB - 1000000000 | 954 MB | -954 MB - 1000000000000 | 931 GB | -931 GB - 1000000000000000 | 909 TB | -909 TB - 10.5 | 10.5 bytes | -10.5 bytes - 1000.5 | 1000.5 bytes | -1000.5 bytes - 1000000.5 | 977 kB | -977 kB - 1000000000.5 | 954 MB | -954 MB - 1000000000000.5 | 931 GB | -931 GB - 1000000000000000.5 | 909 TB | -909 TB -(12 rows) - --- test where units change up -SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM - (VALUES (10239::bigint), (10240::bigint), - (10485247::bigint), (10485248::bigint), - (10736893951::bigint), (10736893952::bigint), - (10994579406847::bigint), (10994579406848::bigint), - (11258449312612351::bigint), (11258449312612352::bigint)) x(size); - size | pg_size_pretty | pg_size_pretty --------------------+----------------+---------------- - 10239 | 10239 bytes | -10239 bytes - 10240 | 10 kB | -10 kB - 10485247 | 10239 kB | -10239 kB - 10485248 | 10 MB | -10 MB - 10736893951 | 10239 MB | -10239 MB - 10736893952 | 10 GB | -10 GB - 10994579406847 | 10239 GB | -10239 GB - 10994579406848 | 10 TB | -10 TB - 11258449312612351 | 10239 TB | -10239 TB - 11258449312612352 | 10 PB | -10 PB -(10 rows) - -SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM - (VALUES (10239::numeric), (10240::numeric), - (10485247::numeric), (10485248::numeric), - (10736893951::numeric), (10736893952::numeric), - (10994579406847::numeric), (10994579406848::numeric), - (11258449312612351::numeric), (11258449312612352::numeric), - (11528652096115048447::numeric), (11528652096115048448::numeric)) x(size); - size | pg_size_pretty | pg_size_pretty -----------------------+----------------+---------------- - 10239 | 10239 bytes | -10239 bytes - 10240 | 10 kB | -10 kB - 10485247 | 10239 kB | -10239 kB - 10485248 | 10 MB | -10 MB - 10736893951 | 10239 MB | -10239 MB - 10736893952 | 10 GB | -10 GB - 10994579406847 | 10239 GB | -10239 GB - 10994579406848 | 10 TB | -10 TB - 11258449312612351 | 10239 TB | -10239 TB - 11258449312612352 | 10 PB | -10 PB - 11528652096115048447 | 10239 PB | -10239 PB - 11528652096115048448 | 10240 PB | -10240 PB -(12 rows) - --- pg_size_bytes() tests -SELECT size, pg_size_bytes(size) FROM - (VALUES ('1'), ('123bytes'), ('256 B'), ('1kB'), ('1MB'), (' 1 GB'), ('1.5 GB '), - ('1TB'), ('3000 TB'), ('1e6 MB'), ('99 PB')) x(size); - size | pg_size_bytes -----------+-------------------- - 1 | 1 - 123bytes | 123 - 256 B | 256 - 1kB | 1024 - 1MB | 1048576 - 1 GB | 1073741824 - 1.5 GB | 1610612736 - 1TB | 1099511627776 - 3000 TB | 3298534883328000 - 1e6 MB | 1048576000000 - 99 PB | 111464090777419776 -(11 rows) - --- case-insensitive units are supported -SELECT size, pg_size_bytes(size) FROM - (VALUES ('1'), ('123bYteS'), ('1kb'), ('1mb'), (' 1 Gb'), ('1.5 gB '), - ('1tb'), ('3000 tb'), ('1e6 mb'), ('99 pb')) x(size); - size | pg_size_bytes -----------+-------------------- - 1 | 1 - 123bYteS | 123 - 1kb | 1024 - 1mb | 1048576 - 1 Gb | 1073741824 - 1.5 gB | 1610612736 - 1tb | 1099511627776 - 3000 tb | 3298534883328000 - 1e6 mb | 1048576000000 - 99 pb | 111464090777419776 -(10 rows) - --- negative numbers are supported -SELECT size, pg_size_bytes(size) FROM - (VALUES ('-1'), ('-123bytes'), ('-1kb'), ('-1mb'), (' -1 Gb'), ('-1.5 gB '), - ('-1tb'), ('-3000 TB'), ('-10e-1 MB'), ('-99 PB')) x(size); - size | pg_size_bytes ------------+--------------------- - -1 | -1 - -123bytes | -123 - -1kb | -1024 - -1mb | -1048576 - -1 Gb | -1073741824 - -1.5 gB | -1610612736 - -1tb | -1099511627776 - -3000 TB | -3298534883328000 - -10e-1 MB | -1048576 - -99 PB | -111464090777419776 -(10 rows) - --- different cases with allowed points -SELECT size, pg_size_bytes(size) FROM - (VALUES ('-1.'), ('-1.kb'), ('-1. kb'), ('-0. gb'), - ('-.1'), ('-.1kb'), ('-.1 kb'), ('-.0 gb')) x(size); - size | pg_size_bytes ---------+--------------- - -1. | -1 - -1.kb | -1024 - -1. kb | -1024 - -0. gb | 0 - -.1 | 0 - -.1kb | -102 - -.1 kb | -102 - -.0 gb | 0 -(8 rows) - --- invalid inputs -SELECT pg_size_bytes('1 AB'); -ERROR: invalid size: "1 AB" -DETAIL: Invalid size unit: "AB". -HINT: Valid units are "bytes", "B", "kB", "MB", "GB", "TB", and "PB". -SELECT pg_size_bytes('1 AB A'); -ERROR: invalid size: "1 AB A" -DETAIL: Invalid size unit: "AB A". -HINT: Valid units are "bytes", "B", "kB", "MB", "GB", "TB", and "PB". -SELECT pg_size_bytes('1 AB A '); -ERROR: invalid size: "1 AB A " -DETAIL: Invalid size unit: "AB A". -HINT: Valid units are "bytes", "B", "kB", "MB", "GB", "TB", and "PB". -SELECT pg_size_bytes('9223372036854775807.9'); -ERROR: bigint out of range -SELECT pg_size_bytes('1e100'); -ERROR: bigint out of range -SELECT pg_size_bytes('1e1000000000000000000'); -ERROR: value overflows numeric format -SELECT pg_size_bytes('1 byte'); -- the singular "byte" is not supported -ERROR: invalid size: "1 byte" -DETAIL: Invalid size unit: "byte". -HINT: Valid units are "bytes", "B", "kB", "MB", "GB", "TB", and "PB". -SELECT pg_size_bytes(''); -ERROR: invalid size: "" -SELECT pg_size_bytes('kb'); -ERROR: invalid size: "kb" -SELECT pg_size_bytes('..'); -ERROR: invalid size: ".." -SELECT pg_size_bytes('-.'); -ERROR: invalid size: "-." -SELECT pg_size_bytes('-.kb'); -ERROR: invalid size: "-.kb" -SELECT pg_size_bytes('-. kb'); -ERROR: invalid size: "-. kb" -SELECT pg_size_bytes('.+912'); -ERROR: invalid size: ".+912" -SELECT pg_size_bytes('+912+ kB'); -ERROR: invalid size: "+912+ kB" -DETAIL: Invalid size unit: "+ kB". -HINT: Valid units are "bytes", "B", "kB", "MB", "GB", "TB", and "PB". -SELECT pg_size_bytes('++123 kB'); -ERROR: invalid size: "++123 kB" +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/merge.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/merge.out --- /tmp/cirrus-ci-build/src/test/regress/expected/merge.out 2024-03-07 14:25:00.331967000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/merge.out 2024-03-07 14:27:17.083601000 +0000 @@ -1,2250 +1,2 @@ --- --- MERGE --- -CREATE USER regress_merge_privs; -CREATE USER regress_merge_no_privs; -CREATE USER regress_merge_none; -DROP TABLE IF EXISTS target; -NOTICE: table "target" does not exist, skipping -DROP TABLE IF EXISTS source; -NOTICE: table "source" does not exist, skipping -CREATE TABLE target (tid integer, balance integer) - WITH (autovacuum_enabled=off); -CREATE TABLE source (sid integer, delta integer) -- no index - WITH (autovacuum_enabled=off); -INSERT INTO target VALUES (1, 10); -INSERT INTO target VALUES (2, 20); -INSERT INTO target VALUES (3, 30); -SELECT t.ctid is not null as matched, t.*, s.* FROM source s FULL OUTER JOIN target t ON s.sid = t.tid ORDER BY t.tid, s.sid; - matched | tid | balance | sid | delta ----------+-----+---------+-----+------- - t | 1 | 10 | | - t | 2 | 20 | | - t | 3 | 30 | | -(3 rows) - -ALTER TABLE target OWNER TO regress_merge_privs; -ALTER TABLE source OWNER TO regress_merge_privs; -CREATE TABLE target2 (tid integer, balance integer) - WITH (autovacuum_enabled=off); -CREATE TABLE source2 (sid integer, delta integer) - WITH (autovacuum_enabled=off); -ALTER TABLE target2 OWNER TO regress_merge_no_privs; -ALTER TABLE source2 OWNER TO regress_merge_no_privs; -GRANT INSERT ON target TO regress_merge_no_privs; -SET SESSION AUTHORIZATION regress_merge_privs; -EXPLAIN (COSTS OFF) -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - DELETE; - QUERY PLAN ----------------------------------------- - Merge on target t - -> Merge Join - Merge Cond: (t.tid = s.sid) - -> Sort - Sort Key: t.tid - -> Seq Scan on target t - -> Sort - Sort Key: s.sid - -> Seq Scan on source s -(9 rows) - --- --- Errors --- -MERGE INTO target t RANDOMWORD -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -ERROR: syntax error at or near "RANDOMWORD" -LINE 1: MERGE INTO target t RANDOMWORD - ^ --- MATCHED/INSERT error -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - INSERT DEFAULT VALUES; -ERROR: syntax error at or near "INSERT" -LINE 5: INSERT DEFAULT VALUES; - ^ --- incorrectly specifying INTO target -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT INTO target DEFAULT VALUES; -ERROR: syntax error at or near "INTO" -LINE 5: INSERT INTO target DEFAULT VALUES; - ^ --- Multiple VALUES clause -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT VALUES (1,1), (2,2); -ERROR: syntax error at or near "," -LINE 5: INSERT VALUES (1,1), (2,2); - ^ --- SELECT query for INSERT -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT SELECT (1, 1); -ERROR: syntax error at or near "SELECT" -LINE 5: INSERT SELECT (1, 1); - ^ --- NOT MATCHED/UPDATE -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - UPDATE SET balance = 0; -ERROR: syntax error at or near "UPDATE" -LINE 5: UPDATE SET balance = 0; - ^ --- UPDATE tablename -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE target SET balance = 0; -ERROR: syntax error at or near "target" -LINE 5: UPDATE target SET balance = 0; - ^ --- source and target names the same -MERGE INTO target -USING target -ON tid = tid -WHEN MATCHED THEN DO NOTHING; -ERROR: name "target" specified more than once -DETAIL: The name is used both as MERGE target table and data source. --- used in a CTE -WITH foo AS ( - MERGE INTO target USING source ON (true) - WHEN MATCHED THEN DELETE -) SELECT * FROM foo; -ERROR: MERGE not supported in WITH query -LINE 1: WITH foo AS ( - ^ --- used in COPY -COPY ( - MERGE INTO target USING source ON (true) - WHEN MATCHED THEN DELETE -) TO stdout; -ERROR: MERGE not supported in COPY --- unsupported relation types --- materialized view -CREATE MATERIALIZED VIEW mv AS SELECT * FROM target; -MERGE INTO mv t -USING source s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT DEFAULT VALUES; -ERROR: cannot execute MERGE on relation "mv" -DETAIL: This operation is not supported for materialized views. -DROP MATERIALIZED VIEW mv; --- permissions -SET SESSION AUTHORIZATION regress_merge_none; -MERGE INTO target -USING (SELECT 1) -ON true -WHEN MATCHED THEN - DO NOTHING; -ERROR: permission denied for table target -SET SESSION AUTHORIZATION regress_merge_privs; -MERGE INTO target -USING source2 -ON target.tid = source2.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -ERROR: permission denied for table source2 -GRANT INSERT ON target TO regress_merge_no_privs; -SET SESSION AUTHORIZATION regress_merge_no_privs; -MERGE INTO target -USING source2 -ON target.tid = source2.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -ERROR: permission denied for table target -GRANT UPDATE ON target2 TO regress_merge_privs; -SET SESSION AUTHORIZATION regress_merge_privs; -MERGE INTO target2 -USING source -ON target2.tid = source.sid -WHEN MATCHED THEN - DELETE; -ERROR: permission denied for table target2 -MERGE INTO target2 -USING source -ON target2.tid = source.sid -WHEN NOT MATCHED THEN - INSERT DEFAULT VALUES; -ERROR: permission denied for table target2 --- check if the target can be accessed from source relation subquery; we should --- not be able to do so -MERGE INTO target t -USING (SELECT * FROM source WHERE t.tid > sid) s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT DEFAULT VALUES; -ERROR: invalid reference to FROM-clause entry for table "t" -LINE 2: USING (SELECT * FROM source WHERE t.tid > sid) s - ^ -DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. --- --- initial tests --- --- zero rows in source has no effect -MERGE INTO target -USING source -ON target.tid = source.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - DELETE; -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT DEFAULT VALUES; -ROLLBACK; --- insert some non-matching source rows to work from -INSERT INTO source VALUES (4, 40); -SELECT * FROM source ORDER BY sid; - sid | delta ------+------- - 4 | 40 -(1 row) - -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 -(3 rows) - -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - DO NOTHING; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - DELETE; -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT DEFAULT VALUES; -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - | -(4 rows) - -ROLLBACK; --- index plans -INSERT INTO target SELECT generate_series(1000,2500), 0; -ALTER TABLE target ADD PRIMARY KEY (tid); -ANALYZE target; -EXPLAIN (COSTS OFF) -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; - QUERY PLAN ----------------------------------------- - Merge on target t - -> Hash Join - Hash Cond: (s.sid = t.tid) - -> Seq Scan on source s - -> Hash - -> Seq Scan on target t -(6 rows) - -EXPLAIN (COSTS OFF) -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - DELETE; - QUERY PLAN ----------------------------------------- - Merge on target t - -> Hash Join - Hash Cond: (s.sid = t.tid) - -> Seq Scan on source s - -> Hash - -> Seq Scan on target t -(6 rows) - -EXPLAIN (COSTS OFF) -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT VALUES (4, NULL); - QUERY PLAN ----------------------------------------- - Merge on target t - -> Hash Left Join - Hash Cond: (s.sid = t.tid) - -> Seq Scan on source s - -> Hash - -> Seq Scan on target t -(6 rows) - -DELETE FROM target WHERE tid > 100; -ANALYZE target; --- insert some matching source rows to work from -INSERT INTO source VALUES (2, 5); -INSERT INTO source VALUES (3, 20); -SELECT * FROM source ORDER BY sid; - sid | delta ------+------- - 2 | 5 - 3 | 20 - 4 | 40 -(3 rows) - -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 -(3 rows) - --- equivalent of an UPDATE join -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 0 - 3 | 0 -(3 rows) - -ROLLBACK; --- equivalent of a DELETE join -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - DELETE; -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 -(1 row) - -ROLLBACK; -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - DO NOTHING; -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 -(3 rows) - -ROLLBACK; -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT VALUES (4, NULL); -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 4 | -(4 rows) - -ROLLBACK; --- duplicate source row causes multiple target row update ERROR -INSERT INTO source VALUES (2, 5); -SELECT * FROM source ORDER BY sid; - sid | delta ------+------- - 2 | 5 - 2 | 5 - 3 | 20 - 4 | 40 -(4 rows) - -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 -(3 rows) - -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -ERROR: MERGE command cannot affect row a second time -HINT: Ensure that not more than one source row matches any one target row. -ROLLBACK; -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - DELETE; -ERROR: MERGE command cannot affect row a second time -HINT: Ensure that not more than one source row matches any one target row. -ROLLBACK; --- remove duplicate MATCHED data from source data -DELETE FROM source WHERE sid = 2; -INSERT INTO source VALUES (2, 5); -SELECT * FROM source ORDER BY sid; - sid | delta ------+------- - 2 | 5 - 3 | 20 - 4 | 40 -(3 rows) - -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 -(3 rows) - --- duplicate source row on INSERT should fail because of target_pkey -INSERT INTO source VALUES (4, 40); -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT VALUES (4, NULL); -ERROR: duplicate key value violates unique constraint "target_pkey" -DETAIL: Key (tid)=(4) already exists. -SELECT * FROM target ORDER BY tid; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- remove duplicate NOT MATCHED data from source data -DELETE FROM source WHERE sid = 4; -INSERT INTO source VALUES (4, 40); -SELECT * FROM source ORDER BY sid; - sid | delta ------+------- - 2 | 5 - 3 | 20 - 4 | 40 -(3 rows) - -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 -(3 rows) - --- remove constraints -alter table target drop CONSTRAINT target_pkey; -alter table target alter column tid drop not null; --- multiple actions -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT VALUES (4, 4) -WHEN MATCHED THEN - UPDATE SET balance = 0; -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 0 - 3 | 0 - 4 | 4 -(4 rows) - -ROLLBACK; --- should be equivalent -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = 0 -WHEN NOT MATCHED THEN - INSERT VALUES (4, 4); -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 0 - 3 | 0 - 4 | 4 -(4 rows) - -ROLLBACK; --- column references --- do a simple equivalent of an UPDATE join -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = t.balance + s.delta; -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 25 - 3 | 50 -(3 rows) - -ROLLBACK; --- do a simple equivalent of an INSERT SELECT -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT VALUES (s.sid, s.delta); -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 4 | 40 -(4 rows) - -ROLLBACK; --- and again with duplicate source rows -INSERT INTO source VALUES (5, 50); -INSERT INTO source VALUES (5, 50); --- do a simple equivalent of an INSERT SELECT -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT VALUES (s.sid, s.delta); -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 4 | 40 - 5 | 50 - 5 | 50 -(6 rows) - -ROLLBACK; --- removing duplicate source rows -DELETE FROM source WHERE sid = 5; --- and again with explicitly identified column list -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (s.sid, s.delta); -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 4 | 40 -(4 rows) - -ROLLBACK; --- and again with a subtle error: referring to non-existent target row for NOT MATCHED -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (t.tid, s.delta); -ERROR: invalid reference to FROM-clause entry for table "t" -LINE 5: INSERT (tid, balance) VALUES (t.tid, s.delta); - ^ -DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. --- and again with a constant ON clause -BEGIN; -MERGE INTO target t -USING source AS s -ON (SELECT true) -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (t.tid, s.delta); -ERROR: invalid reference to FROM-clause entry for table "t" -LINE 5: INSERT (tid, balance) VALUES (t.tid, s.delta); - ^ -DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. -SELECT * FROM target ORDER BY tid; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- now the classic UPSERT -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = t.balance + s.delta -WHEN NOT MATCHED THEN - INSERT VALUES (s.sid, s.delta); -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 25 - 3 | 50 - 4 | 40 -(4 rows) - -ROLLBACK; --- unreachable WHEN clause should ERROR -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN /* Terminal WHEN clause for MATCHED */ - DELETE -WHEN MATCHED THEN - UPDATE SET balance = t.balance - s.delta; -ERROR: unreachable WHEN clause specified after unconditional WHEN clause -ROLLBACK; --- conditional WHEN clause -CREATE TABLE wq_target (tid integer not null, balance integer DEFAULT -1) - WITH (autovacuum_enabled=off); -CREATE TABLE wq_source (balance integer, sid integer) - WITH (autovacuum_enabled=off); -INSERT INTO wq_source (sid, balance) VALUES (1, 100); -BEGIN; --- try a simple INSERT with default values first -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid) VALUES (s.sid); -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | -1 -(1 row) - -ROLLBACK; --- this time with a FALSE condition -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN NOT MATCHED AND FALSE THEN - INSERT (tid) VALUES (s.sid); -SELECT * FROM wq_target; - tid | balance ------+--------- -(0 rows) - --- this time with an actual condition which returns false -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN NOT MATCHED AND s.balance <> 100 THEN - INSERT (tid) VALUES (s.sid); -SELECT * FROM wq_target; - tid | balance ------+--------- -(0 rows) - -BEGIN; --- and now with a condition which returns true -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN NOT MATCHED AND s.balance = 100 THEN - INSERT (tid) VALUES (s.sid); -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | -1 -(1 row) - -ROLLBACK; --- conditions in the NOT MATCHED clause can only refer to source columns -BEGIN; -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN NOT MATCHED AND t.balance = 100 THEN - INSERT (tid) VALUES (s.sid); -ERROR: invalid reference to FROM-clause entry for table "t" -LINE 3: WHEN NOT MATCHED AND t.balance = 100 THEN - ^ -DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. -SELECT * FROM wq_target; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN NOT MATCHED AND s.balance = 100 THEN - INSERT (tid) VALUES (s.sid); -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | -1 -(1 row) - --- conditions in MATCHED clause can refer to both source and target -SELECT * FROM wq_source; - balance | sid ----------+----- - 100 | 1 -(1 row) - -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND s.balance = 100 THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 99 -(1 row) - -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.balance = 100 THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 99 -(1 row) - --- check if AND works -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.balance = 99 AND s.balance > 100 THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 99 -(1 row) - -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.balance = 99 AND s.balance = 100 THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 199 -(1 row) - --- check if OR works -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.balance = 99 OR s.balance > 100 THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 199 -(1 row) - -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.balance = 199 OR s.balance > 100 THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 299 -(1 row) - --- check source-side whole-row references -BEGIN; -MERGE INTO wq_target t -USING wq_source s ON (t.tid = s.sid) -WHEN matched and t = s or t.tid = s.sid THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 399 -(1 row) - -ROLLBACK; --- check if subqueries work in the conditions? -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.balance > (SELECT max(balance) FROM target) THEN - UPDATE SET balance = t.balance + s.balance; --- check if we can access system columns in the conditions -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.xmin = t.xmax THEN - UPDATE SET balance = t.balance + s.balance; -ERROR: cannot use system column "xmin" in MERGE WHEN condition -LINE 3: WHEN MATCHED AND t.xmin = t.xmax THEN - ^ -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.tableoid >= 0 THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 499 -(1 row) - -DROP TABLE wq_target, wq_source; --- test triggers -create or replace function merge_trigfunc () returns trigger -language plpgsql as -$$ -DECLARE - line text; -BEGIN - SELECT INTO line format('%s %s %s trigger%s', - TG_WHEN, TG_OP, TG_LEVEL, CASE - WHEN TG_OP = 'INSERT' AND TG_LEVEL = 'ROW' - THEN format(' row: %s', NEW) - WHEN TG_OP = 'UPDATE' AND TG_LEVEL = 'ROW' - THEN format(' row: %s -> %s', OLD, NEW) - WHEN TG_OP = 'DELETE' AND TG_LEVEL = 'ROW' - THEN format(' row: %s', OLD) - END); - - RAISE NOTICE '%', line; - IF (TG_WHEN = 'BEFORE' AND TG_LEVEL = 'ROW') THEN - IF (TG_OP = 'DELETE') THEN - RETURN OLD; - ELSE - RETURN NEW; - END IF; - ELSE - RETURN NULL; - END IF; -END; -$$; -CREATE TRIGGER merge_bsi BEFORE INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_bsu BEFORE UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_bsd BEFORE DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_asi AFTER INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_asu AFTER UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_asd AFTER DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_bri BEFORE INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_bru BEFORE UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_brd BEFORE DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_ari AFTER INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_aru AFTER UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_ard AFTER DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); --- now the classic UPSERT, with a DELETE -BEGIN; -UPDATE target SET balance = 0 WHERE tid = 3; -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE UPDATE ROW trigger row: (3,30) -> (3,0) -NOTICE: AFTER UPDATE ROW trigger row: (3,30) -> (3,0) -NOTICE: AFTER UPDATE STATEMENT trigger ---EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED AND t.balance > s.delta THEN - UPDATE SET balance = t.balance - s.delta -WHEN MATCHED THEN - DELETE -WHEN NOT MATCHED THEN - INSERT VALUES (s.sid, s.delta); -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE DELETE STATEMENT trigger -NOTICE: BEFORE DELETE ROW trigger row: (3,0) -NOTICE: BEFORE UPDATE ROW trigger row: (2,20) -> (2,15) -NOTICE: BEFORE INSERT ROW trigger row: (4,40) -NOTICE: AFTER DELETE ROW trigger row: (3,0) -NOTICE: AFTER UPDATE ROW trigger row: (2,20) -> (2,15) -NOTICE: AFTER INSERT ROW trigger row: (4,40) -NOTICE: AFTER DELETE STATEMENT trigger -NOTICE: AFTER UPDATE STATEMENT trigger -NOTICE: AFTER INSERT STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 15 - 4 | 40 -(3 rows) - -ROLLBACK; --- Test behavior of triggers that turn UPDATE/DELETE into no-ops -create or replace function skip_merge_op() returns trigger -language plpgsql as -$$ -BEGIN - RETURN NULL; -END; -$$; -SELECT * FROM target full outer join source on (sid = tid); - tid | balance | sid | delta ------+---------+-----+------- - 3 | 30 | 3 | 20 - 2 | 20 | 2 | 5 - | | 4 | 40 - 1 | 10 | | -(4 rows) - -create trigger merge_skip BEFORE INSERT OR UPDATE or DELETE - ON target FOR EACH ROW EXECUTE FUNCTION skip_merge_op(); -DO $$ -DECLARE - result integer; -BEGIN -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED AND s.sid = 3 THEN UPDATE SET balance = t.balance + s.delta -WHEN MATCHED THEN DELETE -WHEN NOT MATCHED THEN INSERT VALUES (sid, delta); -IF FOUND THEN - RAISE NOTICE 'Found'; -ELSE - RAISE NOTICE 'Not found'; -END IF; -GET DIAGNOSTICS result := ROW_COUNT; -RAISE NOTICE 'ROW_COUNT = %', result; -END; -$$; -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE DELETE STATEMENT trigger -NOTICE: BEFORE UPDATE ROW trigger row: (3,30) -> (3,50) -NOTICE: BEFORE DELETE ROW trigger row: (2,20) -NOTICE: BEFORE INSERT ROW trigger row: (4,40) -NOTICE: AFTER DELETE STATEMENT trigger -NOTICE: AFTER UPDATE STATEMENT trigger -NOTICE: AFTER INSERT STATEMENT trigger -NOTICE: Not found -NOTICE: ROW_COUNT = 0 -SELECT * FROM target FULL OUTER JOIN source ON (sid = tid); - tid | balance | sid | delta ------+---------+-----+------- - 3 | 30 | 3 | 20 - 2 | 20 | 2 | 5 - | | 4 | 40 - 1 | 10 | | -(4 rows) - -DROP TRIGGER merge_skip ON target; -DROP FUNCTION skip_merge_op(); --- test from PL/pgSQL --- make sure MERGE INTO isn't interpreted to mean returning variables like SELECT INTO -BEGIN; -DO LANGUAGE plpgsql $$ -BEGIN -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED AND t.balance > s.delta THEN - UPDATE SET balance = t.balance - s.delta; -END; -$$; -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE UPDATE ROW trigger row: (3,30) -> (3,10) -NOTICE: BEFORE UPDATE ROW trigger row: (2,20) -> (2,15) -NOTICE: AFTER UPDATE ROW trigger row: (3,30) -> (3,10) -NOTICE: AFTER UPDATE ROW trigger row: (2,20) -> (2,15) -NOTICE: AFTER UPDATE STATEMENT trigger -ROLLBACK; ---source constants -BEGIN; -MERGE INTO target t -USING (SELECT 9 AS sid, 57 AS delta) AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (s.sid, s.delta); -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: BEFORE INSERT ROW trigger row: (9,57) -NOTICE: AFTER INSERT ROW trigger row: (9,57) -NOTICE: AFTER INSERT STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 9 | 57 -(4 rows) - -ROLLBACK; ---source query -BEGIN; -MERGE INTO target t -USING (SELECT sid, delta FROM source WHERE delta > 0) AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (s.sid, s.delta); -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: BEFORE INSERT ROW trigger row: (4,40) -NOTICE: AFTER INSERT ROW trigger row: (4,40) -NOTICE: AFTER INSERT STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 4 | 40 -(4 rows) - -ROLLBACK; -BEGIN; -MERGE INTO target t -USING (SELECT sid, delta as newname FROM source WHERE delta > 0) AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (s.sid, s.newname); -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: BEFORE INSERT ROW trigger row: (4,40) -NOTICE: AFTER INSERT ROW trigger row: (4,40) -NOTICE: AFTER INSERT STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 4 | 40 -(4 rows) - -ROLLBACK; ---self-merge -BEGIN; -MERGE INTO target t1 -USING target t2 -ON t1.tid = t2.tid -WHEN MATCHED THEN - UPDATE SET balance = t1.balance + t2.balance -WHEN NOT MATCHED THEN - INSERT VALUES (t2.tid, t2.balance); -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE UPDATE ROW trigger row: (1,10) -> (1,20) -NOTICE: BEFORE UPDATE ROW trigger row: (2,20) -> (2,40) -NOTICE: BEFORE UPDATE ROW trigger row: (3,30) -> (3,60) -NOTICE: AFTER UPDATE ROW trigger row: (1,10) -> (1,20) -NOTICE: AFTER UPDATE ROW trigger row: (2,20) -> (2,40) -NOTICE: AFTER UPDATE ROW trigger row: (3,30) -> (3,60) -NOTICE: AFTER UPDATE STATEMENT trigger -NOTICE: AFTER INSERT STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 20 - 2 | 40 - 3 | 60 -(3 rows) - -ROLLBACK; -BEGIN; -MERGE INTO target t -USING (SELECT tid as sid, balance as delta FROM target WHERE balance > 0) AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (s.sid, s.delta); -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: AFTER INSERT STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 -(3 rows) - -ROLLBACK; -BEGIN; -MERGE INTO target t -USING -(SELECT sid, max(delta) AS delta - FROM source - GROUP BY sid - HAVING count(*) = 1 - ORDER BY sid ASC) AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (s.sid, s.delta); -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: BEFORE INSERT ROW trigger row: (4,40) -NOTICE: AFTER INSERT ROW trigger row: (4,40) -NOTICE: AFTER INSERT STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 4 | 40 -(4 rows) - -ROLLBACK; --- plpgsql parameters and results -BEGIN; -CREATE FUNCTION merge_func (p_id integer, p_bal integer) -RETURNS INTEGER -LANGUAGE plpgsql -AS $$ -DECLARE - result integer; -BEGIN -MERGE INTO target t -USING (SELECT p_id AS sid) AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = t.balance - p_bal; -IF FOUND THEN - GET DIAGNOSTICS result := ROW_COUNT; -END IF; -RETURN result; -END; -$$; -SELECT merge_func(3, 4); -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE UPDATE ROW trigger row: (3,30) -> (3,26) -NOTICE: AFTER UPDATE ROW trigger row: (3,30) -> (3,26) -NOTICE: AFTER UPDATE STATEMENT trigger - merge_func ------------- - 1 -(1 row) - -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 26 -(3 rows) - -ROLLBACK; --- PREPARE -BEGIN; -prepare foom as merge into target t using (select 1 as sid) s on (t.tid = s.sid) when matched then update set balance = 1; -execute foom; -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE UPDATE ROW trigger row: (1,10) -> (1,1) -NOTICE: AFTER UPDATE ROW trigger row: (1,10) -> (1,1) -NOTICE: AFTER UPDATE STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 1 - 2 | 20 - 3 | 30 -(3 rows) - -ROLLBACK; -BEGIN; -PREPARE foom2 (integer, integer) AS -MERGE INTO target t -USING (SELECT 1) s -ON t.tid = $1 -WHEN MATCHED THEN -UPDATE SET balance = $2; ---EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) -execute foom2 (1, 1); -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE UPDATE ROW trigger row: (1,10) -> (1,1) -NOTICE: AFTER UPDATE ROW trigger row: (1,10) -> (1,1) -NOTICE: AFTER UPDATE STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 1 - 2 | 20 - 3 | 30 -(3 rows) - -ROLLBACK; --- subqueries in source relation -CREATE TABLE sq_target (tid integer NOT NULL, balance integer) - WITH (autovacuum_enabled=off); -CREATE TABLE sq_source (delta integer, sid integer, balance integer DEFAULT 0) - WITH (autovacuum_enabled=off); -INSERT INTO sq_target(tid, balance) VALUES (1,100), (2,200), (3,300); -INSERT INTO sq_source(sid, delta) VALUES (1,10), (2,20), (4,40); -BEGIN; -MERGE INTO sq_target t -USING (SELECT * FROM sq_source) s -ON tid = sid -WHEN MATCHED AND t.balance > delta THEN - UPDATE SET balance = t.balance + delta; -SELECT * FROM sq_target; - tid | balance ------+--------- - 3 | 300 - 1 | 110 - 2 | 220 -(3 rows) - -ROLLBACK; --- try a view -CREATE VIEW v AS SELECT * FROM sq_source WHERE sid < 2; -BEGIN; -MERGE INTO sq_target -USING v -ON tid = sid -WHEN MATCHED THEN - UPDATE SET balance = v.balance + delta; -SELECT * FROM sq_target; - tid | balance ------+--------- - 2 | 200 - 3 | 300 - 1 | 10 -(3 rows) - -ROLLBACK; --- ambiguous reference to a column -BEGIN; -MERGE INTO sq_target -USING v -ON tid = sid -WHEN MATCHED AND tid > 2 THEN - UPDATE SET balance = balance + delta -WHEN NOT MATCHED THEN - INSERT (balance, tid) VALUES (balance + delta, sid) -WHEN MATCHED AND tid < 2 THEN - DELETE; -ERROR: column reference "balance" is ambiguous -LINE 5: UPDATE SET balance = balance + delta - ^ -ROLLBACK; -BEGIN; -INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); -MERGE INTO sq_target t -USING v -ON tid = sid -WHEN MATCHED AND tid > 2 THEN - UPDATE SET balance = t.balance + delta -WHEN NOT MATCHED THEN - INSERT (balance, tid) VALUES (balance + delta, sid) -WHEN MATCHED AND tid < 2 THEN - DELETE; -SELECT * FROM sq_target; - tid | balance ------+--------- - 2 | 200 - 3 | 300 - -1 | -11 -(3 rows) - -ROLLBACK; --- CTEs -BEGIN; -INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); -WITH targq AS ( - SELECT * FROM v -) -MERGE INTO sq_target t -USING v -ON tid = sid -WHEN MATCHED AND tid > 2 THEN - UPDATE SET balance = t.balance + delta -WHEN NOT MATCHED THEN - INSERT (balance, tid) VALUES (balance + delta, sid) -WHEN MATCHED AND tid < 2 THEN - DELETE; -ROLLBACK; --- RETURNING -BEGIN; -INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); -MERGE INTO sq_target t -USING v -ON tid = sid -WHEN MATCHED AND tid > 2 THEN - UPDATE SET balance = t.balance + delta -WHEN NOT MATCHED THEN - INSERT (balance, tid) VALUES (balance + delta, sid) -WHEN MATCHED AND tid < 2 THEN - DELETE -RETURNING *; -ERROR: syntax error at or near "RETURNING" -LINE 10: RETURNING *; - ^ -ROLLBACK; --- EXPLAIN -CREATE TABLE ex_mtarget (a int, b int) - WITH (autovacuum_enabled=off); -CREATE TABLE ex_msource (a int, b int) - WITH (autovacuum_enabled=off); -INSERT INTO ex_mtarget SELECT i, i*10 FROM generate_series(1,100,2) i; -INSERT INTO ex_msource SELECT i, i*10 FROM generate_series(1,100,1) i; -CREATE FUNCTION explain_merge(query text) RETURNS SETOF text -LANGUAGE plpgsql AS -$$ -DECLARE ln text; -BEGIN - FOR ln IN - EXECUTE 'explain (analyze, timing off, summary off, costs off) ' || - query - LOOP - ln := regexp_replace(ln, '(Memory( Usage)?|Buckets|Batches): \S*', '\1: xxx', 'g'); - RETURN NEXT ln; - END LOOP; -END; -$$; --- only updates -SELECT explain_merge(' -MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a -WHEN MATCHED THEN - UPDATE SET b = t.b + 1'); - explain_merge ----------------------------------------------------------------------- - Merge on ex_mtarget t (actual rows=0 loops=1) - Tuples: updated=50 - -> Merge Join (actual rows=50 loops=1) - Merge Cond: (t.a = s.a) - -> Sort (actual rows=50 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_mtarget t (actual rows=50 loops=1) - -> Sort (actual rows=100 loops=1) - Sort Key: s.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_msource s (actual rows=100 loops=1) -(12 rows) - --- only updates to selected tuples -SELECT explain_merge(' -MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a -WHEN MATCHED AND t.a < 10 THEN - UPDATE SET b = t.b + 1'); - explain_merge ----------------------------------------------------------------------- - Merge on ex_mtarget t (actual rows=0 loops=1) - Tuples: updated=5 skipped=45 - -> Merge Join (actual rows=50 loops=1) - Merge Cond: (t.a = s.a) - -> Sort (actual rows=50 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_mtarget t (actual rows=50 loops=1) - -> Sort (actual rows=100 loops=1) - Sort Key: s.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_msource s (actual rows=100 loops=1) -(12 rows) - --- updates + deletes -SELECT explain_merge(' -MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a -WHEN MATCHED AND t.a < 10 THEN - UPDATE SET b = t.b + 1 -WHEN MATCHED AND t.a >= 10 AND t.a <= 20 THEN - DELETE'); - explain_merge ----------------------------------------------------------------------- - Merge on ex_mtarget t (actual rows=0 loops=1) - Tuples: updated=5 deleted=5 skipped=40 - -> Merge Join (actual rows=50 loops=1) - Merge Cond: (t.a = s.a) - -> Sort (actual rows=50 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_mtarget t (actual rows=50 loops=1) - -> Sort (actual rows=100 loops=1) - Sort Key: s.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_msource s (actual rows=100 loops=1) -(12 rows) - --- only inserts -SELECT explain_merge(' -MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a -WHEN NOT MATCHED AND s.a < 10 THEN - INSERT VALUES (a, b)'); - explain_merge ----------------------------------------------------------------------- - Merge on ex_mtarget t (actual rows=0 loops=1) - Tuples: inserted=4 skipped=96 - -> Merge Left Join (actual rows=100 loops=1) - Merge Cond: (s.a = t.a) - -> Sort (actual rows=100 loops=1) - Sort Key: s.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_msource s (actual rows=100 loops=1) - -> Sort (actual rows=45 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_mtarget t (actual rows=45 loops=1) -(12 rows) - --- all three -SELECT explain_merge(' -MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a -WHEN MATCHED AND t.a < 10 THEN - UPDATE SET b = t.b + 1 -WHEN MATCHED AND t.a >= 30 AND t.a <= 40 THEN - DELETE -WHEN NOT MATCHED AND s.a < 20 THEN - INSERT VALUES (a, b)'); - explain_merge ----------------------------------------------------------------------- - Merge on ex_mtarget t (actual rows=0 loops=1) - Tuples: inserted=10 updated=9 deleted=5 skipped=76 - -> Merge Left Join (actual rows=100 loops=1) - Merge Cond: (s.a = t.a) - -> Sort (actual rows=100 loops=1) - Sort Key: s.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_msource s (actual rows=100 loops=1) - -> Sort (actual rows=49 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_mtarget t (actual rows=49 loops=1) -(12 rows) - --- nothing -SELECT explain_merge(' -MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a AND t.a < -1000 -WHEN MATCHED AND t.a < 10 THEN - DO NOTHING'); - explain_merge --------------------------------------------------------------------- - Merge on ex_mtarget t (actual rows=0 loops=1) - -> Merge Join (actual rows=0 loops=1) - Merge Cond: (t.a = s.a) - -> Sort (actual rows=0 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_mtarget t (actual rows=0 loops=1) - Filter: (a < '-1000'::integer) - Rows Removed by Filter: 54 - -> Sort (never executed) - Sort Key: s.a - -> Seq Scan on ex_msource s (never executed) -(12 rows) - -DROP TABLE ex_msource, ex_mtarget; -DROP FUNCTION explain_merge(text); --- Subqueries -BEGIN; -MERGE INTO sq_target t -USING v -ON tid = sid -WHEN MATCHED THEN - UPDATE SET balance = (SELECT count(*) FROM sq_target); -SELECT * FROM sq_target WHERE tid = 1; - tid | balance ------+--------- - 1 | 3 -(1 row) - -ROLLBACK; -BEGIN; -MERGE INTO sq_target t -USING v -ON tid = sid -WHEN MATCHED AND (SELECT count(*) > 0 FROM sq_target) THEN - UPDATE SET balance = 42; -SELECT * FROM sq_target WHERE tid = 1; - tid | balance ------+--------- - 1 | 42 -(1 row) - -ROLLBACK; -BEGIN; -MERGE INTO sq_target t -USING v -ON tid = sid AND (SELECT count(*) > 0 FROM sq_target) -WHEN MATCHED THEN - UPDATE SET balance = 42; -SELECT * FROM sq_target WHERE tid = 1; - tid | balance ------+--------- - 1 | 42 -(1 row) - -ROLLBACK; -DROP TABLE sq_target, sq_source CASCADE; -NOTICE: drop cascades to view v -CREATE TABLE pa_target (tid integer, balance float, val text) - PARTITION BY LIST (tid); -CREATE TABLE part1 PARTITION OF pa_target FOR VALUES IN (1,4) - WITH (autovacuum_enabled=off); -CREATE TABLE part2 PARTITION OF pa_target FOR VALUES IN (2,5,6) - WITH (autovacuum_enabled=off); -CREATE TABLE part3 PARTITION OF pa_target FOR VALUES IN (3,8,9) - WITH (autovacuum_enabled=off); -CREATE TABLE part4 PARTITION OF pa_target DEFAULT - WITH (autovacuum_enabled=off); -CREATE TABLE pa_source (sid integer, delta float); --- insert many rows to the source table -INSERT INTO pa_source SELECT id, id * 10 FROM generate_series(1,14) AS id; --- insert a few rows in the target table (odd numbered tid) -INSERT INTO pa_target SELECT id, id * 100, 'initial' FROM generate_series(1,14,2) AS id; --- try simple MERGE -BEGIN; -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid - WHEN MATCHED THEN - UPDATE SET balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge'); -SELECT * FROM pa_target ORDER BY tid; - tid | balance | val ------+---------+-------------------------- - 1 | 110 | initial updated by merge - 2 | 20 | inserted by merge - 3 | 330 | initial updated by merge - 4 | 40 | inserted by merge - 5 | 550 | initial updated by merge - 6 | 60 | inserted by merge - 7 | 770 | initial updated by merge - 8 | 80 | inserted by merge - 9 | 990 | initial updated by merge - 10 | 100 | inserted by merge - 11 | 1210 | initial updated by merge - 12 | 120 | inserted by merge - 13 | 1430 | initial updated by merge - 14 | 140 | inserted by merge -(14 rows) - -ROLLBACK; --- same with a constant qual -BEGIN; -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid AND tid = 1 - WHEN MATCHED THEN - UPDATE SET balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge'); -SELECT * FROM pa_target ORDER BY tid; - tid | balance | val ------+---------+-------------------------- - 1 | 110 | initial updated by merge - 2 | 20 | inserted by merge - 3 | 30 | inserted by merge - 3 | 300 | initial - 4 | 40 | inserted by merge - 5 | 500 | initial - 5 | 50 | inserted by merge - 6 | 60 | inserted by merge - 7 | 700 | initial - 7 | 70 | inserted by merge - 8 | 80 | inserted by merge - 9 | 90 | inserted by merge - 9 | 900 | initial - 10 | 100 | inserted by merge - 11 | 1100 | initial - 11 | 110 | inserted by merge - 12 | 120 | inserted by merge - 13 | 1300 | initial - 13 | 130 | inserted by merge - 14 | 140 | inserted by merge -(20 rows) - -ROLLBACK; --- try updating the partition key column -BEGIN; -CREATE FUNCTION merge_func() RETURNS integer LANGUAGE plpgsql AS $$ -DECLARE - result integer; -BEGIN -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid - WHEN MATCHED THEN - UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge'); -IF FOUND THEN - GET DIAGNOSTICS result := ROW_COUNT; -END IF; -RETURN result; -END; -$$; -SELECT merge_func(); - merge_func ------------- - 14 -(1 row) - -SELECT * FROM pa_target ORDER BY tid; - tid | balance | val ------+---------+-------------------------- - 2 | 110 | initial updated by merge - 2 | 20 | inserted by merge - 4 | 40 | inserted by merge - 4 | 330 | initial updated by merge - 6 | 550 | initial updated by merge - 6 | 60 | inserted by merge - 8 | 80 | inserted by merge - 8 | 770 | initial updated by merge - 10 | 990 | initial updated by merge - 10 | 100 | inserted by merge - 12 | 1210 | initial updated by merge - 12 | 120 | inserted by merge - 14 | 1430 | initial updated by merge - 14 | 140 | inserted by merge -(14 rows) - -ROLLBACK; -DROP TABLE pa_target CASCADE; --- The target table is partitioned in the same way, but this time by attaching --- partitions which have columns in different order, dropped columns etc. -CREATE TABLE pa_target (tid integer, balance float, val text) - PARTITION BY LIST (tid); -CREATE TABLE part1 (tid integer, balance float, val text) - WITH (autovacuum_enabled=off); -CREATE TABLE part2 (balance float, tid integer, val text) - WITH (autovacuum_enabled=off); -CREATE TABLE part3 (tid integer, balance float, val text) - WITH (autovacuum_enabled=off); -CREATE TABLE part4 (extraid text, tid integer, balance float, val text) - WITH (autovacuum_enabled=off); -ALTER TABLE part4 DROP COLUMN extraid; -ALTER TABLE pa_target ATTACH PARTITION part1 FOR VALUES IN (1,4); -ALTER TABLE pa_target ATTACH PARTITION part2 FOR VALUES IN (2,5,6); -ALTER TABLE pa_target ATTACH PARTITION part3 FOR VALUES IN (3,8,9); -ALTER TABLE pa_target ATTACH PARTITION part4 DEFAULT; --- insert a few rows in the target table (odd numbered tid) -INSERT INTO pa_target SELECT id, id * 100, 'initial' FROM generate_series(1,14,2) AS id; --- try simple MERGE -BEGIN; -DO $$ -DECLARE - result integer; -BEGIN -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid - WHEN MATCHED THEN - UPDATE SET balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge'); -GET DIAGNOSTICS result := ROW_COUNT; -RAISE NOTICE 'ROW_COUNT = %', result; -END; -$$; -NOTICE: ROW_COUNT = 14 -SELECT * FROM pa_target ORDER BY tid; - tid | balance | val ------+---------+-------------------------- - 1 | 110 | initial updated by merge - 2 | 20 | inserted by merge - 3 | 330 | initial updated by merge - 4 | 40 | inserted by merge - 5 | 550 | initial updated by merge - 6 | 60 | inserted by merge - 7 | 770 | initial updated by merge - 8 | 80 | inserted by merge - 9 | 990 | initial updated by merge - 10 | 100 | inserted by merge - 11 | 1210 | initial updated by merge - 12 | 120 | inserted by merge - 13 | 1430 | initial updated by merge - 14 | 140 | inserted by merge -(14 rows) - -ROLLBACK; --- same with a constant qual -BEGIN; -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid AND tid IN (1, 5) - WHEN MATCHED AND tid % 5 = 0 THEN DELETE - WHEN MATCHED THEN - UPDATE SET balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge'); -SELECT * FROM pa_target ORDER BY tid; - tid | balance | val ------+---------+-------------------------- - 1 | 110 | initial updated by merge - 2 | 20 | inserted by merge - 3 | 30 | inserted by merge - 3 | 300 | initial - 4 | 40 | inserted by merge - 6 | 60 | inserted by merge - 7 | 700 | initial - 7 | 70 | inserted by merge - 8 | 80 | inserted by merge - 9 | 900 | initial - 9 | 90 | inserted by merge - 10 | 100 | inserted by merge - 11 | 110 | inserted by merge - 11 | 1100 | initial - 12 | 120 | inserted by merge - 13 | 1300 | initial - 13 | 130 | inserted by merge - 14 | 140 | inserted by merge -(18 rows) - -ROLLBACK; --- try updating the partition key column -BEGIN; -DO $$ -DECLARE - result integer; -BEGIN -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid - WHEN MATCHED THEN - UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge'); -GET DIAGNOSTICS result := ROW_COUNT; -RAISE NOTICE 'ROW_COUNT = %', result; -END; -$$; -NOTICE: ROW_COUNT = 14 -SELECT * FROM pa_target ORDER BY tid; - tid | balance | val ------+---------+-------------------------- - 2 | 110 | initial updated by merge - 2 | 20 | inserted by merge - 4 | 40 | inserted by merge - 4 | 330 | initial updated by merge - 6 | 550 | initial updated by merge - 6 | 60 | inserted by merge - 8 | 80 | inserted by merge - 8 | 770 | initial updated by merge - 10 | 990 | initial updated by merge - 10 | 100 | inserted by merge - 12 | 1210 | initial updated by merge - 12 | 120 | inserted by merge - 14 | 1430 | initial updated by merge - 14 | 140 | inserted by merge -(14 rows) - -ROLLBACK; --- as above, but blocked by BEFORE DELETE ROW trigger -BEGIN; -CREATE FUNCTION trig_fn() RETURNS trigger LANGUAGE plpgsql AS - $$ BEGIN RETURN NULL; END; $$; -CREATE TRIGGER del_trig BEFORE DELETE ON pa_target - FOR EACH ROW EXECUTE PROCEDURE trig_fn(); -DO $$ -DECLARE - result integer; -BEGIN -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid - WHEN MATCHED THEN - UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge'); -GET DIAGNOSTICS result := ROW_COUNT; -RAISE NOTICE 'ROW_COUNT = %', result; -END; -$$; -NOTICE: ROW_COUNT = 10 -SELECT * FROM pa_target ORDER BY tid; - tid | balance | val ------+---------+-------------------------- - 1 | 100 | initial - 2 | 20 | inserted by merge - 3 | 300 | initial - 4 | 40 | inserted by merge - 6 | 550 | initial updated by merge - 6 | 60 | inserted by merge - 7 | 700 | initial - 8 | 80 | inserted by merge - 9 | 900 | initial - 10 | 100 | inserted by merge - 12 | 1210 | initial updated by merge - 12 | 120 | inserted by merge - 14 | 1430 | initial updated by merge - 14 | 140 | inserted by merge -(14 rows) - -ROLLBACK; --- as above, but blocked by BEFORE INSERT ROW trigger -BEGIN; -CREATE FUNCTION trig_fn() RETURNS trigger LANGUAGE plpgsql AS - $$ BEGIN RETURN NULL; END; $$; -CREATE TRIGGER ins_trig BEFORE INSERT ON pa_target - FOR EACH ROW EXECUTE PROCEDURE trig_fn(); -DO $$ -DECLARE - result integer; -BEGIN -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid - WHEN MATCHED THEN - UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge'); -GET DIAGNOSTICS result := ROW_COUNT; -RAISE NOTICE 'ROW_COUNT = %', result; -END; -$$; -NOTICE: ROW_COUNT = 3 -SELECT * FROM pa_target ORDER BY tid; - tid | balance | val ------+---------+-------------------------- - 6 | 550 | initial updated by merge - 12 | 1210 | initial updated by merge - 14 | 1430 | initial updated by merge -(3 rows) - -ROLLBACK; --- test RLS enforcement -BEGIN; -ALTER TABLE pa_target ENABLE ROW LEVEL SECURITY; -ALTER TABLE pa_target FORCE ROW LEVEL SECURITY; -CREATE POLICY pa_target_pol ON pa_target USING (tid != 0); -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid AND t.tid IN (1,2,3,4) - WHEN MATCHED THEN - UPDATE SET tid = tid - 1; -ERROR: new row violates row-level security policy for table "pa_target" -ROLLBACK; -DROP TABLE pa_source; -DROP TABLE pa_target CASCADE; --- Sub-partitioning -CREATE TABLE pa_target (logts timestamp, tid integer, balance float, val text) - PARTITION BY RANGE (logts); -CREATE TABLE part_m01 PARTITION OF pa_target - FOR VALUES FROM ('2017-01-01') TO ('2017-02-01') - PARTITION BY LIST (tid); -CREATE TABLE part_m01_odd PARTITION OF part_m01 - FOR VALUES IN (1,3,5,7,9) WITH (autovacuum_enabled=off); -CREATE TABLE part_m01_even PARTITION OF part_m01 - FOR VALUES IN (2,4,6,8) WITH (autovacuum_enabled=off); -CREATE TABLE part_m02 PARTITION OF pa_target - FOR VALUES FROM ('2017-02-01') TO ('2017-03-01') - PARTITION BY LIST (tid); -CREATE TABLE part_m02_odd PARTITION OF part_m02 - FOR VALUES IN (1,3,5,7,9) WITH (autovacuum_enabled=off); -CREATE TABLE part_m02_even PARTITION OF part_m02 - FOR VALUES IN (2,4,6,8) WITH (autovacuum_enabled=off); -CREATE TABLE pa_source (sid integer, delta float) - WITH (autovacuum_enabled=off); --- insert many rows to the source table -INSERT INTO pa_source SELECT id, id * 10 FROM generate_series(1,14) AS id; --- insert a few rows in the target table (odd numbered tid) -INSERT INTO pa_target SELECT '2017-01-31', id, id * 100, 'initial' FROM generate_series(1,9,3) AS id; -INSERT INTO pa_target SELECT '2017-02-28', id, id * 100, 'initial' FROM generate_series(2,9,3) AS id; --- try simple MERGE -BEGIN; -MERGE INTO pa_target t - USING (SELECT '2017-01-15' AS slogts, * FROM pa_source WHERE sid < 10) s - ON t.tid = s.sid - WHEN MATCHED THEN - UPDATE SET balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (slogts::timestamp, sid, delta, 'inserted by merge'); -SELECT * FROM pa_target ORDER BY tid; - logts | tid | balance | val ---------------------------+-----+---------+-------------------------- - Tue Jan 31 00:00:00 2017 | 1 | 110 | initial updated by merge - Tue Feb 28 00:00:00 2017 | 2 | 220 | initial updated by merge - Sun Jan 15 00:00:00 2017 | 3 | 30 | inserted by merge - Tue Jan 31 00:00:00 2017 | 4 | 440 | initial updated by merge - Tue Feb 28 00:00:00 2017 | 5 | 550 | initial updated by merge - Sun Jan 15 00:00:00 2017 | 6 | 60 | inserted by merge - Tue Jan 31 00:00:00 2017 | 7 | 770 | initial updated by merge - Tue Feb 28 00:00:00 2017 | 8 | 880 | initial updated by merge - Sun Jan 15 00:00:00 2017 | 9 | 90 | inserted by merge -(9 rows) - -ROLLBACK; -DROP TABLE pa_source; -DROP TABLE pa_target CASCADE; --- Partitioned table with primary key -CREATE TABLE pa_target (tid integer PRIMARY KEY) PARTITION BY LIST (tid); -CREATE TABLE pa_targetp PARTITION OF pa_target DEFAULT; -CREATE TABLE pa_source (sid integer); -INSERT INTO pa_source VALUES (1), (2); -EXPLAIN (VERBOSE, COSTS OFF) -MERGE INTO pa_target t USING pa_source s ON t.tid = s.sid - WHEN NOT MATCHED THEN INSERT VALUES (s.sid); - QUERY PLAN -------------------------------------------------------------- - Merge on public.pa_target t - Merge on public.pa_targetp t_1 - -> Hash Left Join - Output: s.sid, s.ctid, t_1.tableoid, t_1.ctid - Inner Unique: true - Hash Cond: (s.sid = t_1.tid) - -> Seq Scan on public.pa_source s - Output: s.sid, s.ctid - -> Hash - Output: t_1.tid, t_1.tableoid, t_1.ctid - -> Seq Scan on public.pa_targetp t_1 - Output: t_1.tid, t_1.tableoid, t_1.ctid -(12 rows) - -MERGE INTO pa_target t USING pa_source s ON t.tid = s.sid - WHEN NOT MATCHED THEN INSERT VALUES (s.sid); -TABLE pa_target; - tid ------ - 1 - 2 -(2 rows) - --- Partition-less partitioned table --- (the bug we are checking for appeared only if table had partitions before) -DROP TABLE pa_targetp; -EXPLAIN (VERBOSE, COSTS OFF) -MERGE INTO pa_target t USING pa_source s ON t.tid = s.sid - WHEN NOT MATCHED THEN INSERT VALUES (s.sid); - QUERY PLAN --------------------------------------------- - Merge on public.pa_target t - -> Hash Left Join - Output: s.sid, s.ctid, t.ctid - Inner Unique: true - Hash Cond: (s.sid = t.tid) - -> Seq Scan on public.pa_source s - Output: s.sid, s.ctid - -> Hash - Output: t.tid, t.ctid - -> Result - Output: t.tid, t.ctid - One-Time Filter: false -(12 rows) - -MERGE INTO pa_target t USING pa_source s ON t.tid = s.sid - WHEN NOT MATCHED THEN INSERT VALUES (s.sid); -ERROR: no partition of relation "pa_target" found for row -DETAIL: Partition key of the failing row contains (tid) = (1). -DROP TABLE pa_source; -DROP TABLE pa_target CASCADE; --- some complex joins on the source side -CREATE TABLE cj_target (tid integer, balance float, val text) - WITH (autovacuum_enabled=off); -CREATE TABLE cj_source1 (sid1 integer, scat integer, delta integer) - WITH (autovacuum_enabled=off); -CREATE TABLE cj_source2 (sid2 integer, sval text) - WITH (autovacuum_enabled=off); -INSERT INTO cj_source1 VALUES (1, 10, 100); -INSERT INTO cj_source1 VALUES (1, 20, 200); -INSERT INTO cj_source1 VALUES (2, 20, 300); -INSERT INTO cj_source1 VALUES (3, 10, 400); -INSERT INTO cj_source2 VALUES (1, 'initial source2'); -INSERT INTO cj_source2 VALUES (2, 'initial source2'); -INSERT INTO cj_source2 VALUES (3, 'initial source2'); --- source relation is an unaliased join -MERGE INTO cj_target t -USING cj_source1 s1 - INNER JOIN cj_source2 s2 ON sid1 = sid2 -ON t.tid = sid1 -WHEN NOT MATCHED THEN - INSERT VALUES (sid1, delta, sval); --- try accessing columns from either side of the source join -MERGE INTO cj_target t -USING cj_source2 s2 - INNER JOIN cj_source1 s1 ON sid1 = sid2 AND scat = 20 -ON t.tid = sid1 -WHEN NOT MATCHED THEN - INSERT VALUES (sid2, delta, sval) -WHEN MATCHED THEN - DELETE; --- some simple expressions in INSERT targetlist -MERGE INTO cj_target t -USING cj_source2 s2 - INNER JOIN cj_source1 s1 ON sid1 = sid2 -ON t.tid = sid1 -WHEN NOT MATCHED THEN - INSERT VALUES (sid2, delta + scat, sval) -WHEN MATCHED THEN - UPDATE SET val = val || ' updated by merge'; -MERGE INTO cj_target t -USING cj_source2 s2 - INNER JOIN cj_source1 s1 ON sid1 = sid2 AND scat = 20 -ON t.tid = sid1 -WHEN MATCHED THEN - UPDATE SET val = val || ' ' || delta::text; -SELECT * FROM cj_target; - tid | balance | val ------+---------+---------------------------------- - 3 | 400 | initial source2 updated by merge - 1 | 220 | initial source2 200 - 1 | 110 | initial source2 200 - 2 | 320 | initial source2 300 -(4 rows) - --- try it with an outer join and PlaceHolderVar -MERGE INTO cj_target t -USING (SELECT *, 'join input'::text AS phv FROM cj_source1) fj - FULL JOIN cj_source2 fj2 ON fj.scat = fj2.sid2 * 10 -ON t.tid = fj.scat -WHEN NOT MATCHED THEN - INSERT (tid, balance, val) VALUES (fj.scat, fj.delta, fj.phv); -SELECT * FROM cj_target; - tid | balance | val ------+---------+---------------------------------- - 3 | 400 | initial source2 updated by merge - 1 | 220 | initial source2 200 - 1 | 110 | initial source2 200 - 2 | 320 | initial source2 300 - 10 | 100 | join input - 10 | 400 | join input - 20 | 200 | join input - 20 | 300 | join input - | | -(9 rows) - -ALTER TABLE cj_source1 RENAME COLUMN sid1 TO sid; -ALTER TABLE cj_source2 RENAME COLUMN sid2 TO sid; -TRUNCATE cj_target; -MERGE INTO cj_target t -USING cj_source1 s1 - INNER JOIN cj_source2 s2 ON s1.sid = s2.sid -ON t.tid = s1.sid -WHEN NOT MATCHED THEN - INSERT VALUES (s2.sid, delta, sval); -DROP TABLE cj_source2, cj_source1, cj_target; --- Function scans -CREATE TABLE fs_target (a int, b int, c text) - WITH (autovacuum_enabled=off); -MERGE INTO fs_target t -USING generate_series(1,100,1) AS id -ON t.a = id -WHEN MATCHED THEN - UPDATE SET b = b + id -WHEN NOT MATCHED THEN - INSERT VALUES (id, -1); -MERGE INTO fs_target t -USING generate_series(1,100,2) AS id -ON t.a = id -WHEN MATCHED THEN - UPDATE SET b = b + id, c = 'updated '|| id.*::text -WHEN NOT MATCHED THEN - INSERT VALUES (id, -1, 'inserted ' || id.*::text); -SELECT count(*) FROM fs_target; - count -------- - 100 -(1 row) - -DROP TABLE fs_target; --- SERIALIZABLE test --- handled in isolation tests --- Inheritance-based partitioning -CREATE TABLE measurement ( - city_id int not null, - logdate date not null, - peaktemp int, - unitsales int -) WITH (autovacuum_enabled=off); -CREATE TABLE measurement_y2006m02 ( - CHECK ( logdate >= DATE '2006-02-01' AND logdate < DATE '2006-03-01' ) -) INHERITS (measurement) WITH (autovacuum_enabled=off); -CREATE TABLE measurement_y2006m03 ( - CHECK ( logdate >= DATE '2006-03-01' AND logdate < DATE '2006-04-01' ) -) INHERITS (measurement) WITH (autovacuum_enabled=off); -CREATE TABLE measurement_y2007m01 ( - filler text, - peaktemp int, - logdate date not null, - city_id int not null, - unitsales int - CHECK ( logdate >= DATE '2007-01-01' AND logdate < DATE '2007-02-01') -) WITH (autovacuum_enabled=off); -ALTER TABLE measurement_y2007m01 DROP COLUMN filler; -ALTER TABLE measurement_y2007m01 INHERIT measurement; -INSERT INTO measurement VALUES (0, '2005-07-21', 5, 15); -CREATE OR REPLACE FUNCTION measurement_insert_trigger() -RETURNS TRIGGER AS $$ -BEGIN - IF ( NEW.logdate >= DATE '2006-02-01' AND - NEW.logdate < DATE '2006-03-01' ) THEN - INSERT INTO measurement_y2006m02 VALUES (NEW.*); - ELSIF ( NEW.logdate >= DATE '2006-03-01' AND - NEW.logdate < DATE '2006-04-01' ) THEN - INSERT INTO measurement_y2006m03 VALUES (NEW.*); - ELSIF ( NEW.logdate >= DATE '2007-01-01' AND - NEW.logdate < DATE '2007-02-01' ) THEN - INSERT INTO measurement_y2007m01 (city_id, logdate, peaktemp, unitsales) - VALUES (NEW.*); - ELSE - RAISE EXCEPTION 'Date out of range. Fix the measurement_insert_trigger() function!'; - END IF; - RETURN NULL; -END; -$$ LANGUAGE plpgsql ; -CREATE TRIGGER insert_measurement_trigger - BEFORE INSERT ON measurement - FOR EACH ROW EXECUTE PROCEDURE measurement_insert_trigger(); -INSERT INTO measurement VALUES (1, '2006-02-10', 35, 10); -INSERT INTO measurement VALUES (1, '2006-02-16', 45, 20); -INSERT INTO measurement VALUES (1, '2006-03-17', 25, 10); -INSERT INTO measurement VALUES (1, '2006-03-27', 15, 40); -INSERT INTO measurement VALUES (1, '2007-01-15', 10, 10); -INSERT INTO measurement VALUES (1, '2007-01-17', 10, 10); -SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate; - tableoid | city_id | logdate | peaktemp | unitsales -----------------------+---------+------------+----------+----------- - measurement | 0 | 07-21-2005 | 5 | 15 - measurement_y2006m02 | 1 | 02-10-2006 | 35 | 10 - measurement_y2006m02 | 1 | 02-16-2006 | 45 | 20 - measurement_y2006m03 | 1 | 03-17-2006 | 25 | 10 - measurement_y2006m03 | 1 | 03-27-2006 | 15 | 40 - measurement_y2007m01 | 1 | 01-15-2007 | 10 | 10 - measurement_y2007m01 | 1 | 01-17-2007 | 10 | 10 -(7 rows) - -CREATE TABLE new_measurement (LIKE measurement) WITH (autovacuum_enabled=off); -INSERT INTO new_measurement VALUES (0, '2005-07-21', 25, 20); -INSERT INTO new_measurement VALUES (1, '2006-03-01', 20, 10); -INSERT INTO new_measurement VALUES (1, '2006-02-16', 50, 10); -INSERT INTO new_measurement VALUES (2, '2006-02-10', 20, 20); -INSERT INTO new_measurement VALUES (1, '2006-03-27', NULL, NULL); -INSERT INTO new_measurement VALUES (1, '2007-01-17', NULL, NULL); -INSERT INTO new_measurement VALUES (1, '2007-01-15', 5, NULL); -INSERT INTO new_measurement VALUES (1, '2007-01-16', 10, 10); -BEGIN; -MERGE INTO ONLY measurement m - USING new_measurement nm ON - (m.city_id = nm.city_id and m.logdate=nm.logdate) -WHEN MATCHED AND nm.peaktemp IS NULL THEN DELETE -WHEN MATCHED THEN UPDATE - SET peaktemp = greatest(m.peaktemp, nm.peaktemp), - unitsales = m.unitsales + coalesce(nm.unitsales, 0) -WHEN NOT MATCHED THEN INSERT - (city_id, logdate, peaktemp, unitsales) - VALUES (city_id, logdate, peaktemp, unitsales); -SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate, peaktemp; - tableoid | city_id | logdate | peaktemp | unitsales -----------------------+---------+------------+----------+----------- - measurement | 0 | 07-21-2005 | 25 | 35 - measurement_y2006m02 | 1 | 02-10-2006 | 35 | 10 - measurement_y2006m02 | 1 | 02-16-2006 | 45 | 20 - measurement_y2006m02 | 1 | 02-16-2006 | 50 | 10 - measurement_y2006m03 | 1 | 03-01-2006 | 20 | 10 - measurement_y2006m03 | 1 | 03-17-2006 | 25 | 10 - measurement_y2006m03 | 1 | 03-27-2006 | 15 | 40 - measurement_y2006m03 | 1 | 03-27-2006 | | - measurement_y2007m01 | 1 | 01-15-2007 | 5 | - measurement_y2007m01 | 1 | 01-15-2007 | 10 | 10 - measurement_y2007m01 | 1 | 01-16-2007 | 10 | 10 - measurement_y2007m01 | 1 | 01-17-2007 | 10 | 10 - measurement_y2007m01 | 1 | 01-17-2007 | | - measurement_y2006m02 | 2 | 02-10-2006 | 20 | 20 -(14 rows) - -ROLLBACK; -MERGE into measurement m - USING new_measurement nm ON - (m.city_id = nm.city_id and m.logdate=nm.logdate) -WHEN MATCHED AND nm.peaktemp IS NULL THEN DELETE -WHEN MATCHED THEN UPDATE - SET peaktemp = greatest(m.peaktemp, nm.peaktemp), - unitsales = m.unitsales + coalesce(nm.unitsales, 0) -WHEN NOT MATCHED THEN INSERT - (city_id, logdate, peaktemp, unitsales) - VALUES (city_id, logdate, peaktemp, unitsales); -SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate; - tableoid | city_id | logdate | peaktemp | unitsales -----------------------+---------+------------+----------+----------- - measurement | 0 | 07-21-2005 | 25 | 35 - measurement_y2006m02 | 1 | 02-10-2006 | 35 | 10 - measurement_y2006m02 | 1 | 02-16-2006 | 50 | 30 - measurement_y2006m03 | 1 | 03-01-2006 | 20 | 10 - measurement_y2006m03 | 1 | 03-17-2006 | 25 | 10 - measurement_y2007m01 | 1 | 01-15-2007 | 10 | 10 - measurement_y2007m01 | 1 | 01-16-2007 | 10 | 10 - measurement_y2006m02 | 2 | 02-10-2006 | 20 | 20 -(8 rows) - -BEGIN; -MERGE INTO new_measurement nm - USING ONLY measurement m ON - (nm.city_id = m.city_id and nm.logdate=m.logdate) -WHEN MATCHED THEN DELETE; -SELECT * FROM new_measurement ORDER BY city_id, logdate; - city_id | logdate | peaktemp | unitsales ----------+------------+----------+----------- - 1 | 02-16-2006 | 50 | 10 - 1 | 03-01-2006 | 20 | 10 - 1 | 03-27-2006 | | - 1 | 01-15-2007 | 5 | - 1 | 01-16-2007 | 10 | 10 - 1 | 01-17-2007 | | - 2 | 02-10-2006 | 20 | 20 -(7 rows) - -ROLLBACK; -MERGE INTO new_measurement nm - USING measurement m ON - (nm.city_id = m.city_id and nm.logdate=m.logdate) -WHEN MATCHED THEN DELETE; -SELECT * FROM new_measurement ORDER BY city_id, logdate; - city_id | logdate | peaktemp | unitsales ----------+------------+----------+----------- - 1 | 03-27-2006 | | - 1 | 01-17-2007 | | -(2 rows) - -DROP TABLE measurement, new_measurement CASCADE; -NOTICE: drop cascades to 3 other objects -DETAIL: drop cascades to table measurement_y2006m02 -drop cascades to table measurement_y2006m03 -drop cascades to table measurement_y2007m01 -DROP FUNCTION measurement_insert_trigger(); --- prepare -RESET SESSION AUTHORIZATION; -DROP TABLE target, target2; -DROP TABLE source, source2; -DROP FUNCTION merge_trigfunc(); -DROP USER regress_merge_privs; -DROP USER regress_merge_no_privs; -DROP USER regress_merge_none; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/misc_functions.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/misc_functions.out --- /tmp/cirrus-ci-build/src/test/regress/expected/misc_functions.out 2024-03-07 14:25:00.332001000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/misc_functions.out 2024-03-07 14:27:17.083184000 +0000 @@ -1,705 +1,2 @@ --- directory paths and dlsuffix are passed to us in environment variables -\getenv libdir PG_LIBDIR -\getenv dlsuffix PG_DLSUFFIX -\set regresslib :libdir '/regress' :dlsuffix --- --- num_nulls() --- -SELECT num_nonnulls(NULL); - num_nonnulls --------------- - 0 -(1 row) - -SELECT num_nonnulls('1'); - num_nonnulls --------------- - 1 -(1 row) - -SELECT num_nonnulls(NULL::text); - num_nonnulls --------------- - 0 -(1 row) - -SELECT num_nonnulls(NULL::text, NULL::int); - num_nonnulls --------------- - 0 -(1 row) - -SELECT num_nonnulls(1, 2, NULL::text, NULL::point, '', int8 '9', 1.0 / NULL); - num_nonnulls --------------- - 4 -(1 row) - -SELECT num_nonnulls(VARIADIC '{1,2,NULL,3}'::int[]); - num_nonnulls --------------- - 3 -(1 row) - -SELECT num_nonnulls(VARIADIC '{"1","2","3","4"}'::text[]); - num_nonnulls --------------- - 4 -(1 row) - -SELECT num_nonnulls(VARIADIC ARRAY(SELECT CASE WHEN i <> 40 THEN i END FROM generate_series(1, 100) i)); - num_nonnulls --------------- - 99 -(1 row) - -SELECT num_nulls(NULL); - num_nulls ------------ - 1 -(1 row) - -SELECT num_nulls('1'); - num_nulls ------------ - 0 -(1 row) - -SELECT num_nulls(NULL::text); - num_nulls ------------ - 1 -(1 row) - -SELECT num_nulls(NULL::text, NULL::int); - num_nulls ------------ - 2 -(1 row) - -SELECT num_nulls(1, 2, NULL::text, NULL::point, '', int8 '9', 1.0 / NULL); - num_nulls ------------ - 3 -(1 row) - -SELECT num_nulls(VARIADIC '{1,2,NULL,3}'::int[]); - num_nulls ------------ - 1 -(1 row) - -SELECT num_nulls(VARIADIC '{"1","2","3","4"}'::text[]); - num_nulls ------------ - 0 -(1 row) - -SELECT num_nulls(VARIADIC ARRAY(SELECT CASE WHEN i <> 40 THEN i END FROM generate_series(1, 100) i)); - num_nulls ------------ - 1 -(1 row) - --- special cases -SELECT num_nonnulls(VARIADIC NULL::text[]); - num_nonnulls --------------- - -(1 row) - -SELECT num_nonnulls(VARIADIC '{}'::int[]); - num_nonnulls --------------- - 0 -(1 row) - -SELECT num_nulls(VARIADIC NULL::text[]); - num_nulls ------------ - -(1 row) - -SELECT num_nulls(VARIADIC '{}'::int[]); - num_nulls ------------ - 0 -(1 row) - --- should fail, one or more arguments is required -SELECT num_nonnulls(); -ERROR: function num_nonnulls() does not exist -LINE 1: SELECT num_nonnulls(); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -SELECT num_nulls(); -ERROR: function num_nulls() does not exist -LINE 1: SELECT num_nulls(); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. --- --- canonicalize_path() --- -CREATE FUNCTION test_canonicalize_path(text) - RETURNS text - AS :'regresslib' - LANGUAGE C STRICT IMMUTABLE; -SELECT test_canonicalize_path('/'); - test_canonicalize_path ------------------------- - / -(1 row) - -SELECT test_canonicalize_path('/./abc/def/'); - test_canonicalize_path ------------------------- - /abc/def -(1 row) - -SELECT test_canonicalize_path('/./../abc/def'); - test_canonicalize_path ------------------------- - /abc/def -(1 row) - -SELECT test_canonicalize_path('/./../../abc/def/'); - test_canonicalize_path ------------------------- - /abc/def -(1 row) - -SELECT test_canonicalize_path('/abc/.././def/ghi'); - test_canonicalize_path ------------------------- - /def/ghi -(1 row) - -SELECT test_canonicalize_path('/abc/./../def/ghi//'); - test_canonicalize_path ------------------------- - /def/ghi -(1 row) - -SELECT test_canonicalize_path('/abc/def/../..'); - test_canonicalize_path ------------------------- - / -(1 row) - -SELECT test_canonicalize_path('/abc/def/../../..'); - test_canonicalize_path ------------------------- - / -(1 row) - -SELECT test_canonicalize_path('/abc/def/../../../../ghi/jkl'); - test_canonicalize_path ------------------------- - /ghi/jkl -(1 row) - -SELECT test_canonicalize_path('.'); - test_canonicalize_path ------------------------- - . -(1 row) - -SELECT test_canonicalize_path('./'); - test_canonicalize_path ------------------------- - . -(1 row) - -SELECT test_canonicalize_path('./abc/..'); - test_canonicalize_path ------------------------- - . -(1 row) - -SELECT test_canonicalize_path('abc/../'); - test_canonicalize_path ------------------------- - . -(1 row) - -SELECT test_canonicalize_path('abc/../def'); - test_canonicalize_path ------------------------- - def -(1 row) - -SELECT test_canonicalize_path('..'); - test_canonicalize_path ------------------------- - .. -(1 row) - -SELECT test_canonicalize_path('../abc/def'); - test_canonicalize_path ------------------------- - ../abc/def -(1 row) - -SELECT test_canonicalize_path('../abc/..'); - test_canonicalize_path ------------------------- - .. -(1 row) - -SELECT test_canonicalize_path('../abc/../def'); - test_canonicalize_path ------------------------- - ../def -(1 row) - -SELECT test_canonicalize_path('../abc/../../def/ghi'); - test_canonicalize_path ------------------------- - ../../def/ghi -(1 row) - -SELECT test_canonicalize_path('./abc/./def/.'); - test_canonicalize_path ------------------------- - abc/def -(1 row) - -SELECT test_canonicalize_path('./abc/././def/.'); - test_canonicalize_path ------------------------- - abc/def -(1 row) - -SELECT test_canonicalize_path('./abc/./def/.././ghi/../../../jkl/mno'); - test_canonicalize_path ------------------------- - ../jkl/mno -(1 row) - --- --- pg_log_backend_memory_contexts() --- --- Memory contexts are logged and they are not returned to the function. --- Furthermore, their contents can vary depending on the timing. However, --- we can at least verify that the code doesn't fail, and that the --- permissions are set properly. --- -SELECT pg_log_backend_memory_contexts(pg_backend_pid()); - pg_log_backend_memory_contexts --------------------------------- - t -(1 row) - -SELECT pg_log_backend_memory_contexts(pid) FROM pg_stat_activity - WHERE backend_type = 'checkpointer'; - pg_log_backend_memory_contexts --------------------------------- - t -(1 row) - -CREATE ROLE regress_log_memory; -SELECT has_function_privilege('regress_log_memory', - 'pg_log_backend_memory_contexts(integer)', 'EXECUTE'); -- no - has_function_privilege ------------------------- - f -(1 row) - -GRANT EXECUTE ON FUNCTION pg_log_backend_memory_contexts(integer) - TO regress_log_memory; -SELECT has_function_privilege('regress_log_memory', - 'pg_log_backend_memory_contexts(integer)', 'EXECUTE'); -- yes - has_function_privilege ------------------------- - t -(1 row) - -SET ROLE regress_log_memory; -SELECT pg_log_backend_memory_contexts(pg_backend_pid()); - pg_log_backend_memory_contexts --------------------------------- - t -(1 row) - -RESET ROLE; -REVOKE EXECUTE ON FUNCTION pg_log_backend_memory_contexts(integer) - FROM regress_log_memory; -DROP ROLE regress_log_memory; --- --- Test some built-in SRFs --- --- The outputs of these are variable, so we can't just print their results --- directly, but we can at least verify that the code doesn't fail. --- -select setting as segsize -from pg_settings where name = 'wal_segment_size' -\gset -select count(*) > 0 as ok from pg_ls_waldir(); - ok ----- - t -(1 row) - --- Test ProjectSet as well as FunctionScan -select count(*) > 0 as ok from (select pg_ls_waldir()) ss; - ok ----- - t -(1 row) - --- Test not-run-to-completion cases. -select * from pg_ls_waldir() limit 0; - name | size | modification -------+------+-------------- -(0 rows) - -select count(*) > 0 as ok from (select * from pg_ls_waldir() limit 1) ss; - ok ----- - t -(1 row) - -select (w).size = :segsize as ok -from (select pg_ls_waldir() w) ss where length((w).name) = 24 limit 1; - ok ----- - t -(1 row) - -select count(*) >= 0 as ok from pg_ls_archive_statusdir(); - ok ----- - t -(1 row) - --- pg_read_file() -select length(pg_read_file('postmaster.pid')) > 20; - ?column? ----------- - t -(1 row) - -select length(pg_read_file('postmaster.pid', 1, 20)); - length --------- - 20 -(1 row) - --- Test missing_ok -select pg_read_file('does not exist'); -- error -ERROR: could not open file "does not exist" for reading: No such file or directory -select pg_read_file('does not exist', true) IS NULL; -- ok - ?column? ----------- - t -(1 row) - --- Test invalid argument -select pg_read_file('does not exist', 0, -1); -- error -ERROR: requested length cannot be negative -select pg_read_file('does not exist', 0, -1, true); -- error -ERROR: requested length cannot be negative --- pg_read_binary_file() -select length(pg_read_binary_file('postmaster.pid')) > 20; - ?column? ----------- - t -(1 row) - -select length(pg_read_binary_file('postmaster.pid', 1, 20)); - length --------- - 20 -(1 row) - --- Test missing_ok -select pg_read_binary_file('does not exist'); -- error -ERROR: could not open file "does not exist" for reading: No such file or directory -select pg_read_binary_file('does not exist', true) IS NULL; -- ok - ?column? ----------- - t -(1 row) - --- Test invalid argument -select pg_read_binary_file('does not exist', 0, -1); -- error -ERROR: requested length cannot be negative -select pg_read_binary_file('does not exist', 0, -1, true); -- error -ERROR: requested length cannot be negative --- pg_stat_file() -select size > 20, isdir from pg_stat_file('postmaster.pid'); - ?column? | isdir -----------+------- - t | f -(1 row) - --- pg_ls_dir() -select * from (select pg_ls_dir('.') a) a where a = 'base' limit 1; - a ------- - base -(1 row) - --- Test missing_ok (second argument) -select pg_ls_dir('does not exist', false, false); -- error -ERROR: could not open directory "does not exist": No such file or directory -select pg_ls_dir('does not exist', true, false); -- ok - pg_ls_dir ------------ -(0 rows) - --- Test include_dot_dirs (third argument) -select count(*) = 1 as dot_found - from pg_ls_dir('.', false, true) as ls where ls = '.'; - dot_found ------------ - t -(1 row) - -select count(*) = 1 as dot_found - from pg_ls_dir('.', false, false) as ls where ls = '.'; - dot_found ------------ - f -(1 row) - --- pg_timezone_names() -select * from (select (pg_timezone_names()).name) ptn where name='UTC' limit 1; - name ------- - UTC -(1 row) - --- pg_tablespace_databases() -select count(*) > 0 from - (select pg_tablespace_databases(oid) as pts from pg_tablespace - where spcname = 'pg_default') pts - join pg_database db on pts.pts = db.oid; - ?column? ----------- - t -(1 row) - --- --- Test replication slot directory functions --- -CREATE ROLE regress_slot_dir_funcs; --- Not available by default. -SELECT has_function_privilege('regress_slot_dir_funcs', - 'pg_ls_logicalsnapdir()', 'EXECUTE'); - has_function_privilege ------------------------- - f -(1 row) - -SELECT has_function_privilege('regress_slot_dir_funcs', - 'pg_ls_logicalmapdir()', 'EXECUTE'); - has_function_privilege ------------------------- - f -(1 row) - -SELECT has_function_privilege('regress_slot_dir_funcs', - 'pg_ls_replslotdir(text)', 'EXECUTE'); - has_function_privilege ------------------------- - f -(1 row) - -GRANT pg_monitor TO regress_slot_dir_funcs; --- Role is now part of pg_monitor, so these are available. -SELECT has_function_privilege('regress_slot_dir_funcs', - 'pg_ls_logicalsnapdir()', 'EXECUTE'); - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_slot_dir_funcs', - 'pg_ls_logicalmapdir()', 'EXECUTE'); - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_slot_dir_funcs', - 'pg_ls_replslotdir(text)', 'EXECUTE'); - has_function_privilege ------------------------- - t -(1 row) - -DROP ROLE regress_slot_dir_funcs; --- --- Test adding a support function to a subject function --- -CREATE FUNCTION my_int_eq(int, int) RETURNS bool - LANGUAGE internal STRICT IMMUTABLE PARALLEL SAFE - AS $$int4eq$$; --- By default, planner does not think that's selective -EXPLAIN (COSTS OFF) -SELECT * FROM tenk1 a JOIN tenk1 b ON a.unique1 = b.unique1 -WHERE my_int_eq(a.unique2, 42); - QUERY PLAN ----------------------------------------------- - Hash Join - Hash Cond: (b.unique1 = a.unique1) - -> Seq Scan on tenk1 b - -> Hash - -> Seq Scan on tenk1 a - Filter: my_int_eq(unique2, 42) -(6 rows) - --- With support function that knows it's int4eq, we get a different plan -CREATE FUNCTION test_support_func(internal) - RETURNS internal - AS :'regresslib', 'test_support_func' - LANGUAGE C STRICT; -ALTER FUNCTION my_int_eq(int, int) SUPPORT test_support_func; -EXPLAIN (COSTS OFF) -SELECT * FROM tenk1 a JOIN tenk1 b ON a.unique1 = b.unique1 -WHERE my_int_eq(a.unique2, 42); - QUERY PLAN -------------------------------------------------- - Nested Loop - -> Seq Scan on tenk1 a - Filter: my_int_eq(unique2, 42) - -> Index Scan using tenk1_unique1 on tenk1 b - Index Cond: (unique1 = a.unique1) -(5 rows) - --- Also test non-default rowcount estimate -CREATE FUNCTION my_gen_series(int, int) RETURNS SETOF integer - LANGUAGE internal STRICT IMMUTABLE PARALLEL SAFE - AS $$generate_series_int4$$ - SUPPORT test_support_func; -EXPLAIN (COSTS OFF) -SELECT * FROM tenk1 a JOIN my_gen_series(1,1000) g ON a.unique1 = g; - QUERY PLAN ----------------------------------------- - Hash Join - Hash Cond: (g.g = a.unique1) - -> Function Scan on my_gen_series g - -> Hash - -> Seq Scan on tenk1 a -(5 rows) - -EXPLAIN (COSTS OFF) -SELECT * FROM tenk1 a JOIN my_gen_series(1,10) g ON a.unique1 = g; - QUERY PLAN -------------------------------------------------- - Nested Loop - -> Function Scan on my_gen_series g - -> Index Scan using tenk1_unique1 on tenk1 a - Index Cond: (unique1 = g.g) -(4 rows) - --- Test functions for control data -SELECT count(*) > 0 AS ok FROM pg_control_checkpoint(); - ok ----- - t -(1 row) - -SELECT count(*) > 0 AS ok FROM pg_control_init(); - ok ----- - t -(1 row) - -SELECT count(*) > 0 AS ok FROM pg_control_recovery(); - ok ----- - t -(1 row) - -SELECT count(*) > 0 AS ok FROM pg_control_system(); - ok ----- - t -(1 row) - --- pg_split_walfile_name, pg_walfile_name & pg_walfile_name_offset -SELECT * FROM pg_split_walfile_name(NULL); - segment_number | timeline_id -----------------+------------- - | -(1 row) - -SELECT * FROM pg_split_walfile_name('invalid'); -ERROR: invalid WAL file name "invalid" -SELECT segment_number > 0 AS ok_segment_number, timeline_id - FROM pg_split_walfile_name('000000010000000100000000'); - ok_segment_number | timeline_id --------------------+------------- - t | 1 -(1 row) - -SELECT segment_number > 0 AS ok_segment_number, timeline_id - FROM pg_split_walfile_name('ffffffFF00000001000000af'); - ok_segment_number | timeline_id --------------------+------------- - t | 4294967295 -(1 row) - -SELECT setting::int8 AS segment_size -FROM pg_settings -WHERE name = 'wal_segment_size' -\gset -SELECT segment_number, file_offset -FROM pg_walfile_name_offset('0/0'::pg_lsn + :segment_size), - pg_split_walfile_name(file_name); - segment_number | file_offset -----------------+------------- - 1 | 0 -(1 row) - -SELECT segment_number, file_offset -FROM pg_walfile_name_offset('0/0'::pg_lsn + :segment_size + 1), - pg_split_walfile_name(file_name); - segment_number | file_offset -----------------+------------- - 1 | 1 -(1 row) - -SELECT segment_number, file_offset = :segment_size - 1 -FROM pg_walfile_name_offset('0/0'::pg_lsn + :segment_size - 1), - pg_split_walfile_name(file_name); - segment_number | ?column? -----------------+---------- - 0 | t -(1 row) - --- test stratnum support functions -SELECT gist_stratnum_identity(3::smallint); - gist_stratnum_identity ------------------------- - 3 -(1 row) - -SELECT gist_stratnum_identity(18::smallint); - gist_stratnum_identity ------------------------- - 18 -(1 row) - --- pg_current_logfile -CREATE ROLE regress_current_logfile; --- not available by default -SELECT has_function_privilege('regress_current_logfile', - 'pg_current_logfile()', 'EXECUTE'); - has_function_privilege ------------------------- - f -(1 row) - -GRANT pg_monitor TO regress_current_logfile; --- role has privileges of pg_monitor and can execute the function -SELECT has_function_privilege('regress_current_logfile', - 'pg_current_logfile()', 'EXECUTE'); - has_function_privilege ------------------------- - t -(1 row) - -DROP ROLE regress_current_logfile; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/sysviews.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/sysviews.out --- /tmp/cirrus-ci-build/src/test/regress/expected/sysviews.out 2024-03-07 14:25:00.334062000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/sysviews.out 2024-03-07 14:27:17.079852000 +0000 @@ -1,187 +1,2 @@ --- --- Test assorted system views --- --- This test is mainly meant to provide some code coverage for the --- set-returning functions that underlie certain system views. --- The output of most of these functions is very environment-dependent, --- so our ability to test with fixed expected output is pretty limited; --- but even a trivial check of count(*) will exercise the normal code path --- through the SRF. -select count(*) >= 0 as ok from pg_available_extension_versions; - ok ----- - t -(1 row) - -select count(*) >= 0 as ok from pg_available_extensions; - ok ----- - t -(1 row) - --- The entire output of pg_backend_memory_contexts is not stable, --- we test only the existence and basic condition of TopMemoryContext. -select name, ident, parent, level, total_bytes >= free_bytes - from pg_backend_memory_contexts where level = 0; - name | ident | parent | level | ?column? -------------------+-------+--------+-------+---------- - TopMemoryContext | | | 0 | t -(1 row) - --- At introduction, pg_config had 23 entries; it may grow -select count(*) > 20 as ok from pg_config; - ok ----- - t -(1 row) - --- We expect no cursors in this test; see also portals.sql -select count(*) = 0 as ok from pg_cursors; - ok ----- - t -(1 row) - -select count(*) >= 0 as ok from pg_file_settings; - ok ----- - t -(1 row) - --- There will surely be at least one rule, with no errors. -select count(*) > 0 as ok, count(*) FILTER (WHERE error IS NOT NULL) = 0 AS no_err - from pg_hba_file_rules; - ok | no_err -----+-------- - t | t -(1 row) - --- There may be no rules, and there should be no errors. -select count(*) >= 0 as ok, count(*) FILTER (WHERE error IS NOT NULL) = 0 AS no_err - from pg_ident_file_mappings; - ok | no_err -----+-------- - t | t -(1 row) - --- There will surely be at least one active lock -select count(*) > 0 as ok from pg_locks; - ok ----- - t -(1 row) - --- We expect no prepared statements in this test; see also prepare.sql -select count(*) = 0 as ok from pg_prepared_statements; - ok ----- - t -(1 row) - --- See also prepared_xacts.sql -select count(*) >= 0 as ok from pg_prepared_xacts; - ok ----- - t -(1 row) - --- There will surely be at least one SLRU cache -select count(*) > 0 as ok from pg_stat_slru; - ok ----- - t -(1 row) - --- There must be only one record -select count(*) = 1 as ok from pg_stat_wal; - ok ----- - t -(1 row) - --- We expect no walreceiver running in this test -select count(*) = 0 as ok from pg_stat_wal_receiver; - ok ----- - t -(1 row) - --- This is to record the prevailing planner enable_foo settings during --- a regression test run. -select name, setting from pg_settings where name like 'enable%'; - name | setting ---------------------------------+--------- - enable_async_append | on - enable_bitmapscan | on - enable_gathermerge | on - enable_group_by_reordering | on - enable_hashagg | on - enable_hashjoin | on - enable_incremental_sort | on - enable_indexonlyscan | on - enable_indexscan | on - enable_material | on - enable_memoize | on - enable_mergejoin | on - enable_nestloop | on - enable_parallel_append | on - enable_parallel_hash | on - enable_partition_pruning | on - enable_partitionwise_aggregate | off - enable_partitionwise_join | off - enable_presorted_aggregate | on - enable_self_join_removal | on - enable_seqscan | on - enable_sort | on - enable_tidscan | on -(23 rows) - --- There are always wait event descriptions for various types. -select type, count(*) > 0 as ok FROM pg_wait_events - group by type order by type COLLATE "C"; - type | ok ------------+---- - Activity | t - BufferPin | t - Client | t - Extension | t - IO | t - IPC | t - LWLock | t - Lock | t - Timeout | t -(9 rows) - --- Test that the pg_timezone_names and pg_timezone_abbrevs views are --- more-or-less working. We can't test their contents in any great detail --- without the outputs changing anytime IANA updates the underlying data, --- but it seems reasonable to expect at least one entry per major meridian. --- (At the time of writing, the actual counts are around 38 because of --- zones using fractional GMT offsets, so this is a pretty loose test.) -select count(distinct utc_offset) >= 24 as ok from pg_timezone_names; - ok ----- - t -(1 row) - -select count(distinct utc_offset) >= 24 as ok from pg_timezone_abbrevs; - ok ----- - t -(1 row) - --- Let's check the non-default timezone abbreviation sets, too -set timezone_abbreviations = 'Australia'; -select count(distinct utc_offset) >= 24 as ok from pg_timezone_abbrevs; - ok ----- - t -(1 row) - -set timezone_abbreviations = 'India'; -select count(distinct utc_offset) >= 24 as ok from pg_timezone_abbrevs; - ok ----- - t -(1 row) - +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/tsrf.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/tsrf.out --- /tmp/cirrus-ci-build/src/test/regress/expected/tsrf.out 2024-03-07 14:25:00.334436000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/tsrf.out 2024-03-07 14:27:17.082322000 +0000 @@ -1,712 +1,2 @@ --- --- tsrf - targetlist set returning function tests --- --- simple srf -SELECT generate_series(1, 3); - generate_series ------------------ - 1 - 2 - 3 -(3 rows) - --- parallel iteration -SELECT generate_series(1, 3), generate_series(3,5); - generate_series | generate_series ------------------+----------------- - 1 | 3 - 2 | 4 - 3 | 5 -(3 rows) - --- parallel iteration, different number of rows -SELECT generate_series(1, 2), generate_series(1,4); - generate_series | generate_series ------------------+----------------- - 1 | 1 - 2 | 2 - | 3 - | 4 -(4 rows) - --- srf, with SRF argument -SELECT generate_series(1, generate_series(1, 3)); - generate_series ------------------ - 1 - 1 - 2 - 1 - 2 - 3 -(6 rows) - --- but we've traditionally rejected the same in FROM -SELECT * FROM generate_series(1, generate_series(1, 3)); -ERROR: set-returning functions must appear at top level of FROM -LINE 1: SELECT * FROM generate_series(1, generate_series(1, 3)); - ^ --- srf, with two SRF arguments -SELECT generate_series(generate_series(1,3), generate_series(2, 4)); - generate_series ------------------ - 1 - 2 - 2 - 3 - 3 - 4 -(6 rows) - --- check proper nesting of SRFs in different expressions -explain (verbose, costs off) -SELECT generate_series(1, generate_series(1, 3)), generate_series(2, 4); - QUERY PLAN --------------------------------------------------------------------------------- - ProjectSet - Output: generate_series(1, (generate_series(1, 3))), (generate_series(2, 4)) - -> ProjectSet - Output: generate_series(1, 3), generate_series(2, 4) - -> Result -(5 rows) - -SELECT generate_series(1, generate_series(1, 3)), generate_series(2, 4); - generate_series | generate_series ------------------+----------------- - 1 | 2 - 1 | 3 - 2 | 3 - 1 | 4 - 2 | 4 - 3 | 4 -(6 rows) - -CREATE TABLE few(id int, dataa text, datab text); -INSERT INTO few VALUES(1, 'a', 'foo'),(2, 'a', 'bar'),(3, 'b', 'bar'); --- SRF with a provably-dummy relation -explain (verbose, costs off) -SELECT unnest(ARRAY[1, 2]) FROM few WHERE false; - QUERY PLAN --------------------------------------- - ProjectSet - Output: unnest('{1,2}'::integer[]) - -> Result - One-Time Filter: false -(4 rows) - -SELECT unnest(ARRAY[1, 2]) FROM few WHERE false; - unnest --------- -(0 rows) - --- SRF shouldn't prevent upper query from recognizing lower as dummy -explain (verbose, costs off) -SELECT * FROM few f1, - (SELECT unnest(ARRAY[1,2]) FROM few f2 WHERE false OFFSET 0) ss; - QUERY PLAN ------------------------------------------------- - Result - Output: f1.id, f1.dataa, f1.datab, ss.unnest - One-Time Filter: false -(3 rows) - -SELECT * FROM few f1, - (SELECT unnest(ARRAY[1,2]) FROM few f2 WHERE false OFFSET 0) ss; - id | dataa | datab | unnest -----+-------+-------+-------- -(0 rows) - --- SRF output order of sorting is maintained, if SRF is not referenced -SELECT few.id, generate_series(1,3) g FROM few ORDER BY id DESC; - id | g -----+--- - 3 | 1 - 3 | 2 - 3 | 3 - 2 | 1 - 2 | 2 - 2 | 3 - 1 | 1 - 1 | 2 - 1 | 3 -(9 rows) - --- but SRFs can be referenced in sort -SELECT few.id, generate_series(1,3) g FROM few ORDER BY id, g DESC; - id | g -----+--- - 1 | 3 - 1 | 2 - 1 | 1 - 2 | 3 - 2 | 2 - 2 | 1 - 3 | 3 - 3 | 2 - 3 | 1 -(9 rows) - -SELECT few.id, generate_series(1,3) g FROM few ORDER BY id, generate_series(1,3) DESC; - id | g -----+--- - 1 | 3 - 1 | 2 - 1 | 1 - 2 | 3 - 2 | 2 - 2 | 1 - 3 | 3 - 3 | 2 - 3 | 1 -(9 rows) - --- it's weird to have ORDER BYs that increase the number of results -SELECT few.id FROM few ORDER BY id, generate_series(1,3) DESC; - id ----- - 1 - 1 - 1 - 2 - 2 - 2 - 3 - 3 - 3 -(9 rows) - --- SRFs are computed after aggregation -SET enable_hashagg TO 0; -- stable output order -SELECT few.dataa, count(*), min(id), max(id), unnest('{1,1,3}'::int[]) FROM few WHERE few.id = 1 GROUP BY few.dataa; - dataa | count | min | max | unnest --------+-------+-----+-----+-------- - a | 1 | 1 | 1 | 1 - a | 1 | 1 | 1 | 1 - a | 1 | 1 | 1 | 3 -(3 rows) - --- unless referenced in GROUP BY clause -SELECT few.dataa, count(*), min(id), max(id), unnest('{1,1,3}'::int[]) FROM few WHERE few.id = 1 GROUP BY few.dataa, unnest('{1,1,3}'::int[]); - dataa | count | min | max | unnest --------+-------+-----+-----+-------- - a | 2 | 1 | 1 | 1 - a | 1 | 1 | 1 | 3 -(2 rows) - -SELECT few.dataa, count(*), min(id), max(id), unnest('{1,1,3}'::int[]) FROM few WHERE few.id = 1 GROUP BY few.dataa, 5; - dataa | count | min | max | unnest --------+-------+-----+-----+-------- - a | 2 | 1 | 1 | 1 - a | 1 | 1 | 1 | 3 -(2 rows) - -RESET enable_hashagg; --- check HAVING works when GROUP BY does [not] reference SRF output -SELECT dataa, generate_series(1,1), count(*) FROM few GROUP BY 1 HAVING count(*) > 1; - dataa | generate_series | count --------+-----------------+------- - a | 1 | 2 -(1 row) - -SELECT dataa, generate_series(1,1), count(*) FROM few GROUP BY 1, 2 HAVING count(*) > 1; - dataa | generate_series | count --------+-----------------+------- - a | 1 | 2 -(1 row) - --- it's weird to have GROUP BYs that increase the number of results -SELECT few.dataa, count(*) FROM few WHERE dataa = 'a' GROUP BY few.dataa ORDER BY 2; - dataa | count --------+------- - a | 2 -(1 row) - -SELECT few.dataa, count(*) FROM few WHERE dataa = 'a' GROUP BY few.dataa, unnest('{1,1,3}'::int[]) ORDER BY 2; - dataa | count --------+------- - a | 2 - a | 4 -(2 rows) - --- SRFs are not allowed if they'd need to be conditionally executed -SELECT q1, case when q1 > 0 then generate_series(1,3) else 0 end FROM int8_tbl; -ERROR: set-returning functions are not allowed in CASE -LINE 1: SELECT q1, case when q1 > 0 then generate_series(1,3) else 0... - ^ -HINT: You might be able to move the set-returning function into a LATERAL FROM item. -SELECT q1, coalesce(generate_series(1,3), 0) FROM int8_tbl; -ERROR: set-returning functions are not allowed in COALESCE -LINE 1: SELECT q1, coalesce(generate_series(1,3), 0) FROM int8_tbl; - ^ -HINT: You might be able to move the set-returning function into a LATERAL FROM item. --- SRFs are not allowed in aggregate arguments -SELECT min(generate_series(1, 3)) FROM few; -ERROR: aggregate function calls cannot contain set-returning function calls -LINE 1: SELECT min(generate_series(1, 3)) FROM few; - ^ -HINT: You might be able to move the set-returning function into a LATERAL FROM item. --- ... unless they're within a sub-select -SELECT sum((3 = ANY(SELECT generate_series(1,4)))::int); - sum ------ - 1 -(1 row) - -SELECT sum((3 = ANY(SELECT lag(x) over(order by x) - FROM generate_series(1,4) x))::int); - sum ------ - 1 -(1 row) - --- SRFs are not allowed in window function arguments, either -SELECT min(generate_series(1, 3)) OVER() FROM few; -ERROR: window function calls cannot contain set-returning function calls -LINE 1: SELECT min(generate_series(1, 3)) OVER() FROM few; - ^ -HINT: You might be able to move the set-returning function into a LATERAL FROM item. --- SRFs are normally computed after window functions -SELECT id,lag(id) OVER(), count(*) OVER(), generate_series(1,3) FROM few; - id | lag | count | generate_series -----+-----+-------+----------------- - 1 | | 3 | 1 - 1 | | 3 | 2 - 1 | | 3 | 3 - 2 | 1 | 3 | 1 - 2 | 1 | 3 | 2 - 2 | 1 | 3 | 3 - 3 | 2 | 3 | 1 - 3 | 2 | 3 | 2 - 3 | 2 | 3 | 3 -(9 rows) - --- unless referencing SRFs -SELECT SUM(count(*)) OVER(PARTITION BY generate_series(1,3) ORDER BY generate_series(1,3)), generate_series(1,3) g FROM few GROUP BY g; - sum | g ------+--- - 3 | 1 - 3 | 2 - 3 | 3 -(3 rows) - --- sorting + grouping -SELECT few.dataa, count(*), min(id), max(id), generate_series(1,3) FROM few GROUP BY few.dataa ORDER BY 5, 1; - dataa | count | min | max | generate_series --------+-------+-----+-----+----------------- - a | 2 | 1 | 2 | 1 - b | 1 | 3 | 3 | 1 - a | 2 | 1 | 2 | 2 - b | 1 | 3 | 3 | 2 - a | 2 | 1 | 2 | 3 - b | 1 | 3 | 3 | 3 -(6 rows) - --- grouping sets are a bit special, they produce NULLs in columns not actually NULL -set enable_hashagg = false; -SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab); - dataa | b | g | count --------+-----+---+------- - a | bar | 1 | 1 - a | bar | 2 | 1 - a | foo | 1 | 1 - a | foo | 2 | 1 - a | | 1 | 2 - a | | 2 | 2 - b | bar | 1 | 1 - b | bar | 2 | 1 - b | | 1 | 1 - b | | 2 | 1 - | | 1 | 3 - | | 2 | 3 - | bar | 1 | 2 - | bar | 2 | 2 - | foo | 1 | 1 - | foo | 2 | 1 -(16 rows) - -SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab) ORDER BY dataa; - dataa | b | g | count --------+-----+---+------- - a | bar | 1 | 1 - a | bar | 2 | 1 - a | foo | 1 | 1 - a | foo | 2 | 1 - a | | 1 | 2 - a | | 2 | 2 - b | bar | 1 | 1 - b | bar | 2 | 1 - b | | 1 | 1 - b | | 2 | 1 - | | 1 | 3 - | | 2 | 3 - | bar | 1 | 2 - | bar | 2 | 2 - | foo | 1 | 1 - | foo | 2 | 1 -(16 rows) - -SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab) ORDER BY g; - dataa | b | g | count --------+-----+---+------- - a | bar | 1 | 1 - a | foo | 1 | 1 - a | | 1 | 2 - b | bar | 1 | 1 - b | | 1 | 1 - | | 1 | 3 - | bar | 1 | 2 - | foo | 1 | 1 - | foo | 2 | 1 - a | bar | 2 | 1 - b | | 2 | 1 - a | foo | 2 | 1 - | bar | 2 | 2 - a | | 2 | 2 - | | 2 | 3 - b | bar | 2 | 1 -(16 rows) - -SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g); - dataa | b | g | count --------+-----+---+------- - a | bar | 1 | 1 - a | bar | 2 | 1 - a | bar | | 2 - a | foo | 1 | 1 - a | foo | 2 | 1 - a | foo | | 2 - a | | | 4 - b | bar | 1 | 1 - b | bar | 2 | 1 - b | bar | | 2 - b | | | 2 - | | | 6 - | bar | 1 | 2 - | bar | 2 | 2 - | bar | | 4 - | foo | 1 | 1 - | foo | 2 | 1 - | foo | | 2 - a | | 1 | 2 - b | | 1 | 1 - | | 1 | 3 - a | | 2 | 2 - b | | 2 | 1 - | | 2 | 3 -(24 rows) - -SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g) ORDER BY dataa; - dataa | b | g | count --------+-----+---+------- - a | foo | | 2 - a | | | 4 - a | | 2 | 2 - a | bar | 1 | 1 - a | bar | 2 | 1 - a | bar | | 2 - a | foo | 1 | 1 - a | foo | 2 | 1 - a | | 1 | 2 - b | bar | 1 | 1 - b | | | 2 - b | | 1 | 1 - b | bar | 2 | 1 - b | bar | | 2 - b | | 2 | 1 - | | 2 | 3 - | | | 6 - | bar | 1 | 2 - | bar | 2 | 2 - | bar | | 4 - | foo | 1 | 1 - | foo | 2 | 1 - | foo | | 2 - | | 1 | 3 -(24 rows) - -SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g) ORDER BY g; - dataa | b | g | count --------+-----+---+------- - a | bar | 1 | 1 - a | foo | 1 | 1 - b | bar | 1 | 1 - | bar | 1 | 2 - | foo | 1 | 1 - a | | 1 | 2 - b | | 1 | 1 - | | 1 | 3 - a | | 2 | 2 - b | | 2 | 1 - | bar | 2 | 2 - | | 2 | 3 - | foo | 2 | 1 - a | bar | 2 | 1 - a | foo | 2 | 1 - b | bar | 2 | 1 - a | | | 4 - b | bar | | 2 - b | | | 2 - | | | 6 - a | foo | | 2 - a | bar | | 2 - | bar | | 4 - | foo | | 2 -(24 rows) - -reset enable_hashagg; --- case with degenerate ORDER BY -explain (verbose, costs off) -select 'foo' as f, generate_series(1,2) as g from few order by 1; - QUERY PLAN ----------------------------------------------- - ProjectSet - Output: 'foo'::text, generate_series(1, 2) - -> Seq Scan on public.few - Output: id, dataa, datab -(4 rows) - -select 'foo' as f, generate_series(1,2) as g from few order by 1; - f | g ------+--- - foo | 1 - foo | 2 - foo | 1 - foo | 2 - foo | 1 - foo | 2 -(6 rows) - --- data modification -CREATE TABLE fewmore AS SELECT generate_series(1,3) AS data; -INSERT INTO fewmore VALUES(generate_series(4,5)); -SELECT * FROM fewmore; - data ------- - 1 - 2 - 3 - 4 - 5 -(5 rows) - --- SRFs are not allowed in UPDATE (they once were, but it was nonsense) -UPDATE fewmore SET data = generate_series(4,9); -ERROR: set-returning functions are not allowed in UPDATE -LINE 1: UPDATE fewmore SET data = generate_series(4,9); - ^ --- SRFs are not allowed in RETURNING -INSERT INTO fewmore VALUES(1) RETURNING generate_series(1,3); -ERROR: set-returning functions are not allowed in RETURNING -LINE 1: INSERT INTO fewmore VALUES(1) RETURNING generate_series(1,3)... - ^ --- nor standalone VALUES (but surely this is a bug?) -VALUES(1, generate_series(1,2)); -ERROR: set-returning functions are not allowed in VALUES -LINE 1: VALUES(1, generate_series(1,2)); - ^ --- We allow tSRFs that are not at top level -SELECT int4mul(generate_series(1,2), 10); - int4mul ---------- - 10 - 20 -(2 rows) - -SELECT generate_series(1,3) IS DISTINCT FROM 2; - ?column? ----------- - t - f - t -(3 rows) - --- but SRFs in function RTEs must be at top level (annoying restriction) -SELECT * FROM int4mul(generate_series(1,2), 10); -ERROR: set-returning functions must appear at top level of FROM -LINE 1: SELECT * FROM int4mul(generate_series(1,2), 10); - ^ --- DISTINCT ON is evaluated before tSRF evaluation if SRF is not --- referenced either in ORDER BY or in the DISTINCT ON list. The ORDER --- BY reference can be implicitly generated, if there's no other ORDER BY. --- implicit reference (via implicit ORDER) to all columns -SELECT DISTINCT ON (a) a, b, generate_series(1,3) g -FROM (VALUES (3, 2), (3,1), (1,1), (1,4), (5,3), (5,1)) AS t(a, b); - a | b | g ----+---+--- - 1 | 1 | 1 - 3 | 2 | 1 - 5 | 3 | 1 -(3 rows) - --- unreferenced in DISTINCT ON or ORDER BY -SELECT DISTINCT ON (a) a, b, generate_series(1,3) g -FROM (VALUES (3, 2), (3,1), (1,1), (1,4), (5,3), (5,1)) AS t(a, b) -ORDER BY a, b DESC; - a | b | g ----+---+--- - 1 | 4 | 1 - 1 | 4 | 2 - 1 | 4 | 3 - 3 | 2 | 1 - 3 | 2 | 2 - 3 | 2 | 3 - 5 | 3 | 1 - 5 | 3 | 2 - 5 | 3 | 3 -(9 rows) - --- referenced in ORDER BY -SELECT DISTINCT ON (a) a, b, generate_series(1,3) g -FROM (VALUES (3, 2), (3,1), (1,1), (1,4), (5,3), (5,1)) AS t(a, b) -ORDER BY a, b DESC, g DESC; - a | b | g ----+---+--- - 1 | 4 | 3 - 3 | 2 | 3 - 5 | 3 | 3 -(3 rows) - --- referenced in ORDER BY and DISTINCT ON -SELECT DISTINCT ON (a, b, g) a, b, generate_series(1,3) g -FROM (VALUES (3, 2), (3,1), (1,1), (1,4), (5,3), (5,1)) AS t(a, b) -ORDER BY a, b DESC, g DESC; - a | b | g ----+---+--- - 1 | 4 | 3 - 1 | 4 | 2 - 1 | 4 | 1 - 1 | 1 | 3 - 1 | 1 | 2 - 1 | 1 | 1 - 3 | 2 | 3 - 3 | 2 | 2 - 3 | 2 | 1 - 3 | 1 | 3 - 3 | 1 | 2 - 3 | 1 | 1 - 5 | 3 | 3 - 5 | 3 | 2 - 5 | 3 | 1 - 5 | 1 | 3 - 5 | 1 | 2 - 5 | 1 | 1 -(18 rows) - --- only SRF mentioned in DISTINCT ON -SELECT DISTINCT ON (g) a, b, generate_series(1,3) g -FROM (VALUES (3, 2), (3,1), (1,1), (1,4), (5,3), (5,1)) AS t(a, b); - a | b | g ----+---+--- - 3 | 2 | 1 - 5 | 1 | 2 - 3 | 1 | 3 -(3 rows) - --- LIMIT / OFFSET is evaluated after SRF evaluation -SELECT a, generate_series(1,2) FROM (VALUES(1),(2),(3)) r(a) LIMIT 2 OFFSET 2; - a | generate_series ----+----------------- - 2 | 1 - 2 | 2 -(2 rows) - --- SRFs are not allowed in LIMIT. -SELECT 1 LIMIT generate_series(1,3); -ERROR: set-returning functions are not allowed in LIMIT -LINE 1: SELECT 1 LIMIT generate_series(1,3); - ^ --- tSRF in correlated subquery, referencing table outside -SELECT (SELECT generate_series(1,3) LIMIT 1 OFFSET few.id) FROM few; - generate_series ------------------ - 2 - 3 - -(3 rows) - --- tSRF in correlated subquery, referencing SRF outside -SELECT (SELECT generate_series(1,3) LIMIT 1 OFFSET g.i) FROM generate_series(0,3) g(i); - generate_series ------------------ - 1 - 2 - 3 - -(4 rows) - --- Operators can return sets too -CREATE OPERATOR |@| (PROCEDURE = unnest, RIGHTARG = ANYARRAY); -SELECT |@|ARRAY[1,2,3]; - ?column? ----------- - 1 - 2 - 3 -(3 rows) - --- Some fun cases involving duplicate SRF calls -explain (verbose, costs off) -select generate_series(1,3) as x, generate_series(1,3) + 1 as xp1; - QUERY PLAN ------------------------------------------------------------------- - Result - Output: (generate_series(1, 3)), ((generate_series(1, 3)) + 1) - -> ProjectSet - Output: generate_series(1, 3) - -> Result -(5 rows) - -select generate_series(1,3) as x, generate_series(1,3) + 1 as xp1; - x | xp1 ----+----- - 1 | 2 - 2 | 3 - 3 | 4 -(3 rows) - -explain (verbose, costs off) -select generate_series(1,3)+1 order by generate_series(1,3); - QUERY PLAN ------------------------------------------------------------------------- - Sort - Output: (((generate_series(1, 3)) + 1)), (generate_series(1, 3)) - Sort Key: (generate_series(1, 3)) - -> Result - Output: ((generate_series(1, 3)) + 1), (generate_series(1, 3)) - -> ProjectSet - Output: generate_series(1, 3) - -> Result -(8 rows) - -select generate_series(1,3)+1 order by generate_series(1,3); - ?column? ----------- - 2 - 3 - 4 -(3 rows) - --- Check that SRFs of same nesting level run in lockstep -explain (verbose, costs off) -select generate_series(1,3) as x, generate_series(3,6) + 1 as y; - QUERY PLAN ------------------------------------------------------------------- - Result - Output: (generate_series(1, 3)), ((generate_series(3, 6)) + 1) - -> ProjectSet - Output: generate_series(1, 3), generate_series(3, 6) - -> Result -(5 rows) - -select generate_series(1,3) as x, generate_series(3,6) + 1 as y; - x | y ----+--- - 1 | 4 - 2 | 5 - 3 | 6 - | 7 -(4 rows) - --- Clean up -DROP TABLE few; -DROP TABLE fewmore; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/tid.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/tid.out --- /tmp/cirrus-ci-build/src/test/regress/expected/tid.out 2024-03-07 14:25:00.332427000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/tid.out 2024-03-07 14:27:17.080684000 +0000 @@ -1,121 +1,2 @@ --- basic tests for the TID data type -SELECT - '(0,0)'::tid as tid00, - '(0,1)'::tid as tid01, - '(-1,0)'::tid as tidm10, - '(4294967295,65535)'::tid as tidmax; - tid00 | tid01 | tidm10 | tidmax --------+-------+----------------+-------------------- - (0,0) | (0,1) | (4294967295,0) | (4294967295,65535) -(1 row) - -SELECT '(4294967296,1)'::tid; -- error -ERROR: invalid input syntax for type tid: "(4294967296,1)" -LINE 1: SELECT '(4294967296,1)'::tid; - ^ -SELECT '(1,65536)'::tid; -- error -ERROR: invalid input syntax for type tid: "(1,65536)" -LINE 1: SELECT '(1,65536)'::tid; - ^ --- Also try it with non-error-throwing API -SELECT pg_input_is_valid('(0)', 'tid'); - pg_input_is_valid -------------------- - f -(1 row) - -SELECT * FROM pg_input_error_info('(0)', 'tid'); - message | detail | hint | sql_error_code -------------------------------------------+--------+------+---------------- - invalid input syntax for type tid: "(0)" | | | 22P02 -(1 row) - -SELECT pg_input_is_valid('(0,-1)', 'tid'); - pg_input_is_valid -------------------- - f -(1 row) - -SELECT * FROM pg_input_error_info('(0,-1)', 'tid'); - message | detail | hint | sql_error_code ----------------------------------------------+--------+------+---------------- - invalid input syntax for type tid: "(0,-1)" | | | 22P02 -(1 row) - --- tests for functions related to TID handling -CREATE TABLE tid_tab (a int); --- min() and max() for TIDs -INSERT INTO tid_tab VALUES (1), (2); -SELECT min(ctid) FROM tid_tab; - min -------- - (0,1) -(1 row) - -SELECT max(ctid) FROM tid_tab; - max -------- - (0,2) -(1 row) - -TRUNCATE tid_tab; --- Tests for currtid2() with various relation kinds --- Materialized view -CREATE MATERIALIZED VIEW tid_matview AS SELECT a FROM tid_tab; -SELECT currtid2('tid_matview'::text, '(0,1)'::tid); -- fails -ERROR: tid (0, 1) is not valid for relation "tid_matview" -INSERT INTO tid_tab VALUES (1); -REFRESH MATERIALIZED VIEW tid_matview; -SELECT currtid2('tid_matview'::text, '(0,1)'::tid); -- ok - currtid2 ----------- - (0,1) -(1 row) - -DROP MATERIALIZED VIEW tid_matview; -TRUNCATE tid_tab; --- Sequence -CREATE SEQUENCE tid_seq; -SELECT currtid2('tid_seq'::text, '(0,1)'::tid); -- ok - currtid2 ----------- - (0,1) -(1 row) - -DROP SEQUENCE tid_seq; --- Index, fails with incorrect relation type -CREATE INDEX tid_ind ON tid_tab(a); -SELECT currtid2('tid_ind'::text, '(0,1)'::tid); -- fails -ERROR: cannot open relation "tid_ind" -DETAIL: This operation is not supported for indexes. -DROP INDEX tid_ind; --- Partitioned table, no storage -CREATE TABLE tid_part (a int) PARTITION BY RANGE (a); -SELECT currtid2('tid_part'::text, '(0,1)'::tid); -- fails -ERROR: cannot look at latest visible tid for relation "public.tid_part" -DROP TABLE tid_part; --- Views --- ctid not defined in the view -CREATE VIEW tid_view_no_ctid AS SELECT a FROM tid_tab; -SELECT currtid2('tid_view_no_ctid'::text, '(0,1)'::tid); -- fails -ERROR: currtid cannot handle views with no CTID -DROP VIEW tid_view_no_ctid; --- ctid fetched directly from the source table. -CREATE VIEW tid_view_with_ctid AS SELECT ctid, a FROM tid_tab; -SELECT currtid2('tid_view_with_ctid'::text, '(0,1)'::tid); -- fails -ERROR: tid (0, 1) is not valid for relation "tid_tab" -INSERT INTO tid_tab VALUES (1); -SELECT currtid2('tid_view_with_ctid'::text, '(0,1)'::tid); -- ok - currtid2 ----------- - (0,1) -(1 row) - -DROP VIEW tid_view_with_ctid; -TRUNCATE tid_tab; --- ctid attribute with incorrect data type -CREATE VIEW tid_view_fake_ctid AS SELECT 1 AS ctid, 2 AS a; -SELECT currtid2('tid_view_fake_ctid'::text, '(0,1)'::tid); -- fails -ERROR: ctid isn't of type TID -DROP VIEW tid_view_fake_ctid; -DROP TABLE tid_tab CASCADE; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/tidscan.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/tidscan.out --- /tmp/cirrus-ci-build/src/test/regress/expected/tidscan.out 2024-03-07 14:25:00.334167000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/tidscan.out 2024-03-07 14:27:17.084478000 +0000 @@ -1,296 +1,2 @@ --- tests for tidscans -CREATE TABLE tidscan(id integer); --- only insert a few rows, we don't want to spill onto a second table page -INSERT INTO tidscan VALUES (1), (2), (3); --- show ctids -SELECT ctid, * FROM tidscan; - ctid | id --------+---- - (0,1) | 1 - (0,2) | 2 - (0,3) | 3 -(3 rows) - --- ctid equality - implemented as tidscan -EXPLAIN (COSTS OFF) -SELECT ctid, * FROM tidscan WHERE ctid = '(0,1)'; - QUERY PLAN ------------------------------------ - Tid Scan on tidscan - TID Cond: (ctid = '(0,1)'::tid) -(2 rows) - -SELECT ctid, * FROM tidscan WHERE ctid = '(0,1)'; - ctid | id --------+---- - (0,1) | 1 -(1 row) - -EXPLAIN (COSTS OFF) -SELECT ctid, * FROM tidscan WHERE '(0,1)' = ctid; - QUERY PLAN ------------------------------------ - Tid Scan on tidscan - TID Cond: ('(0,1)'::tid = ctid) -(2 rows) - -SELECT ctid, * FROM tidscan WHERE '(0,1)' = ctid; - ctid | id --------+---- - (0,1) | 1 -(1 row) - --- OR'd clauses -EXPLAIN (COSTS OFF) -SELECT ctid, * FROM tidscan WHERE ctid = '(0,2)' OR '(0,1)' = ctid; - QUERY PLAN --------------------------------------------------------------- - Tid Scan on tidscan - TID Cond: ((ctid = '(0,2)'::tid) OR ('(0,1)'::tid = ctid)) -(2 rows) - -SELECT ctid, * FROM tidscan WHERE ctid = '(0,2)' OR '(0,1)' = ctid; - ctid | id --------+---- - (0,1) | 1 - (0,2) | 2 -(2 rows) - --- ctid = ScalarArrayOp - implemented as tidscan -EXPLAIN (COSTS OFF) -SELECT ctid, * FROM tidscan WHERE ctid = ANY(ARRAY['(0,1)', '(0,2)']::tid[]); - QUERY PLAN -------------------------------------------------------- - Tid Scan on tidscan - TID Cond: (ctid = ANY ('{"(0,1)","(0,2)"}'::tid[])) -(2 rows) - -SELECT ctid, * FROM tidscan WHERE ctid = ANY(ARRAY['(0,1)', '(0,2)']::tid[]); - ctid | id --------+---- - (0,1) | 1 - (0,2) | 2 -(2 rows) - --- ctid != ScalarArrayOp - can't be implemented as tidscan -EXPLAIN (COSTS OFF) -SELECT ctid, * FROM tidscan WHERE ctid != ANY(ARRAY['(0,1)', '(0,2)']::tid[]); - QUERY PLAN ------------------------------------------------------- - Seq Scan on tidscan - Filter: (ctid <> ANY ('{"(0,1)","(0,2)"}'::tid[])) -(2 rows) - -SELECT ctid, * FROM tidscan WHERE ctid != ANY(ARRAY['(0,1)', '(0,2)']::tid[]); - ctid | id --------+---- - (0,1) | 1 - (0,2) | 2 - (0,3) | 3 -(3 rows) - --- tid equality extracted from sub-AND clauses -EXPLAIN (COSTS OFF) -SELECT ctid, * FROM tidscan -WHERE (id = 3 AND ctid IN ('(0,2)', '(0,3)')) OR (ctid = '(0,1)' AND id = 1); - QUERY PLAN --------------------------------------------------------------------------------------------------------------- - Tid Scan on tidscan - TID Cond: ((ctid = ANY ('{"(0,2)","(0,3)"}'::tid[])) OR (ctid = '(0,1)'::tid)) - Filter: (((id = 3) AND (ctid = ANY ('{"(0,2)","(0,3)"}'::tid[]))) OR ((ctid = '(0,1)'::tid) AND (id = 1))) -(3 rows) - -SELECT ctid, * FROM tidscan -WHERE (id = 3 AND ctid IN ('(0,2)', '(0,3)')) OR (ctid = '(0,1)' AND id = 1); - ctid | id --------+---- - (0,1) | 1 - (0,3) | 3 -(2 rows) - --- nestloop-with-inner-tidscan joins on tid -SET enable_hashjoin TO off; -- otherwise hash join might win -EXPLAIN (COSTS OFF) -SELECT t1.ctid, t1.*, t2.ctid, t2.* -FROM tidscan t1 JOIN tidscan t2 ON t1.ctid = t2.ctid WHERE t1.id = 1; - QUERY PLAN ------------------------------------- - Nested Loop - -> Seq Scan on tidscan t1 - Filter: (id = 1) - -> Tid Scan on tidscan t2 - TID Cond: (t1.ctid = ctid) -(5 rows) - -SELECT t1.ctid, t1.*, t2.ctid, t2.* -FROM tidscan t1 JOIN tidscan t2 ON t1.ctid = t2.ctid WHERE t1.id = 1; - ctid | id | ctid | id --------+----+-------+---- - (0,1) | 1 | (0,1) | 1 -(1 row) - -EXPLAIN (COSTS OFF) -SELECT t1.ctid, t1.*, t2.ctid, t2.* -FROM tidscan t1 LEFT JOIN tidscan t2 ON t1.ctid = t2.ctid WHERE t1.id = 1; - QUERY PLAN ------------------------------------- - Nested Loop Left Join - -> Seq Scan on tidscan t1 - Filter: (id = 1) - -> Tid Scan on tidscan t2 - TID Cond: (t1.ctid = ctid) -(5 rows) - -SELECT t1.ctid, t1.*, t2.ctid, t2.* -FROM tidscan t1 LEFT JOIN tidscan t2 ON t1.ctid = t2.ctid WHERE t1.id = 1; - ctid | id | ctid | id --------+----+-------+---- - (0,1) | 1 | (0,1) | 1 -(1 row) - -RESET enable_hashjoin; --- exercise backward scan and rewind -BEGIN; -DECLARE c CURSOR FOR -SELECT ctid, * FROM tidscan WHERE ctid = ANY(ARRAY['(0,1)', '(0,2)']::tid[]); -FETCH ALL FROM c; - ctid | id --------+---- - (0,1) | 1 - (0,2) | 2 -(2 rows) - -FETCH BACKWARD 1 FROM c; - ctid | id --------+---- - (0,2) | 2 -(1 row) - -FETCH FIRST FROM c; - ctid | id --------+---- - (0,1) | 1 -(1 row) - -ROLLBACK; --- tidscan via CURRENT OF -BEGIN; -DECLARE c CURSOR FOR SELECT ctid, * FROM tidscan; -FETCH NEXT FROM c; -- skip one row - ctid | id --------+---- - (0,1) | 1 -(1 row) - -FETCH NEXT FROM c; - ctid | id --------+---- - (0,2) | 2 -(1 row) - --- perform update -EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) -UPDATE tidscan SET id = -id WHERE CURRENT OF c RETURNING *; - QUERY PLAN ---------------------------------------------------- - Update on tidscan (actual rows=1 loops=1) - -> Tid Scan on tidscan (actual rows=1 loops=1) - TID Cond: CURRENT OF c -(3 rows) - -FETCH NEXT FROM c; - ctid | id --------+---- - (0,3) | 3 -(1 row) - --- perform update -EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) -UPDATE tidscan SET id = -id WHERE CURRENT OF c RETURNING *; - QUERY PLAN ---------------------------------------------------- - Update on tidscan (actual rows=1 loops=1) - -> Tid Scan on tidscan (actual rows=1 loops=1) - TID Cond: CURRENT OF c -(3 rows) - -SELECT * FROM tidscan; - id ----- - 1 - -2 - -3 -(3 rows) - --- position cursor past any rows -FETCH NEXT FROM c; - ctid | id -------+---- -(0 rows) - --- should error out -EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) -UPDATE tidscan SET id = -id WHERE CURRENT OF c RETURNING *; -ERROR: cursor "c" is not positioned on a row -ROLLBACK; --- bulk joins on CTID --- (these plans don't use TID scans, but this still seems like an --- appropriate place for these tests) -EXPLAIN (COSTS OFF) -SELECT count(*) FROM tenk1 t1 JOIN tenk1 t2 ON t1.ctid = t2.ctid; - QUERY PLAN ----------------------------------------- - Aggregate - -> Hash Join - Hash Cond: (t1.ctid = t2.ctid) - -> Seq Scan on tenk1 t1 - -> Hash - -> Seq Scan on tenk1 t2 -(6 rows) - -SELECT count(*) FROM tenk1 t1 JOIN tenk1 t2 ON t1.ctid = t2.ctid; - count -------- - 10000 -(1 row) - -SET enable_hashjoin TO off; -EXPLAIN (COSTS OFF) -SELECT count(*) FROM tenk1 t1 JOIN tenk1 t2 ON t1.ctid = t2.ctid; - QUERY PLAN ------------------------------------------ - Aggregate - -> Merge Join - Merge Cond: (t1.ctid = t2.ctid) - -> Sort - Sort Key: t1.ctid - -> Seq Scan on tenk1 t1 - -> Sort - Sort Key: t2.ctid - -> Seq Scan on tenk1 t2 -(9 rows) - -SELECT count(*) FROM tenk1 t1 JOIN tenk1 t2 ON t1.ctid = t2.ctid; - count -------- - 10000 -(1 row) - -RESET enable_hashjoin; --- check predicate lock on CTID -BEGIN ISOLATION LEVEL SERIALIZABLE; -SELECT * FROM tidscan WHERE ctid = '(0,1)'; - id ----- - 1 -(1 row) - --- locktype should be 'tuple' -SELECT locktype, mode FROM pg_locks WHERE pid = pg_backend_pid() AND mode = 'SIReadLock'; - locktype | mode -----------+------------ - tuple | SIReadLock -(1 row) - -ROLLBACK; -DROP TABLE tidscan; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/tidrangescan.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/tidrangescan.out --- /tmp/cirrus-ci-build/src/test/regress/expected/tidrangescan.out 2024-03-07 14:25:00.334145000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/tidrangescan.out 2024-03-07 14:27:17.082707000 +0000 @@ -1,300 +1,2 @@ --- tests for tidrangescans -SET enable_seqscan TO off; -CREATE TABLE tidrangescan(id integer, data text); --- empty table -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid < '(1, 0)'; - QUERY PLAN ------------------------------------ - Tid Range Scan on tidrangescan - TID Cond: (ctid < '(1,0)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid < '(1, 0)'; - ctid ------- -(0 rows) - -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid > '(9, 0)'; - QUERY PLAN ------------------------------------ - Tid Range Scan on tidrangescan - TID Cond: (ctid > '(9,0)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid > '(9, 0)'; - ctid ------- -(0 rows) - --- insert enough tuples to fill at least two pages -INSERT INTO tidrangescan SELECT i,repeat('x', 100) FROM generate_series(1,200) AS s(i); --- remove all tuples after the 10th tuple on each page. Trying to ensure --- we get the same layout with all CPU architectures and smaller than standard --- page sizes. -DELETE FROM tidrangescan -WHERE substring(ctid::text FROM ',(\d+)\)')::integer > 10 OR substring(ctid::text FROM '\((\d+),')::integer > 2; -VACUUM tidrangescan; --- range scans with upper bound -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid < '(1,0)'; - QUERY PLAN ------------------------------------ - Tid Range Scan on tidrangescan - TID Cond: (ctid < '(1,0)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid < '(1,0)'; - ctid --------- - (0,1) - (0,2) - (0,3) - (0,4) - (0,5) - (0,6) - (0,7) - (0,8) - (0,9) - (0,10) -(10 rows) - -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid <= '(1,5)'; - QUERY PLAN ------------------------------------- - Tid Range Scan on tidrangescan - TID Cond: (ctid <= '(1,5)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid <= '(1,5)'; - ctid --------- - (0,1) - (0,2) - (0,3) - (0,4) - (0,5) - (0,6) - (0,7) - (0,8) - (0,9) - (0,10) - (1,1) - (1,2) - (1,3) - (1,4) - (1,5) -(15 rows) - -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid < '(0,0)'; - QUERY PLAN ------------------------------------ - Tid Range Scan on tidrangescan - TID Cond: (ctid < '(0,0)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid < '(0,0)'; - ctid ------- -(0 rows) - --- range scans with lower bound -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid > '(2,8)'; - QUERY PLAN ------------------------------------ - Tid Range Scan on tidrangescan - TID Cond: (ctid > '(2,8)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid > '(2,8)'; - ctid --------- - (2,9) - (2,10) -(2 rows) - -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE '(2,8)' < ctid; - QUERY PLAN ------------------------------------ - Tid Range Scan on tidrangescan - TID Cond: ('(2,8)'::tid < ctid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE '(2,8)' < ctid; - ctid --------- - (2,9) - (2,10) -(2 rows) - -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid >= '(2,8)'; - QUERY PLAN ------------------------------------- - Tid Range Scan on tidrangescan - TID Cond: (ctid >= '(2,8)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid >= '(2,8)'; - ctid --------- - (2,8) - (2,9) - (2,10) -(3 rows) - -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid >= '(100,0)'; - QUERY PLAN --------------------------------------- - Tid Range Scan on tidrangescan - TID Cond: (ctid >= '(100,0)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid >= '(100,0)'; - ctid ------- -(0 rows) - --- range scans with both bounds -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid > '(1,4)' AND '(1,7)' >= ctid; - QUERY PLAN ----------------------------------------------------------------- - Tid Range Scan on tidrangescan - TID Cond: ((ctid > '(1,4)'::tid) AND ('(1,7)'::tid >= ctid)) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid > '(1,4)' AND '(1,7)' >= ctid; - ctid -------- - (1,5) - (1,6) - (1,7) -(3 rows) - -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE '(1,7)' >= ctid AND ctid > '(1,4)'; - QUERY PLAN ----------------------------------------------------------------- - Tid Range Scan on tidrangescan - TID Cond: (('(1,7)'::tid >= ctid) AND (ctid > '(1,4)'::tid)) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE '(1,7)' >= ctid AND ctid > '(1,4)'; - ctid -------- - (1,5) - (1,6) - (1,7) -(3 rows) - --- extreme offsets -SELECT ctid FROM tidrangescan WHERE ctid > '(0,65535)' AND ctid < '(1,0)' LIMIT 1; - ctid ------- -(0 rows) - -SELECT ctid FROM tidrangescan WHERE ctid < '(0,0)' LIMIT 1; - ctid ------- -(0 rows) - -SELECT ctid FROM tidrangescan WHERE ctid > '(4294967295,65535)'; - ctid ------- -(0 rows) - -SELECT ctid FROM tidrangescan WHERE ctid < '(0,0)'; - ctid ------- -(0 rows) - --- NULLs in the range cannot return tuples -SELECT ctid FROM tidrangescan WHERE ctid >= (SELECT NULL::tid); - ctid ------- -(0 rows) - --- rescans -EXPLAIN (COSTS OFF) -SELECT t.ctid,t2.c FROM tidrangescan t, -LATERAL (SELECT count(*) c FROM tidrangescan t2 WHERE t2.ctid <= t.ctid) t2 -WHERE t.ctid < '(1,0)'; - QUERY PLAN ------------------------------------------------ - Nested Loop - -> Tid Range Scan on tidrangescan t - TID Cond: (ctid < '(1,0)'::tid) - -> Aggregate - -> Tid Range Scan on tidrangescan t2 - TID Cond: (ctid <= t.ctid) -(6 rows) - -SELECT t.ctid,t2.c FROM tidrangescan t, -LATERAL (SELECT count(*) c FROM tidrangescan t2 WHERE t2.ctid <= t.ctid) t2 -WHERE t.ctid < '(1,0)'; - ctid | c ---------+---- - (0,1) | 1 - (0,2) | 2 - (0,3) | 3 - (0,4) | 4 - (0,5) | 5 - (0,6) | 6 - (0,7) | 7 - (0,8) | 8 - (0,9) | 9 - (0,10) | 10 -(10 rows) - --- cursors --- Ensure we get a TID Range scan without a Materialize node. -EXPLAIN (COSTS OFF) -DECLARE c SCROLL CURSOR FOR SELECT ctid FROM tidrangescan WHERE ctid < '(1,0)'; - QUERY PLAN ------------------------------------ - Tid Range Scan on tidrangescan - TID Cond: (ctid < '(1,0)'::tid) -(2 rows) - -BEGIN; -DECLARE c SCROLL CURSOR FOR SELECT ctid FROM tidrangescan WHERE ctid < '(1,0)'; -FETCH NEXT c; - ctid -------- - (0,1) -(1 row) - -FETCH NEXT c; - ctid -------- - (0,2) -(1 row) - -FETCH PRIOR c; - ctid -------- - (0,1) -(1 row) - -FETCH FIRST c; - ctid -------- - (0,1) -(1 row) - -FETCH LAST c; - ctid --------- - (0,10) -(1 row) - -COMMIT; -DROP TABLE tidrangescan; -RESET enable_seqscan; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/collate.icu.utf8_1.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/collate.icu.utf8.out --- /tmp/cirrus-ci-build/src/test/regress/expected/collate.icu.utf8_1.out 2024-03-07 14:25:00.329582000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/collate.icu.utf8.out 2024-03-07 14:27:17.076966000 +0000 @@ -1,9 +1,2 @@ -/* - * This test is for ICU collations. - */ -/* skip test if not UTF8 server encoding or no ICU collations installed */ -SELECT getdatabaseencoding() <> 'UTF8' OR - (SELECT count(*) FROM pg_collation WHERE collprovider = 'i' AND collname <> 'unicode') = 0 - AS skip_test \gset -\if :skip_test -\quit +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/incremental_sort.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/incremental_sort.out --- /tmp/cirrus-ci-build/src/test/regress/expected/incremental_sort.out 2024-03-07 14:25:00.330969000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/incremental_sort.out 2024-03-07 14:27:17.077440000 +0000 @@ -1,1695 +1,2 @@ --- When there is a LIMIT clause, incremental sort is beneficial because --- it only has to sort some of the groups, and not the entire table. -explain (costs off) -select * from (select * from tenk1 order by four) t order by four, ten -limit 1; - QUERY PLAN ------------------------------------------ - Limit - -> Incremental Sort - Sort Key: tenk1.four, tenk1.ten - Presorted Key: tenk1.four - -> Sort - Sort Key: tenk1.four - -> Seq Scan on tenk1 -(7 rows) - --- When work_mem is not enough to sort the entire table, incremental sort --- may be faster if individual groups still fit into work_mem. -set work_mem to '2MB'; -explain (costs off) -select * from (select * from tenk1 order by four) t order by four, ten; - QUERY PLAN ------------------------------------ - Incremental Sort - Sort Key: tenk1.four, tenk1.ten - Presorted Key: tenk1.four - -> Sort - Sort Key: tenk1.four - -> Seq Scan on tenk1 -(6 rows) - -reset work_mem; -create table t(a integer, b integer); -create or replace function explain_analyze_without_memory(query text) -returns table (out_line text) language plpgsql -as -$$ -declare - line text; -begin - for line in - execute 'explain (analyze, costs off, summary off, timing off) ' || query - loop - out_line := regexp_replace(line, '\d+kB', 'NNkB', 'g'); - return next; - end loop; -end; -$$; -create or replace function explain_analyze_inc_sort_nodes(query text) -returns jsonb language plpgsql -as -$$ -declare - elements jsonb; - element jsonb; - matching_nodes jsonb := '[]'::jsonb; -begin - execute 'explain (analyze, costs off, summary off, timing off, format ''json'') ' || query into strict elements; - while jsonb_array_length(elements) > 0 loop - element := elements->0; - elements := elements - 0; - case jsonb_typeof(element) - when 'array' then - if jsonb_array_length(element) > 0 then - elements := elements || element; - end if; - when 'object' then - if element ? 'Plan' then - elements := elements || jsonb_build_array(element->'Plan'); - element := element - 'Plan'; - else - if element ? 'Plans' then - elements := elements || jsonb_build_array(element->'Plans'); - element := element - 'Plans'; - end if; - if (element->>'Node Type')::text = 'Incremental Sort' then - matching_nodes := matching_nodes || element; - end if; - end if; - end case; - end loop; - return matching_nodes; -end; -$$; -create or replace function explain_analyze_inc_sort_nodes_without_memory(query text) -returns jsonb language plpgsql -as -$$ -declare - nodes jsonb := '[]'::jsonb; - node jsonb; - group_key text; - space_key text; -begin - for node in select * from jsonb_array_elements(explain_analyze_inc_sort_nodes(query)) t loop - for group_key in select unnest(array['Full-sort Groups', 'Pre-sorted Groups']::text[]) t loop - for space_key in select unnest(array['Sort Space Memory', 'Sort Space Disk']::text[]) t loop - node := jsonb_set(node, array[group_key, space_key, 'Average Sort Space Used'], '"NN"', false); - node := jsonb_set(node, array[group_key, space_key, 'Peak Sort Space Used'], '"NN"', false); - end loop; - end loop; - nodes := nodes || node; - end loop; - return nodes; -end; -$$; -create or replace function explain_analyze_inc_sort_nodes_verify_invariants(query text) -returns bool language plpgsql -as -$$ -declare - node jsonb; - group_stats jsonb; - group_key text; - space_key text; -begin - for node in select * from jsonb_array_elements(explain_analyze_inc_sort_nodes(query)) t loop - for group_key in select unnest(array['Full-sort Groups', 'Pre-sorted Groups']::text[]) t loop - group_stats := node->group_key; - for space_key in select unnest(array['Sort Space Memory', 'Sort Space Disk']::text[]) t loop - if (group_stats->space_key->'Peak Sort Space Used')::bigint < (group_stats->space_key->'Peak Sort Space Used')::bigint then - raise exception '% has invalid max space < average space', group_key; - end if; - end loop; - end loop; - end loop; - return true; -end; -$$; --- A single large group tested around each mode transition point. -insert into t(a, b) select i/100 + 1, i + 1 from generate_series(0, 999) n(i); -analyze t; -explain (costs off) select * from (select * from t order by a) s order by a, b limit 31; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 31; - a | b ----+---- - 1 | 1 - 1 | 2 - 1 | 3 - 1 | 4 - 1 | 5 - 1 | 6 - 1 | 7 - 1 | 8 - 1 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 1 | 20 - 1 | 21 - 1 | 22 - 1 | 23 - 1 | 24 - 1 | 25 - 1 | 26 - 1 | 27 - 1 | 28 - 1 | 29 - 1 | 30 - 1 | 31 -(31 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 32; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 32; - a | b ----+---- - 1 | 1 - 1 | 2 - 1 | 3 - 1 | 4 - 1 | 5 - 1 | 6 - 1 | 7 - 1 | 8 - 1 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 1 | 20 - 1 | 21 - 1 | 22 - 1 | 23 - 1 | 24 - 1 | 25 - 1 | 26 - 1 | 27 - 1 | 28 - 1 | 29 - 1 | 30 - 1 | 31 - 1 | 32 -(32 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 33; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 33; - a | b ----+---- - 1 | 1 - 1 | 2 - 1 | 3 - 1 | 4 - 1 | 5 - 1 | 6 - 1 | 7 - 1 | 8 - 1 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 1 | 20 - 1 | 21 - 1 | 22 - 1 | 23 - 1 | 24 - 1 | 25 - 1 | 26 - 1 | 27 - 1 | 28 - 1 | 29 - 1 | 30 - 1 | 31 - 1 | 32 - 1 | 33 -(33 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 65; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 65; - a | b ----+---- - 1 | 1 - 1 | 2 - 1 | 3 - 1 | 4 - 1 | 5 - 1 | 6 - 1 | 7 - 1 | 8 - 1 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 1 | 20 - 1 | 21 - 1 | 22 - 1 | 23 - 1 | 24 - 1 | 25 - 1 | 26 - 1 | 27 - 1 | 28 - 1 | 29 - 1 | 30 - 1 | 31 - 1 | 32 - 1 | 33 - 1 | 34 - 1 | 35 - 1 | 36 - 1 | 37 - 1 | 38 - 1 | 39 - 1 | 40 - 1 | 41 - 1 | 42 - 1 | 43 - 1 | 44 - 1 | 45 - 1 | 46 - 1 | 47 - 1 | 48 - 1 | 49 - 1 | 50 - 1 | 51 - 1 | 52 - 1 | 53 - 1 | 54 - 1 | 55 - 1 | 56 - 1 | 57 - 1 | 58 - 1 | 59 - 1 | 60 - 1 | 61 - 1 | 62 - 1 | 63 - 1 | 64 - 1 | 65 -(65 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 66; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 66; - a | b ----+---- - 1 | 1 - 1 | 2 - 1 | 3 - 1 | 4 - 1 | 5 - 1 | 6 - 1 | 7 - 1 | 8 - 1 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 1 | 20 - 1 | 21 - 1 | 22 - 1 | 23 - 1 | 24 - 1 | 25 - 1 | 26 - 1 | 27 - 1 | 28 - 1 | 29 - 1 | 30 - 1 | 31 - 1 | 32 - 1 | 33 - 1 | 34 - 1 | 35 - 1 | 36 - 1 | 37 - 1 | 38 - 1 | 39 - 1 | 40 - 1 | 41 - 1 | 42 - 1 | 43 - 1 | 44 - 1 | 45 - 1 | 46 - 1 | 47 - 1 | 48 - 1 | 49 - 1 | 50 - 1 | 51 - 1 | 52 - 1 | 53 - 1 | 54 - 1 | 55 - 1 | 56 - 1 | 57 - 1 | 58 - 1 | 59 - 1 | 60 - 1 | 61 - 1 | 62 - 1 | 63 - 1 | 64 - 1 | 65 - 1 | 66 -(66 rows) - -delete from t; --- An initial large group followed by a small group. -insert into t(a, b) select i/50 + 1, i + 1 from generate_series(0, 999) n(i); -analyze t; -explain (costs off) select * from (select * from t order by a) s order by a, b limit 55; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 55; - a | b ----+---- - 1 | 1 - 1 | 2 - 1 | 3 - 1 | 4 - 1 | 5 - 1 | 6 - 1 | 7 - 1 | 8 - 1 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 1 | 20 - 1 | 21 - 1 | 22 - 1 | 23 - 1 | 24 - 1 | 25 - 1 | 26 - 1 | 27 - 1 | 28 - 1 | 29 - 1 | 30 - 1 | 31 - 1 | 32 - 1 | 33 - 1 | 34 - 1 | 35 - 1 | 36 - 1 | 37 - 1 | 38 - 1 | 39 - 1 | 40 - 1 | 41 - 1 | 42 - 1 | 43 - 1 | 44 - 1 | 45 - 1 | 46 - 1 | 47 - 1 | 48 - 1 | 49 - 1 | 50 - 2 | 51 - 2 | 52 - 2 | 53 - 2 | 54 - 2 | 55 -(55 rows) - --- Test EXPLAIN ANALYZE with only a fullsort group. -select explain_analyze_without_memory('select * from (select * from t order by a) s order by a, b limit 55'); - explain_analyze_without_memory ---------------------------------------------------------------------------------------------------------------- - Limit (actual rows=55 loops=1) - -> Incremental Sort (actual rows=55 loops=1) - Sort Key: t.a, t.b - Presorted Key: t.a - Full-sort Groups: 2 Sort Methods: top-N heapsort, quicksort Average Memory: NNkB Peak Memory: NNkB - -> Sort (actual rows=101 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: NNkB - -> Seq Scan on t (actual rows=1000 loops=1) -(9 rows) - -select jsonb_pretty(explain_analyze_inc_sort_nodes_without_memory('select * from (select * from t order by a) s order by a, b limit 55')); - jsonb_pretty -------------------------------------------------- - [ + - { + - "Sort Key": [ + - "t.a", + - "t.b" + - ], + - "Node Type": "Incremental Sort", + - "Actual Rows": 55, + - "Actual Loops": 1, + - "Async Capable": false, + - "Presorted Key": [ + - "t.a" + - ], + - "Parallel Aware": false, + - "Full-sort Groups": { + - "Group Count": 2, + - "Sort Methods Used": [ + - "top-N heapsort", + - "quicksort" + - ], + - "Sort Space Memory": { + - "Peak Sort Space Used": "NN", + - "Average Sort Space Used": "NN"+ - } + - }, + - "Parent Relationship": "Outer" + - } + - ] -(1 row) - -select explain_analyze_inc_sort_nodes_verify_invariants('select * from (select * from t order by a) s order by a, b limit 55'); - explain_analyze_inc_sort_nodes_verify_invariants --------------------------------------------------- - t -(1 row) - -delete from t; --- An initial small group followed by a large group. -insert into t(a, b) select (case when i < 5 then i else 9 end), i from generate_series(1, 1000) n(i); -analyze t; -explain (costs off) select * from (select * from t order by a) s order by a, b limit 70; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 70; - a | b ----+---- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 9 | 5 - 9 | 6 - 9 | 7 - 9 | 8 - 9 | 9 - 9 | 10 - 9 | 11 - 9 | 12 - 9 | 13 - 9 | 14 - 9 | 15 - 9 | 16 - 9 | 17 - 9 | 18 - 9 | 19 - 9 | 20 - 9 | 21 - 9 | 22 - 9 | 23 - 9 | 24 - 9 | 25 - 9 | 26 - 9 | 27 - 9 | 28 - 9 | 29 - 9 | 30 - 9 | 31 - 9 | 32 - 9 | 33 - 9 | 34 - 9 | 35 - 9 | 36 - 9 | 37 - 9 | 38 - 9 | 39 - 9 | 40 - 9 | 41 - 9 | 42 - 9 | 43 - 9 | 44 - 9 | 45 - 9 | 46 - 9 | 47 - 9 | 48 - 9 | 49 - 9 | 50 - 9 | 51 - 9 | 52 - 9 | 53 - 9 | 54 - 9 | 55 - 9 | 56 - 9 | 57 - 9 | 58 - 9 | 59 - 9 | 60 - 9 | 61 - 9 | 62 - 9 | 63 - 9 | 64 - 9 | 65 - 9 | 66 - 9 | 67 - 9 | 68 - 9 | 69 - 9 | 70 -(70 rows) - --- Checks case where we hit a group boundary at the last tuple of a batch. --- Because the full sort state is bounded, we scan 64 tuples (the mode --- transition point) but only retain 5. Thus when we transition modes, all --- tuples in the full sort state have different prefix keys. -explain (costs off) select * from (select * from t order by a) s order by a, b limit 5; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 5; - a | b ----+--- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 9 | 5 -(5 rows) - --- Test rescan. -begin; --- We force the planner to choose a plan with incremental sort on the right side --- of a nested loop join node. That way we trigger the rescan code path. -set local enable_hashjoin = off; -set local enable_mergejoin = off; -set local enable_material = off; -set local enable_sort = off; -explain (costs off) select * from t left join (select * from (select * from t order by a) v order by a, b) s on s.a = t.a where t.a in (1, 2); - QUERY PLAN ------------------------------------------------- - Nested Loop Left Join - Join Filter: (t_1.a = t.a) - -> Seq Scan on t - Filter: (a = ANY ('{1,2}'::integer[])) - -> Incremental Sort - Sort Key: t_1.a, t_1.b - Presorted Key: t_1.a - -> Sort - Sort Key: t_1.a - -> Seq Scan on t t_1 -(10 rows) - -select * from t left join (select * from (select * from t order by a) v order by a, b) s on s.a = t.a where t.a in (1, 2); - a | b | a | b ----+---+---+--- - 1 | 1 | 1 | 1 - 2 | 2 | 2 | 2 -(2 rows) - -rollback; --- Test EXPLAIN ANALYZE with both fullsort and presorted groups. -select explain_analyze_without_memory('select * from (select * from t order by a) s order by a, b limit 70'); - explain_analyze_without_memory ----------------------------------------------------------------------------------------------------------------- - Limit (actual rows=70 loops=1) - -> Incremental Sort (actual rows=70 loops=1) - Sort Key: t.a, t.b - Presorted Key: t.a - Full-sort Groups: 1 Sort Method: quicksort Average Memory: NNkB Peak Memory: NNkB - Pre-sorted Groups: 5 Sort Methods: top-N heapsort, quicksort Average Memory: NNkB Peak Memory: NNkB - -> Sort (actual rows=1000 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: NNkB - -> Seq Scan on t (actual rows=1000 loops=1) -(10 rows) - -select jsonb_pretty(explain_analyze_inc_sort_nodes_without_memory('select * from (select * from t order by a) s order by a, b limit 70')); - jsonb_pretty -------------------------------------------------- - [ + - { + - "Sort Key": [ + - "t.a", + - "t.b" + - ], + - "Node Type": "Incremental Sort", + - "Actual Rows": 70, + - "Actual Loops": 1, + - "Async Capable": false, + - "Presorted Key": [ + - "t.a" + - ], + - "Parallel Aware": false, + - "Full-sort Groups": { + - "Group Count": 1, + - "Sort Methods Used": [ + - "quicksort" + - ], + - "Sort Space Memory": { + - "Peak Sort Space Used": "NN", + - "Average Sort Space Used": "NN"+ - } + - }, + - "Pre-sorted Groups": { + - "Group Count": 5, + - "Sort Methods Used": [ + - "top-N heapsort", + - "quicksort" + - ], + - "Sort Space Memory": { + - "Peak Sort Space Used": "NN", + - "Average Sort Space Used": "NN"+ - } + - }, + - "Parent Relationship": "Outer" + - } + - ] -(1 row) - -select explain_analyze_inc_sort_nodes_verify_invariants('select * from (select * from t order by a) s order by a, b limit 70'); - explain_analyze_inc_sort_nodes_verify_invariants --------------------------------------------------- - t -(1 row) - -delete from t; --- Small groups of 10 tuples each tested around each mode transition point. -insert into t(a, b) select i / 10, i from generate_series(1, 1000) n(i); -analyze t; -explain (costs off) select * from (select * from t order by a) s order by a, b limit 31; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 31; - a | b ----+---- - 0 | 1 - 0 | 2 - 0 | 3 - 0 | 4 - 0 | 5 - 0 | 6 - 0 | 7 - 0 | 8 - 0 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 2 | 20 - 2 | 21 - 2 | 22 - 2 | 23 - 2 | 24 - 2 | 25 - 2 | 26 - 2 | 27 - 2 | 28 - 2 | 29 - 3 | 30 - 3 | 31 -(31 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 32; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 32; - a | b ----+---- - 0 | 1 - 0 | 2 - 0 | 3 - 0 | 4 - 0 | 5 - 0 | 6 - 0 | 7 - 0 | 8 - 0 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 2 | 20 - 2 | 21 - 2 | 22 - 2 | 23 - 2 | 24 - 2 | 25 - 2 | 26 - 2 | 27 - 2 | 28 - 2 | 29 - 3 | 30 - 3 | 31 - 3 | 32 -(32 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 33; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 33; - a | b ----+---- - 0 | 1 - 0 | 2 - 0 | 3 - 0 | 4 - 0 | 5 - 0 | 6 - 0 | 7 - 0 | 8 - 0 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 2 | 20 - 2 | 21 - 2 | 22 - 2 | 23 - 2 | 24 - 2 | 25 - 2 | 26 - 2 | 27 - 2 | 28 - 2 | 29 - 3 | 30 - 3 | 31 - 3 | 32 - 3 | 33 -(33 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 65; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 65; - a | b ----+---- - 0 | 1 - 0 | 2 - 0 | 3 - 0 | 4 - 0 | 5 - 0 | 6 - 0 | 7 - 0 | 8 - 0 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 2 | 20 - 2 | 21 - 2 | 22 - 2 | 23 - 2 | 24 - 2 | 25 - 2 | 26 - 2 | 27 - 2 | 28 - 2 | 29 - 3 | 30 - 3 | 31 - 3 | 32 - 3 | 33 - 3 | 34 - 3 | 35 - 3 | 36 - 3 | 37 - 3 | 38 - 3 | 39 - 4 | 40 - 4 | 41 - 4 | 42 - 4 | 43 - 4 | 44 - 4 | 45 - 4 | 46 - 4 | 47 - 4 | 48 - 4 | 49 - 5 | 50 - 5 | 51 - 5 | 52 - 5 | 53 - 5 | 54 - 5 | 55 - 5 | 56 - 5 | 57 - 5 | 58 - 5 | 59 - 6 | 60 - 6 | 61 - 6 | 62 - 6 | 63 - 6 | 64 - 6 | 65 -(65 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 66; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 66; - a | b ----+---- - 0 | 1 - 0 | 2 - 0 | 3 - 0 | 4 - 0 | 5 - 0 | 6 - 0 | 7 - 0 | 8 - 0 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 2 | 20 - 2 | 21 - 2 | 22 - 2 | 23 - 2 | 24 - 2 | 25 - 2 | 26 - 2 | 27 - 2 | 28 - 2 | 29 - 3 | 30 - 3 | 31 - 3 | 32 - 3 | 33 - 3 | 34 - 3 | 35 - 3 | 36 - 3 | 37 - 3 | 38 - 3 | 39 - 4 | 40 - 4 | 41 - 4 | 42 - 4 | 43 - 4 | 44 - 4 | 45 - 4 | 46 - 4 | 47 - 4 | 48 - 4 | 49 - 5 | 50 - 5 | 51 - 5 | 52 - 5 | 53 - 5 | 54 - 5 | 55 - 5 | 56 - 5 | 57 - 5 | 58 - 5 | 59 - 6 | 60 - 6 | 61 - 6 | 62 - 6 | 63 - 6 | 64 - 6 | 65 - 6 | 66 -(66 rows) - -delete from t; --- Small groups of only 1 tuple each tested around each mode transition point. -insert into t(a, b) select i, i from generate_series(1, 1000) n(i); -analyze t; -explain (costs off) select * from (select * from t order by a) s order by a, b limit 31; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 31; - a | b -----+---- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 5 | 5 - 6 | 6 - 7 | 7 - 8 | 8 - 9 | 9 - 10 | 10 - 11 | 11 - 12 | 12 - 13 | 13 - 14 | 14 - 15 | 15 - 16 | 16 - 17 | 17 - 18 | 18 - 19 | 19 - 20 | 20 - 21 | 21 - 22 | 22 - 23 | 23 - 24 | 24 - 25 | 25 - 26 | 26 - 27 | 27 - 28 | 28 - 29 | 29 - 30 | 30 - 31 | 31 -(31 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 32; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 32; - a | b -----+---- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 5 | 5 - 6 | 6 - 7 | 7 - 8 | 8 - 9 | 9 - 10 | 10 - 11 | 11 - 12 | 12 - 13 | 13 - 14 | 14 - 15 | 15 - 16 | 16 - 17 | 17 - 18 | 18 - 19 | 19 - 20 | 20 - 21 | 21 - 22 | 22 - 23 | 23 - 24 | 24 - 25 | 25 - 26 | 26 - 27 | 27 - 28 | 28 - 29 | 29 - 30 | 30 - 31 | 31 - 32 | 32 -(32 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 33; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 33; - a | b -----+---- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 5 | 5 - 6 | 6 - 7 | 7 - 8 | 8 - 9 | 9 - 10 | 10 - 11 | 11 - 12 | 12 - 13 | 13 - 14 | 14 - 15 | 15 - 16 | 16 - 17 | 17 - 18 | 18 - 19 | 19 - 20 | 20 - 21 | 21 - 22 | 22 - 23 | 23 - 24 | 24 - 25 | 25 - 26 | 26 - 27 | 27 - 28 | 28 - 29 | 29 - 30 | 30 - 31 | 31 - 32 | 32 - 33 | 33 -(33 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 65; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 65; - a | b -----+---- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 5 | 5 - 6 | 6 - 7 | 7 - 8 | 8 - 9 | 9 - 10 | 10 - 11 | 11 - 12 | 12 - 13 | 13 - 14 | 14 - 15 | 15 - 16 | 16 - 17 | 17 - 18 | 18 - 19 | 19 - 20 | 20 - 21 | 21 - 22 | 22 - 23 | 23 - 24 | 24 - 25 | 25 - 26 | 26 - 27 | 27 - 28 | 28 - 29 | 29 - 30 | 30 - 31 | 31 - 32 | 32 - 33 | 33 - 34 | 34 - 35 | 35 - 36 | 36 - 37 | 37 - 38 | 38 - 39 | 39 - 40 | 40 - 41 | 41 - 42 | 42 - 43 | 43 - 44 | 44 - 45 | 45 - 46 | 46 - 47 | 47 - 48 | 48 - 49 | 49 - 50 | 50 - 51 | 51 - 52 | 52 - 53 | 53 - 54 | 54 - 55 | 55 - 56 | 56 - 57 | 57 - 58 | 58 - 59 | 59 - 60 | 60 - 61 | 61 - 62 | 62 - 63 | 63 - 64 | 64 - 65 | 65 -(65 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 66; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 66; - a | b -----+---- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 5 | 5 - 6 | 6 - 7 | 7 - 8 | 8 - 9 | 9 - 10 | 10 - 11 | 11 - 12 | 12 - 13 | 13 - 14 | 14 - 15 | 15 - 16 | 16 - 17 | 17 - 18 | 18 - 19 | 19 - 20 | 20 - 21 | 21 - 22 | 22 - 23 | 23 - 24 | 24 - 25 | 25 - 26 | 26 - 27 | 27 - 28 | 28 - 29 | 29 - 30 | 30 - 31 | 31 - 32 | 32 - 33 | 33 - 34 | 34 - 35 | 35 - 36 | 36 - 37 | 37 - 38 | 38 - 39 | 39 - 40 | 40 - 41 | 41 - 42 | 42 - 43 | 43 - 44 | 44 - 45 | 45 - 46 | 46 - 47 | 47 - 48 | 48 - 49 | 49 - 50 | 50 - 51 | 51 - 52 | 52 - 53 | 53 - 54 | 54 - 55 | 55 - 56 | 56 - 57 | 57 - 58 | 58 - 59 | 59 - 60 | 60 - 61 | 61 - 62 | 62 - 63 | 63 - 64 | 64 - 65 | 65 - 66 | 66 -(66 rows) - -delete from t; -drop table t; --- Incremental sort vs. parallel queries -set min_parallel_table_scan_size = '1kB'; -set min_parallel_index_scan_size = '1kB'; -set parallel_setup_cost = 0; -set parallel_tuple_cost = 0; -set max_parallel_workers_per_gather = 2; -create table t (a int, b int, c int); -insert into t select mod(i,10),mod(i,10),i from generate_series(1,10000) s(i); -create index on t (a); -analyze t; -set enable_incremental_sort = off; -explain (costs off) select a,b,sum(c) from t group by 1,2 order by 1,2,3 limit 1; - QUERY PLAN ------------------------------------------------------- - Limit - -> Sort - Sort Key: a, b, (sum(c)) - -> Finalize HashAggregate - Group Key: a, b - -> Gather - Workers Planned: 2 - -> Partial HashAggregate - Group Key: a, b - -> Parallel Seq Scan on t -(10 rows) - -set enable_incremental_sort = on; -explain (costs off) select a,b,sum(c) from t group by 1,2 order by 1,2,3 limit 1; - QUERY PLAN ----------------------------------------------------------------------- - Limit - -> Incremental Sort - Sort Key: a, b, (sum(c)) - Presorted Key: a, b - -> GroupAggregate - Group Key: a, b - -> Gather Merge - Workers Planned: 2 - -> Incremental Sort - Sort Key: a, b - Presorted Key: a - -> Parallel Index Scan using t_a_idx on t -(12 rows) - --- Incremental sort vs. set operations with varno 0 -set enable_hashagg to off; -explain (costs off) select * from t union select * from t order by 1,3; - QUERY PLAN ----------------------------------------------------------- - Incremental Sort - Sort Key: t.a, t.c - Presorted Key: t.a - -> Unique - -> Sort - Sort Key: t.a, t.b, t.c - -> Gather - Workers Planned: 2 - -> Parallel Append - -> Parallel Seq Scan on t - -> Parallel Seq Scan on t t_1 -(11 rows) - --- Full sort, not just incremental sort can be pushed below a gather merge path --- by generate_useful_gather_paths. -explain (costs off) select distinct a,b from t; - QUERY PLAN ------------------------------------------------- - Unique - -> Gather Merge - Workers Planned: 2 - -> Unique - -> Sort - Sort Key: a, b - -> Parallel Seq Scan on t -(7 rows) - -drop table t; --- Sort pushdown can't go below where expressions are part of the rel target. --- In particular this is interesting for volatile expressions which have to --- go above joins since otherwise we'll incorrectly use expression evaluations --- across multiple rows. -set enable_hashagg=off; -set enable_seqscan=off; -set enable_incremental_sort = off; -set parallel_tuple_cost=0; -set parallel_setup_cost=0; -set min_parallel_table_scan_size = 0; -set min_parallel_index_scan_size = 0; --- Parallel sort below join. -explain (costs off) select distinct sub.unique1, stringu1 -from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub; - QUERY PLAN --------------------------------------------------------------------------- - Unique - -> Nested Loop - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: tenk1.unique1, tenk1.stringu1 - -> Parallel Index Scan using tenk1_unique1 on tenk1 - -> Function Scan on generate_series -(8 rows) - -explain (costs off) select sub.unique1, stringu1 -from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub -order by 1, 2; - QUERY PLAN --------------------------------------------------------------------- - Nested Loop - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: tenk1.unique1, tenk1.stringu1 - -> Parallel Index Scan using tenk1_unique1 on tenk1 - -> Function Scan on generate_series -(7 rows) - --- Parallel sort but with expression that can be safely generated at the base rel. -explain (costs off) select distinct sub.unique1, md5(stringu1) -from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub; - QUERY PLAN ----------------------------------------------------------------------------------------- - Unique - -> Nested Loop - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: tenk1.unique1, (md5((tenk1.stringu1)::text)) COLLATE "C" - -> Parallel Index Scan using tenk1_unique1 on tenk1 - -> Function Scan on generate_series -(8 rows) - -explain (costs off) select sub.unique1, md5(stringu1) -from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub -order by 1, 2; - QUERY PLAN ----------------------------------------------------------------------------------- - Nested Loop - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: tenk1.unique1, (md5((tenk1.stringu1)::text)) COLLATE "C" - -> Parallel Index Scan using tenk1_unique1 on tenk1 - -> Function Scan on generate_series -(7 rows) - --- Parallel sort with an aggregate that can be safely generated in parallel, --- but we can't sort by partial aggregate values. -explain (costs off) select count(*) -from tenk1 t1 -join tenk1 t2 on t1.unique1 = t2.unique2 -join tenk1 t3 on t2.unique1 = t3.unique1 -order by count(*); - QUERY PLAN ------------------------------------------------------------------------------------------------ - Sort - Sort Key: (count(*)) - -> Finalize Aggregate - -> Gather - Workers Planned: 2 - -> Partial Aggregate - -> Parallel Hash Join - Hash Cond: (t2.unique1 = t3.unique1) - -> Parallel Hash Join - Hash Cond: (t1.unique1 = t2.unique2) - -> Parallel Index Only Scan using tenk1_unique1 on tenk1 t1 - -> Parallel Hash - -> Parallel Index Scan using tenk1_unique2 on tenk1 t2 - -> Parallel Hash - -> Parallel Index Only Scan using tenk1_unique1 on tenk1 t3 -(15 rows) - --- Parallel sort but with expression (correlated subquery) that --- is prohibited in parallel plans. -explain (costs off) select distinct - unique1, - (select t.unique1 from tenk1 where tenk1.unique1 = t.unique1) -from tenk1 t, generate_series(1, 1000); - QUERY PLAN ---------------------------------------------------------------------------------- - Unique - -> Sort - Sort Key: t.unique1, ((SubPlan 1)) - -> Gather - Workers Planned: 2 - -> Nested Loop - -> Parallel Index Only Scan using tenk1_unique1 on tenk1 t - -> Function Scan on generate_series - SubPlan 1 - -> Index Only Scan using tenk1_unique1 on tenk1 - Index Cond: (unique1 = t.unique1) -(11 rows) - -explain (costs off) select - unique1, - (select t.unique1 from tenk1 where tenk1.unique1 = t.unique1) -from tenk1 t, generate_series(1, 1000) -order by 1, 2; - QUERY PLAN ---------------------------------------------------------------------------- - Sort - Sort Key: t.unique1, ((SubPlan 1)) - -> Gather - Workers Planned: 2 - -> Nested Loop - -> Parallel Index Only Scan using tenk1_unique1 on tenk1 t - -> Function Scan on generate_series - SubPlan 1 - -> Index Only Scan using tenk1_unique1 on tenk1 - Index Cond: (unique1 = t.unique1) -(10 rows) - --- Parallel sort but with expression not available until the upper rel. -explain (costs off) select distinct sub.unique1, stringu1 || random()::text -from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub; - QUERY PLAN ---------------------------------------------------------------------------------------------- - Unique - -> Sort - Sort Key: tenk1.unique1, (((tenk1.stringu1)::text || (random())::text)) COLLATE "C" - -> Gather - Workers Planned: 2 - -> Nested Loop - -> Parallel Index Scan using tenk1_unique1 on tenk1 - -> Function Scan on generate_series -(8 rows) - -explain (costs off) select sub.unique1, stringu1 || random()::text -from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub -order by 1, 2; - QUERY PLAN ---------------------------------------------------------------------------------------- - Sort - Sort Key: tenk1.unique1, (((tenk1.stringu1)::text || (random())::text)) COLLATE "C" - -> Gather - Workers Planned: 2 - -> Nested Loop - -> Parallel Index Scan using tenk1_unique1 on tenk1 - -> Function Scan on generate_series -(7 rows) - -reset enable_hashagg; -reset enable_seqscan; -reset enable_incremental_sort; -reset parallel_tuple_cost; -reset parallel_setup_cost; -reset min_parallel_table_scan_size; -reset min_parallel_index_scan_size; --- Ensure incremental sorts work for amcanorderbyop type indexes -create table point_table (a point, b int); -create index point_table_a_idx on point_table using gist(a); --- Ensure we get an incremental sort plan for both of the following queries -explain (costs off) select a, b, a <-> point(5, 5) dist from point_table order by dist, b limit 1; - QUERY PLAN ---------------------------------------------------------------- - Limit - -> Incremental Sort - Sort Key: ((a <-> '(5,5)'::point)), b - Presorted Key: ((a <-> '(5,5)'::point)) - -> Index Scan using point_table_a_idx on point_table - Order By: (a <-> '(5,5)'::point) -(6 rows) - -explain (costs off) select a, b, a <-> point(5, 5) dist from point_table order by dist, b desc limit 1; - QUERY PLAN ---------------------------------------------------------------- - Limit - -> Incremental Sort - Sort Key: ((a <-> '(5,5)'::point)), b DESC - Presorted Key: ((a <-> '(5,5)'::point)) - -> Index Scan using point_table_a_idx on point_table - Order By: (a <-> '(5,5)'::point) -(6 rows) - +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/create_role.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/create_role.out --- /tmp/cirrus-ci-build/src/test/regress/expected/create_role.out 2024-03-07 14:25:00.330014000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/create_role.out 2024-03-07 14:27:17.083628000 +0000 @@ -1,261 +1,2 @@ --- ok, superuser can create users with any set of privileges -CREATE ROLE regress_role_super SUPERUSER; -CREATE ROLE regress_role_admin CREATEDB CREATEROLE REPLICATION BYPASSRLS; -GRANT CREATE ON DATABASE regression TO regress_role_admin WITH GRANT OPTION; -CREATE ROLE regress_role_limited_admin CREATEROLE; -CREATE ROLE regress_role_normal; --- fail, CREATEROLE user can't give away role attributes without having them -SET SESSION AUTHORIZATION regress_role_limited_admin; -CREATE ROLE regress_nosuch_superuser SUPERUSER; -ERROR: permission denied to create role -DETAIL: Only roles with the SUPERUSER attribute may create roles with the SUPERUSER attribute. -CREATE ROLE regress_nosuch_replication_bypassrls REPLICATION BYPASSRLS; -ERROR: permission denied to create role -DETAIL: Only roles with the REPLICATION attribute may create roles with the REPLICATION attribute. -CREATE ROLE regress_nosuch_replication REPLICATION; -ERROR: permission denied to create role -DETAIL: Only roles with the REPLICATION attribute may create roles with the REPLICATION attribute. -CREATE ROLE regress_nosuch_bypassrls BYPASSRLS; -ERROR: permission denied to create role -DETAIL: Only roles with the BYPASSRLS attribute may create roles with the BYPASSRLS attribute. -CREATE ROLE regress_nosuch_createdb CREATEDB; -ERROR: permission denied to create role -DETAIL: Only roles with the CREATEDB attribute may create roles with the CREATEDB attribute. --- ok, can create a role without any special attributes -CREATE ROLE regress_role_limited; --- fail, can't give it in any of the restricted attributes -ALTER ROLE regress_role_limited SUPERUSER; -ERROR: permission denied to alter role -DETAIL: Only roles with the SUPERUSER attribute may change the SUPERUSER attribute. -ALTER ROLE regress_role_limited REPLICATION; -ERROR: permission denied to alter role -DETAIL: Only roles with the REPLICATION attribute may change the REPLICATION attribute. -ALTER ROLE regress_role_limited CREATEDB; -ERROR: permission denied to alter role -DETAIL: Only roles with the CREATEDB attribute may change the CREATEDB attribute. -ALTER ROLE regress_role_limited BYPASSRLS; -ERROR: permission denied to alter role -DETAIL: Only roles with the BYPASSRLS attribute may change the BYPASSRLS attribute. -DROP ROLE regress_role_limited; --- ok, can give away these role attributes if you have them -SET SESSION AUTHORIZATION regress_role_admin; -CREATE ROLE regress_replication_bypassrls REPLICATION BYPASSRLS; -CREATE ROLE regress_replication REPLICATION; -CREATE ROLE regress_bypassrls BYPASSRLS; -CREATE ROLE regress_createdb CREATEDB; --- ok, can toggle these role attributes off and on if you have them -ALTER ROLE regress_replication NOREPLICATION; -ALTER ROLE regress_replication REPLICATION; -ALTER ROLE regress_bypassrls NOBYPASSRLS; -ALTER ROLE regress_bypassrls BYPASSRLS; -ALTER ROLE regress_createdb NOCREATEDB; -ALTER ROLE regress_createdb CREATEDB; --- fail, can't toggle SUPERUSER -ALTER ROLE regress_createdb SUPERUSER; -ERROR: permission denied to alter role -DETAIL: Only roles with the SUPERUSER attribute may change the SUPERUSER attribute. -ALTER ROLE regress_createdb NOSUPERUSER; -ERROR: permission denied to alter role -DETAIL: Only roles with the SUPERUSER attribute may change the SUPERUSER attribute. --- ok, having CREATEROLE is enough to create users with these privileges -CREATE ROLE regress_createrole CREATEROLE NOINHERIT; -GRANT CREATE ON DATABASE regression TO regress_createrole WITH GRANT OPTION; -CREATE ROLE regress_login LOGIN; -CREATE ROLE regress_inherit INHERIT; -CREATE ROLE regress_connection_limit CONNECTION LIMIT 5; -CREATE ROLE regress_encrypted_password ENCRYPTED PASSWORD 'foo'; -CREATE ROLE regress_password_null PASSWORD NULL; --- ok, backwards compatible noise words should be ignored -CREATE ROLE regress_noiseword SYSID 12345; -NOTICE: SYSID can no longer be specified --- fail, cannot grant membership in superuser role -CREATE ROLE regress_nosuch_super IN ROLE regress_role_super; -ERROR: permission denied to grant role "regress_role_super" -DETAIL: Only roles with the SUPERUSER attribute may grant roles with the SUPERUSER attribute. --- fail, database owner cannot have members -CREATE ROLE regress_nosuch_dbowner IN ROLE pg_database_owner; -ERROR: role "pg_database_owner" cannot have explicit members --- ok, can grant other users into a role -CREATE ROLE regress_inroles ROLE - regress_role_super, regress_createdb, regress_createrole, regress_login, - regress_inherit, regress_connection_limit, regress_encrypted_password, regress_password_null; --- fail, cannot grant a role into itself -CREATE ROLE regress_nosuch_recursive ROLE regress_nosuch_recursive; -ERROR: role "regress_nosuch_recursive" is a member of role "regress_nosuch_recursive" --- ok, can grant other users into a role with admin option -CREATE ROLE regress_adminroles ADMIN - regress_role_super, regress_createdb, regress_createrole, regress_login, - regress_inherit, regress_connection_limit, regress_encrypted_password, regress_password_null; --- fail, cannot grant a role into itself with admin option -CREATE ROLE regress_nosuch_admin_recursive ADMIN regress_nosuch_admin_recursive; -ERROR: role "regress_nosuch_admin_recursive" is a member of role "regress_nosuch_admin_recursive" --- fail, regress_createrole does not have CREATEDB privilege -SET SESSION AUTHORIZATION regress_createrole; -CREATE DATABASE regress_nosuch_db; -ERROR: permission denied to create database --- ok, regress_createrole can create new roles -CREATE ROLE regress_plainrole; --- ok, roles with CREATEROLE can create new roles with it -CREATE ROLE regress_rolecreator CREATEROLE; --- ok, roles with CREATEROLE can create new roles with different role --- attributes, including CREATEROLE -CREATE ROLE regress_hasprivs CREATEROLE LOGIN INHERIT CONNECTION LIMIT 5; --- ok, we should be able to modify a role we created -COMMENT ON ROLE regress_hasprivs IS 'some comment'; -ALTER ROLE regress_hasprivs RENAME TO regress_tenant; -ALTER ROLE regress_tenant NOINHERIT NOLOGIN CONNECTION LIMIT 7; --- fail, we should be unable to modify a role we did not create -COMMENT ON ROLE regress_role_normal IS 'some comment'; -ERROR: permission denied -DETAIL: The current user must have the ADMIN option on role "regress_role_normal". -ALTER ROLE regress_role_normal RENAME TO regress_role_abnormal; -ERROR: permission denied to rename role -DETAIL: Only roles with the CREATEROLE attribute and the ADMIN option on role "regress_role_normal" may rename this role. -ALTER ROLE regress_role_normal NOINHERIT NOLOGIN CONNECTION LIMIT 7; -ERROR: permission denied to alter role -DETAIL: Only roles with the CREATEROLE attribute and the ADMIN option on role "regress_role_normal" may alter this role. --- ok, regress_tenant can create objects within the database -SET SESSION AUTHORIZATION regress_tenant; -CREATE TABLE tenant_table (i integer); -CREATE INDEX tenant_idx ON tenant_table(i); -CREATE VIEW tenant_view AS SELECT * FROM pg_catalog.pg_class; -REVOKE ALL PRIVILEGES ON tenant_table FROM PUBLIC; --- fail, these objects belonging to regress_tenant -SET SESSION AUTHORIZATION regress_createrole; -DROP INDEX tenant_idx; -ERROR: must be owner of index tenant_idx -ALTER TABLE tenant_table ADD COLUMN t text; -ERROR: must be owner of table tenant_table -DROP TABLE tenant_table; -ERROR: must be owner of table tenant_table -ALTER VIEW tenant_view OWNER TO regress_role_admin; -ERROR: must be owner of view tenant_view -DROP VIEW tenant_view; -ERROR: must be owner of view tenant_view --- fail, can't create objects owned as regress_tenant -CREATE SCHEMA regress_tenant_schema AUTHORIZATION regress_tenant; -ERROR: must be able to SET ROLE "regress_tenant" --- fail, we don't inherit permissions from regress_tenant -REASSIGN OWNED BY regress_tenant TO regress_createrole; -ERROR: permission denied to reassign objects -DETAIL: Only roles with privileges of role "regress_tenant" may reassign objects owned by it. --- ok, create a role with a value for createrole_self_grant -SET createrole_self_grant = 'set, inherit'; -CREATE ROLE regress_tenant2; -GRANT CREATE ON DATABASE regression TO regress_tenant2; --- ok, regress_tenant2 can create objects within the database -SET SESSION AUTHORIZATION regress_tenant2; -CREATE TABLE tenant2_table (i integer); -REVOKE ALL PRIVILEGES ON tenant2_table FROM PUBLIC; --- ok, because we have SET and INHERIT on regress_tenant2 -SET SESSION AUTHORIZATION regress_createrole; -CREATE SCHEMA regress_tenant2_schema AUTHORIZATION regress_tenant2; -ALTER SCHEMA regress_tenant2_schema OWNER TO regress_createrole; -ALTER TABLE tenant2_table OWNER TO regress_createrole; -ALTER TABLE tenant2_table OWNER TO regress_tenant2; --- with SET but not INHERIT, we can give away objects but not take them -REVOKE INHERIT OPTION FOR regress_tenant2 FROM regress_createrole; -ALTER SCHEMA regress_tenant2_schema OWNER TO regress_tenant2; -ALTER TABLE tenant2_table OWNER TO regress_createrole; -ERROR: must be owner of table tenant2_table --- with INHERIT but not SET, we can take objects but not give them away -GRANT regress_tenant2 TO regress_createrole WITH INHERIT TRUE, SET FALSE; -ALTER TABLE tenant2_table OWNER TO regress_createrole; -ALTER TABLE tenant2_table OWNER TO regress_tenant2; -ERROR: must be able to SET ROLE "regress_tenant2" -DROP TABLE tenant2_table; --- fail, CREATEROLE is not enough to create roles in privileged roles -CREATE ROLE regress_read_all_data IN ROLE pg_read_all_data; -ERROR: permission denied to grant role "pg_read_all_data" -DETAIL: Only roles with the ADMIN option on role "pg_read_all_data" may grant this role. -CREATE ROLE regress_write_all_data IN ROLE pg_write_all_data; -ERROR: permission denied to grant role "pg_write_all_data" -DETAIL: Only roles with the ADMIN option on role "pg_write_all_data" may grant this role. -CREATE ROLE regress_monitor IN ROLE pg_monitor; -ERROR: permission denied to grant role "pg_monitor" -DETAIL: Only roles with the ADMIN option on role "pg_monitor" may grant this role. -CREATE ROLE regress_read_all_settings IN ROLE pg_read_all_settings; -ERROR: permission denied to grant role "pg_read_all_settings" -DETAIL: Only roles with the ADMIN option on role "pg_read_all_settings" may grant this role. -CREATE ROLE regress_read_all_stats IN ROLE pg_read_all_stats; -ERROR: permission denied to grant role "pg_read_all_stats" -DETAIL: Only roles with the ADMIN option on role "pg_read_all_stats" may grant this role. -CREATE ROLE regress_stat_scan_tables IN ROLE pg_stat_scan_tables; -ERROR: permission denied to grant role "pg_stat_scan_tables" -DETAIL: Only roles with the ADMIN option on role "pg_stat_scan_tables" may grant this role. -CREATE ROLE regress_read_server_files IN ROLE pg_read_server_files; -ERROR: permission denied to grant role "pg_read_server_files" -DETAIL: Only roles with the ADMIN option on role "pg_read_server_files" may grant this role. -CREATE ROLE regress_write_server_files IN ROLE pg_write_server_files; -ERROR: permission denied to grant role "pg_write_server_files" -DETAIL: Only roles with the ADMIN option on role "pg_write_server_files" may grant this role. -CREATE ROLE regress_execute_server_program IN ROLE pg_execute_server_program; -ERROR: permission denied to grant role "pg_execute_server_program" -DETAIL: Only roles with the ADMIN option on role "pg_execute_server_program" may grant this role. -CREATE ROLE regress_signal_backend IN ROLE pg_signal_backend; -ERROR: permission denied to grant role "pg_signal_backend" -DETAIL: Only roles with the ADMIN option on role "pg_signal_backend" may grant this role. --- fail, role still owns database objects -DROP ROLE regress_tenant; -ERROR: role "regress_tenant" cannot be dropped because some objects depend on it -DETAIL: owner of table tenant_table -owner of view tenant_view --- fail, creation of these roles failed above so they do not now exist -SET SESSION AUTHORIZATION regress_role_admin; -DROP ROLE regress_nosuch_superuser; -ERROR: role "regress_nosuch_superuser" does not exist -DROP ROLE regress_nosuch_replication_bypassrls; -ERROR: role "regress_nosuch_replication_bypassrls" does not exist -DROP ROLE regress_nosuch_replication; -ERROR: role "regress_nosuch_replication" does not exist -DROP ROLE regress_nosuch_bypassrls; -ERROR: role "regress_nosuch_bypassrls" does not exist -DROP ROLE regress_nosuch_super; -ERROR: role "regress_nosuch_super" does not exist -DROP ROLE regress_nosuch_dbowner; -ERROR: role "regress_nosuch_dbowner" does not exist -DROP ROLE regress_nosuch_recursive; -ERROR: role "regress_nosuch_recursive" does not exist -DROP ROLE regress_nosuch_admin_recursive; -ERROR: role "regress_nosuch_admin_recursive" does not exist -DROP ROLE regress_plainrole; --- must revoke privileges before dropping role -REVOKE CREATE ON DATABASE regression FROM regress_createrole CASCADE; --- ok, should be able to drop non-superuser roles we created -DROP ROLE regress_replication_bypassrls; -DROP ROLE regress_replication; -DROP ROLE regress_bypassrls; -DROP ROLE regress_createdb; -DROP ROLE regress_createrole; -DROP ROLE regress_login; -DROP ROLE regress_inherit; -DROP ROLE regress_connection_limit; -DROP ROLE regress_encrypted_password; -DROP ROLE regress_password_null; -DROP ROLE regress_noiseword; -DROP ROLE regress_inroles; -DROP ROLE regress_adminroles; --- fail, cannot drop ourself, nor superusers or roles we lack ADMIN for -DROP ROLE regress_role_super; -ERROR: permission denied to drop role -DETAIL: Only roles with the SUPERUSER attribute may drop roles with the SUPERUSER attribute. -DROP ROLE regress_role_admin; -ERROR: current user cannot be dropped -DROP ROLE regress_rolecreator; -ERROR: permission denied to drop role -DETAIL: Only roles with the CREATEROLE attribute and the ADMIN option on role "regress_rolecreator" may drop this role. --- ok -RESET SESSION AUTHORIZATION; -REVOKE CREATE ON DATABASE regression FROM regress_role_admin CASCADE; -DROP INDEX tenant_idx; -DROP TABLE tenant_table; -DROP VIEW tenant_view; -DROP SCHEMA regress_tenant2_schema; --- check for duplicated drop -DROP ROLE regress_tenant, regress_tenant; -DROP ROLE regress_tenant2; -DROP ROLE regress_rolecreator; -DROP ROLE regress_role_admin; -DROP ROLE regress_role_limited_admin; -DROP ROLE regress_role_super; -DROP ROLE regress_role_normal; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/without_overlaps.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/without_overlaps.out --- /tmp/cirrus-ci-build/src/test/regress/expected/without_overlaps.out 2024-03-07 14:25:00.334827000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/without_overlaps.out 2024-03-07 14:27:17.082280000 +0000 @@ -1,395 +1,2 @@ --- Tests for WITHOUT OVERLAPS. --- --- We leave behind several tables to test pg_dump etc: --- temporal_rng, temporal_rng2, --- temporal_fk_rng2rng. --- --- test input parser --- --- PK with no columns just WITHOUT OVERLAPS: -CREATE TABLE temporal_rng ( - valid_at tsrange, - CONSTRAINT temporal_rng_pk PRIMARY KEY (valid_at WITHOUT OVERLAPS) -); -ERROR: constraint using WITHOUT OVERLAPS needs at least two columns --- PK with a range column/PERIOD that isn't there: -CREATE TABLE temporal_rng ( - id INTEGER, - CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -); -ERROR: column "valid_at" named in key does not exist -LINE 3: CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOU... - ^ --- PK with a non-range column: -CREATE TABLE temporal_rng ( - id int4range, - valid_at TEXT, - CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -); -ERROR: data type text has no default operator class for access method "gist" -HINT: You must specify an operator class for the index or define a default operator class for the data type. --- PK with one column plus a range: -CREATE TABLE temporal_rng ( - -- Since we can't depend on having btree_gist here, - -- use an int4range instead of an int. - -- (The rangetypes regression test uses the same trick.) - id int4range, - valid_at tsrange, - CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -); -\d temporal_rng - Table "public.temporal_rng" - Column | Type | Collation | Nullable | Default -----------+-----------+-----------+----------+--------- - id | int4range | | not null | - valid_at | tsrange | | not null | -Indexes: - "temporal_rng_pk" PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) - -SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conname = 'temporal_rng_pk'; - pg_get_constraintdef ---------------------------------------------- - PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -(1 row) - -SELECT pg_get_indexdef(conindid, 0, true) FROM pg_constraint WHERE conname = 'temporal_rng_pk'; - pg_get_indexdef -------------------------------------------------------------------------------- - CREATE UNIQUE INDEX temporal_rng_pk ON temporal_rng USING gist (id, valid_at) -(1 row) - --- PK with two columns plus a range: --- We don't drop this table because tests below also need multiple scalar columns. -CREATE TABLE temporal_rng2 ( - id1 int4range, - id2 int4range, - valid_at tsrange, - CONSTRAINT temporal_rng2_pk PRIMARY KEY (id1, id2, valid_at WITHOUT OVERLAPS) -); -\d temporal_rng2 - Table "public.temporal_rng2" - Column | Type | Collation | Nullable | Default -----------+-----------+-----------+----------+--------- - id1 | int4range | | not null | - id2 | int4range | | not null | - valid_at | tsrange | | not null | -Indexes: - "temporal_rng2_pk" PRIMARY KEY (id1, id2, valid_at WITHOUT OVERLAPS) - -SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conname = 'temporal_rng2_pk'; - pg_get_constraintdef ---------------------------------------------------- - PRIMARY KEY (id1, id2, valid_at WITHOUT OVERLAPS) -(1 row) - -SELECT pg_get_indexdef(conindid, 0, true) FROM pg_constraint WHERE conname = 'temporal_rng2_pk'; - pg_get_indexdef ---------------------------------------------------------------------------------------- - CREATE UNIQUE INDEX temporal_rng2_pk ON temporal_rng2 USING gist (id1, id2, valid_at) -(1 row) - --- PK with a custom range type: -CREATE TYPE textrange2 AS range (subtype=text, collation="C"); -CREATE TABLE temporal_rng3 ( - id int4range, - valid_at textrange2, - CONSTRAINT temporal_rng3_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -); -ALTER TABLE temporal_rng3 DROP CONSTRAINT temporal_rng3_pk; -DROP TABLE temporal_rng3; -DROP TYPE textrange2; --- PK with a multirange: -CREATE TABLE temporal_mltrng ( - id int4range, - valid_at tsmultirange, - CONSTRAINT temporal_mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -); -\d temporal_mltrng - Table "public.temporal_mltrng" - Column | Type | Collation | Nullable | Default -----------+--------------+-----------+----------+--------- - id | int4range | | not null | - valid_at | tsmultirange | | not null | -Indexes: - "temporal_mltrng_pk" PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) - --- UNIQUE with no columns just WITHOUT OVERLAPS: -CREATE TABLE temporal_rng3 ( - valid_at tsrange, - CONSTRAINT temporal_rng3_uq UNIQUE (valid_at WITHOUT OVERLAPS) -); -ERROR: constraint using WITHOUT OVERLAPS needs at least two columns --- UNIQUE with a range column/PERIOD that isn't there: -CREATE TABLE temporal_rng3 ( - id INTEGER, - CONSTRAINT temporal_rng3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS) -); -ERROR: column "valid_at" named in key does not exist -LINE 3: CONSTRAINT temporal_rng3_uq UNIQUE (id, valid_at WITHOUT OV... - ^ --- UNIQUE with a non-range column: -CREATE TABLE temporal_rng3 ( - id int4range, - valid_at TEXT, - CONSTRAINT temporal_rng3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS) -); -ERROR: data type text has no default operator class for access method "gist" -HINT: You must specify an operator class for the index or define a default operator class for the data type. --- UNIQUE with one column plus a range: -CREATE TABLE temporal_rng3 ( - id int4range, - valid_at tsrange, - CONSTRAINT temporal_rng3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS) -); -\d temporal_rng3 - Table "public.temporal_rng3" - Column | Type | Collation | Nullable | Default -----------+-----------+-----------+----------+--------- - id | int4range | | | - valid_at | tsrange | | | -Indexes: - "temporal_rng3_uq" UNIQUE (id, valid_at WITHOUT OVERLAPS) - -SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conname = 'temporal_rng3_uq'; - pg_get_constraintdef ----------------------------------------- - UNIQUE (id, valid_at WITHOUT OVERLAPS) -(1 row) - -SELECT pg_get_indexdef(conindid, 0, true) FROM pg_constraint WHERE conname = 'temporal_rng3_uq'; - pg_get_indexdef ---------------------------------------------------------------------------------- - CREATE UNIQUE INDEX temporal_rng3_uq ON temporal_rng3 USING gist (id, valid_at) -(1 row) - -DROP TABLE temporal_rng3; --- UNIQUE with two columns plus a range: -CREATE TABLE temporal_rng3 ( - id1 int4range, - id2 int4range, - valid_at tsrange, - CONSTRAINT temporal_rng3_uq UNIQUE (id1, id2, valid_at WITHOUT OVERLAPS) -); -\d temporal_rng3 - Table "public.temporal_rng3" - Column | Type | Collation | Nullable | Default -----------+-----------+-----------+----------+--------- - id1 | int4range | | | - id2 | int4range | | | - valid_at | tsrange | | | -Indexes: - "temporal_rng3_uq" UNIQUE (id1, id2, valid_at WITHOUT OVERLAPS) - -SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conname = 'temporal_rng3_uq'; - pg_get_constraintdef ----------------------------------------------- - UNIQUE (id1, id2, valid_at WITHOUT OVERLAPS) -(1 row) - -SELECT pg_get_indexdef(conindid, 0, true) FROM pg_constraint WHERE conname = 'temporal_rng3_uq'; - pg_get_indexdef ---------------------------------------------------------------------------------------- - CREATE UNIQUE INDEX temporal_rng3_uq ON temporal_rng3 USING gist (id1, id2, valid_at) -(1 row) - -DROP TABLE temporal_rng3; --- UNIQUE with a custom range type: -CREATE TYPE textrange2 AS range (subtype=text, collation="C"); -CREATE TABLE temporal_rng3 ( - id int4range, - valid_at textrange2, - CONSTRAINT temporal_rng3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS) -); -ALTER TABLE temporal_rng3 DROP CONSTRAINT temporal_rng3_uq; -DROP TABLE temporal_rng3; -DROP TYPE textrange2; --- --- test ALTER TABLE ADD CONSTRAINT --- -DROP TABLE temporal_rng; -CREATE TABLE temporal_rng ( - id int4range, - valid_at tsrange -); -ALTER TABLE temporal_rng - ADD CONSTRAINT temporal_rng_pk - PRIMARY KEY (id, valid_at WITHOUT OVERLAPS); --- PK with USING INDEX (not possible): -CREATE TABLE temporal3 ( - id int4range, - valid_at tsrange -); -CREATE INDEX idx_temporal3_uq ON temporal3 USING gist (id, valid_at); -ALTER TABLE temporal3 - ADD CONSTRAINT temporal3_pk - PRIMARY KEY USING INDEX idx_temporal3_uq; -ERROR: "idx_temporal3_uq" is not a unique index -LINE 2: ADD CONSTRAINT temporal3_pk - ^ -DETAIL: Cannot create a primary key or unique constraint using such an index. -DROP TABLE temporal3; --- UNIQUE with USING INDEX (not possible): -CREATE TABLE temporal3 ( - id int4range, - valid_at tsrange -); -CREATE INDEX idx_temporal3_uq ON temporal3 USING gist (id, valid_at); -ALTER TABLE temporal3 - ADD CONSTRAINT temporal3_uq - UNIQUE USING INDEX idx_temporal3_uq; -ERROR: "idx_temporal3_uq" is not a unique index -LINE 2: ADD CONSTRAINT temporal3_uq - ^ -DETAIL: Cannot create a primary key or unique constraint using such an index. -DROP TABLE temporal3; --- UNIQUE with USING [UNIQUE] INDEX (possible but not a temporal constraint): -CREATE TABLE temporal3 ( - id int4range, - valid_at tsrange -); -CREATE UNIQUE INDEX idx_temporal3_uq ON temporal3 (id, valid_at); -ALTER TABLE temporal3 - ADD CONSTRAINT temporal3_uq - UNIQUE USING INDEX idx_temporal3_uq; -NOTICE: ALTER TABLE / ADD CONSTRAINT USING INDEX will rename index "idx_temporal3_uq" to "temporal3_uq" -DROP TABLE temporal3; --- Add range column and the PK at the same time -CREATE TABLE temporal3 ( - id int4range -); -ALTER TABLE temporal3 - ADD COLUMN valid_at tsrange, - ADD CONSTRAINT temporal3_pk - PRIMARY KEY (id, valid_at WITHOUT OVERLAPS); -DROP TABLE temporal3; --- Add range column and UNIQUE constraint at the same time -CREATE TABLE temporal3 ( - id int4range -); -ALTER TABLE temporal3 - ADD COLUMN valid_at tsrange, - ADD CONSTRAINT temporal3_uq - UNIQUE (id, valid_at WITHOUT OVERLAPS); -DROP TABLE temporal3; --- --- test PK inserts --- --- okay: -INSERT INTO temporal_rng VALUES ('[1,1]', tsrange('2018-01-02', '2018-02-03')); -INSERT INTO temporal_rng VALUES ('[1,1]', tsrange('2018-03-03', '2018-04-04')); -INSERT INTO temporal_rng VALUES ('[2,2]', tsrange('2018-01-01', '2018-01-05')); -INSERT INTO temporal_rng VALUES ('[3,3]', tsrange('2018-01-01', NULL)); --- should fail: -INSERT INTO temporal_rng VALUES ('[1,1]', tsrange('2018-01-01', '2018-01-05')); -ERROR: conflicting key value violates exclusion constraint "temporal_rng_pk" -DETAIL: Key (id, valid_at)=([1,2), ["Mon Jan 01 00:00:00 2018","Fri Jan 05 00:00:00 2018")) conflicts with existing key (id, valid_at)=([1,2), ["Tue Jan 02 00:00:00 2018","Sat Feb 03 00:00:00 2018")). -INSERT INTO temporal_rng VALUES (NULL, tsrange('2018-01-01', '2018-01-05')); -ERROR: null value in column "id" of relation "temporal_rng" violates not-null constraint -DETAIL: Failing row contains (null, ["Mon Jan 01 00:00:00 2018","Fri Jan 05 00:00:00 2018")). -INSERT INTO temporal_rng VALUES ('[3,3]', NULL); -ERROR: null value in column "valid_at" of relation "temporal_rng" violates not-null constraint -DETAIL: Failing row contains ([3,4), null). --- --- test a range with both a PK and a UNIQUE constraint --- -CREATE TABLE temporal3 ( - id int4range, - valid_at daterange, - id2 int8range, - name TEXT, - CONSTRAINT temporal3_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal3_uniq UNIQUE (id2, valid_at WITHOUT OVERLAPS) -); -INSERT INTO temporal3 (id, valid_at, id2, name) - VALUES - ('[1,1]', daterange('2000-01-01', '2010-01-01'), '[7,7]', 'foo'), - ('[2,2]', daterange('2000-01-01', '2010-01-01'), '[9,9]', 'bar') -; -DROP TABLE temporal3; --- --- test changing the PK's dependencies --- -CREATE TABLE temporal3 ( - id int4range, - valid_at tsrange, - CONSTRAINT temporal3_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -); -ALTER TABLE temporal3 ALTER COLUMN valid_at DROP NOT NULL; -ERROR: column "valid_at" is in a primary key -ALTER TABLE temporal3 ALTER COLUMN valid_at TYPE tstzrange USING tstzrange(lower(valid_at), upper(valid_at)); -ALTER TABLE temporal3 RENAME COLUMN valid_at TO valid_thru; -ALTER TABLE temporal3 DROP COLUMN valid_thru; -DROP TABLE temporal3; --- --- test PARTITION BY for ranges --- --- temporal PRIMARY KEY: -CREATE TABLE temporal_partitioned ( - id int4range, - valid_at daterange, - name text, - CONSTRAINT temporal_paritioned_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -) PARTITION BY LIST (id); -CREATE TABLE tp1 PARTITION OF temporal_partitioned FOR VALUES IN ('[1,1]', '[2,2]'); -CREATE TABLE tp2 PARTITION OF temporal_partitioned FOR VALUES IN ('[3,3]', '[4,4]'); -INSERT INTO temporal_partitioned VALUES - ('[1,1]', daterange('2000-01-01', '2000-02-01'), 'one'), - ('[1,1]', daterange('2000-02-01', '2000-03-01'), 'one'), - ('[3,3]', daterange('2000-01-01', '2010-01-01'), 'three'); -SELECT * FROM temporal_partitioned ORDER BY id, valid_at; - id | valid_at | name --------+-------------------------+------- - [1,2) | [01-01-2000,02-01-2000) | one - [1,2) | [02-01-2000,03-01-2000) | one - [3,4) | [01-01-2000,01-01-2010) | three -(3 rows) - -SELECT * FROM tp1 ORDER BY id, valid_at; - id | valid_at | name --------+-------------------------+------ - [1,2) | [01-01-2000,02-01-2000) | one - [1,2) | [02-01-2000,03-01-2000) | one -(2 rows) - -SELECT * FROM tp2 ORDER BY id, valid_at; - id | valid_at | name --------+-------------------------+------- - [3,4) | [01-01-2000,01-01-2010) | three -(1 row) - -DROP TABLE temporal_partitioned; --- temporal UNIQUE: -CREATE TABLE temporal_partitioned ( - id int4range, - valid_at daterange, - name text, - CONSTRAINT temporal_paritioned_uq UNIQUE (id, valid_at WITHOUT OVERLAPS) -) PARTITION BY LIST (id); -CREATE TABLE tp1 PARTITION OF temporal_partitioned FOR VALUES IN ('[1,1]', '[2,2]'); -CREATE TABLE tp2 PARTITION OF temporal_partitioned FOR VALUES IN ('[3,3]', '[4,4]'); -INSERT INTO temporal_partitioned VALUES - ('[1,1]', daterange('2000-01-01', '2000-02-01'), 'one'), - ('[1,1]', daterange('2000-02-01', '2000-03-01'), 'one'), - ('[3,3]', daterange('2000-01-01', '2010-01-01'), 'three'); -SELECT * FROM temporal_partitioned ORDER BY id, valid_at; - id | valid_at | name --------+-------------------------+------- - [1,2) | [01-01-2000,02-01-2000) | one - [1,2) | [02-01-2000,03-01-2000) | one - [3,4) | [01-01-2000,01-01-2010) | three -(3 rows) - -SELECT * FROM tp1 ORDER BY id, valid_at; - id | valid_at | name --------+-------------------------+------ - [1,2) | [01-01-2000,02-01-2000) | one - [1,2) | [02-01-2000,03-01-2000) | one -(2 rows) - -SELECT * FROM tp2 ORDER BY id, valid_at; - id | valid_at | name --------+-------------------------+------- - [3,4) | [01-01-2000,01-01-2010) | three -(1 row) - -DROP TABLE temporal_partitioned; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/rules.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/rules.out --- /tmp/cirrus-ci-build/src/test/regress/expected/rules.out 2024-03-07 14:25:00.333480000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/rules.out 2024-03-07 14:27:17.150806000 +0000 @@ -1,3800 +1,2 @@ --- --- RULES --- From Jan's original setup_ruletest.sql and run_ruletest.sql --- - thomas 1998-09-13 --- --- --- Tables and rules for the view test --- -create table rtest_t1 (a int4, b int4); -create table rtest_t2 (a int4, b int4); -create table rtest_t3 (a int4, b int4); -create view rtest_v1 as select * from rtest_t1; -create rule rtest_v1_ins as on insert to rtest_v1 do instead - insert into rtest_t1 values (new.a, new.b); -create rule rtest_v1_upd as on update to rtest_v1 do instead - update rtest_t1 set a = new.a, b = new.b - where a = old.a; -create rule rtest_v1_del as on delete to rtest_v1 do instead - delete from rtest_t1 where a = old.a; --- Test comments -COMMENT ON RULE rtest_v1_bad ON rtest_v1 IS 'bad rule'; -ERROR: rule "rtest_v1_bad" for relation "rtest_v1" does not exist -COMMENT ON RULE rtest_v1_del ON rtest_v1 IS 'delete rule'; -COMMENT ON RULE rtest_v1_del ON rtest_v1 IS NULL; --- --- Tables and rules for the constraint update/delete test --- --- Note: --- Now that we have multiple action rule support, we check --- both possible syntaxes to define them (The last action --- can but must not have a semicolon at the end). --- -create table rtest_system (sysname text, sysdesc text); -create table rtest_interface (sysname text, ifname text); -create table rtest_person (pname text, pdesc text); -create table rtest_admin (pname text, sysname text); -create rule rtest_sys_upd as on update to rtest_system do also ( - update rtest_interface set sysname = new.sysname - where sysname = old.sysname; - update rtest_admin set sysname = new.sysname - where sysname = old.sysname - ); -create rule rtest_sys_del as on delete to rtest_system do also ( - delete from rtest_interface where sysname = old.sysname; - delete from rtest_admin where sysname = old.sysname; - ); -create rule rtest_pers_upd as on update to rtest_person do also - update rtest_admin set pname = new.pname where pname = old.pname; -create rule rtest_pers_del as on delete to rtest_person do also - delete from rtest_admin where pname = old.pname; --- --- Tables and rules for the logging test --- -create table rtest_emp (ename char(20), salary numeric); -create table rtest_emplog (ename char(20), who name, action char(10), newsal numeric, oldsal numeric); -create table rtest_empmass (ename char(20), salary numeric); -create rule rtest_emp_ins as on insert to rtest_emp do - insert into rtest_emplog values (new.ename, current_user, - 'hired', new.salary, '0.00'); -create rule rtest_emp_upd as on update to rtest_emp where new.salary != old.salary do - insert into rtest_emplog values (new.ename, current_user, - 'honored', new.salary, old.salary); -create rule rtest_emp_del as on delete to rtest_emp do - insert into rtest_emplog values (old.ename, current_user, - 'fired', '0.00', old.salary); --- --- Tables and rules for the multiple cascaded qualified instead --- rule test --- -create table rtest_t4 (a int4, b text); -create table rtest_t5 (a int4, b text); -create table rtest_t6 (a int4, b text); -create table rtest_t7 (a int4, b text); -create table rtest_t8 (a int4, b text); -create table rtest_t9 (a int4, b text); -create rule rtest_t4_ins1 as on insert to rtest_t4 - where new.a >= 10 and new.a < 20 do instead - insert into rtest_t5 values (new.a, new.b); -create rule rtest_t4_ins2 as on insert to rtest_t4 - where new.a >= 20 and new.a < 30 do - insert into rtest_t6 values (new.a, new.b); -create rule rtest_t5_ins as on insert to rtest_t5 - where new.a > 15 do - insert into rtest_t7 values (new.a, new.b); -create rule rtest_t6_ins as on insert to rtest_t6 - where new.a > 25 do instead - insert into rtest_t8 values (new.a, new.b); --- --- Tables and rules for the rule fire order test --- --- As of PG 7.3, the rules should fire in order by name, regardless --- of INSTEAD attributes or creation order. --- -create table rtest_order1 (a int4); -create table rtest_order2 (a int4, b int4, c text); -create sequence rtest_seq; -create rule rtest_order_r3 as on insert to rtest_order1 do instead - insert into rtest_order2 values (new.a, nextval('rtest_seq'), - 'rule 3 - this should run 3rd'); -create rule rtest_order_r4 as on insert to rtest_order1 - where a < 100 do instead - insert into rtest_order2 values (new.a, nextval('rtest_seq'), - 'rule 4 - this should run 4th'); -create rule rtest_order_r2 as on insert to rtest_order1 do - insert into rtest_order2 values (new.a, nextval('rtest_seq'), - 'rule 2 - this should run 2nd'); -create rule rtest_order_r1 as on insert to rtest_order1 do instead - insert into rtest_order2 values (new.a, nextval('rtest_seq'), - 'rule 1 - this should run 1st'); --- --- Tables and rules for the instead nothing test --- -create table rtest_nothn1 (a int4, b text); -create table rtest_nothn2 (a int4, b text); -create table rtest_nothn3 (a int4, b text); -create table rtest_nothn4 (a int4, b text); -create rule rtest_nothn_r1 as on insert to rtest_nothn1 - where new.a >= 10 and new.a < 20 do instead nothing; -create rule rtest_nothn_r2 as on insert to rtest_nothn1 - where new.a >= 30 and new.a < 40 do instead nothing; -create rule rtest_nothn_r3 as on insert to rtest_nothn2 - where new.a >= 100 do instead - insert into rtest_nothn3 values (new.a, new.b); -create rule rtest_nothn_r4 as on insert to rtest_nothn2 - do instead nothing; --- --- Tests on a view that is select * of a table --- and has insert/update/delete instead rules to --- behave close like the real table. --- --- --- We need test date later --- -insert into rtest_t2 values (1, 21); -insert into rtest_t2 values (2, 22); -insert into rtest_t2 values (3, 23); -insert into rtest_t3 values (1, 31); -insert into rtest_t3 values (2, 32); -insert into rtest_t3 values (3, 33); -insert into rtest_t3 values (4, 34); -insert into rtest_t3 values (5, 35); --- insert values -insert into rtest_v1 values (1, 11); -insert into rtest_v1 values (2, 12); -select * from rtest_v1; - a | b ----+---- - 1 | 11 - 2 | 12 -(2 rows) - --- delete with constant expression -delete from rtest_v1 where a = 1; -select * from rtest_v1; - a | b ----+---- - 2 | 12 -(1 row) - -insert into rtest_v1 values (1, 11); -delete from rtest_v1 where b = 12; -select * from rtest_v1; - a | b ----+---- - 1 | 11 -(1 row) - -insert into rtest_v1 values (2, 12); -insert into rtest_v1 values (2, 13); -select * from rtest_v1; - a | b ----+---- - 1 | 11 - 2 | 12 - 2 | 13 -(3 rows) - -** Remember the delete rule on rtest_v1: It says -** DO INSTEAD DELETE FROM rtest_t1 WHERE a = old.a -** So this time both rows with a = 2 must get deleted -\p -** Remember the delete rule on rtest_v1: It says -** DO INSTEAD DELETE FROM rtest_t1 WHERE a = old.a -** So this time both rows with a = 2 must get deleted -\r -delete from rtest_v1 where b = 12; -select * from rtest_v1; - a | b ----+---- - 1 | 11 -(1 row) - -delete from rtest_v1; --- insert select -insert into rtest_v1 select * from rtest_t2; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 2 | 22 - 3 | 23 -(3 rows) - -delete from rtest_v1; --- same with swapped targetlist -insert into rtest_v1 (b, a) select b, a from rtest_t2; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 2 | 22 - 3 | 23 -(3 rows) - --- now with only one target attribute -insert into rtest_v1 (a) select a from rtest_t3; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 2 | 22 - 3 | 23 - 1 | - 2 | - 3 | - 4 | - 5 | -(8 rows) - -select * from rtest_v1 where b isnull; - a | b ----+--- - 1 | - 2 | - 3 | - 4 | - 5 | -(5 rows) - --- let attribute a differ (must be done on rtest_t1 - see above) -update rtest_t1 set a = a + 10 where b isnull; -delete from rtest_v1 where b isnull; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 2 | 22 - 3 | 23 -(3 rows) - --- now updates with constant expression -update rtest_v1 set b = 42 where a = 2; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 3 | 23 - 2 | 42 -(3 rows) - -update rtest_v1 set b = 99 where b = 42; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 3 | 23 - 2 | 99 -(3 rows) - -update rtest_v1 set b = 88 where b < 50; -select * from rtest_v1; - a | b ----+---- - 2 | 99 - 1 | 88 - 3 | 88 -(3 rows) - -delete from rtest_v1; -insert into rtest_v1 select rtest_t2.a, rtest_t3.b - from rtest_t2, rtest_t3 - where rtest_t2.a = rtest_t3.a; -select * from rtest_v1; - a | b ----+---- - 1 | 31 - 2 | 32 - 3 | 33 -(3 rows) - --- updates in a mergejoin -update rtest_v1 set b = rtest_t2.b from rtest_t2 where rtest_v1.a = rtest_t2.a; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 2 | 22 - 3 | 23 -(3 rows) - -insert into rtest_v1 select * from rtest_t3; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 2 | 22 - 3 | 23 - 1 | 31 - 2 | 32 - 3 | 33 - 4 | 34 - 5 | 35 -(8 rows) - -update rtest_t1 set a = a + 10 where b > 30; -select * from rtest_v1; - a | b -----+---- - 1 | 21 - 2 | 22 - 3 | 23 - 11 | 31 - 12 | 32 - 13 | 33 - 14 | 34 - 15 | 35 -(8 rows) - -update rtest_v1 set a = rtest_t3.a + 20 from rtest_t3 where rtest_v1.b = rtest_t3.b; -select * from rtest_v1; - a | b -----+---- - 1 | 21 - 2 | 22 - 3 | 23 - 21 | 31 - 22 | 32 - 23 | 33 - 24 | 34 - 25 | 35 -(8 rows) - --- --- Test for constraint updates/deletes --- -insert into rtest_system values ('orion', 'Linux Jan Wieck'); -insert into rtest_system values ('notjw', 'WinNT Jan Wieck (notebook)'); -insert into rtest_system values ('neptun', 'Fileserver'); -insert into rtest_interface values ('orion', 'eth0'); -insert into rtest_interface values ('orion', 'eth1'); -insert into rtest_interface values ('notjw', 'eth0'); -insert into rtest_interface values ('neptun', 'eth0'); -insert into rtest_person values ('jw', 'Jan Wieck'); -insert into rtest_person values ('bm', 'Bruce Momjian'); -insert into rtest_admin values ('jw', 'orion'); -insert into rtest_admin values ('jw', 'notjw'); -insert into rtest_admin values ('bm', 'neptun'); -update rtest_system set sysname = 'pluto' where sysname = 'neptun'; -select * from rtest_interface; - sysname | ifname ----------+-------- - orion | eth0 - orion | eth1 - notjw | eth0 - pluto | eth0 -(4 rows) - -select * from rtest_admin; - pname | sysname --------+--------- - jw | orion - jw | notjw - bm | pluto -(3 rows) - -update rtest_person set pname = 'jwieck' where pdesc = 'Jan Wieck'; --- Note: use ORDER BY here to ensure consistent output across all systems. --- The above UPDATE affects two rows with equal keys, so they could be --- updated in either order depending on the whim of the local qsort(). -select * from rtest_admin order by pname, sysname; - pname | sysname ---------+--------- - bm | pluto - jwieck | notjw - jwieck | orion -(3 rows) - -delete from rtest_system where sysname = 'orion'; -select * from rtest_interface; - sysname | ifname ----------+-------- - notjw | eth0 - pluto | eth0 -(2 rows) - -select * from rtest_admin; - pname | sysname ---------+--------- - bm | pluto - jwieck | notjw -(2 rows) - --- --- Rule qualification test --- -insert into rtest_emp values ('wiecc', '5000.00'); -insert into rtest_emp values ('gates', '80000.00'); -update rtest_emp set ename = 'wiecx' where ename = 'wiecc'; -update rtest_emp set ename = 'wieck', salary = '6000.00' where ename = 'wiecx'; -update rtest_emp set salary = '7000.00' where ename = 'wieck'; -delete from rtest_emp where ename = 'gates'; -select ename, who = current_user as "matches user", action, newsal, oldsal from rtest_emplog order by ename, action, newsal; - ename | matches user | action | newsal | oldsal -----------------------+--------------+------------+----------+---------- - gates | t | fired | 0.00 | 80000.00 - gates | t | hired | 80000.00 | 0.00 - wiecc | t | hired | 5000.00 | 0.00 - wieck | t | honored | 6000.00 | 5000.00 - wieck | t | honored | 7000.00 | 6000.00 -(5 rows) - -insert into rtest_empmass values ('meyer', '4000.00'); -insert into rtest_empmass values ('maier', '5000.00'); -insert into rtest_empmass values ('mayr', '6000.00'); -insert into rtest_emp select * from rtest_empmass; -select ename, who = current_user as "matches user", action, newsal, oldsal from rtest_emplog order by ename, action, newsal; - ename | matches user | action | newsal | oldsal -----------------------+--------------+------------+----------+---------- - gates | t | fired | 0.00 | 80000.00 - gates | t | hired | 80000.00 | 0.00 - maier | t | hired | 5000.00 | 0.00 - mayr | t | hired | 6000.00 | 0.00 - meyer | t | hired | 4000.00 | 0.00 - wiecc | t | hired | 5000.00 | 0.00 - wieck | t | honored | 6000.00 | 5000.00 - wieck | t | honored | 7000.00 | 6000.00 -(8 rows) - -update rtest_empmass set salary = salary + '1000.00'; -update rtest_emp set salary = rtest_empmass.salary from rtest_empmass where rtest_emp.ename = rtest_empmass.ename; -select ename, who = current_user as "matches user", action, newsal, oldsal from rtest_emplog order by ename, action, newsal; - ename | matches user | action | newsal | oldsal -----------------------+--------------+------------+----------+---------- - gates | t | fired | 0.00 | 80000.00 - gates | t | hired | 80000.00 | 0.00 - maier | t | hired | 5000.00 | 0.00 - maier | t | honored | 6000.00 | 5000.00 - mayr | t | hired | 6000.00 | 0.00 - mayr | t | honored | 7000.00 | 6000.00 - meyer | t | hired | 4000.00 | 0.00 - meyer | t | honored | 5000.00 | 4000.00 - wiecc | t | hired | 5000.00 | 0.00 - wieck | t | honored | 6000.00 | 5000.00 - wieck | t | honored | 7000.00 | 6000.00 -(11 rows) - -delete from rtest_emp using rtest_empmass where rtest_emp.ename = rtest_empmass.ename; -select ename, who = current_user as "matches user", action, newsal, oldsal from rtest_emplog order by ename, action, newsal; - ename | matches user | action | newsal | oldsal -----------------------+--------------+------------+----------+---------- - gates | t | fired | 0.00 | 80000.00 - gates | t | hired | 80000.00 | 0.00 - maier | t | fired | 0.00 | 6000.00 - maier | t | hired | 5000.00 | 0.00 - maier | t | honored | 6000.00 | 5000.00 - mayr | t | fired | 0.00 | 7000.00 - mayr | t | hired | 6000.00 | 0.00 - mayr | t | honored | 7000.00 | 6000.00 - meyer | t | fired | 0.00 | 5000.00 - meyer | t | hired | 4000.00 | 0.00 - meyer | t | honored | 5000.00 | 4000.00 - wiecc | t | hired | 5000.00 | 0.00 - wieck | t | honored | 6000.00 | 5000.00 - wieck | t | honored | 7000.00 | 6000.00 -(14 rows) - --- --- Multiple cascaded qualified instead rule test --- -insert into rtest_t4 values (1, 'Record should go to rtest_t4'); -insert into rtest_t4 values (2, 'Record should go to rtest_t4'); -insert into rtest_t4 values (10, 'Record should go to rtest_t5'); -insert into rtest_t4 values (15, 'Record should go to rtest_t5'); -insert into rtest_t4 values (19, 'Record should go to rtest_t5 and t7'); -insert into rtest_t4 values (20, 'Record should go to rtest_t4 and t6'); -insert into rtest_t4 values (26, 'Record should go to rtest_t4 and t8'); -insert into rtest_t4 values (28, 'Record should go to rtest_t4 and t8'); -insert into rtest_t4 values (30, 'Record should go to rtest_t4'); -insert into rtest_t4 values (40, 'Record should go to rtest_t4'); -select * from rtest_t4; - a | b -----+------------------------------------- - 1 | Record should go to rtest_t4 - 2 | Record should go to rtest_t4 - 20 | Record should go to rtest_t4 and t6 - 26 | Record should go to rtest_t4 and t8 - 28 | Record should go to rtest_t4 and t8 - 30 | Record should go to rtest_t4 - 40 | Record should go to rtest_t4 -(7 rows) - -select * from rtest_t5; - a | b -----+------------------------------------- - 10 | Record should go to rtest_t5 - 15 | Record should go to rtest_t5 - 19 | Record should go to rtest_t5 and t7 -(3 rows) - -select * from rtest_t6; - a | b -----+------------------------------------- - 20 | Record should go to rtest_t4 and t6 -(1 row) - -select * from rtest_t7; - a | b -----+------------------------------------- - 19 | Record should go to rtest_t5 and t7 -(1 row) - -select * from rtest_t8; - a | b -----+------------------------------------- - 26 | Record should go to rtest_t4 and t8 - 28 | Record should go to rtest_t4 and t8 -(2 rows) - -delete from rtest_t4; -delete from rtest_t5; -delete from rtest_t6; -delete from rtest_t7; -delete from rtest_t8; -insert into rtest_t9 values (1, 'Record should go to rtest_t4'); -insert into rtest_t9 values (2, 'Record should go to rtest_t4'); -insert into rtest_t9 values (10, 'Record should go to rtest_t5'); -insert into rtest_t9 values (15, 'Record should go to rtest_t5'); -insert into rtest_t9 values (19, 'Record should go to rtest_t5 and t7'); -insert into rtest_t9 values (20, 'Record should go to rtest_t4 and t6'); -insert into rtest_t9 values (26, 'Record should go to rtest_t4 and t8'); -insert into rtest_t9 values (28, 'Record should go to rtest_t4 and t8'); -insert into rtest_t9 values (30, 'Record should go to rtest_t4'); -insert into rtest_t9 values (40, 'Record should go to rtest_t4'); -insert into rtest_t4 select * from rtest_t9 where a < 20; -select * from rtest_t4; - a | b ----+------------------------------ - 1 | Record should go to rtest_t4 - 2 | Record should go to rtest_t4 -(2 rows) - -select * from rtest_t5; - a | b -----+------------------------------------- - 10 | Record should go to rtest_t5 - 15 | Record should go to rtest_t5 - 19 | Record should go to rtest_t5 and t7 -(3 rows) - -select * from rtest_t6; - a | b ----+--- -(0 rows) - -select * from rtest_t7; - a | b -----+------------------------------------- - 19 | Record should go to rtest_t5 and t7 -(1 row) - -select * from rtest_t8; - a | b ----+--- -(0 rows) - -insert into rtest_t4 select * from rtest_t9 where b ~ 'and t8'; -select * from rtest_t4; - a | b -----+------------------------------------- - 1 | Record should go to rtest_t4 - 2 | Record should go to rtest_t4 - 26 | Record should go to rtest_t4 and t8 - 28 | Record should go to rtest_t4 and t8 -(4 rows) - -select * from rtest_t5; - a | b -----+------------------------------------- - 10 | Record should go to rtest_t5 - 15 | Record should go to rtest_t5 - 19 | Record should go to rtest_t5 and t7 -(3 rows) - -select * from rtest_t6; - a | b ----+--- -(0 rows) - -select * from rtest_t7; - a | b -----+------------------------------------- - 19 | Record should go to rtest_t5 and t7 -(1 row) - -select * from rtest_t8; - a | b -----+------------------------------------- - 26 | Record should go to rtest_t4 and t8 - 28 | Record should go to rtest_t4 and t8 -(2 rows) - -insert into rtest_t4 select a + 1, b from rtest_t9 where a in (20, 30, 40); -select * from rtest_t4; - a | b -----+------------------------------------- - 1 | Record should go to rtest_t4 - 2 | Record should go to rtest_t4 - 26 | Record should go to rtest_t4 and t8 - 28 | Record should go to rtest_t4 and t8 - 21 | Record should go to rtest_t4 and t6 - 31 | Record should go to rtest_t4 - 41 | Record should go to rtest_t4 -(7 rows) - -select * from rtest_t5; - a | b -----+------------------------------------- - 10 | Record should go to rtest_t5 - 15 | Record should go to rtest_t5 - 19 | Record should go to rtest_t5 and t7 -(3 rows) - -select * from rtest_t6; - a | b -----+------------------------------------- - 21 | Record should go to rtest_t4 and t6 -(1 row) - -select * from rtest_t7; - a | b -----+------------------------------------- - 19 | Record should go to rtest_t5 and t7 -(1 row) - -select * from rtest_t8; - a | b -----+------------------------------------- - 26 | Record should go to rtest_t4 and t8 - 28 | Record should go to rtest_t4 and t8 -(2 rows) - --- --- Check that the ordering of rules fired is correct --- -insert into rtest_order1 values (1); -select * from rtest_order2; - a | b | c ----+---+------------------------------ - 1 | 1 | rule 1 - this should run 1st - 1 | 2 | rule 2 - this should run 2nd - 1 | 3 | rule 3 - this should run 3rd - 1 | 4 | rule 4 - this should run 4th -(4 rows) - --- --- Check if instead nothing w/without qualification works --- -insert into rtest_nothn1 values (1, 'want this'); -insert into rtest_nothn1 values (2, 'want this'); -insert into rtest_nothn1 values (10, 'don''t want this'); -insert into rtest_nothn1 values (19, 'don''t want this'); -insert into rtest_nothn1 values (20, 'want this'); -insert into rtest_nothn1 values (29, 'want this'); -insert into rtest_nothn1 values (30, 'don''t want this'); -insert into rtest_nothn1 values (39, 'don''t want this'); -insert into rtest_nothn1 values (40, 'want this'); -insert into rtest_nothn1 values (50, 'want this'); -insert into rtest_nothn1 values (60, 'want this'); -select * from rtest_nothn1; - a | b -----+----------- - 1 | want this - 2 | want this - 20 | want this - 29 | want this - 40 | want this - 50 | want this - 60 | want this -(7 rows) - -insert into rtest_nothn2 values (10, 'too small'); -insert into rtest_nothn2 values (50, 'too small'); -insert into rtest_nothn2 values (100, 'OK'); -insert into rtest_nothn2 values (200, 'OK'); -select * from rtest_nothn2; - a | b ----+--- -(0 rows) - -select * from rtest_nothn3; - a | b ------+---- - 100 | OK - 200 | OK -(2 rows) - -delete from rtest_nothn1; -delete from rtest_nothn2; -delete from rtest_nothn3; -insert into rtest_nothn4 values (1, 'want this'); -insert into rtest_nothn4 values (2, 'want this'); -insert into rtest_nothn4 values (10, 'don''t want this'); -insert into rtest_nothn4 values (19, 'don''t want this'); -insert into rtest_nothn4 values (20, 'want this'); -insert into rtest_nothn4 values (29, 'want this'); -insert into rtest_nothn4 values (30, 'don''t want this'); -insert into rtest_nothn4 values (39, 'don''t want this'); -insert into rtest_nothn4 values (40, 'want this'); -insert into rtest_nothn4 values (50, 'want this'); -insert into rtest_nothn4 values (60, 'want this'); -insert into rtest_nothn1 select * from rtest_nothn4; -select * from rtest_nothn1; - a | b -----+----------- - 1 | want this - 2 | want this - 20 | want this - 29 | want this - 40 | want this - 50 | want this - 60 | want this -(7 rows) - -delete from rtest_nothn4; -insert into rtest_nothn4 values (10, 'too small'); -insert into rtest_nothn4 values (50, 'too small'); -insert into rtest_nothn4 values (100, 'OK'); -insert into rtest_nothn4 values (200, 'OK'); -insert into rtest_nothn2 select * from rtest_nothn4; -select * from rtest_nothn2; - a | b ----+--- -(0 rows) - -select * from rtest_nothn3; - a | b ------+---- - 100 | OK - 200 | OK -(2 rows) - -create table rtest_view1 (a int4, b text, v bool); -create table rtest_view2 (a int4); -create table rtest_view3 (a int4, b text); -create table rtest_view4 (a int4, b text, c int4); -create view rtest_vview1 as select a, b from rtest_view1 X - where 0 < (select count(*) from rtest_view2 Y where Y.a = X.a); -create view rtest_vview2 as select a, b from rtest_view1 where v; -create view rtest_vview3 as select a, b from rtest_vview2 X - where 0 < (select count(*) from rtest_view2 Y where Y.a = X.a); -create view rtest_vview4 as select X.a, X.b, count(Y.a) as refcount - from rtest_view1 X, rtest_view2 Y - where X.a = Y.a - group by X.a, X.b; -create function rtest_viewfunc1(int4) returns int4 as - 'select count(*)::int4 from rtest_view2 where a = $1' - language sql; -create view rtest_vview5 as select a, b, rtest_viewfunc1(a) as refcount - from rtest_view1; -insert into rtest_view1 values (1, 'item 1', 't'); -insert into rtest_view1 values (2, 'item 2', 't'); -insert into rtest_view1 values (3, 'item 3', 't'); -insert into rtest_view1 values (4, 'item 4', 'f'); -insert into rtest_view1 values (5, 'item 5', 't'); -insert into rtest_view1 values (6, 'item 6', 'f'); -insert into rtest_view1 values (7, 'item 7', 't'); -insert into rtest_view1 values (8, 'item 8', 't'); -insert into rtest_view2 values (2); -insert into rtest_view2 values (2); -insert into rtest_view2 values (4); -insert into rtest_view2 values (5); -insert into rtest_view2 values (7); -insert into rtest_view2 values (7); -insert into rtest_view2 values (7); -insert into rtest_view2 values (7); -select * from rtest_vview1; - a | b ----+-------- - 2 | item 2 - 4 | item 4 - 5 | item 5 - 7 | item 7 -(4 rows) - -select * from rtest_vview2; - a | b ----+-------- - 1 | item 1 - 2 | item 2 - 3 | item 3 - 5 | item 5 - 7 | item 7 - 8 | item 8 -(6 rows) - -select * from rtest_vview3; - a | b ----+-------- - 2 | item 2 - 5 | item 5 - 7 | item 7 -(3 rows) - -select * from rtest_vview4 order by a, b; - a | b | refcount ----+--------+---------- - 2 | item 2 | 2 - 4 | item 4 | 1 - 5 | item 5 | 1 - 7 | item 7 | 4 -(4 rows) - -select * from rtest_vview5; - a | b | refcount ----+--------+---------- - 1 | item 1 | 0 - 2 | item 2 | 2 - 3 | item 3 | 0 - 4 | item 4 | 1 - 5 | item 5 | 1 - 6 | item 6 | 0 - 7 | item 7 | 4 - 8 | item 8 | 0 -(8 rows) - -insert into rtest_view3 select * from rtest_vview1 where a < 7; -select * from rtest_view3; - a | b ----+-------- - 2 | item 2 - 4 | item 4 - 5 | item 5 -(3 rows) - -delete from rtest_view3; -insert into rtest_view3 select * from rtest_vview2 where a != 5 and b !~ '2'; -select * from rtest_view3; - a | b ----+-------- - 1 | item 1 - 3 | item 3 - 7 | item 7 - 8 | item 8 -(4 rows) - -delete from rtest_view3; -insert into rtest_view3 select * from rtest_vview3; -select * from rtest_view3; - a | b ----+-------- - 2 | item 2 - 5 | item 5 - 7 | item 7 -(3 rows) - -delete from rtest_view3; -insert into rtest_view4 select * from rtest_vview4 where 3 > refcount; -select * from rtest_view4 order by a, b; - a | b | c ----+--------+--- - 2 | item 2 | 2 - 4 | item 4 | 1 - 5 | item 5 | 1 -(3 rows) - -delete from rtest_view4; -insert into rtest_view4 select * from rtest_vview5 where a > 2 and refcount = 0; -select * from rtest_view4; - a | b | c ----+--------+--- - 3 | item 3 | 0 - 6 | item 6 | 0 - 8 | item 8 | 0 -(3 rows) - -delete from rtest_view4; --- --- Test for computations in views --- -create table rtest_comp ( - part text, - unit char(4), - size float -); -create table rtest_unitfact ( - unit char(4), - factor float -); -create view rtest_vcomp as - select X.part, (X.size * Y.factor) as size_in_cm - from rtest_comp X, rtest_unitfact Y - where X.unit = Y.unit; -insert into rtest_unitfact values ('m', 100.0); -insert into rtest_unitfact values ('cm', 1.0); -insert into rtest_unitfact values ('inch', 2.54); -insert into rtest_comp values ('p1', 'm', 5.0); -insert into rtest_comp values ('p2', 'm', 3.0); -insert into rtest_comp values ('p3', 'cm', 5.0); -insert into rtest_comp values ('p4', 'cm', 15.0); -insert into rtest_comp values ('p5', 'inch', 7.0); -insert into rtest_comp values ('p6', 'inch', 4.4); -select * from rtest_vcomp order by part; - part | size_in_cm -------+-------------------- - p1 | 500 - p2 | 300 - p3 | 5 - p4 | 15 - p5 | 17.78 - p6 | 11.176000000000002 -(6 rows) - -select * from rtest_vcomp where size_in_cm > 10.0 order by size_in_cm using >; - part | size_in_cm -------+-------------------- - p1 | 500 - p2 | 300 - p5 | 17.78 - p4 | 15 - p6 | 11.176000000000002 -(5 rows) - --- --- In addition run the (slightly modified) queries from the --- programmers manual section on the rule system. --- -CREATE TABLE shoe_data ( - shoename char(10), -- primary key - sh_avail integer, -- available # of pairs - slcolor char(10), -- preferred shoelace color - slminlen float, -- minimum shoelace length - slmaxlen float, -- maximum shoelace length - slunit char(8) -- length unit -); -CREATE TABLE shoelace_data ( - sl_name char(10), -- primary key - sl_avail integer, -- available # of pairs - sl_color char(10), -- shoelace color - sl_len float, -- shoelace length - sl_unit char(8) -- length unit -); -CREATE TABLE unit ( - un_name char(8), -- the primary key - un_fact float -- factor to transform to cm -); -CREATE VIEW shoe AS - SELECT sh.shoename, - sh.sh_avail, - sh.slcolor, - sh.slminlen, - sh.slminlen * un.un_fact AS slminlen_cm, - sh.slmaxlen, - sh.slmaxlen * un.un_fact AS slmaxlen_cm, - sh.slunit - FROM shoe_data sh, unit un - WHERE sh.slunit = un.un_name; -CREATE VIEW shoelace AS - SELECT s.sl_name, - s.sl_avail, - s.sl_color, - s.sl_len, - s.sl_unit, - s.sl_len * u.un_fact AS sl_len_cm - FROM shoelace_data s, unit u - WHERE s.sl_unit = u.un_name; -CREATE VIEW shoe_ready AS - SELECT rsh.shoename, - rsh.sh_avail, - rsl.sl_name, - rsl.sl_avail, - int4smaller(rsh.sh_avail, rsl.sl_avail) AS total_avail - FROM shoe rsh, shoelace rsl - WHERE rsl.sl_color = rsh.slcolor - AND rsl.sl_len_cm >= rsh.slminlen_cm - AND rsl.sl_len_cm <= rsh.slmaxlen_cm; -INSERT INTO unit VALUES ('cm', 1.0); -INSERT INTO unit VALUES ('m', 100.0); -INSERT INTO unit VALUES ('inch', 2.54); -INSERT INTO shoe_data VALUES ('sh1', 2, 'black', 70.0, 90.0, 'cm'); -INSERT INTO shoe_data VALUES ('sh2', 0, 'black', 30.0, 40.0, 'inch'); -INSERT INTO shoe_data VALUES ('sh3', 4, 'brown', 50.0, 65.0, 'cm'); -INSERT INTO shoe_data VALUES ('sh4', 3, 'brown', 40.0, 50.0, 'inch'); -INSERT INTO shoelace_data VALUES ('sl1', 5, 'black', 80.0, 'cm'); -INSERT INTO shoelace_data VALUES ('sl2', 6, 'black', 100.0, 'cm'); -INSERT INTO shoelace_data VALUES ('sl3', 0, 'black', 35.0 , 'inch'); -INSERT INTO shoelace_data VALUES ('sl4', 8, 'black', 40.0 , 'inch'); -INSERT INTO shoelace_data VALUES ('sl5', 4, 'brown', 1.0 , 'm'); -INSERT INTO shoelace_data VALUES ('sl6', 0, 'brown', 0.9 , 'm'); -INSERT INTO shoelace_data VALUES ('sl7', 7, 'brown', 60 , 'cm'); -INSERT INTO shoelace_data VALUES ('sl8', 1, 'brown', 40 , 'inch'); --- SELECTs in doc -SELECT * FROM shoelace ORDER BY sl_name; - sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm -------------+----------+------------+--------+----------+----------- - sl1 | 5 | black | 80 | cm | 80 - sl2 | 6 | black | 100 | cm | 100 - sl3 | 0 | black | 35 | inch | 88.9 - sl4 | 8 | black | 40 | inch | 101.6 - sl5 | 4 | brown | 1 | m | 100 - sl6 | 0 | brown | 0.9 | m | 90 - sl7 | 7 | brown | 60 | cm | 60 - sl8 | 1 | brown | 40 | inch | 101.6 -(8 rows) - -SELECT * FROM shoe_ready WHERE total_avail >= 2 ORDER BY 1; - shoename | sh_avail | sl_name | sl_avail | total_avail -------------+----------+------------+----------+------------- - sh1 | 2 | sl1 | 5 | 2 - sh3 | 4 | sl7 | 7 | 4 -(2 rows) - - CREATE TABLE shoelace_log ( - sl_name char(10), -- shoelace changed - sl_avail integer, -- new available value - log_who name, -- who did it - log_when timestamp -- when - ); --- Want "log_who" to be CURRENT_USER, --- but that is non-portable for the regression test --- - thomas 1999-02-21 - CREATE RULE log_shoelace AS ON UPDATE TO shoelace_data - WHERE NEW.sl_avail != OLD.sl_avail - DO INSERT INTO shoelace_log VALUES ( - NEW.sl_name, - NEW.sl_avail, - 'Al Bundy', - 'epoch' - ); -UPDATE shoelace_data SET sl_avail = 6 WHERE sl_name = 'sl7'; -SELECT * FROM shoelace_log; - sl_name | sl_avail | log_who | log_when -------------+----------+----------+-------------------------- - sl7 | 6 | Al Bundy | Thu Jan 01 00:00:00 1970 -(1 row) - - CREATE RULE shoelace_ins AS ON INSERT TO shoelace - DO INSTEAD - INSERT INTO shoelace_data VALUES ( - NEW.sl_name, - NEW.sl_avail, - NEW.sl_color, - NEW.sl_len, - NEW.sl_unit); - CREATE RULE shoelace_upd AS ON UPDATE TO shoelace - DO INSTEAD - UPDATE shoelace_data SET - sl_name = NEW.sl_name, - sl_avail = NEW.sl_avail, - sl_color = NEW.sl_color, - sl_len = NEW.sl_len, - sl_unit = NEW.sl_unit - WHERE sl_name = OLD.sl_name; - CREATE RULE shoelace_del AS ON DELETE TO shoelace - DO INSTEAD - DELETE FROM shoelace_data - WHERE sl_name = OLD.sl_name; - CREATE TABLE shoelace_arrive ( - arr_name char(10), - arr_quant integer - ); - CREATE TABLE shoelace_ok ( - ok_name char(10), - ok_quant integer - ); - CREATE RULE shoelace_ok_ins AS ON INSERT TO shoelace_ok - DO INSTEAD - UPDATE shoelace SET - sl_avail = sl_avail + NEW.ok_quant - WHERE sl_name = NEW.ok_name; -INSERT INTO shoelace_arrive VALUES ('sl3', 10); -INSERT INTO shoelace_arrive VALUES ('sl6', 20); -INSERT INTO shoelace_arrive VALUES ('sl8', 20); -SELECT * FROM shoelace ORDER BY sl_name; - sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm -------------+----------+------------+--------+----------+----------- - sl1 | 5 | black | 80 | cm | 80 - sl2 | 6 | black | 100 | cm | 100 - sl3 | 0 | black | 35 | inch | 88.9 - sl4 | 8 | black | 40 | inch | 101.6 - sl5 | 4 | brown | 1 | m | 100 - sl6 | 0 | brown | 0.9 | m | 90 - sl7 | 6 | brown | 60 | cm | 60 - sl8 | 1 | brown | 40 | inch | 101.6 -(8 rows) - -insert into shoelace_ok select * from shoelace_arrive; -SELECT * FROM shoelace ORDER BY sl_name; - sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm -------------+----------+------------+--------+----------+----------- - sl1 | 5 | black | 80 | cm | 80 - sl2 | 6 | black | 100 | cm | 100 - sl3 | 10 | black | 35 | inch | 88.9 - sl4 | 8 | black | 40 | inch | 101.6 - sl5 | 4 | brown | 1 | m | 100 - sl6 | 20 | brown | 0.9 | m | 90 - sl7 | 6 | brown | 60 | cm | 60 - sl8 | 21 | brown | 40 | inch | 101.6 -(8 rows) - -SELECT * FROM shoelace_log ORDER BY sl_name; - sl_name | sl_avail | log_who | log_when -------------+----------+----------+-------------------------- - sl3 | 10 | Al Bundy | Thu Jan 01 00:00:00 1970 - sl6 | 20 | Al Bundy | Thu Jan 01 00:00:00 1970 - sl7 | 6 | Al Bundy | Thu Jan 01 00:00:00 1970 - sl8 | 21 | Al Bundy | Thu Jan 01 00:00:00 1970 -(4 rows) - - CREATE VIEW shoelace_obsolete AS - SELECT * FROM shoelace WHERE NOT EXISTS - (SELECT shoename FROM shoe WHERE slcolor = sl_color); - CREATE VIEW shoelace_candelete AS - SELECT * FROM shoelace_obsolete WHERE sl_avail = 0; -insert into shoelace values ('sl9', 0, 'pink', 35.0, 'inch', 0.0); -insert into shoelace values ('sl10', 1000, 'magenta', 40.0, 'inch', 0.0); --- Unsupported (even though a similar updatable view construct is) -insert into shoelace values ('sl10', 1000, 'magenta', 40.0, 'inch', 0.0) - on conflict do nothing; -ERROR: INSERT with ON CONFLICT clause cannot be used with table that has INSERT or UPDATE rules -SELECT * FROM shoelace_obsolete ORDER BY sl_len_cm; - sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm -------------+----------+------------+--------+----------+----------- - sl9 | 0 | pink | 35 | inch | 88.9 - sl10 | 1000 | magenta | 40 | inch | 101.6 -(2 rows) - -SELECT * FROM shoelace_candelete; - sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm -------------+----------+------------+--------+----------+----------- - sl9 | 0 | pink | 35 | inch | 88.9 -(1 row) - -DELETE FROM shoelace WHERE EXISTS - (SELECT * FROM shoelace_candelete - WHERE sl_name = shoelace.sl_name); -SELECT * FROM shoelace ORDER BY sl_name; - sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm -------------+----------+------------+--------+----------+----------- - sl1 | 5 | black | 80 | cm | 80 - sl10 | 1000 | magenta | 40 | inch | 101.6 - sl2 | 6 | black | 100 | cm | 100 - sl3 | 10 | black | 35 | inch | 88.9 - sl4 | 8 | black | 40 | inch | 101.6 - sl5 | 4 | brown | 1 | m | 100 - sl6 | 20 | brown | 0.9 | m | 90 - sl7 | 6 | brown | 60 | cm | 60 - sl8 | 21 | brown | 40 | inch | 101.6 -(9 rows) - -SELECT * FROM shoe ORDER BY shoename; - shoename | sh_avail | slcolor | slminlen | slminlen_cm | slmaxlen | slmaxlen_cm | slunit -------------+----------+------------+----------+-------------+----------+-------------+---------- - sh1 | 2 | black | 70 | 70 | 90 | 90 | cm - sh2 | 0 | black | 30 | 76.2 | 40 | 101.6 | inch - sh3 | 4 | brown | 50 | 50 | 65 | 65 | cm - sh4 | 3 | brown | 40 | 101.6 | 50 | 127 | inch -(4 rows) - -SELECT count(*) FROM shoe; - count -------- - 4 -(1 row) - --- --- Simple test of qualified ON INSERT ... this did not work in 7.0 ... --- -create table rules_foo (f1 int); -create table rules_foo2 (f1 int); -create rule rules_foorule as on insert to rules_foo where f1 < 100 -do instead nothing; -insert into rules_foo values(1); -insert into rules_foo values(1001); -select * from rules_foo; - f1 ------- - 1001 -(1 row) - -drop rule rules_foorule on rules_foo; --- this should fail because f1 is not exposed for unqualified reference: -create rule rules_foorule as on insert to rules_foo where f1 < 100 -do instead insert into rules_foo2 values (f1); -ERROR: column "f1" does not exist -LINE 2: do instead insert into rules_foo2 values (f1); - ^ -DETAIL: There are columns named "f1", but they are in tables that cannot be referenced from this part of the query. -HINT: Try using a table-qualified name. --- this is the correct way: -create rule rules_foorule as on insert to rules_foo where f1 < 100 -do instead insert into rules_foo2 values (new.f1); -insert into rules_foo values(2); -insert into rules_foo values(100); -select * from rules_foo; - f1 ------- - 1001 - 100 -(2 rows) - -select * from rules_foo2; - f1 ----- - 2 -(1 row) - -drop rule rules_foorule on rules_foo; -drop table rules_foo; -drop table rules_foo2; --- --- Test rules containing INSERT ... SELECT, which is a very ugly special --- case as of 7.1. Example is based on bug report from Joel Burton. --- -create table pparent (pid int, txt text); -insert into pparent values (1,'parent1'); -insert into pparent values (2,'parent2'); -create table cchild (pid int, descrip text); -insert into cchild values (1,'descrip1'); -create view vview as - select pparent.pid, txt, descrip from - pparent left join cchild using (pid); -create rule rrule as - on update to vview do instead -( - insert into cchild (pid, descrip) - select old.pid, new.descrip where old.descrip isnull; - update cchild set descrip = new.descrip where cchild.pid = old.pid; -); -select * from vview; - pid | txt | descrip ------+---------+---------- - 1 | parent1 | descrip1 - 2 | parent2 | -(2 rows) - -update vview set descrip='test1' where pid=1; -select * from vview; - pid | txt | descrip ------+---------+--------- - 1 | parent1 | test1 - 2 | parent2 | -(2 rows) - -update vview set descrip='test2' where pid=2; -select * from vview; - pid | txt | descrip ------+---------+--------- - 1 | parent1 | test1 - 2 | parent2 | test2 -(2 rows) - -update vview set descrip='test3' where pid=3; -select * from vview; - pid | txt | descrip ------+---------+--------- - 1 | parent1 | test1 - 2 | parent2 | test2 -(2 rows) - -select * from cchild; - pid | descrip ------+--------- - 1 | test1 - 2 | test2 -(2 rows) - -drop rule rrule on vview; -drop view vview; -drop table pparent; -drop table cchild; --- --- Check that ruleutils are working --- --- temporarily disable fancy output, so view changes create less diff noise -\a\t -SELECT viewname, definition FROM pg_views -WHERE schemaname = 'pg_catalog' -ORDER BY viewname; -pg_available_extension_versions| SELECT e.name, - e.version, - (x.extname IS NOT NULL) AS installed, - e.superuser, - e.trusted, - e.relocatable, - e.schema, - e.requires, - e.comment - FROM (pg_available_extension_versions() e(name, version, superuser, trusted, relocatable, schema, requires, comment) - LEFT JOIN pg_extension x ON (((e.name = x.extname) AND (e.version = x.extversion)))); -pg_available_extensions| SELECT e.name, - e.default_version, - x.extversion AS installed_version, - e.comment - FROM (pg_available_extensions() e(name, default_version, comment) - LEFT JOIN pg_extension x ON ((e.name = x.extname))); -pg_backend_memory_contexts| SELECT name, - ident, - parent, - level, - total_bytes, - total_nblocks, - free_bytes, - free_chunks, - used_bytes - FROM pg_get_backend_memory_contexts() pg_get_backend_memory_contexts(name, ident, parent, level, total_bytes, total_nblocks, free_bytes, free_chunks, used_bytes); -pg_config| SELECT name, - setting - FROM pg_config() pg_config(name, setting); -pg_cursors| SELECT name, - statement, - is_holdable, - is_binary, - is_scrollable, - creation_time - FROM pg_cursor() c(name, statement, is_holdable, is_binary, is_scrollable, creation_time); -pg_file_settings| SELECT sourcefile, - sourceline, - seqno, - name, - setting, - applied, - error - FROM pg_show_all_file_settings() a(sourcefile, sourceline, seqno, name, setting, applied, error); -pg_group| SELECT rolname AS groname, - oid AS grosysid, - ARRAY( SELECT pg_auth_members.member - FROM pg_auth_members - WHERE (pg_auth_members.roleid = pg_authid.oid)) AS grolist - FROM pg_authid - WHERE (NOT rolcanlogin); -pg_hba_file_rules| SELECT rule_number, - file_name, - line_number, - type, - database, - user_name, - address, - netmask, - auth_method, - options, - error - FROM pg_hba_file_rules() a(rule_number, file_name, line_number, type, database, user_name, address, netmask, auth_method, options, error); -pg_ident_file_mappings| SELECT map_number, - file_name, - line_number, - map_name, - sys_name, - pg_username, - error - FROM pg_ident_file_mappings() a(map_number, file_name, line_number, map_name, sys_name, pg_username, error); -pg_indexes| SELECT n.nspname AS schemaname, - c.relname AS tablename, - i.relname AS indexname, - t.spcname AS tablespace, - pg_get_indexdef(i.oid) AS indexdef - FROM ((((pg_index x - JOIN pg_class c ON ((c.oid = x.indrelid))) - JOIN pg_class i ON ((i.oid = x.indexrelid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - LEFT JOIN pg_tablespace t ON ((t.oid = i.reltablespace))) - WHERE ((c.relkind = ANY (ARRAY['r'::"char", 'm'::"char", 'p'::"char"])) AND (i.relkind = ANY (ARRAY['i'::"char", 'I'::"char"]))); -pg_locks| SELECT locktype, - database, - relation, - page, - tuple, - virtualxid, - transactionid, - classid, - objid, - objsubid, - virtualtransaction, - pid, - mode, - granted, - fastpath, - waitstart - FROM pg_lock_status() l(locktype, database, relation, page, tuple, virtualxid, transactionid, classid, objid, objsubid, virtualtransaction, pid, mode, granted, fastpath, waitstart); -pg_matviews| SELECT n.nspname AS schemaname, - c.relname AS matviewname, - pg_get_userbyid(c.relowner) AS matviewowner, - t.spcname AS tablespace, - c.relhasindex AS hasindexes, - c.relispopulated AS ispopulated, - pg_get_viewdef(c.oid) AS definition - FROM ((pg_class c - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - LEFT JOIN pg_tablespace t ON ((t.oid = c.reltablespace))) - WHERE (c.relkind = 'm'::"char"); -pg_policies| SELECT n.nspname AS schemaname, - c.relname AS tablename, - pol.polname AS policyname, - CASE - WHEN pol.polpermissive THEN 'PERMISSIVE'::text - ELSE 'RESTRICTIVE'::text - END AS permissive, - CASE - WHEN (pol.polroles = '{0}'::oid[]) THEN (string_to_array('public'::text, ''::text))::name[] - ELSE ARRAY( SELECT pg_authid.rolname - FROM pg_authid - WHERE (pg_authid.oid = ANY (pol.polroles)) - ORDER BY pg_authid.rolname) - END AS roles, - CASE pol.polcmd - WHEN 'r'::"char" THEN 'SELECT'::text - WHEN 'a'::"char" THEN 'INSERT'::text - WHEN 'w'::"char" THEN 'UPDATE'::text - WHEN 'd'::"char" THEN 'DELETE'::text - WHEN '*'::"char" THEN 'ALL'::text - ELSE NULL::text - END AS cmd, - pg_get_expr(pol.polqual, pol.polrelid) AS qual, - pg_get_expr(pol.polwithcheck, pol.polrelid) AS with_check - FROM ((pg_policy pol - JOIN pg_class c ON ((c.oid = pol.polrelid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))); -pg_prepared_statements| SELECT name, - statement, - prepare_time, - parameter_types, - result_types, - from_sql, - generic_plans, - custom_plans - FROM pg_prepared_statement() p(name, statement, prepare_time, parameter_types, result_types, from_sql, generic_plans, custom_plans); -pg_prepared_xacts| SELECT p.transaction, - p.gid, - p.prepared, - u.rolname AS owner, - d.datname AS database - FROM ((pg_prepared_xact() p(transaction, gid, prepared, ownerid, dbid) - LEFT JOIN pg_authid u ON ((p.ownerid = u.oid))) - LEFT JOIN pg_database d ON ((p.dbid = d.oid))); -pg_publication_tables| SELECT p.pubname, - n.nspname AS schemaname, - c.relname AS tablename, - ( SELECT array_agg(a.attname ORDER BY a.attnum) AS array_agg - FROM pg_attribute a - WHERE ((a.attrelid = gpt.relid) AND (a.attnum = ANY ((gpt.attrs)::smallint[])))) AS attnames, - pg_get_expr(gpt.qual, gpt.relid) AS rowfilter - FROM pg_publication p, - LATERAL pg_get_publication_tables(VARIADIC ARRAY[(p.pubname)::text]) gpt(pubid, relid, attrs, qual), - (pg_class c - JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (c.oid = gpt.relid); -pg_replication_origin_status| SELECT local_id, - external_id, - remote_lsn, - local_lsn - FROM pg_show_replication_origin_status() pg_show_replication_origin_status(local_id, external_id, remote_lsn, local_lsn); -pg_replication_slots| SELECT l.slot_name, - l.plugin, - l.slot_type, - l.datoid, - d.datname AS database, - l.temporary, - l.active, - l.active_pid, - l.xmin, - l.catalog_xmin, - l.restart_lsn, - l.confirmed_flush_lsn, - l.wal_status, - l.safe_wal_size, - l.two_phase, - l.conflict_reason, - l.failover, - l.synced - FROM (pg_get_replication_slots() l(slot_name, plugin, slot_type, datoid, temporary, active, active_pid, xmin, catalog_xmin, restart_lsn, confirmed_flush_lsn, wal_status, safe_wal_size, two_phase, conflict_reason, failover, synced) - LEFT JOIN pg_database d ON ((l.datoid = d.oid))); -pg_roles| SELECT pg_authid.rolname, - pg_authid.rolsuper, - pg_authid.rolinherit, - pg_authid.rolcreaterole, - pg_authid.rolcreatedb, - pg_authid.rolcanlogin, - pg_authid.rolreplication, - pg_authid.rolconnlimit, - '********'::text AS rolpassword, - pg_authid.rolvaliduntil, - pg_authid.rolbypassrls, - s.setconfig AS rolconfig, - pg_authid.oid - FROM (pg_authid - LEFT JOIN pg_db_role_setting s ON (((pg_authid.oid = s.setrole) AND (s.setdatabase = (0)::oid)))); -pg_rules| SELECT n.nspname AS schemaname, - c.relname AS tablename, - r.rulename, - pg_get_ruledef(r.oid) AS definition - FROM ((pg_rewrite r - JOIN pg_class c ON ((c.oid = r.ev_class))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (r.rulename <> '_RETURN'::name); -pg_seclabels| SELECT l.objoid, - l.classoid, - l.objsubid, - CASE - WHEN (rel.relkind = ANY (ARRAY['r'::"char", 'p'::"char"])) THEN 'table'::text - WHEN (rel.relkind = 'v'::"char") THEN 'view'::text - WHEN (rel.relkind = 'm'::"char") THEN 'materialized view'::text - WHEN (rel.relkind = 'S'::"char") THEN 'sequence'::text - WHEN (rel.relkind = 'f'::"char") THEN 'foreign table'::text - ELSE NULL::text - END AS objtype, - rel.relnamespace AS objnamespace, - CASE - WHEN pg_table_is_visible(rel.oid) THEN quote_ident((rel.relname)::text) - ELSE ((quote_ident((nsp.nspname)::text) || '.'::text) || quote_ident((rel.relname)::text)) - END AS objname, - l.provider, - l.label - FROM ((pg_seclabel l - JOIN pg_class rel ON (((l.classoid = rel.tableoid) AND (l.objoid = rel.oid)))) - JOIN pg_namespace nsp ON ((rel.relnamespace = nsp.oid))) - WHERE (l.objsubid = 0) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - 'column'::text AS objtype, - rel.relnamespace AS objnamespace, - (( - CASE - WHEN pg_table_is_visible(rel.oid) THEN quote_ident((rel.relname)::text) - ELSE ((quote_ident((nsp.nspname)::text) || '.'::text) || quote_ident((rel.relname)::text)) - END || '.'::text) || (att.attname)::text) AS objname, - l.provider, - l.label - FROM (((pg_seclabel l - JOIN pg_class rel ON (((l.classoid = rel.tableoid) AND (l.objoid = rel.oid)))) - JOIN pg_attribute att ON (((rel.oid = att.attrelid) AND (l.objsubid = att.attnum)))) - JOIN pg_namespace nsp ON ((rel.relnamespace = nsp.oid))) - WHERE (l.objsubid <> 0) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - CASE pro.prokind - WHEN 'a'::"char" THEN 'aggregate'::text - WHEN 'f'::"char" THEN 'function'::text - WHEN 'p'::"char" THEN 'procedure'::text - WHEN 'w'::"char" THEN 'window'::text - ELSE NULL::text - END AS objtype, - pro.pronamespace AS objnamespace, - ((( - CASE - WHEN pg_function_is_visible(pro.oid) THEN quote_ident((pro.proname)::text) - ELSE ((quote_ident((nsp.nspname)::text) || '.'::text) || quote_ident((pro.proname)::text)) - END || '('::text) || pg_get_function_arguments(pro.oid)) || ')'::text) AS objname, - l.provider, - l.label - FROM ((pg_seclabel l - JOIN pg_proc pro ON (((l.classoid = pro.tableoid) AND (l.objoid = pro.oid)))) - JOIN pg_namespace nsp ON ((pro.pronamespace = nsp.oid))) - WHERE (l.objsubid = 0) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - CASE - WHEN (typ.typtype = 'd'::"char") THEN 'domain'::text - ELSE 'type'::text - END AS objtype, - typ.typnamespace AS objnamespace, - CASE - WHEN pg_type_is_visible(typ.oid) THEN quote_ident((typ.typname)::text) - ELSE ((quote_ident((nsp.nspname)::text) || '.'::text) || quote_ident((typ.typname)::text)) - END AS objname, - l.provider, - l.label - FROM ((pg_seclabel l - JOIN pg_type typ ON (((l.classoid = typ.tableoid) AND (l.objoid = typ.oid)))) - JOIN pg_namespace nsp ON ((typ.typnamespace = nsp.oid))) - WHERE (l.objsubid = 0) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - 'large object'::text AS objtype, - NULL::oid AS objnamespace, - (l.objoid)::text AS objname, - l.provider, - l.label - FROM (pg_seclabel l - JOIN pg_largeobject_metadata lom ON ((l.objoid = lom.oid))) - WHERE ((l.classoid = ('pg_largeobject'::regclass)::oid) AND (l.objsubid = 0)) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - 'language'::text AS objtype, - NULL::oid AS objnamespace, - quote_ident((lan.lanname)::text) AS objname, - l.provider, - l.label - FROM (pg_seclabel l - JOIN pg_language lan ON (((l.classoid = lan.tableoid) AND (l.objoid = lan.oid)))) - WHERE (l.objsubid = 0) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - 'schema'::text AS objtype, - nsp.oid AS objnamespace, - quote_ident((nsp.nspname)::text) AS objname, - l.provider, - l.label - FROM (pg_seclabel l - JOIN pg_namespace nsp ON (((l.classoid = nsp.tableoid) AND (l.objoid = nsp.oid)))) - WHERE (l.objsubid = 0) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - 'event trigger'::text AS objtype, - NULL::oid AS objnamespace, - quote_ident((evt.evtname)::text) AS objname, - l.provider, - l.label - FROM (pg_seclabel l - JOIN pg_event_trigger evt ON (((l.classoid = evt.tableoid) AND (l.objoid = evt.oid)))) - WHERE (l.objsubid = 0) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - 'publication'::text AS objtype, - NULL::oid AS objnamespace, - quote_ident((p.pubname)::text) AS objname, - l.provider, - l.label - FROM (pg_seclabel l - JOIN pg_publication p ON (((l.classoid = p.tableoid) AND (l.objoid = p.oid)))) - WHERE (l.objsubid = 0) -UNION ALL - SELECT l.objoid, - l.classoid, - 0 AS objsubid, - 'subscription'::text AS objtype, - NULL::oid AS objnamespace, - quote_ident((s.subname)::text) AS objname, - l.provider, - l.label - FROM (pg_shseclabel l - JOIN pg_subscription s ON (((l.classoid = s.tableoid) AND (l.objoid = s.oid)))) -UNION ALL - SELECT l.objoid, - l.classoid, - 0 AS objsubid, - 'database'::text AS objtype, - NULL::oid AS objnamespace, - quote_ident((dat.datname)::text) AS objname, - l.provider, - l.label - FROM (pg_shseclabel l - JOIN pg_database dat ON (((l.classoid = dat.tableoid) AND (l.objoid = dat.oid)))) -UNION ALL - SELECT l.objoid, - l.classoid, - 0 AS objsubid, - 'tablespace'::text AS objtype, - NULL::oid AS objnamespace, - quote_ident((spc.spcname)::text) AS objname, - l.provider, - l.label - FROM (pg_shseclabel l - JOIN pg_tablespace spc ON (((l.classoid = spc.tableoid) AND (l.objoid = spc.oid)))) -UNION ALL - SELECT l.objoid, - l.classoid, - 0 AS objsubid, - 'role'::text AS objtype, - NULL::oid AS objnamespace, - quote_ident((rol.rolname)::text) AS objname, - l.provider, - l.label - FROM (pg_shseclabel l - JOIN pg_authid rol ON (((l.classoid = rol.tableoid) AND (l.objoid = rol.oid)))); -pg_sequences| SELECT n.nspname AS schemaname, - c.relname AS sequencename, - pg_get_userbyid(c.relowner) AS sequenceowner, - (s.seqtypid)::regtype AS data_type, - s.seqstart AS start_value, - s.seqmin AS min_value, - s.seqmax AS max_value, - s.seqincrement AS increment_by, - s.seqcycle AS cycle, - s.seqcache AS cache_size, - CASE - WHEN has_sequence_privilege(c.oid, 'SELECT,USAGE'::text) THEN pg_sequence_last_value((c.oid)::regclass) - ELSE NULL::bigint - END AS last_value - FROM ((pg_sequence s - JOIN pg_class c ON ((c.oid = s.seqrelid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE ((NOT pg_is_other_temp_schema(n.oid)) AND (c.relkind = 'S'::"char")); -pg_settings| SELECT name, - setting, - unit, - category, - short_desc, - extra_desc, - context, - vartype, - source, - min_val, - max_val, - enumvals, - boot_val, - reset_val, - sourcefile, - sourceline, - pending_restart - FROM pg_show_all_settings() a(name, setting, unit, category, short_desc, extra_desc, context, vartype, source, min_val, max_val, enumvals, boot_val, reset_val, sourcefile, sourceline, pending_restart); -pg_shadow| SELECT pg_authid.rolname AS usename, - pg_authid.oid AS usesysid, - pg_authid.rolcreatedb AS usecreatedb, - pg_authid.rolsuper AS usesuper, - pg_authid.rolreplication AS userepl, - pg_authid.rolbypassrls AS usebypassrls, - pg_authid.rolpassword AS passwd, - pg_authid.rolvaliduntil AS valuntil, - s.setconfig AS useconfig - FROM (pg_authid - LEFT JOIN pg_db_role_setting s ON (((pg_authid.oid = s.setrole) AND (s.setdatabase = (0)::oid)))) - WHERE pg_authid.rolcanlogin; -pg_shmem_allocations| SELECT name, - off, - size, - allocated_size - FROM pg_get_shmem_allocations() pg_get_shmem_allocations(name, off, size, allocated_size); -pg_stat_activity| SELECT s.datid, - d.datname, - s.pid, - s.leader_pid, - s.usesysid, - u.rolname AS usename, - s.application_name, - s.client_addr, - s.client_hostname, - s.client_port, - s.backend_start, - s.xact_start, - s.query_start, - s.state_change, - s.wait_event_type, - s.wait_event, - s.state, - s.backend_xid, - s.backend_xmin, - s.query_id, - s.query, - s.backend_type - FROM ((pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, backend_type, ssl, sslversion, sslcipher, sslbits, ssl_client_dn, ssl_client_serial, ssl_issuer_dn, gss_auth, gss_princ, gss_enc, gss_delegation, leader_pid, query_id) - LEFT JOIN pg_database d ON ((s.datid = d.oid))) - LEFT JOIN pg_authid u ON ((s.usesysid = u.oid))); -pg_stat_all_indexes| SELECT c.oid AS relid, - i.oid AS indexrelid, - n.nspname AS schemaname, - c.relname, - i.relname AS indexrelname, - pg_stat_get_numscans(i.oid) AS idx_scan, - pg_stat_get_lastscan(i.oid) AS last_idx_scan, - pg_stat_get_tuples_returned(i.oid) AS idx_tup_read, - pg_stat_get_tuples_fetched(i.oid) AS idx_tup_fetch - FROM (((pg_class c - JOIN pg_index x ON ((c.oid = x.indrelid))) - JOIN pg_class i ON ((i.oid = x.indexrelid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char", 'm'::"char"])); -pg_stat_all_tables| SELECT c.oid AS relid, - n.nspname AS schemaname, - c.relname, - pg_stat_get_numscans(c.oid) AS seq_scan, - pg_stat_get_lastscan(c.oid) AS last_seq_scan, - pg_stat_get_tuples_returned(c.oid) AS seq_tup_read, - (sum(pg_stat_get_numscans(i.indexrelid)))::bigint AS idx_scan, - max(pg_stat_get_lastscan(i.indexrelid)) AS last_idx_scan, - ((sum(pg_stat_get_tuples_fetched(i.indexrelid)))::bigint + pg_stat_get_tuples_fetched(c.oid)) AS idx_tup_fetch, - pg_stat_get_tuples_inserted(c.oid) AS n_tup_ins, - pg_stat_get_tuples_updated(c.oid) AS n_tup_upd, - pg_stat_get_tuples_deleted(c.oid) AS n_tup_del, - pg_stat_get_tuples_hot_updated(c.oid) AS n_tup_hot_upd, - pg_stat_get_tuples_newpage_updated(c.oid) AS n_tup_newpage_upd, - pg_stat_get_live_tuples(c.oid) AS n_live_tup, - pg_stat_get_dead_tuples(c.oid) AS n_dead_tup, - pg_stat_get_mod_since_analyze(c.oid) AS n_mod_since_analyze, - pg_stat_get_ins_since_vacuum(c.oid) AS n_ins_since_vacuum, - pg_stat_get_last_vacuum_time(c.oid) AS last_vacuum, - pg_stat_get_last_autovacuum_time(c.oid) AS last_autovacuum, - pg_stat_get_last_analyze_time(c.oid) AS last_analyze, - pg_stat_get_last_autoanalyze_time(c.oid) AS last_autoanalyze, - pg_stat_get_vacuum_count(c.oid) AS vacuum_count, - pg_stat_get_autovacuum_count(c.oid) AS autovacuum_count, - pg_stat_get_analyze_count(c.oid) AS analyze_count, - pg_stat_get_autoanalyze_count(c.oid) AS autoanalyze_count - FROM ((pg_class c - LEFT JOIN pg_index i ON ((c.oid = i.indrelid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char", 'm'::"char", 'p'::"char"])) - GROUP BY c.oid, n.nspname, c.relname; -pg_stat_archiver| SELECT archived_count, - last_archived_wal, - last_archived_time, - failed_count, - last_failed_wal, - last_failed_time, - stats_reset - FROM pg_stat_get_archiver() s(archived_count, last_archived_wal, last_archived_time, failed_count, last_failed_wal, last_failed_time, stats_reset); -pg_stat_bgwriter| SELECT pg_stat_get_bgwriter_buf_written_clean() AS buffers_clean, - pg_stat_get_bgwriter_maxwritten_clean() AS maxwritten_clean, - pg_stat_get_buf_alloc() AS buffers_alloc, - pg_stat_get_bgwriter_stat_reset_time() AS stats_reset; -pg_stat_checkpointer| SELECT pg_stat_get_checkpointer_num_timed() AS num_timed, - pg_stat_get_checkpointer_num_requested() AS num_requested, - pg_stat_get_checkpointer_restartpoints_timed() AS restartpoints_timed, - pg_stat_get_checkpointer_restartpoints_requested() AS restartpoints_req, - pg_stat_get_checkpointer_restartpoints_performed() AS restartpoints_done, - pg_stat_get_checkpointer_write_time() AS write_time, - pg_stat_get_checkpointer_sync_time() AS sync_time, - pg_stat_get_checkpointer_buffers_written() AS buffers_written, - pg_stat_get_checkpointer_stat_reset_time() AS stats_reset; -pg_stat_database| SELECT oid AS datid, - datname, - CASE - WHEN (oid = (0)::oid) THEN 0 - ELSE pg_stat_get_db_numbackends(oid) - END AS numbackends, - pg_stat_get_db_xact_commit(oid) AS xact_commit, - pg_stat_get_db_xact_rollback(oid) AS xact_rollback, - (pg_stat_get_db_blocks_fetched(oid) - pg_stat_get_db_blocks_hit(oid)) AS blks_read, - pg_stat_get_db_blocks_hit(oid) AS blks_hit, - pg_stat_get_db_tuples_returned(oid) AS tup_returned, - pg_stat_get_db_tuples_fetched(oid) AS tup_fetched, - pg_stat_get_db_tuples_inserted(oid) AS tup_inserted, - pg_stat_get_db_tuples_updated(oid) AS tup_updated, - pg_stat_get_db_tuples_deleted(oid) AS tup_deleted, - pg_stat_get_db_conflict_all(oid) AS conflicts, - pg_stat_get_db_temp_files(oid) AS temp_files, - pg_stat_get_db_temp_bytes(oid) AS temp_bytes, - pg_stat_get_db_deadlocks(oid) AS deadlocks, - pg_stat_get_db_checksum_failures(oid) AS checksum_failures, - pg_stat_get_db_checksum_last_failure(oid) AS checksum_last_failure, - pg_stat_get_db_blk_read_time(oid) AS blk_read_time, - pg_stat_get_db_blk_write_time(oid) AS blk_write_time, - pg_stat_get_db_session_time(oid) AS session_time, - pg_stat_get_db_active_time(oid) AS active_time, - pg_stat_get_db_idle_in_transaction_time(oid) AS idle_in_transaction_time, - pg_stat_get_db_sessions(oid) AS sessions, - pg_stat_get_db_sessions_abandoned(oid) AS sessions_abandoned, - pg_stat_get_db_sessions_fatal(oid) AS sessions_fatal, - pg_stat_get_db_sessions_killed(oid) AS sessions_killed, - pg_stat_get_db_stat_reset_time(oid) AS stats_reset - FROM ( SELECT 0 AS oid, - NULL::name AS datname - UNION ALL - SELECT pg_database.oid, - pg_database.datname - FROM pg_database) d; -pg_stat_database_conflicts| SELECT oid AS datid, - datname, - pg_stat_get_db_conflict_tablespace(oid) AS confl_tablespace, - pg_stat_get_db_conflict_lock(oid) AS confl_lock, - pg_stat_get_db_conflict_snapshot(oid) AS confl_snapshot, - pg_stat_get_db_conflict_bufferpin(oid) AS confl_bufferpin, - pg_stat_get_db_conflict_startup_deadlock(oid) AS confl_deadlock, - pg_stat_get_db_conflict_logicalslot(oid) AS confl_active_logicalslot - FROM pg_database d; -pg_stat_gssapi| SELECT pid, - gss_auth AS gss_authenticated, - gss_princ AS principal, - gss_enc AS encrypted, - gss_delegation AS credentials_delegated - FROM pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, backend_type, ssl, sslversion, sslcipher, sslbits, ssl_client_dn, ssl_client_serial, ssl_issuer_dn, gss_auth, gss_princ, gss_enc, gss_delegation, leader_pid, query_id) - WHERE (client_port IS NOT NULL); -pg_stat_io| SELECT backend_type, - object, - context, - reads, - read_time, - writes, - write_time, - writebacks, - writeback_time, - extends, - extend_time, - op_bytes, - hits, - evictions, - reuses, - fsyncs, - fsync_time, - stats_reset - FROM pg_stat_get_io() b(backend_type, object, context, reads, read_time, writes, write_time, writebacks, writeback_time, extends, extend_time, op_bytes, hits, evictions, reuses, fsyncs, fsync_time, stats_reset); -pg_stat_progress_analyze| SELECT s.pid, - s.datid, - d.datname, - s.relid, - CASE s.param1 - WHEN 0 THEN 'initializing'::text - WHEN 1 THEN 'acquiring sample rows'::text - WHEN 2 THEN 'acquiring inherited sample rows'::text - WHEN 3 THEN 'computing statistics'::text - WHEN 4 THEN 'computing extended statistics'::text - WHEN 5 THEN 'finalizing analyze'::text - ELSE NULL::text - END AS phase, - s.param2 AS sample_blks_total, - s.param3 AS sample_blks_scanned, - s.param4 AS ext_stats_total, - s.param5 AS ext_stats_computed, - s.param6 AS child_tables_total, - s.param7 AS child_tables_done, - (s.param8)::oid AS current_child_table_relid - FROM (pg_stat_get_progress_info('ANALYZE'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20) - LEFT JOIN pg_database d ON ((s.datid = d.oid))); -pg_stat_progress_basebackup| SELECT pid, - CASE param1 - WHEN 0 THEN 'initializing'::text - WHEN 1 THEN 'waiting for checkpoint to finish'::text - WHEN 2 THEN 'estimating backup size'::text - WHEN 3 THEN 'streaming database files'::text - WHEN 4 THEN 'waiting for wal archiving to finish'::text - WHEN 5 THEN 'transferring wal files'::text - ELSE NULL::text - END AS phase, - CASE param2 - WHEN '-1'::integer THEN NULL::bigint - ELSE param2 - END AS backup_total, - param3 AS backup_streamed, - param4 AS tablespaces_total, - param5 AS tablespaces_streamed - FROM pg_stat_get_progress_info('BASEBACKUP'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20); -pg_stat_progress_cluster| SELECT s.pid, - s.datid, - d.datname, - s.relid, - CASE s.param1 - WHEN 1 THEN 'CLUSTER'::text - WHEN 2 THEN 'VACUUM FULL'::text - ELSE NULL::text - END AS command, - CASE s.param2 - WHEN 0 THEN 'initializing'::text - WHEN 1 THEN 'seq scanning heap'::text - WHEN 2 THEN 'index scanning heap'::text - WHEN 3 THEN 'sorting tuples'::text - WHEN 4 THEN 'writing new heap'::text - WHEN 5 THEN 'swapping relation files'::text - WHEN 6 THEN 'rebuilding index'::text - WHEN 7 THEN 'performing final cleanup'::text - ELSE NULL::text - END AS phase, - (s.param3)::oid AS cluster_index_relid, - s.param4 AS heap_tuples_scanned, - s.param5 AS heap_tuples_written, - s.param6 AS heap_blks_total, - s.param7 AS heap_blks_scanned, - s.param8 AS index_rebuild_count - FROM (pg_stat_get_progress_info('CLUSTER'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20) - LEFT JOIN pg_database d ON ((s.datid = d.oid))); -pg_stat_progress_copy| SELECT s.pid, - s.datid, - d.datname, - s.relid, - CASE s.param5 - WHEN 1 THEN 'COPY FROM'::text - WHEN 2 THEN 'COPY TO'::text - ELSE NULL::text - END AS command, - CASE s.param6 - WHEN 1 THEN 'FILE'::text - WHEN 2 THEN 'PROGRAM'::text - WHEN 3 THEN 'PIPE'::text - WHEN 4 THEN 'CALLBACK'::text - ELSE NULL::text - END AS type, - s.param1 AS bytes_processed, - s.param2 AS bytes_total, - s.param3 AS tuples_processed, - s.param4 AS tuples_excluded, - s.param7 AS tuples_skipped - FROM (pg_stat_get_progress_info('COPY'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20) - LEFT JOIN pg_database d ON ((s.datid = d.oid))); -pg_stat_progress_create_index| SELECT s.pid, - s.datid, - d.datname, - s.relid, - (s.param7)::oid AS index_relid, - CASE s.param1 - WHEN 1 THEN 'CREATE INDEX'::text - WHEN 2 THEN 'CREATE INDEX CONCURRENTLY'::text - WHEN 3 THEN 'REINDEX'::text - WHEN 4 THEN 'REINDEX CONCURRENTLY'::text - ELSE NULL::text - END AS command, - CASE s.param10 - WHEN 0 THEN 'initializing'::text - WHEN 1 THEN 'waiting for writers before build'::text - WHEN 2 THEN ('building index'::text || COALESCE((': '::text || pg_indexam_progress_phasename((s.param9)::oid, s.param11)), ''::text)) - WHEN 3 THEN 'waiting for writers before validation'::text - WHEN 4 THEN 'index validation: scanning index'::text - WHEN 5 THEN 'index validation: sorting tuples'::text - WHEN 6 THEN 'index validation: scanning table'::text - WHEN 7 THEN 'waiting for old snapshots'::text - WHEN 8 THEN 'waiting for readers before marking dead'::text - WHEN 9 THEN 'waiting for readers before dropping'::text - ELSE NULL::text - END AS phase, - s.param4 AS lockers_total, - s.param5 AS lockers_done, - s.param6 AS current_locker_pid, - s.param16 AS blocks_total, - s.param17 AS blocks_done, - s.param12 AS tuples_total, - s.param13 AS tuples_done, - s.param14 AS partitions_total, - s.param15 AS partitions_done - FROM (pg_stat_get_progress_info('CREATE INDEX'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20) - LEFT JOIN pg_database d ON ((s.datid = d.oid))); -pg_stat_progress_vacuum| SELECT s.pid, - s.datid, - d.datname, - s.relid, - CASE s.param1 - WHEN 0 THEN 'initializing'::text - WHEN 1 THEN 'scanning heap'::text - WHEN 2 THEN 'vacuuming indexes'::text - WHEN 3 THEN 'vacuuming heap'::text - WHEN 4 THEN 'cleaning up indexes'::text - WHEN 5 THEN 'truncating heap'::text - WHEN 6 THEN 'performing final cleanup'::text - ELSE NULL::text - END AS phase, - s.param2 AS heap_blks_total, - s.param3 AS heap_blks_scanned, - s.param4 AS heap_blks_vacuumed, - s.param5 AS index_vacuum_count, - s.param6 AS max_dead_tuples, - s.param7 AS num_dead_tuples, - s.param8 AS indexes_total, - s.param9 AS indexes_processed - FROM (pg_stat_get_progress_info('VACUUM'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20) - LEFT JOIN pg_database d ON ((s.datid = d.oid))); -pg_stat_recovery_prefetch| SELECT stats_reset, - prefetch, - hit, - skip_init, - skip_new, - skip_fpw, - skip_rep, - wal_distance, - block_distance, - io_depth - FROM pg_stat_get_recovery_prefetch() s(stats_reset, prefetch, hit, skip_init, skip_new, skip_fpw, skip_rep, wal_distance, block_distance, io_depth); -pg_stat_replication| SELECT s.pid, - s.usesysid, - u.rolname AS usename, - s.application_name, - s.client_addr, - s.client_hostname, - s.client_port, - s.backend_start, - s.backend_xmin, - w.state, - w.sent_lsn, - w.write_lsn, - w.flush_lsn, - w.replay_lsn, - w.write_lag, - w.flush_lag, - w.replay_lag, - w.sync_priority, - w.sync_state, - w.reply_time - FROM ((pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, backend_type, ssl, sslversion, sslcipher, sslbits, ssl_client_dn, ssl_client_serial, ssl_issuer_dn, gss_auth, gss_princ, gss_enc, gss_delegation, leader_pid, query_id) - JOIN pg_stat_get_wal_senders() w(pid, state, sent_lsn, write_lsn, flush_lsn, replay_lsn, write_lag, flush_lag, replay_lag, sync_priority, sync_state, reply_time) ON ((s.pid = w.pid))) - LEFT JOIN pg_authid u ON ((s.usesysid = u.oid))); -pg_stat_replication_slots| SELECT s.slot_name, - s.spill_txns, - s.spill_count, - s.spill_bytes, - s.stream_txns, - s.stream_count, - s.stream_bytes, - s.total_txns, - s.total_bytes, - s.stats_reset - FROM pg_replication_slots r, - LATERAL pg_stat_get_replication_slot((r.slot_name)::text) s(slot_name, spill_txns, spill_count, spill_bytes, stream_txns, stream_count, stream_bytes, total_txns, total_bytes, stats_reset) - WHERE (r.datoid IS NOT NULL); -pg_stat_slru| SELECT name, - blks_zeroed, - blks_hit, - blks_read, - blks_written, - blks_exists, - flushes, - truncates, - stats_reset - FROM pg_stat_get_slru() s(name, blks_zeroed, blks_hit, blks_read, blks_written, blks_exists, flushes, truncates, stats_reset); -pg_stat_ssl| SELECT pid, - ssl, - sslversion AS version, - sslcipher AS cipher, - sslbits AS bits, - ssl_client_dn AS client_dn, - ssl_client_serial AS client_serial, - ssl_issuer_dn AS issuer_dn - FROM pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, backend_type, ssl, sslversion, sslcipher, sslbits, ssl_client_dn, ssl_client_serial, ssl_issuer_dn, gss_auth, gss_princ, gss_enc, gss_delegation, leader_pid, query_id) - WHERE (client_port IS NOT NULL); -pg_stat_subscription| SELECT su.oid AS subid, - su.subname, - st.worker_type, - st.pid, - st.leader_pid, - st.relid, - st.received_lsn, - st.last_msg_send_time, - st.last_msg_receipt_time, - st.latest_end_lsn, - st.latest_end_time - FROM (pg_subscription su - LEFT JOIN pg_stat_get_subscription(NULL::oid) st(subid, relid, pid, leader_pid, received_lsn, last_msg_send_time, last_msg_receipt_time, latest_end_lsn, latest_end_time, worker_type) ON ((st.subid = su.oid))); -pg_stat_subscription_stats| SELECT ss.subid, - s.subname, - ss.apply_error_count, - ss.sync_error_count, - ss.stats_reset - FROM pg_subscription s, - LATERAL pg_stat_get_subscription_stats(s.oid) ss(subid, apply_error_count, sync_error_count, stats_reset); -pg_stat_sys_indexes| SELECT relid, - indexrelid, - schemaname, - relname, - indexrelname, - idx_scan, - last_idx_scan, - idx_tup_read, - idx_tup_fetch - FROM pg_stat_all_indexes - WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); -pg_stat_sys_tables| SELECT relid, - schemaname, - relname, - seq_scan, - last_seq_scan, - seq_tup_read, - idx_scan, - last_idx_scan, - idx_tup_fetch, - n_tup_ins, - n_tup_upd, - n_tup_del, - n_tup_hot_upd, - n_tup_newpage_upd, - n_live_tup, - n_dead_tup, - n_mod_since_analyze, - n_ins_since_vacuum, - last_vacuum, - last_autovacuum, - last_analyze, - last_autoanalyze, - vacuum_count, - autovacuum_count, - analyze_count, - autoanalyze_count - FROM pg_stat_all_tables - WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); -pg_stat_user_functions| SELECT p.oid AS funcid, - n.nspname AS schemaname, - p.proname AS funcname, - pg_stat_get_function_calls(p.oid) AS calls, - pg_stat_get_function_total_time(p.oid) AS total_time, - pg_stat_get_function_self_time(p.oid) AS self_time - FROM (pg_proc p - LEFT JOIN pg_namespace n ON ((n.oid = p.pronamespace))) - WHERE ((p.prolang <> (12)::oid) AND (pg_stat_get_function_calls(p.oid) IS NOT NULL)); -pg_stat_user_indexes| SELECT relid, - indexrelid, - schemaname, - relname, - indexrelname, - idx_scan, - last_idx_scan, - idx_tup_read, - idx_tup_fetch - FROM pg_stat_all_indexes - WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); -pg_stat_user_tables| SELECT relid, - schemaname, - relname, - seq_scan, - last_seq_scan, - seq_tup_read, - idx_scan, - last_idx_scan, - idx_tup_fetch, - n_tup_ins, - n_tup_upd, - n_tup_del, - n_tup_hot_upd, - n_tup_newpage_upd, - n_live_tup, - n_dead_tup, - n_mod_since_analyze, - n_ins_since_vacuum, - last_vacuum, - last_autovacuum, - last_analyze, - last_autoanalyze, - vacuum_count, - autovacuum_count, - analyze_count, - autoanalyze_count - FROM pg_stat_all_tables - WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); -pg_stat_wal| SELECT wal_records, - wal_fpi, - wal_bytes, - wal_buffers_full, - wal_write, - wal_sync, - wal_write_time, - wal_sync_time, - stats_reset - FROM pg_stat_get_wal() w(wal_records, wal_fpi, wal_bytes, wal_buffers_full, wal_write, wal_sync, wal_write_time, wal_sync_time, stats_reset); -pg_stat_wal_receiver| SELECT pid, - status, - receive_start_lsn, - receive_start_tli, - written_lsn, - flushed_lsn, - received_tli, - last_msg_send_time, - last_msg_receipt_time, - latest_end_lsn, - latest_end_time, - slot_name, - sender_host, - sender_port, - conninfo - FROM pg_stat_get_wal_receiver() s(pid, status, receive_start_lsn, receive_start_tli, written_lsn, flushed_lsn, received_tli, last_msg_send_time, last_msg_receipt_time, latest_end_lsn, latest_end_time, slot_name, sender_host, sender_port, conninfo) - WHERE (pid IS NOT NULL); -pg_stat_xact_all_tables| SELECT c.oid AS relid, - n.nspname AS schemaname, - c.relname, - pg_stat_get_xact_numscans(c.oid) AS seq_scan, - pg_stat_get_xact_tuples_returned(c.oid) AS seq_tup_read, - (sum(pg_stat_get_xact_numscans(i.indexrelid)))::bigint AS idx_scan, - ((sum(pg_stat_get_xact_tuples_fetched(i.indexrelid)))::bigint + pg_stat_get_xact_tuples_fetched(c.oid)) AS idx_tup_fetch, - pg_stat_get_xact_tuples_inserted(c.oid) AS n_tup_ins, - pg_stat_get_xact_tuples_updated(c.oid) AS n_tup_upd, - pg_stat_get_xact_tuples_deleted(c.oid) AS n_tup_del, - pg_stat_get_xact_tuples_hot_updated(c.oid) AS n_tup_hot_upd, - pg_stat_get_xact_tuples_newpage_updated(c.oid) AS n_tup_newpage_upd - FROM ((pg_class c - LEFT JOIN pg_index i ON ((c.oid = i.indrelid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char", 'm'::"char", 'p'::"char"])) - GROUP BY c.oid, n.nspname, c.relname; -pg_stat_xact_sys_tables| SELECT relid, - schemaname, - relname, - seq_scan, - seq_tup_read, - idx_scan, - idx_tup_fetch, - n_tup_ins, - n_tup_upd, - n_tup_del, - n_tup_hot_upd, - n_tup_newpage_upd - FROM pg_stat_xact_all_tables - WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); -pg_stat_xact_user_functions| SELECT p.oid AS funcid, - n.nspname AS schemaname, - p.proname AS funcname, - pg_stat_get_xact_function_calls(p.oid) AS calls, - pg_stat_get_xact_function_total_time(p.oid) AS total_time, - pg_stat_get_xact_function_self_time(p.oid) AS self_time - FROM (pg_proc p - LEFT JOIN pg_namespace n ON ((n.oid = p.pronamespace))) - WHERE ((p.prolang <> (12)::oid) AND (pg_stat_get_xact_function_calls(p.oid) IS NOT NULL)); -pg_stat_xact_user_tables| SELECT relid, - schemaname, - relname, - seq_scan, - seq_tup_read, - idx_scan, - idx_tup_fetch, - n_tup_ins, - n_tup_upd, - n_tup_del, - n_tup_hot_upd, - n_tup_newpage_upd - FROM pg_stat_xact_all_tables - WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); -pg_statio_all_indexes| SELECT c.oid AS relid, - i.oid AS indexrelid, - n.nspname AS schemaname, - c.relname, - i.relname AS indexrelname, - (pg_stat_get_blocks_fetched(i.oid) - pg_stat_get_blocks_hit(i.oid)) AS idx_blks_read, - pg_stat_get_blocks_hit(i.oid) AS idx_blks_hit - FROM (((pg_class c - JOIN pg_index x ON ((c.oid = x.indrelid))) - JOIN pg_class i ON ((i.oid = x.indexrelid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char", 'm'::"char"])); -pg_statio_all_sequences| SELECT c.oid AS relid, - n.nspname AS schemaname, - c.relname, - (pg_stat_get_blocks_fetched(c.oid) - pg_stat_get_blocks_hit(c.oid)) AS blks_read, - pg_stat_get_blocks_hit(c.oid) AS blks_hit - FROM (pg_class c - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (c.relkind = 'S'::"char"); -pg_statio_all_tables| SELECT c.oid AS relid, - n.nspname AS schemaname, - c.relname, - (pg_stat_get_blocks_fetched(c.oid) - pg_stat_get_blocks_hit(c.oid)) AS heap_blks_read, - pg_stat_get_blocks_hit(c.oid) AS heap_blks_hit, - i.idx_blks_read, - i.idx_blks_hit, - (pg_stat_get_blocks_fetched(t.oid) - pg_stat_get_blocks_hit(t.oid)) AS toast_blks_read, - pg_stat_get_blocks_hit(t.oid) AS toast_blks_hit, - x.idx_blks_read AS tidx_blks_read, - x.idx_blks_hit AS tidx_blks_hit - FROM ((((pg_class c - LEFT JOIN pg_class t ON ((c.reltoastrelid = t.oid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - LEFT JOIN LATERAL ( SELECT (sum((pg_stat_get_blocks_fetched(pg_index.indexrelid) - pg_stat_get_blocks_hit(pg_index.indexrelid))))::bigint AS idx_blks_read, - (sum(pg_stat_get_blocks_hit(pg_index.indexrelid)))::bigint AS idx_blks_hit - FROM pg_index - WHERE (pg_index.indrelid = c.oid)) i ON (true)) - LEFT JOIN LATERAL ( SELECT (sum((pg_stat_get_blocks_fetched(pg_index.indexrelid) - pg_stat_get_blocks_hit(pg_index.indexrelid))))::bigint AS idx_blks_read, - (sum(pg_stat_get_blocks_hit(pg_index.indexrelid)))::bigint AS idx_blks_hit - FROM pg_index - WHERE (pg_index.indrelid = t.oid)) x ON (true)) - WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char", 'm'::"char"])); -pg_statio_sys_indexes| SELECT relid, - indexrelid, - schemaname, - relname, - indexrelname, - idx_blks_read, - idx_blks_hit - FROM pg_statio_all_indexes - WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); -pg_statio_sys_sequences| SELECT relid, - schemaname, - relname, - blks_read, - blks_hit - FROM pg_statio_all_sequences - WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); -pg_statio_sys_tables| SELECT relid, - schemaname, - relname, - heap_blks_read, - heap_blks_hit, - idx_blks_read, - idx_blks_hit, - toast_blks_read, - toast_blks_hit, - tidx_blks_read, - tidx_blks_hit - FROM pg_statio_all_tables - WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); -pg_statio_user_indexes| SELECT relid, - indexrelid, - schemaname, - relname, - indexrelname, - idx_blks_read, - idx_blks_hit - FROM pg_statio_all_indexes - WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); -pg_statio_user_sequences| SELECT relid, - schemaname, - relname, - blks_read, - blks_hit - FROM pg_statio_all_sequences - WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); -pg_statio_user_tables| SELECT relid, - schemaname, - relname, - heap_blks_read, - heap_blks_hit, - idx_blks_read, - idx_blks_hit, - toast_blks_read, - toast_blks_hit, - tidx_blks_read, - tidx_blks_hit - FROM pg_statio_all_tables - WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); -pg_stats| SELECT n.nspname AS schemaname, - c.relname AS tablename, - a.attname, - s.stainherit AS inherited, - s.stanullfrac AS null_frac, - s.stawidth AS avg_width, - s.stadistinct AS n_distinct, - CASE - WHEN (s.stakind1 = 1) THEN s.stavalues1 - WHEN (s.stakind2 = 1) THEN s.stavalues2 - WHEN (s.stakind3 = 1) THEN s.stavalues3 - WHEN (s.stakind4 = 1) THEN s.stavalues4 - WHEN (s.stakind5 = 1) THEN s.stavalues5 - ELSE NULL::anyarray - END AS most_common_vals, - CASE - WHEN (s.stakind1 = 1) THEN s.stanumbers1 - WHEN (s.stakind2 = 1) THEN s.stanumbers2 - WHEN (s.stakind3 = 1) THEN s.stanumbers3 - WHEN (s.stakind4 = 1) THEN s.stanumbers4 - WHEN (s.stakind5 = 1) THEN s.stanumbers5 - ELSE NULL::real[] - END AS most_common_freqs, - CASE - WHEN (s.stakind1 = 2) THEN s.stavalues1 - WHEN (s.stakind2 = 2) THEN s.stavalues2 - WHEN (s.stakind3 = 2) THEN s.stavalues3 - WHEN (s.stakind4 = 2) THEN s.stavalues4 - WHEN (s.stakind5 = 2) THEN s.stavalues5 - ELSE NULL::anyarray - END AS histogram_bounds, - CASE - WHEN (s.stakind1 = 3) THEN s.stanumbers1[1] - WHEN (s.stakind2 = 3) THEN s.stanumbers2[1] - WHEN (s.stakind3 = 3) THEN s.stanumbers3[1] - WHEN (s.stakind4 = 3) THEN s.stanumbers4[1] - WHEN (s.stakind5 = 3) THEN s.stanumbers5[1] - ELSE NULL::real - END AS correlation, - CASE - WHEN (s.stakind1 = 4) THEN s.stavalues1 - WHEN (s.stakind2 = 4) THEN s.stavalues2 - WHEN (s.stakind3 = 4) THEN s.stavalues3 - WHEN (s.stakind4 = 4) THEN s.stavalues4 - WHEN (s.stakind5 = 4) THEN s.stavalues5 - ELSE NULL::anyarray - END AS most_common_elems, - CASE - WHEN (s.stakind1 = 4) THEN s.stanumbers1 - WHEN (s.stakind2 = 4) THEN s.stanumbers2 - WHEN (s.stakind3 = 4) THEN s.stanumbers3 - WHEN (s.stakind4 = 4) THEN s.stanumbers4 - WHEN (s.stakind5 = 4) THEN s.stanumbers5 - ELSE NULL::real[] - END AS most_common_elem_freqs, - CASE - WHEN (s.stakind1 = 5) THEN s.stanumbers1 - WHEN (s.stakind2 = 5) THEN s.stanumbers2 - WHEN (s.stakind3 = 5) THEN s.stanumbers3 - WHEN (s.stakind4 = 5) THEN s.stanumbers4 - WHEN (s.stakind5 = 5) THEN s.stanumbers5 - ELSE NULL::real[] - END AS elem_count_histogram, - CASE - WHEN (s.stakind1 = 6) THEN s.stavalues1 - WHEN (s.stakind2 = 6) THEN s.stavalues2 - WHEN (s.stakind3 = 6) THEN s.stavalues3 - WHEN (s.stakind4 = 6) THEN s.stavalues4 - WHEN (s.stakind5 = 6) THEN s.stavalues5 - ELSE NULL::anyarray - END AS range_length_histogram, - CASE - WHEN (s.stakind1 = 6) THEN s.stanumbers1[1] - WHEN (s.stakind2 = 6) THEN s.stanumbers2[1] - WHEN (s.stakind3 = 6) THEN s.stanumbers3[1] - WHEN (s.stakind4 = 6) THEN s.stanumbers4[1] - WHEN (s.stakind5 = 6) THEN s.stanumbers5[1] - ELSE NULL::real - END AS range_empty_frac, - CASE - WHEN (s.stakind1 = 7) THEN s.stavalues1 - WHEN (s.stakind2 = 7) THEN s.stavalues2 - WHEN (s.stakind3 = 7) THEN s.stavalues3 - WHEN (s.stakind4 = 7) THEN s.stavalues4 - WHEN (s.stakind5 = 7) THEN s.stavalues5 - ELSE NULL::anyarray - END AS range_bounds_histogram - FROM (((pg_statistic s - JOIN pg_class c ON ((c.oid = s.starelid))) - JOIN pg_attribute a ON (((c.oid = a.attrelid) AND (a.attnum = s.staattnum)))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE ((NOT a.attisdropped) AND has_column_privilege(c.oid, a.attnum, 'select'::text) AND ((c.relrowsecurity = false) OR (NOT row_security_active(c.oid)))); -pg_stats_ext| SELECT cn.nspname AS schemaname, - c.relname AS tablename, - sn.nspname AS statistics_schemaname, - s.stxname AS statistics_name, - pg_get_userbyid(s.stxowner) AS statistics_owner, - ( SELECT array_agg(a.attname ORDER BY a.attnum) AS array_agg - FROM (unnest(s.stxkeys) k(k) - JOIN pg_attribute a ON (((a.attrelid = s.stxrelid) AND (a.attnum = k.k))))) AS attnames, - pg_get_statisticsobjdef_expressions(s.oid) AS exprs, - s.stxkind AS kinds, - sd.stxdinherit AS inherited, - sd.stxdndistinct AS n_distinct, - sd.stxddependencies AS dependencies, - m.most_common_vals, - m.most_common_val_nulls, - m.most_common_freqs, - m.most_common_base_freqs - FROM (((((pg_statistic_ext s - JOIN pg_class c ON ((c.oid = s.stxrelid))) - JOIN pg_statistic_ext_data sd ON ((s.oid = sd.stxoid))) - LEFT JOIN pg_namespace cn ON ((cn.oid = c.relnamespace))) - LEFT JOIN pg_namespace sn ON ((sn.oid = s.stxnamespace))) - LEFT JOIN LATERAL ( SELECT array_agg(pg_mcv_list_items."values") AS most_common_vals, - array_agg(pg_mcv_list_items.nulls) AS most_common_val_nulls, - array_agg(pg_mcv_list_items.frequency) AS most_common_freqs, - array_agg(pg_mcv_list_items.base_frequency) AS most_common_base_freqs - FROM pg_mcv_list_items(sd.stxdmcv) pg_mcv_list_items(index, "values", nulls, frequency, base_frequency)) m ON ((sd.stxdmcv IS NOT NULL))) - WHERE ((NOT (EXISTS ( SELECT 1 - FROM (unnest(s.stxkeys) k(k) - JOIN pg_attribute a ON (((a.attrelid = s.stxrelid) AND (a.attnum = k.k)))) - WHERE (NOT has_column_privilege(c.oid, a.attnum, 'select'::text))))) AND ((c.relrowsecurity = false) OR (NOT row_security_active(c.oid)))); -pg_stats_ext_exprs| SELECT cn.nspname AS schemaname, - c.relname AS tablename, - sn.nspname AS statistics_schemaname, - s.stxname AS statistics_name, - pg_get_userbyid(s.stxowner) AS statistics_owner, - stat.expr, - sd.stxdinherit AS inherited, - (stat.a).stanullfrac AS null_frac, - (stat.a).stawidth AS avg_width, - (stat.a).stadistinct AS n_distinct, - CASE - WHEN ((stat.a).stakind1 = 1) THEN (stat.a).stavalues1 - WHEN ((stat.a).stakind2 = 1) THEN (stat.a).stavalues2 - WHEN ((stat.a).stakind3 = 1) THEN (stat.a).stavalues3 - WHEN ((stat.a).stakind4 = 1) THEN (stat.a).stavalues4 - WHEN ((stat.a).stakind5 = 1) THEN (stat.a).stavalues5 - ELSE NULL::anyarray - END AS most_common_vals, - CASE - WHEN ((stat.a).stakind1 = 1) THEN (stat.a).stanumbers1 - WHEN ((stat.a).stakind2 = 1) THEN (stat.a).stanumbers2 - WHEN ((stat.a).stakind3 = 1) THEN (stat.a).stanumbers3 - WHEN ((stat.a).stakind4 = 1) THEN (stat.a).stanumbers4 - WHEN ((stat.a).stakind5 = 1) THEN (stat.a).stanumbers5 - ELSE NULL::real[] - END AS most_common_freqs, - CASE - WHEN ((stat.a).stakind1 = 2) THEN (stat.a).stavalues1 - WHEN ((stat.a).stakind2 = 2) THEN (stat.a).stavalues2 - WHEN ((stat.a).stakind3 = 2) THEN (stat.a).stavalues3 - WHEN ((stat.a).stakind4 = 2) THEN (stat.a).stavalues4 - WHEN ((stat.a).stakind5 = 2) THEN (stat.a).stavalues5 - ELSE NULL::anyarray - END AS histogram_bounds, - CASE - WHEN ((stat.a).stakind1 = 3) THEN (stat.a).stanumbers1[1] - WHEN ((stat.a).stakind2 = 3) THEN (stat.a).stanumbers2[1] - WHEN ((stat.a).stakind3 = 3) THEN (stat.a).stanumbers3[1] - WHEN ((stat.a).stakind4 = 3) THEN (stat.a).stanumbers4[1] - WHEN ((stat.a).stakind5 = 3) THEN (stat.a).stanumbers5[1] - ELSE NULL::real - END AS correlation, - CASE - WHEN ((stat.a).stakind1 = 4) THEN (stat.a).stavalues1 - WHEN ((stat.a).stakind2 = 4) THEN (stat.a).stavalues2 - WHEN ((stat.a).stakind3 = 4) THEN (stat.a).stavalues3 - WHEN ((stat.a).stakind4 = 4) THEN (stat.a).stavalues4 - WHEN ((stat.a).stakind5 = 4) THEN (stat.a).stavalues5 - ELSE NULL::anyarray - END AS most_common_elems, - CASE - WHEN ((stat.a).stakind1 = 4) THEN (stat.a).stanumbers1 - WHEN ((stat.a).stakind2 = 4) THEN (stat.a).stanumbers2 - WHEN ((stat.a).stakind3 = 4) THEN (stat.a).stanumbers3 - WHEN ((stat.a).stakind4 = 4) THEN (stat.a).stanumbers4 - WHEN ((stat.a).stakind5 = 4) THEN (stat.a).stanumbers5 - ELSE NULL::real[] - END AS most_common_elem_freqs, - CASE - WHEN ((stat.a).stakind1 = 5) THEN (stat.a).stanumbers1 - WHEN ((stat.a).stakind2 = 5) THEN (stat.a).stanumbers2 - WHEN ((stat.a).stakind3 = 5) THEN (stat.a).stanumbers3 - WHEN ((stat.a).stakind4 = 5) THEN (stat.a).stanumbers4 - WHEN ((stat.a).stakind5 = 5) THEN (stat.a).stanumbers5 - ELSE NULL::real[] - END AS elem_count_histogram - FROM (((((pg_statistic_ext s - JOIN pg_class c ON ((c.oid = s.stxrelid))) - LEFT JOIN pg_statistic_ext_data sd ON ((s.oid = sd.stxoid))) - LEFT JOIN pg_namespace cn ON ((cn.oid = c.relnamespace))) - LEFT JOIN pg_namespace sn ON ((sn.oid = s.stxnamespace))) - JOIN LATERAL ( SELECT unnest(pg_get_statisticsobjdef_expressions(s.oid)) AS expr, - unnest(sd.stxdexpr) AS a) stat ON ((stat.expr IS NOT NULL))); -pg_tables| SELECT n.nspname AS schemaname, - c.relname AS tablename, - pg_get_userbyid(c.relowner) AS tableowner, - t.spcname AS tablespace, - c.relhasindex AS hasindexes, - c.relhasrules AS hasrules, - c.relhastriggers AS hastriggers, - c.relrowsecurity AS rowsecurity - FROM ((pg_class c - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - LEFT JOIN pg_tablespace t ON ((t.oid = c.reltablespace))) - WHERE (c.relkind = ANY (ARRAY['r'::"char", 'p'::"char"])); -pg_timezone_abbrevs| SELECT abbrev, - utc_offset, - is_dst - FROM pg_timezone_abbrevs() pg_timezone_abbrevs(abbrev, utc_offset, is_dst); -pg_timezone_names| SELECT name, - abbrev, - utc_offset, - is_dst - FROM pg_timezone_names() pg_timezone_names(name, abbrev, utc_offset, is_dst); -pg_user| SELECT usename, - usesysid, - usecreatedb, - usesuper, - userepl, - usebypassrls, - '********'::text AS passwd, - valuntil, - useconfig - FROM pg_shadow; -pg_user_mappings| SELECT u.oid AS umid, - s.oid AS srvid, - s.srvname, - u.umuser, - CASE - WHEN (u.umuser = (0)::oid) THEN 'public'::name - ELSE a.rolname - END AS usename, - CASE - WHEN (((u.umuser <> (0)::oid) AND (a.rolname = CURRENT_USER) AND (pg_has_role(s.srvowner, 'USAGE'::text) OR has_server_privilege(s.oid, 'USAGE'::text))) OR ((u.umuser = (0)::oid) AND pg_has_role(s.srvowner, 'USAGE'::text)) OR ( SELECT pg_authid.rolsuper - FROM pg_authid - WHERE (pg_authid.rolname = CURRENT_USER))) THEN u.umoptions - ELSE NULL::text[] - END AS umoptions - FROM ((pg_user_mapping u - JOIN pg_foreign_server s ON ((u.umserver = s.oid))) - LEFT JOIN pg_authid a ON ((a.oid = u.umuser))); -pg_views| SELECT n.nspname AS schemaname, - c.relname AS viewname, - pg_get_userbyid(c.relowner) AS viewowner, - pg_get_viewdef(c.oid) AS definition - FROM (pg_class c - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (c.relkind = 'v'::"char"); -pg_wait_events| SELECT type, - name, - description - FROM pg_get_wait_events() pg_get_wait_events(type, name, description); -SELECT tablename, rulename, definition FROM pg_rules -WHERE schemaname = 'pg_catalog' -ORDER BY tablename, rulename; -pg_settings|pg_settings_n|CREATE RULE pg_settings_n AS - ON UPDATE TO pg_catalog.pg_settings DO INSTEAD NOTHING; -pg_settings|pg_settings_u|CREATE RULE pg_settings_u AS - ON UPDATE TO pg_catalog.pg_settings - WHERE (new.name = old.name) DO SELECT set_config(old.name, new.setting, false) AS set_config; --- restore normal output mode -\a\t --- --- CREATE OR REPLACE RULE --- -CREATE TABLE ruletest_tbl (a int, b int); -CREATE TABLE ruletest_tbl2 (a int, b int); -CREATE OR REPLACE RULE myrule AS ON INSERT TO ruletest_tbl - DO INSTEAD INSERT INTO ruletest_tbl2 VALUES (10, 10); -INSERT INTO ruletest_tbl VALUES (99, 99); -CREATE OR REPLACE RULE myrule AS ON INSERT TO ruletest_tbl - DO INSTEAD INSERT INTO ruletest_tbl2 VALUES (1000, 1000); -INSERT INTO ruletest_tbl VALUES (99, 99); -SELECT * FROM ruletest_tbl2; - a | b -------+------ - 10 | 10 - 1000 | 1000 -(2 rows) - --- Check that rewrite rules splitting one INSERT into multiple --- conditional statements does not disable FK checking. -create table rule_and_refint_t1 ( - id1a integer, - id1b integer, - primary key (id1a, id1b) -); -create table rule_and_refint_t2 ( - id2a integer, - id2c integer, - primary key (id2a, id2c) -); -create table rule_and_refint_t3 ( - id3a integer, - id3b integer, - id3c integer, - data text, - primary key (id3a, id3b, id3c), - foreign key (id3a, id3b) references rule_and_refint_t1 (id1a, id1b), - foreign key (id3a, id3c) references rule_and_refint_t2 (id2a, id2c) -); -insert into rule_and_refint_t1 values (1, 11); -insert into rule_and_refint_t1 values (1, 12); -insert into rule_and_refint_t1 values (2, 21); -insert into rule_and_refint_t1 values (2, 22); -insert into rule_and_refint_t2 values (1, 11); -insert into rule_and_refint_t2 values (1, 12); -insert into rule_and_refint_t2 values (2, 21); -insert into rule_and_refint_t2 values (2, 22); -insert into rule_and_refint_t3 values (1, 11, 11, 'row1'); -insert into rule_and_refint_t3 values (1, 11, 12, 'row2'); -insert into rule_and_refint_t3 values (1, 12, 11, 'row3'); -insert into rule_and_refint_t3 values (1, 12, 12, 'row4'); -insert into rule_and_refint_t3 values (1, 11, 13, 'row5'); -ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3c_fkey" -DETAIL: Key (id3a, id3c)=(1, 13) is not present in table "rule_and_refint_t2". -insert into rule_and_refint_t3 values (1, 13, 11, 'row6'); -ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3b_fkey" -DETAIL: Key (id3a, id3b)=(1, 13) is not present in table "rule_and_refint_t1". --- Ordinary table -insert into rule_and_refint_t3 values (1, 13, 11, 'row6') - on conflict do nothing; -ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3b_fkey" -DETAIL: Key (id3a, id3b)=(1, 13) is not present in table "rule_and_refint_t1". --- rule not fired, so fk violation -insert into rule_and_refint_t3 values (1, 13, 11, 'row6') - on conflict (id3a, id3b, id3c) do update - set id3b = excluded.id3b; -ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3b_fkey" -DETAIL: Key (id3a, id3b)=(1, 13) is not present in table "rule_and_refint_t1". --- rule fired, so unsupported -insert into shoelace values ('sl9', 0, 'pink', 35.0, 'inch', 0.0) - on conflict (sl_name) do update - set sl_avail = excluded.sl_avail; -ERROR: INSERT with ON CONFLICT clause cannot be used with table that has INSERT or UPDATE rules -create rule rule_and_refint_t3_ins as on insert to rule_and_refint_t3 - where (exists (select 1 from rule_and_refint_t3 - where (((rule_and_refint_t3.id3a = new.id3a) - and (rule_and_refint_t3.id3b = new.id3b)) - and (rule_and_refint_t3.id3c = new.id3c)))) - do instead update rule_and_refint_t3 set data = new.data - where (((rule_and_refint_t3.id3a = new.id3a) - and (rule_and_refint_t3.id3b = new.id3b)) - and (rule_and_refint_t3.id3c = new.id3c)); -insert into rule_and_refint_t3 values (1, 11, 13, 'row7'); -ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3c_fkey" -DETAIL: Key (id3a, id3c)=(1, 13) is not present in table "rule_and_refint_t2". -insert into rule_and_refint_t3 values (1, 13, 11, 'row8'); -ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3b_fkey" -DETAIL: Key (id3a, id3b)=(1, 13) is not present in table "rule_and_refint_t1". --- --- disallow dropping a view's rule (bug #5072) --- -create view rules_fooview as select 'rules_foo'::text; -drop rule "_RETURN" on rules_fooview; -ERROR: cannot drop rule _RETURN on view rules_fooview because view rules_fooview requires it -HINT: You can drop view rules_fooview instead. -drop view rules_fooview; --- --- We used to allow converting a table to a view by creating a "_RETURN" --- rule for it, but no more. --- -create table rules_fooview (x int, y text); -create rule "_RETURN" as on select to rules_fooview do instead - select 1 as x, 'aaa'::text as y; -ERROR: relation "rules_fooview" cannot have ON SELECT rules -DETAIL: This operation is not supported for tables. -drop table rules_fooview; --- likewise, converting a partitioned table or partition to view is not allowed -create table rules_fooview (x int, y text) partition by list (x); -create rule "_RETURN" as on select to rules_fooview do instead - select 1 as x, 'aaa'::text as y; -ERROR: relation "rules_fooview" cannot have ON SELECT rules -DETAIL: This operation is not supported for partitioned tables. -create table rules_fooview_part partition of rules_fooview for values in (1); -create rule "_RETURN" as on select to rules_fooview_part do instead - select 1 as x, 'aaa'::text as y; -ERROR: relation "rules_fooview_part" cannot have ON SELECT rules -DETAIL: This operation is not supported for tables. -drop table rules_fooview; --- --- check for planner problems with complex inherited UPDATES --- -create table id (id serial primary key, name text); --- currently, must respecify PKEY for each inherited subtable -create table test_1 (id integer primary key) inherits (id); -NOTICE: merging column "id" with inherited definition -create table test_2 (id integer primary key) inherits (id); -NOTICE: merging column "id" with inherited definition -create table test_3 (id integer primary key) inherits (id); -NOTICE: merging column "id" with inherited definition -insert into test_1 (name) values ('Test 1'); -insert into test_1 (name) values ('Test 2'); -insert into test_2 (name) values ('Test 3'); -insert into test_2 (name) values ('Test 4'); -insert into test_3 (name) values ('Test 5'); -insert into test_3 (name) values ('Test 6'); -create view id_ordered as select * from id order by id; -create rule update_id_ordered as on update to id_ordered - do instead update id set name = new.name where id = old.id; -select * from id_ordered; - id | name -----+-------- - 1 | Test 1 - 2 | Test 2 - 3 | Test 3 - 4 | Test 4 - 5 | Test 5 - 6 | Test 6 -(6 rows) - -update id_ordered set name = 'update 2' where id = 2; -update id_ordered set name = 'update 4' where id = 4; -update id_ordered set name = 'update 5' where id = 5; -select * from id_ordered; - id | name -----+---------- - 1 | Test 1 - 2 | update 2 - 3 | Test 3 - 4 | update 4 - 5 | update 5 - 6 | Test 6 -(6 rows) - -drop table id cascade; -NOTICE: drop cascades to 4 other objects -DETAIL: drop cascades to table test_1 -drop cascades to table test_2 -drop cascades to table test_3 -drop cascades to view id_ordered --- --- check corner case where an entirely-dummy subplan is created by --- constraint exclusion --- -create temp table t1 (a integer primary key); -create temp table t1_1 (check (a >= 0 and a < 10)) inherits (t1); -create temp table t1_2 (check (a >= 10 and a < 20)) inherits (t1); -create rule t1_ins_1 as on insert to t1 - where new.a >= 0 and new.a < 10 - do instead - insert into t1_1 values (new.a); -create rule t1_ins_2 as on insert to t1 - where new.a >= 10 and new.a < 20 - do instead - insert into t1_2 values (new.a); -create rule t1_upd_1 as on update to t1 - where old.a >= 0 and old.a < 10 - do instead - update t1_1 set a = new.a where a = old.a; -create rule t1_upd_2 as on update to t1 - where old.a >= 10 and old.a < 20 - do instead - update t1_2 set a = new.a where a = old.a; -set constraint_exclusion = on; -insert into t1 select * from generate_series(5,19,1) g; -update t1 set a = 4 where a = 5; -select * from only t1; - a ---- -(0 rows) - -select * from only t1_1; - a ---- - 6 - 7 - 8 - 9 - 4 -(5 rows) - -select * from only t1_2; - a ----- - 10 - 11 - 12 - 13 - 14 - 15 - 16 - 17 - 18 - 19 -(10 rows) - -reset constraint_exclusion; --- test FOR UPDATE in rules -create table rules_base(f1 int, f2 int); -insert into rules_base values(1,2), (11,12); -create rule r1 as on update to rules_base do instead - select * from rules_base where f1 = 1 for update; -update rules_base set f2 = f2 + 1; - f1 | f2 -----+---- - 1 | 2 -(1 row) - -create or replace rule r1 as on update to rules_base do instead - select * from rules_base where f1 = 11 for update of rules_base; -update rules_base set f2 = f2 + 1; - f1 | f2 -----+---- - 11 | 12 -(1 row) - -create or replace rule r1 as on update to rules_base do instead - select * from rules_base where f1 = 11 for update of old; -- error -ERROR: relation "old" in FOR UPDATE clause not found in FROM clause -LINE 2: select * from rules_base where f1 = 11 for update of old; - ^ -drop table rules_base; --- test various flavors of pg_get_viewdef() -select pg_get_viewdef('shoe'::regclass) as unpretty; - unpretty ------------------------------------------------- - SELECT sh.shoename, + - sh.sh_avail, + - sh.slcolor, + - sh.slminlen, + - (sh.slminlen * un.un_fact) AS slminlen_cm,+ - sh.slmaxlen, + - (sh.slmaxlen * un.un_fact) AS slmaxlen_cm,+ - sh.slunit + - FROM shoe_data sh, + - unit un + - WHERE (sh.slunit = un.un_name); -(1 row) - -select pg_get_viewdef('shoe'::regclass,true) as pretty; - pretty ----------------------------------------------- - SELECT sh.shoename, + - sh.sh_avail, + - sh.slcolor, + - sh.slminlen, + - sh.slminlen * un.un_fact AS slminlen_cm,+ - sh.slmaxlen, + - sh.slmaxlen * un.un_fact AS slmaxlen_cm,+ - sh.slunit + - FROM shoe_data sh, + - unit un + - WHERE sh.slunit = un.un_name; -(1 row) - -select pg_get_viewdef('shoe'::regclass,0) as prettier; - prettier ----------------------------------------------- - SELECT sh.shoename, + - sh.sh_avail, + - sh.slcolor, + - sh.slminlen, + - sh.slminlen * un.un_fact AS slminlen_cm,+ - sh.slmaxlen, + - sh.slmaxlen * un.un_fact AS slmaxlen_cm,+ - sh.slunit + - FROM shoe_data sh, + - unit un + - WHERE sh.slunit = un.un_name; -(1 row) - --- --- check multi-row VALUES in rules --- -create table rules_src(f1 int, f2 int default 0); -create table rules_log(f1 int, f2 int, tag text, id serial); -insert into rules_src values(1,2), (11,12); -create rule r1 as on update to rules_src do also - insert into rules_log values(old.*, 'old', default), (new.*, 'new', default); -update rules_src set f2 = f2 + 1; -update rules_src set f2 = f2 * 10; -select * from rules_src; - f1 | f2 -----+----- - 1 | 30 - 11 | 130 -(2 rows) - -select * from rules_log; - f1 | f2 | tag | id -----+-----+-----+---- - 1 | 2 | old | 1 - 1 | 3 | new | 2 - 11 | 12 | old | 3 - 11 | 13 | new | 4 - 1 | 3 | old | 5 - 1 | 30 | new | 6 - 11 | 13 | old | 7 - 11 | 130 | new | 8 -(8 rows) - -create rule r2 as on update to rules_src do also - values(old.*, 'old'), (new.*, 'new'); -update rules_src set f2 = f2 / 10; - column1 | column2 | column3 ----------+---------+--------- - 1 | 30 | old - 1 | 3 | new - 11 | 130 | old - 11 | 13 | new -(4 rows) - -create rule r3 as on insert to rules_src do also - insert into rules_log values(null, null, '-', default), (new.*, 'new', default); -insert into rules_src values(22,23), (33,default); -select * from rules_src; - f1 | f2 -----+---- - 1 | 3 - 11 | 13 - 22 | 23 - 33 | 0 -(4 rows) - -select * from rules_log; - f1 | f2 | tag | id -----+-----+-----+---- - 1 | 2 | old | 1 - 1 | 3 | new | 2 - 11 | 12 | old | 3 - 11 | 13 | new | 4 - 1 | 3 | old | 5 - 1 | 30 | new | 6 - 11 | 13 | old | 7 - 11 | 130 | new | 8 - 1 | 30 | old | 9 - 1 | 3 | new | 10 - 11 | 130 | old | 11 - 11 | 13 | new | 12 - | | - | 13 - 22 | 23 | new | 14 - | | - | 15 - 33 | 0 | new | 16 -(16 rows) - -create rule r4 as on delete to rules_src do notify rules_src_deletion; --- --- Ensure an aliased target relation for insert is correctly deparsed. --- -create rule r5 as on insert to rules_src do instead insert into rules_log AS trgt SELECT NEW.* RETURNING trgt.f1, trgt.f2; -create rule r6 as on update to rules_src do instead UPDATE rules_log AS trgt SET tag = 'updated' WHERE trgt.f1 = new.f1; --- --- Check deparse disambiguation of INSERT/UPDATE/DELETE targets. --- -create rule r7 as on delete to rules_src do instead - with wins as (insert into int4_tbl as trgt values (0) returning *), - wupd as (update int4_tbl trgt set f1 = f1+1 returning *), - wdel as (delete from int4_tbl trgt where f1 = 0 returning *) - insert into rules_log AS trgt select old.* from wins, wupd, wdel - returning trgt.f1, trgt.f2; --- check display of all rules added above -\d+ rules_src - Table "public.rules_src" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - f1 | integer | | | | plain | | - f2 | integer | | | 0 | plain | | -Rules: - r1 AS - ON UPDATE TO rules_src DO INSERT INTO rules_log (f1, f2, tag, id) VALUES (old.f1,old.f2,'old'::text,DEFAULT), (new.f1,new.f2,'new'::text,DEFAULT) - r2 AS - ON UPDATE TO rules_src DO VALUES (old.f1,old.f2,'old'::text), (new.f1,new.f2,'new'::text) - r3 AS - ON INSERT TO rules_src DO INSERT INTO rules_log (f1, f2, tag, id) VALUES (NULL::integer,NULL::integer,'-'::text,DEFAULT), (new.f1,new.f2,'new'::text,DEFAULT) - r4 AS - ON DELETE TO rules_src DO - NOTIFY rules_src_deletion - r5 AS - ON INSERT TO rules_src DO INSTEAD INSERT INTO rules_log AS trgt (f1, f2) SELECT new.f1, - new.f2 - RETURNING trgt.f1, - trgt.f2 - r6 AS - ON UPDATE TO rules_src DO INSTEAD UPDATE rules_log trgt SET tag = 'updated'::text - WHERE trgt.f1 = new.f1 - r7 AS - ON DELETE TO rules_src DO INSTEAD WITH wins AS ( - INSERT INTO int4_tbl AS trgt_1 (f1) - VALUES (0) - RETURNING trgt_1.f1 - ), wupd AS ( - UPDATE int4_tbl trgt_1 SET f1 = trgt_1.f1 + 1 - RETURNING trgt_1.f1 - ), wdel AS ( - DELETE FROM int4_tbl trgt_1 - WHERE trgt_1.f1 = 0 - RETURNING trgt_1.f1 - ) - INSERT INTO rules_log AS trgt (f1, f2) SELECT old.f1, - old.f2 - FROM wins, - wupd, - wdel - RETURNING trgt.f1, - trgt.f2 - --- --- Also check multiassignment deparsing. --- -create table rule_t1(f1 int, f2 int); -create table rule_dest(f1 int, f2 int[], tag text); -create rule rr as on update to rule_t1 do instead UPDATE rule_dest trgt - SET (f2[1], f1, tag) = (SELECT new.f2, new.f1, 'updated'::varchar) - WHERE trgt.f1 = new.f1 RETURNING new.*; -\d+ rule_t1 - Table "public.rule_t1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - f1 | integer | | | | plain | | - f2 | integer | | | | plain | | -Rules: - rr AS - ON UPDATE TO rule_t1 DO INSTEAD UPDATE rule_dest trgt SET (f2[1], f1, tag) = ( SELECT new.f2, - new.f1, - 'updated'::character varying AS "varchar") - WHERE trgt.f1 = new.f1 - RETURNING new.f1, - new.f2 - -drop table rule_t1, rule_dest; --- --- Test implicit LATERAL references to old/new in rules --- -CREATE TABLE rule_t1(a int, b text DEFAULT 'xxx', c int); -CREATE VIEW rule_v1 AS SELECT * FROM rule_t1; -CREATE RULE v1_ins AS ON INSERT TO rule_v1 - DO ALSO INSERT INTO rule_t1 - SELECT * FROM (SELECT a + 10 FROM rule_t1 WHERE a = NEW.a) tt; -CREATE RULE v1_upd AS ON UPDATE TO rule_v1 - DO ALSO UPDATE rule_t1 t - SET c = tt.a * 10 - FROM (SELECT a FROM rule_t1 WHERE a = OLD.a) tt WHERE t.a = tt.a; -INSERT INTO rule_v1 VALUES (1, 'a'), (2, 'b'); -UPDATE rule_v1 SET b = upper(b); -SELECT * FROM rule_t1; - a | b | c -----+-----+----- - 1 | A | 10 - 2 | B | 20 - 11 | XXX | 110 - 12 | XXX | 120 -(4 rows) - -DROP TABLE rule_t1 CASCADE; -NOTICE: drop cascades to view rule_v1 --- --- check alter rename rule --- -CREATE TABLE rule_t1 (a INT); -CREATE VIEW rule_v1 AS SELECT * FROM rule_t1; -CREATE RULE InsertRule AS - ON INSERT TO rule_v1 - DO INSTEAD - INSERT INTO rule_t1 VALUES(new.a); -ALTER RULE InsertRule ON rule_v1 RENAME to NewInsertRule; -INSERT INTO rule_v1 VALUES(1); -SELECT * FROM rule_v1; - a ---- - 1 -(1 row) - -\d+ rule_v1 - View "public.rule_v1" - Column | Type | Collation | Nullable | Default | Storage | Description ---------+---------+-----------+----------+---------+---------+------------- - a | integer | | | | plain | -View definition: - SELECT a - FROM rule_t1; -Rules: - newinsertrule AS - ON INSERT TO rule_v1 DO INSTEAD INSERT INTO rule_t1 (a) - VALUES (new.a) - --- --- error conditions for alter rename rule --- -ALTER RULE InsertRule ON rule_v1 RENAME TO NewInsertRule; -- doesn't exist -ERROR: rule "insertrule" for relation "rule_v1" does not exist -ALTER RULE NewInsertRule ON rule_v1 RENAME TO "_RETURN"; -- already exists -ERROR: rule "_RETURN" for relation "rule_v1" already exists -ALTER RULE "_RETURN" ON rule_v1 RENAME TO abc; -- ON SELECT rule cannot be renamed -ERROR: renaming an ON SELECT rule is not allowed -DROP VIEW rule_v1; -DROP TABLE rule_t1; --- --- check display of VALUES in view definitions --- -create view rule_v1 as values(1,2); -\d+ rule_v1 - View "public.rule_v1" - Column | Type | Collation | Nullable | Default | Storage | Description ----------+---------+-----------+----------+---------+---------+------------- - column1 | integer | | | | plain | - column2 | integer | | | | plain | -View definition: - VALUES (1,2); - -alter table rule_v1 rename column column2 to q2; -\d+ rule_v1 - View "public.rule_v1" - Column | Type | Collation | Nullable | Default | Storage | Description ----------+---------+-----------+----------+---------+---------+------------- - column1 | integer | | | | plain | - q2 | integer | | | | plain | -View definition: - SELECT column1, - column2 AS q2 - FROM (VALUES (1,2)) "*VALUES*"; - -drop view rule_v1; -create view rule_v1(x) as values(1,2); -\d+ rule_v1 - View "public.rule_v1" - Column | Type | Collation | Nullable | Default | Storage | Description ----------+---------+-----------+----------+---------+---------+------------- - x | integer | | | | plain | - column2 | integer | | | | plain | -View definition: - SELECT column1 AS x, - column2 - FROM (VALUES (1,2)) "*VALUES*"; - -drop view rule_v1; -create view rule_v1(x) as select * from (values(1,2)) v; -\d+ rule_v1 - View "public.rule_v1" - Column | Type | Collation | Nullable | Default | Storage | Description ----------+---------+-----------+----------+---------+---------+------------- - x | integer | | | | plain | - column2 | integer | | | | plain | -View definition: - SELECT column1 AS x, - column2 - FROM ( VALUES (1,2)) v; - -drop view rule_v1; -create view rule_v1(x) as select * from (values(1,2)) v(q,w); -\d+ rule_v1 - View "public.rule_v1" - Column | Type | Collation | Nullable | Default | Storage | Description ---------+---------+-----------+----------+---------+---------+------------- - x | integer | | | | plain | - w | integer | | | | plain | -View definition: - SELECT q AS x, - w - FROM ( VALUES (1,2)) v(q, w); - -drop view rule_v1; --- --- Check DO INSTEAD rules with ON CONFLICT --- -CREATE TABLE hats ( - hat_name char(10) primary key, - hat_color char(10) -- hat color -); -CREATE TABLE hat_data ( - hat_name char(10), - hat_color char(10) -- hat color -); -create unique index hat_data_unique_idx - on hat_data (hat_name COLLATE "C" bpchar_pattern_ops); --- DO NOTHING with ON CONFLICT -CREATE RULE hat_nosert AS ON INSERT TO hats - DO INSTEAD - INSERT INTO hat_data VALUES ( - NEW.hat_name, - NEW.hat_color) - ON CONFLICT (hat_name COLLATE "C" bpchar_pattern_ops) WHERE hat_color = 'green' - DO NOTHING - RETURNING *; -SELECT definition FROM pg_rules WHERE tablename = 'hats' ORDER BY rulename; - definition ---------------------------------------------------------------------------------------------- - CREATE RULE hat_nosert AS + - ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + - VALUES (new.hat_name, new.hat_color) ON CONFLICT(hat_name COLLATE "C" bpchar_pattern_ops)+ - WHERE (hat_color = 'green'::bpchar) DO NOTHING + - RETURNING hat_data.hat_name, + - hat_data.hat_color; -(1 row) - --- Works (projects row) -INSERT INTO hats VALUES ('h7', 'black') RETURNING *; - hat_name | hat_color -------------+------------ - h7 | black -(1 row) - --- Works (does nothing) -INSERT INTO hats VALUES ('h7', 'black') RETURNING *; - hat_name | hat_color -----------+----------- -(0 rows) - -SELECT tablename, rulename, definition FROM pg_rules - WHERE tablename = 'hats'; - tablename | rulename | definition ------------+------------+--------------------------------------------------------------------------------------------- - hats | hat_nosert | CREATE RULE hat_nosert AS + - | | ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + - | | VALUES (new.hat_name, new.hat_color) ON CONFLICT(hat_name COLLATE "C" bpchar_pattern_ops)+ - | | WHERE (hat_color = 'green'::bpchar) DO NOTHING + - | | RETURNING hat_data.hat_name, + - | | hat_data.hat_color; -(1 row) - -DROP RULE hat_nosert ON hats; --- DO NOTHING without ON CONFLICT -CREATE RULE hat_nosert_all AS ON INSERT TO hats - DO INSTEAD - INSERT INTO hat_data VALUES ( - NEW.hat_name, - NEW.hat_color) - ON CONFLICT - DO NOTHING - RETURNING *; -SELECT definition FROM pg_rules WHERE tablename = 'hats' ORDER BY rulename; - definition -------------------------------------------------------------------------------------- - CREATE RULE hat_nosert_all AS + - ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color)+ - VALUES (new.hat_name, new.hat_color) ON CONFLICT DO NOTHING + - RETURNING hat_data.hat_name, + - hat_data.hat_color; -(1 row) - -DROP RULE hat_nosert_all ON hats; --- Works (does nothing) -INSERT INTO hats VALUES ('h7', 'black') RETURNING *; - hat_name | hat_color -------------+------------ - h7 | black -(1 row) - --- DO UPDATE with a WHERE clause -CREATE RULE hat_upsert AS ON INSERT TO hats - DO INSTEAD - INSERT INTO hat_data VALUES ( - NEW.hat_name, - NEW.hat_color) - ON CONFLICT (hat_name) - DO UPDATE - SET hat_name = hat_data.hat_name, hat_color = excluded.hat_color - WHERE excluded.hat_color <> 'forbidden' AND hat_data.* != excluded.* - RETURNING *; -SELECT definition FROM pg_rules WHERE tablename = 'hats' ORDER BY rulename; - definition ------------------------------------------------------------------------------------------------------------------------------------------ - CREATE RULE hat_upsert AS + - ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + - VALUES (new.hat_name, new.hat_color) ON CONFLICT(hat_name) DO UPDATE SET hat_name = hat_data.hat_name, hat_color = excluded.hat_color+ - WHERE ((excluded.hat_color <> 'forbidden'::bpchar) AND (hat_data.* <> excluded.*)) + - RETURNING hat_data.hat_name, + - hat_data.hat_color; -(1 row) - --- Works (does upsert) -INSERT INTO hats VALUES ('h8', 'black') RETURNING *; - hat_name | hat_color -------------+------------ - h8 | black -(1 row) - -SELECT * FROM hat_data WHERE hat_name = 'h8'; - hat_name | hat_color -------------+------------ - h8 | black -(1 row) - -INSERT INTO hats VALUES ('h8', 'white') RETURNING *; - hat_name | hat_color -------------+------------ - h8 | white -(1 row) - -SELECT * FROM hat_data WHERE hat_name = 'h8'; - hat_name | hat_color -------------+------------ - h8 | white -(1 row) - -INSERT INTO hats VALUES ('h8', 'forbidden') RETURNING *; - hat_name | hat_color -----------+----------- -(0 rows) - -SELECT * FROM hat_data WHERE hat_name = 'h8'; - hat_name | hat_color -------------+------------ - h8 | white -(1 row) - -SELECT tablename, rulename, definition FROM pg_rules - WHERE tablename = 'hats'; - tablename | rulename | definition ------------+------------+----------------------------------------------------------------------------------------------------------------------------------------- - hats | hat_upsert | CREATE RULE hat_upsert AS + - | | ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + - | | VALUES (new.hat_name, new.hat_color) ON CONFLICT(hat_name) DO UPDATE SET hat_name = hat_data.hat_name, hat_color = excluded.hat_color+ - | | WHERE ((excluded.hat_color <> 'forbidden'::bpchar) AND (hat_data.* <> excluded.*)) + - | | RETURNING hat_data.hat_name, + - | | hat_data.hat_color; -(1 row) - --- ensure explain works for on insert conflict rules -explain (costs off) INSERT INTO hats VALUES ('h8', 'forbidden') RETURNING *; - QUERY PLAN -------------------------------------------------------------------------------------------------- - Insert on hat_data - Conflict Resolution: UPDATE - Conflict Arbiter Indexes: hat_data_unique_idx - Conflict Filter: ((excluded.hat_color <> 'forbidden'::bpchar) AND (hat_data.* <> excluded.*)) - -> Result -(5 rows) - --- ensure upserting into a rule, with a CTE (different offsets!) works -WITH data(hat_name, hat_color) AS MATERIALIZED ( - VALUES ('h8', 'green'), - ('h9', 'blue'), - ('h7', 'forbidden') -) -INSERT INTO hats - SELECT * FROM data -RETURNING *; - hat_name | hat_color -------------+------------ - h8 | green - h9 | blue -(2 rows) - -EXPLAIN (costs off) -WITH data(hat_name, hat_color) AS MATERIALIZED ( - VALUES ('h8', 'green'), - ('h9', 'blue'), - ('h7', 'forbidden') -) -INSERT INTO hats - SELECT * FROM data -RETURNING *; - QUERY PLAN -------------------------------------------------------------------------------------------------- - Insert on hat_data - Conflict Resolution: UPDATE - Conflict Arbiter Indexes: hat_data_unique_idx - Conflict Filter: ((excluded.hat_color <> 'forbidden'::bpchar) AND (hat_data.* <> excluded.*)) - CTE data - -> Values Scan on "*VALUES*" - -> CTE Scan on data -(7 rows) - -SELECT * FROM hat_data WHERE hat_name IN ('h8', 'h9', 'h7') ORDER BY hat_name; - hat_name | hat_color -------------+------------ - h7 | black - h8 | green - h9 | blue -(3 rows) - -DROP RULE hat_upsert ON hats; -drop table hats; -drop table hat_data; --- test for pg_get_functiondef properly regurgitating SET parameters --- Note that the function is kept around to stress pg_dump. -CREATE FUNCTION func_with_set_params() RETURNS integer - AS 'select 1;' - LANGUAGE SQL - SET search_path TO PG_CATALOG - SET extra_float_digits TO 2 - SET work_mem TO '4MB' - SET datestyle to iso, mdy - SET local_preload_libraries TO "Mixed/Case", 'c:/''a"/path', '', '0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789' - IMMUTABLE STRICT; -SELECT pg_get_functiondef('func_with_set_params()'::regprocedure); - pg_get_functiondef --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - CREATE OR REPLACE FUNCTION public.func_with_set_params() + - RETURNS integer + - LANGUAGE sql + - IMMUTABLE STRICT + - SET search_path TO 'pg_catalog' + - SET extra_float_digits TO '2' + - SET work_mem TO '4MB' + - SET "DateStyle" TO 'iso, mdy' + - SET local_preload_libraries TO 'Mixed/Case', 'c:/''a"/path', '', '0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789'+ - AS $function$select 1;$function$ + - -(1 row) - --- tests for pg_get_*def with invalid objects -SELECT pg_get_constraintdef(0); - pg_get_constraintdef ----------------------- - -(1 row) - -SELECT pg_get_functiondef(0); - pg_get_functiondef --------------------- - -(1 row) - -SELECT pg_get_indexdef(0); - pg_get_indexdef ------------------ - -(1 row) - -SELECT pg_get_ruledef(0); - pg_get_ruledef ----------------- - -(1 row) - -SELECT pg_get_statisticsobjdef(0); - pg_get_statisticsobjdef -------------------------- - -(1 row) - -SELECT pg_get_triggerdef(0); - pg_get_triggerdef -------------------- - -(1 row) - -SELECT pg_get_viewdef(0); - pg_get_viewdef ----------------- - -(1 row) - -SELECT pg_get_function_arguments(0); - pg_get_function_arguments ---------------------------- - -(1 row) - -SELECT pg_get_function_identity_arguments(0); - pg_get_function_identity_arguments ------------------------------------- - -(1 row) - -SELECT pg_get_function_result(0); - pg_get_function_result ------------------------- - -(1 row) - -SELECT pg_get_function_arg_default(0, 0); - pg_get_function_arg_default ------------------------------ - -(1 row) - -SELECT pg_get_function_arg_default('pg_class'::regclass, 0); - pg_get_function_arg_default ------------------------------ - -(1 row) - -SELECT pg_get_partkeydef(0); - pg_get_partkeydef -------------------- - -(1 row) - --- test rename for a rule defined on a partitioned table -CREATE TABLE rules_parted_table (a int) PARTITION BY LIST (a); -CREATE TABLE rules_parted_table_1 PARTITION OF rules_parted_table FOR VALUES IN (1); -CREATE RULE rules_parted_table_insert AS ON INSERT to rules_parted_table - DO INSTEAD INSERT INTO rules_parted_table_1 VALUES (NEW.*); -ALTER RULE rules_parted_table_insert ON rules_parted_table RENAME TO rules_parted_table_insert_redirect; -DROP TABLE rules_parted_table; --- --- test MERGE --- -CREATE TABLE rule_merge1 (a int, b text); -CREATE TABLE rule_merge2 (a int, b text); -CREATE RULE rule1 AS ON INSERT TO rule_merge1 - DO INSTEAD INSERT INTO rule_merge2 VALUES (NEW.*); -CREATE RULE rule2 AS ON UPDATE TO rule_merge1 - DO INSTEAD UPDATE rule_merge2 SET a = NEW.a, b = NEW.b - WHERE a = OLD.a; -CREATE RULE rule3 AS ON DELETE TO rule_merge1 - DO INSTEAD DELETE FROM rule_merge2 WHERE a = OLD.a; --- MERGE not supported for table with rules -MERGE INTO rule_merge1 t USING (SELECT 1 AS a) s - ON t.a = s.a - WHEN MATCHED AND t.a < 2 THEN - UPDATE SET b = b || ' updated by merge' - WHEN MATCHED AND t.a > 2 THEN - DELETE - WHEN NOT MATCHED THEN - INSERT VALUES (s.a, ''); -ERROR: cannot execute MERGE on relation "rule_merge1" -DETAIL: MERGE is not supported for relations with rules. --- should be ok with the other table though -MERGE INTO rule_merge2 t USING (SELECT 1 AS a) s - ON t.a = s.a - WHEN MATCHED AND t.a < 2 THEN - UPDATE SET b = b || ' updated by merge' - WHEN MATCHED AND t.a > 2 THEN - DELETE - WHEN NOT MATCHED THEN - INSERT VALUES (s.a, ''); --- also ok if the rules are disabled -ALTER TABLE rule_merge1 DISABLE RULE rule1; -ALTER TABLE rule_merge1 DISABLE RULE rule2; -ALTER TABLE rule_merge1 DISABLE RULE rule3; -MERGE INTO rule_merge1 t USING (SELECT 1 AS a) s - ON t.a = s.a - WHEN MATCHED AND t.a < 2 THEN - UPDATE SET b = b || ' updated by merge' - WHEN MATCHED AND t.a > 2 THEN - DELETE - WHEN NOT MATCHED THEN - INSERT VALUES (s.a, ''); --- test deparsing -CREATE TABLE sf_target(id int, data text, filling int[]); -CREATE FUNCTION merge_sf_test() - RETURNS void - LANGUAGE sql -BEGIN ATOMIC - MERGE INTO sf_target t - USING rule_merge1 s - ON (s.a = t.id) -WHEN MATCHED - AND (s.a + t.id) = 42 - THEN UPDATE SET data = repeat(t.data, s.a) || s.b, id = length(s.b) -WHEN NOT MATCHED - AND (s.b IS NOT NULL) - THEN INSERT (data, id) - VALUES (s.b, s.a) -WHEN MATCHED - AND length(s.b || t.data) > 10 - THEN UPDATE SET data = s.b -WHEN MATCHED - AND s.a > 200 - THEN UPDATE SET filling[s.a] = t.id -WHEN MATCHED - AND s.a > 100 - THEN DELETE -WHEN MATCHED - THEN DO NOTHING -WHEN NOT MATCHED - AND s.a > 200 - THEN INSERT DEFAULT VALUES -WHEN NOT MATCHED - AND s.a > 100 - THEN INSERT (id, data) OVERRIDING USER VALUE - VALUES (s.a, DEFAULT) -WHEN NOT MATCHED - AND s.a > 0 - THEN INSERT - VALUES (s.a, s.b, DEFAULT) -WHEN NOT MATCHED - THEN INSERT (filling[1], id) - VALUES (s.a, s.a); -END; -\sf merge_sf_test -CREATE OR REPLACE FUNCTION public.merge_sf_test() - RETURNS void - LANGUAGE sql -BEGIN ATOMIC - MERGE INTO sf_target t - USING rule_merge1 s - ON (s.a = t.id) - WHEN MATCHED - AND ((s.a + t.id) = 42) - THEN UPDATE SET data = (repeat(t.data, s.a) || s.b), id = length(s.b) - WHEN NOT MATCHED - AND (s.b IS NOT NULL) - THEN INSERT (data, id) - VALUES (s.b, s.a) - WHEN MATCHED - AND (length((s.b || t.data)) > 10) - THEN UPDATE SET data = s.b - WHEN MATCHED - AND (s.a > 200) - THEN UPDATE SET filling[s.a] = t.id - WHEN MATCHED - AND (s.a > 100) - THEN DELETE - WHEN MATCHED - THEN DO NOTHING - WHEN NOT MATCHED - AND (s.a > 200) - THEN INSERT DEFAULT VALUES - WHEN NOT MATCHED - AND (s.a > 100) - THEN INSERT (id, data) OVERRIDING USER VALUE - VALUES (s.a, DEFAULT) - WHEN NOT MATCHED - AND (s.a > 0) - THEN INSERT (id, data, filling) - VALUES (s.a, s.b, DEFAULT) - WHEN NOT MATCHED - THEN INSERT (filling[1], id) - VALUES (s.a, s.a); -END -DROP FUNCTION merge_sf_test; -DROP TABLE sf_target; --- --- Test enabling/disabling --- -CREATE TABLE ruletest1 (a int); -CREATE TABLE ruletest2 (b int); -CREATE RULE rule1 AS ON INSERT TO ruletest1 - DO INSTEAD INSERT INTO ruletest2 VALUES (NEW.*); -INSERT INTO ruletest1 VALUES (1); -ALTER TABLE ruletest1 DISABLE RULE rule1; -INSERT INTO ruletest1 VALUES (2); -ALTER TABLE ruletest1 ENABLE RULE rule1; -SET session_replication_role = replica; -INSERT INTO ruletest1 VALUES (3); -ALTER TABLE ruletest1 ENABLE REPLICA RULE rule1; -INSERT INTO ruletest1 VALUES (4); -RESET session_replication_role; -INSERT INTO ruletest1 VALUES (5); -SELECT * FROM ruletest1; - a ---- - 2 - 3 - 5 -(3 rows) - -SELECT * FROM ruletest2; - b ---- - 1 - 4 -(2 rows) - -DROP TABLE ruletest1; -DROP TABLE ruletest2; --- --- Test non-SELECT rule on security invoker view. --- Should use view owner's permissions. --- -CREATE USER regress_rule_user1; -CREATE TABLE ruletest_t1 (x int); -CREATE TABLE ruletest_t2 (x int); -CREATE VIEW ruletest_v1 WITH (security_invoker=true) AS - SELECT * FROM ruletest_t1; -GRANT INSERT ON ruletest_v1 TO regress_rule_user1; -CREATE RULE rule1 AS ON INSERT TO ruletest_v1 - DO INSTEAD INSERT INTO ruletest_t2 VALUES (NEW.*); -SET SESSION AUTHORIZATION regress_rule_user1; -INSERT INTO ruletest_v1 VALUES (1); -RESET SESSION AUTHORIZATION; --- Test that main query's relation's permissions are checked before --- the rule action's relation's. -CREATE TABLE ruletest_t3 (x int); -CREATE RULE rule2 AS ON UPDATE TO ruletest_t1 - DO INSTEAD INSERT INTO ruletest_t2 VALUES (OLD.*); -REVOKE ALL ON ruletest_t2 FROM regress_rule_user1; -REVOKE ALL ON ruletest_t3 FROM regress_rule_user1; -ALTER TABLE ruletest_t1 OWNER TO regress_rule_user1; -SET SESSION AUTHORIZATION regress_rule_user1; -UPDATE ruletest_t1 t1 SET x = 0 FROM ruletest_t3 t3 WHERE t1.x = t3.x; -ERROR: permission denied for table ruletest_t3 -RESET SESSION AUTHORIZATION; -SELECT * FROM ruletest_t1; - x ---- -(0 rows) - -SELECT * FROM ruletest_t2; - x ---- - 1 -(1 row) - -DROP VIEW ruletest_v1; -DROP RULE rule2 ON ruletest_t1; -DROP TABLE ruletest_t3; -DROP TABLE ruletest_t2; -DROP TABLE ruletest_t1; -DROP USER regress_rule_user1; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/psql.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/psql.out --- /tmp/cirrus-ci-build/src/test/regress/expected/psql.out 2024-03-07 14:25:00.333109000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/psql.out 2024-03-07 14:27:17.149634000 +0000 @@ -1,6725 +1,2 @@ --- --- Tests for psql features that aren't closely connected to any --- specific server features --- --- \set --- fail: invalid name -\set invalid/name foo -invalid variable name: "invalid/name" --- fail: invalid value for special variable -\set AUTOCOMMIT foo -unrecognized value "foo" for "AUTOCOMMIT": Boolean expected -\set FETCH_COUNT foo -invalid value "foo" for "FETCH_COUNT": integer expected --- check handling of built-in boolean variable -\echo :ON_ERROR_ROLLBACK -off -\set ON_ERROR_ROLLBACK -\echo :ON_ERROR_ROLLBACK -on -\set ON_ERROR_ROLLBACK foo -unrecognized value "foo" for "ON_ERROR_ROLLBACK" -Available values are: on, off, interactive. -\echo :ON_ERROR_ROLLBACK -on -\set ON_ERROR_ROLLBACK on -\echo :ON_ERROR_ROLLBACK -on -\unset ON_ERROR_ROLLBACK -\echo :ON_ERROR_ROLLBACK -off --- \g and \gx -SELECT 1 as one, 2 as two \g - one | two ------+----- - 1 | 2 -(1 row) - -\gx --[ RECORD 1 ] -one | 1 -two | 2 - -SELECT 3 as three, 4 as four \gx --[ RECORD 1 ] -three | 3 -four | 4 - -\g - three | four --------+------ - 3 | 4 -(1 row) - --- \gx should work in FETCH_COUNT mode too -\set FETCH_COUNT 1 -SELECT 1 as one, 2 as two \g - one | two ------+----- - 1 | 2 -(1 row) - -\gx --[ RECORD 1 ] -one | 1 -two | 2 - -SELECT 3 as three, 4 as four \gx --[ RECORD 1 ] -three | 3 -four | 4 - -\g - three | four --------+------ - 3 | 4 -(1 row) - -\unset FETCH_COUNT --- \g/\gx with pset options -SELECT 1 as one, 2 as two \g (format=csv csv_fieldsep='\t') -one two -1 2 -\g - one | two ------+----- - 1 | 2 -(1 row) - -SELECT 1 as one, 2 as two \gx (title='foo bar') -foo bar --[ RECORD 1 ] -one | 1 -two | 2 - -\g - one | two ------+----- - 1 | 2 -(1 row) - --- \bind (extended query protocol) -SELECT 1 \bind \g - ?column? ----------- - 1 -(1 row) - -SELECT $1 \bind 'foo' \g - ?column? ----------- - foo -(1 row) - -SELECT $1, $2 \bind 'foo' 'bar' \g - ?column? | ?column? -----------+---------- - foo | bar -(1 row) - --- errors --- parse error -SELECT foo \bind \g -ERROR: column "foo" does not exist -LINE 1: SELECT foo - ^ --- tcop error -SELECT 1 \; SELECT 2 \bind \g -ERROR: cannot insert multiple commands into a prepared statement --- bind error -SELECT $1, $2 \bind 'foo' \g -ERROR: bind message supplies 1 parameters, but prepared statement "" requires 2 --- \gset -select 10 as test01, 20 as test02, 'Hello' as test03 \gset pref01_ -\echo :pref01_test01 :pref01_test02 :pref01_test03 -10 20 Hello --- should fail: bad variable name -select 10 as "bad name" -\gset -invalid variable name: "bad name" -select 97 as "EOF", 'ok' as _foo \gset IGNORE -attempt to \gset into specially treated variable "IGNOREEOF" ignored -\echo :IGNORE_foo :IGNOREEOF -ok 0 --- multiple backslash commands in one line -select 1 as x, 2 as y \gset pref01_ \\ \echo :pref01_x -1 -select 3 as x, 4 as y \gset pref01_ \echo :pref01_x \echo :pref01_y -3 -4 -select 5 as x, 6 as y \gset pref01_ \\ \g \echo :pref01_x :pref01_y - x | y ----+--- - 5 | 6 -(1 row) - -5 6 -select 7 as x, 8 as y \g \gset pref01_ \echo :pref01_x :pref01_y - x | y ----+--- - 7 | 8 -(1 row) - -7 8 --- NULL should unset the variable -\set var2 xyz -select 1 as var1, NULL as var2, 3 as var3 \gset -\echo :var1 :var2 :var3 -1 :var2 3 --- \gset requires just one tuple -select 10 as test01, 20 as test02 from generate_series(1,3) \gset -more than one row returned for \gset -select 10 as test01, 20 as test02 from generate_series(1,0) \gset -no rows returned for \gset --- \gset returns no tuples -select a from generate_series(1, 10) as a where a = 11 \gset -no rows returned for \gset -\echo :ROW_COUNT -0 --- \gset should work in FETCH_COUNT mode too -\set FETCH_COUNT 1 -select 1 as x, 2 as y \gset pref01_ \\ \echo :pref01_x -1 -select 3 as x, 4 as y \gset pref01_ \echo :pref01_x \echo :pref01_y -3 -4 -select 10 as test01, 20 as test02 from generate_series(1,3) \gset -more than one row returned for \gset -select 10 as test01, 20 as test02 from generate_series(1,0) \gset -no rows returned for \gset -\unset FETCH_COUNT --- \gdesc -SELECT - NULL AS zero, - 1 AS one, - 2.0 AS two, - 'three' AS three, - $1 AS four, - sin($2) as five, - 'foo'::varchar(4) as six, - CURRENT_DATE AS now -\gdesc - Column | Type ---------+---------------------- - zero | text - one | integer - two | numeric - three | text - four | text - five | double precision - six | character varying(4) - now | date -(8 rows) - --- should work with tuple-returning utilities, such as EXECUTE -PREPARE test AS SELECT 1 AS first, 2 AS second; -EXECUTE test \gdesc - Column | Type ---------+--------- - first | integer - second | integer -(2 rows) - -EXPLAIN EXECUTE test \gdesc - Column | Type -------------+------ - QUERY PLAN | text -(1 row) - --- should fail cleanly - syntax error -SELECT 1 + \gdesc -ERROR: syntax error at end of input -LINE 1: SELECT 1 + - ^ --- check behavior with empty results -SELECT \gdesc -The command has no result, or the result has no columns. -CREATE TABLE bububu(a int) \gdesc -The command has no result, or the result has no columns. --- subject command should not have executed -TABLE bububu; -- fail -ERROR: relation "bububu" does not exist -LINE 1: TABLE bububu; - ^ --- query buffer should remain unchanged -SELECT 1 AS x, 'Hello', 2 AS y, true AS "dirty\name" -\gdesc - Column | Type -------------+--------- - x | integer - ?column? | text - y | integer - dirty\name | boolean -(4 rows) - -\g - x | ?column? | y | dirty\name ----+----------+---+------------ - 1 | Hello | 2 | t -(1 row) - --- all on one line -SELECT 3 AS x, 'Hello', 4 AS y, true AS "dirty\name" \gdesc \g - Column | Type -------------+--------- - x | integer - ?column? | text - y | integer - dirty\name | boolean -(4 rows) - - x | ?column? | y | dirty\name ----+----------+---+------------ - 3 | Hello | 4 | t -(1 row) - --- test for server bug #17983 with empty statement in aborted transaction -set search_path = default; -begin; -bogus; -ERROR: syntax error at or near "bogus" -LINE 1: bogus; - ^ -; -\gdesc -The command has no result, or the result has no columns. -rollback; --- \gexec -create temporary table gexec_test(a int, b text, c date, d float); -select format('create index on gexec_test(%I)', attname) -from pg_attribute -where attrelid = 'gexec_test'::regclass and attnum > 0 -order by attnum -\gexec -create index on gexec_test(a) -create index on gexec_test(b) -create index on gexec_test(c) -create index on gexec_test(d) --- \gexec should work in FETCH_COUNT mode too --- (though the fetch limit applies to the executed queries not the meta query) -\set FETCH_COUNT 1 -select 'select 1 as ones', 'select x.y, x.y*2 as double from generate_series(1,4) as x(y)' -union all -select 'drop table gexec_test', NULL -union all -select 'drop table gexec_test', 'select ''2000-01-01''::date as party_over' -\gexec -select 1 as ones - ones ------- - 1 -(1 row) - -select x.y, x.y*2 as double from generate_series(1,4) as x(y) - y | double ----+-------- - 1 | 2 - 2 | 4 - 3 | 6 - 4 | 8 -(4 rows) - -drop table gexec_test -drop table gexec_test -ERROR: table "gexec_test" does not exist -select '2000-01-01'::date as party_over - party_over ------------- - 01-01-2000 -(1 row) - -\unset FETCH_COUNT --- \setenv, \getenv --- ensure MYVAR isn't set -\setenv MYVAR --- in which case, reading it doesn't change the target -\getenv res MYVAR -\echo :res -:res --- now set it -\setenv MYVAR 'environment value' -\getenv res MYVAR -\echo :res -environment value --- show all pset options -\pset -border 1 -columns 0 -csv_fieldsep ',' -expanded off -fieldsep '|' -fieldsep_zero off -footer on -format aligned -linestyle ascii -null '' -numericlocale off -pager 1 -pager_min_lines 0 -recordsep '\n' -recordsep_zero off -tableattr -title -tuples_only off -unicode_border_linestyle single -unicode_column_linestyle single -unicode_header_linestyle single -xheader_width full --- test multi-line headers, wrapping, and newline indicators --- in aligned, unaligned, and wrapped formats -prepare q as select array_to_string(array_agg(repeat('x',2*n)),E'\n') as "ab - -c", array_to_string(array_agg(repeat('y',20-2*n)),E'\n') as "a -bc" from generate_series(1,10) as n(n) group by n>1 order by n>1; -\pset linestyle ascii -\pset expanded off -\pset columns 40 -\pset border 0 -\pset format unaligned -execute q; -ab - -c|a -bc -xx|yyyyyyyyyyyyyyyyyy -xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -(2 rows) -\pset format aligned -execute q; - ab + a + - + bc - c --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx +yyyyyyyyyyyyyyyy + -xxxxxx +yyyyyyyyyyyyyy + -xxxxxxxx +yyyyyyyyyyyy + -xxxxxxxxxx +yyyyyyyyyy + -xxxxxxxxxxxx +yyyyyyyy + -xxxxxxxxxxxxxx +yyyyyy + -xxxxxxxxxxxxxxxx +yyyy + -xxxxxxxxxxxxxxxxxx +yy + -xxxxxxxxxxxxxxxxxxxx -(2 rows) - -\pset format wrapped -execute q; - ab + a + - + bc - c --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx +yyyyyyyyyyyyyyyy + -xxxxxx +yyyyyyyyyyyyyy + -xxxxxxxx +yyyyyyyyyyyy + -xxxxxxxxxx +yyyyyyyyyy + -xxxxxxxxxxxx +yyyyyyyy + -xxxxxxxxxxxxxx +yyyyyy + -xxxxxxxxxxxxxxxx +yyyy + -xxxxxxxxxxxxxxxxxx +yy + -xxxxxxxxxxxxxxxxxxxx -(2 rows) - -\pset border 1 -\pset format unaligned -execute q; -ab - -c|a -bc -xx|yyyyyyyyyyyyyyyyyy -xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -(2 rows) -\pset format aligned -execute q; - ab +| a + - +| bc - c | -----------------------+-------------------- - xx | yyyyyyyyyyyyyyyyyy - xxxx +| yyyyyyyyyyyyyyyy + - xxxxxx +| yyyyyyyyyyyyyy + - xxxxxxxx +| yyyyyyyyyyyy + - xxxxxxxxxx +| yyyyyyyyyy + - xxxxxxxxxxxx +| yyyyyyyy + - xxxxxxxxxxxxxx +| yyyyyy + - xxxxxxxxxxxxxxxx +| yyyy + - xxxxxxxxxxxxxxxxxx +| yy + - xxxxxxxxxxxxxxxxxxxx | -(2 rows) - -\pset format wrapped -execute q; - ab +| a + - +| bc - c | --------------------+-------------------- - xx | yyyyyyyyyyyyyyyyyy - xxxx +| yyyyyyyyyyyyyyyy + - xxxxxx +| yyyyyyyyyyyyyy + - xxxxxxxx +| yyyyyyyyyyyy + - xxxxxxxxxx +| yyyyyyyyyy + - xxxxxxxxxxxx +| yyyyyyyy + - xxxxxxxxxxxxxx +| yyyyyy + - xxxxxxxxxxxxxxxx +| yyyy + - xxxxxxxxxxxxxxxxx.| yy + -.x +| - xxxxxxxxxxxxxxxxx.| -.xxx | -(2 rows) - -\pset border 2 -\pset format unaligned -execute q; -ab - -c|a -bc -xx|yyyyyyyyyyyyyyyyyy -xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -(2 rows) -\pset format aligned -execute q; -+----------------------+--------------------+ -| ab +| a +| -| +| bc | -| c | | -+----------------------+--------------------+ -| xx | yyyyyyyyyyyyyyyyyy | -| xxxx +| yyyyyyyyyyyyyyyy +| -| xxxxxx +| yyyyyyyyyyyyyy +| -| xxxxxxxx +| yyyyyyyyyyyy +| -| xxxxxxxxxx +| yyyyyyyyyy +| -| xxxxxxxxxxxx +| yyyyyyyy +| -| xxxxxxxxxxxxxx +| yyyyyy +| -| xxxxxxxxxxxxxxxx +| yyyy +| -| xxxxxxxxxxxxxxxxxx +| yy +| -| xxxxxxxxxxxxxxxxxxxx | | -+----------------------+--------------------+ -(2 rows) - -\pset format wrapped -execute q; -+-----------------+--------------------+ -| ab +| a +| -| +| bc | -| c | | -+-----------------+--------------------+ -| xx | yyyyyyyyyyyyyyyyyy | -| xxxx +| yyyyyyyyyyyyyyyy +| -| xxxxxx +| yyyyyyyyyyyyyy +| -| xxxxxxxx +| yyyyyyyyyyyy +| -| xxxxxxxxxx +| yyyyyyyyyy +| -| xxxxxxxxxxxx +| yyyyyyyy +| -| xxxxxxxxxxxxxx +| yyyyyy +| -| xxxxxxxxxxxxxxx.| yyyy +| -|.x +| yy +| -| xxxxxxxxxxxxxxx.| | -|.xxx +| | -| xxxxxxxxxxxxxxx.| | -|.xxxxx | | -+-----------------+--------------------+ -(2 rows) - -\pset expanded on -\pset columns 20 -\pset border 0 -\pset format unaligned -execute q; -ab - -c|xx -a -bc|yyyyyyyyyyyyyyyyyy - -ab - -c|xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx -a -bc|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -\pset format aligned -execute q; -* Record 1 -ab+ xx - + -c -a + yyyyyyyyyyyyyyyyyy -bc -* Record 2 -ab+ xxxx + - + xxxxxx + -c xxxxxxxx + - xxxxxxxxxx + - xxxxxxxxxxxx + - xxxxxxxxxxxxxx + - xxxxxxxxxxxxxxxx + - xxxxxxxxxxxxxxxxxx + - xxxxxxxxxxxxxxxxxxxx -a + yyyyyyyyyyyyyyyy + -bc yyyyyyyyyyyyyy + - yyyyyyyyyyyy + - yyyyyyyyyy + - yyyyyyyy + - yyyyyy + - yyyy + - yy + - - -\pset format wrapped -execute q; -* Record 1 -ab+ xx - + -c -a + yyyyyyyyyyyyyyy. -bc .yyy -* Record 2 -ab+ xxxx + - + xxxxxx + -c xxxxxxxx + - xxxxxxxxxx + - xxxxxxxxxxxx + - xxxxxxxxxxxxxx + - xxxxxxxxxxxxxxx. - .x + - xxxxxxxxxxxxxxx. - .xxx + - xxxxxxxxxxxxxxx. - .xxxxx -a + yyyyyyyyyyyyyyy. -bc .y + - yyyyyyyyyyyyyy + - yyyyyyyyyyyy + - yyyyyyyyyy + - yyyyyyyy + - yyyyyy + - yyyy + - yy + - - -\pset border 1 -\pset format unaligned -execute q; -ab - -c|xx -a -bc|yyyyyyyyyyyyyyyyyy - -ab - -c|xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx -a -bc|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -\pset format aligned -execute q; --[ RECORD 1 ]------------ -ab+| xx - +| -c | -a +| yyyyyyyyyyyyyyyyyy -bc | --[ RECORD 2 ]------------ -ab+| xxxx + - +| xxxxxx + -c | xxxxxxxx + - | xxxxxxxxxx + - | xxxxxxxxxxxx + - | xxxxxxxxxxxxxx + - | xxxxxxxxxxxxxxxx + - | xxxxxxxxxxxxxxxxxx + - | xxxxxxxxxxxxxxxxxxxx -a +| yyyyyyyyyyyyyyyy + -bc | yyyyyyyyyyyyyy + - | yyyyyyyyyyyy + - | yyyyyyyyyy + - | yyyyyyyy + - | yyyyyy + - | yyyy + - | yy + - | - -\pset format wrapped -execute q; --[ RECORD 1 ]------ -ab+| xx - +| -c | -a +| yyyyyyyyyyyyyy. -bc |.yyyy --[ RECORD 2 ]------ -ab+| xxxx + - +| xxxxxx + -c | xxxxxxxx + - | xxxxxxxxxx + - | xxxxxxxxxxxx + - | xxxxxxxxxxxxxx+ - | xxxxxxxxxxxxxx. - |.xx + - | xxxxxxxxxxxxxx. - |.xxxx + - | xxxxxxxxxxxxxx. - |.xxxxxx -a +| yyyyyyyyyyyyyy. -bc |.yy + - | yyyyyyyyyyyyyy+ - | yyyyyyyyyyyy + - | yyyyyyyyyy + - | yyyyyyyy + - | yyyyyy + - | yyyy + - | yy + - | - -\pset border 2 -\pset format unaligned -execute q; -ab - -c|xx -a -bc|yyyyyyyyyyyyyyyyyy - -ab - -c|xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx -a -bc|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -\pset format aligned -execute q; -+-[ RECORD 1 ]--------------+ -| ab+| xx | -| +| | -| c | | -| a +| yyyyyyyyyyyyyyyyyy | -| bc | | -+-[ RECORD 2 ]--------------+ -| ab+| xxxx +| -| +| xxxxxx +| -| c | xxxxxxxx +| -| | xxxxxxxxxx +| -| | xxxxxxxxxxxx +| -| | xxxxxxxxxxxxxx +| -| | xxxxxxxxxxxxxxxx +| -| | xxxxxxxxxxxxxxxxxx +| -| | xxxxxxxxxxxxxxxxxxxx | -| a +| yyyyyyyyyyyyyyyy +| -| bc | yyyyyyyyyyyyyy +| -| | yyyyyyyyyyyy +| -| | yyyyyyyyyy +| -| | yyyyyyyy +| -| | yyyyyy +| -| | yyyy +| -| | yy +| -| | | -+----+----------------------+ - -\pset format wrapped -execute q; -+-[ RECORD 1 ]-----+ -| ab+| xx | -| +| | -| c | | -| a +| yyyyyyyyyyy.| -| bc |.yyyyyyy | -+-[ RECORD 2 ]-----+ -| ab+| xxxx +| -| +| xxxxxx +| -| c | xxxxxxxx +| -| | xxxxxxxxxx +| -| | xxxxxxxxxxx.| -| |.x +| -| | xxxxxxxxxxx.| -| |.xxx +| -| | xxxxxxxxxxx.| -| |.xxxxx +| -| | xxxxxxxxxxx.| -| |.xxxxxxx +| -| | xxxxxxxxxxx.| -| |.xxxxxxxxx | -| a +| yyyyyyyyyyy.| -| bc |.yyyyy +| -| | yyyyyyyyyyy.| -| |.yyy +| -| | yyyyyyyyyyy.| -| |.y +| -| | yyyyyyyyyy +| -| | yyyyyyyy +| -| | yyyyyy +| -| | yyyy +| -| | yy +| -| | | -+----+-------------+ - -\pset linestyle old-ascii -\pset expanded off -\pset columns 40 -\pset border 0 -\pset format unaligned -execute q; -ab - -c|a -bc -xx|yyyyyyyyyyyyyyyyyy -xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -(2 rows) -\pset format aligned -execute q; - ab a - + bc - c + --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx yyyyyyyyyyyyyyyy -xxxxxx yyyyyyyyyyyyyy -xxxxxxxx yyyyyyyyyyyy -xxxxxxxxxx yyyyyyyyyy -xxxxxxxxxxxx yyyyyyyy -xxxxxxxxxxxxxx yyyyyy -xxxxxxxxxxxxxxxx yyyy -xxxxxxxxxxxxxxxxxx yy -xxxxxxxxxxxxxxxxxxxx -(2 rows) - -\pset format wrapped -execute q; - ab a - + bc - c + --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx yyyyyyyyyyyyyyyy -xxxxxx yyyyyyyyyyyyyy -xxxxxxxx yyyyyyyyyyyy -xxxxxxxxxx yyyyyyyyyy -xxxxxxxxxxxx yyyyyyyy -xxxxxxxxxxxxxx yyyyyy -xxxxxxxxxxxxxxxx yyyy -xxxxxxxxxxxxxxxxxx yy -xxxxxxxxxxxxxxxxxxxx -(2 rows) - -\pset border 1 -\pset format unaligned -execute q; -ab - -c|a -bc -xx|yyyyyyyyyyyyyyyyyy -xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -(2 rows) -\pset format aligned -execute q; - ab | a -+ |+ bc -+ c |+ -----------------------+-------------------- - xx | yyyyyyyyyyyyyyyyyy - xxxx | yyyyyyyyyyyyyyyy - xxxxxx : yyyyyyyyyyyyyy - xxxxxxxx : yyyyyyyyyyyy - xxxxxxxxxx : yyyyyyyyyy - xxxxxxxxxxxx : yyyyyyyy - xxxxxxxxxxxxxx : yyyyyy - xxxxxxxxxxxxxxxx : yyyy - xxxxxxxxxxxxxxxxxx : yy - xxxxxxxxxxxxxxxxxxxx : -(2 rows) - -\pset format wrapped -execute q; - ab | a -+ |+ bc -+ c |+ --------------------+-------------------- - xx | yyyyyyyyyyyyyyyyyy - xxxx | yyyyyyyyyyyyyyyy - xxxxxx : yyyyyyyyyyyyyy - xxxxxxxx : yyyyyyyyyyyy - xxxxxxxxxx : yyyyyyyyyy - xxxxxxxxxxxx : yyyyyyyy - xxxxxxxxxxxxxx : yyyyyy - xxxxxxxxxxxxxxxx : yyyy - xxxxxxxxxxxxxxxxx : yy - x : - xxxxxxxxxxxxxxxxx - xxx -(2 rows) - -\pset border 2 -\pset format unaligned -execute q; -ab - -c|a -bc -xx|yyyyyyyyyyyyyyyyyy -xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -(2 rows) -\pset format aligned -execute q; -+----------------------+--------------------+ -| ab | a | -|+ |+ bc | -|+ c |+ | -+----------------------+--------------------+ -| xx | yyyyyyyyyyyyyyyyyy | -| xxxx | yyyyyyyyyyyyyyyy | -| xxxxxx : yyyyyyyyyyyyyy | -| xxxxxxxx : yyyyyyyyyyyy | -| xxxxxxxxxx : yyyyyyyyyy | -| xxxxxxxxxxxx : yyyyyyyy | -| xxxxxxxxxxxxxx : yyyyyy | -| xxxxxxxxxxxxxxxx : yyyy | -| xxxxxxxxxxxxxxxxxx : yy | -| xxxxxxxxxxxxxxxxxxxx : | -+----------------------+--------------------+ -(2 rows) - -\pset format wrapped -execute q; -+-----------------+--------------------+ -| ab | a | -|+ |+ bc | -|+ c |+ | -+-----------------+--------------------+ -| xx | yyyyyyyyyyyyyyyyyy | -| xxxx | yyyyyyyyyyyyyyyy | -| xxxxxx : yyyyyyyyyyyyyy | -| xxxxxxxx : yyyyyyyyyyyy | -| xxxxxxxxxx : yyyyyyyyyy | -| xxxxxxxxxxxx : yyyyyyyy | -| xxxxxxxxxxxxxx : yyyyyy | -| xxxxxxxxxxxxxxx : yyyy | -| x : yy | -| xxxxxxxxxxxxxxx : | -| xxx | -| xxxxxxxxxxxxxxx | -| xxxxx | -+-----------------+--------------------+ -(2 rows) - -\pset expanded on -\pset columns 20 -\pset border 0 -\pset format unaligned -execute q; -ab - -c|xx -a -bc|yyyyyyyyyyyyyyyyyy - -ab - -c|xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx -a -bc|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -\pset format aligned -execute q; -* Record 1 - ab xx -+ -+c - a yyyyyyyyyyyyyyyyyy -+bc -* Record 2 - ab xxxx -+ xxxxxx -+c xxxxxxxx - xxxxxxxxxx - xxxxxxxxxxxx - xxxxxxxxxxxxxx - xxxxxxxxxxxxxxxx - xxxxxxxxxxxxxxxxxx - xxxxxxxxxxxxxxxxxxxx - a yyyyyyyyyyyyyyyy -+bc yyyyyyyyyyyyyy - yyyyyyyyyyyy - yyyyyyyyyy - yyyyyyyy - yyyyyy - yyyy - yy - - -\pset format wrapped -execute q; -* Record 1 - ab xx -+ -+c - a yyyyyyyyyyyyyyyy -+bc yy -* Record 2 - ab xxxx -+ xxxxxx -+c xxxxxxxx - xxxxxxxxxx - xxxxxxxxxxxx - xxxxxxxxxxxxxx - xxxxxxxxxxxxxxxx - xxxxxxxxxxxxxxxx - xx - xxxxxxxxxxxxxxxx - xxxx - a yyyyyyyyyyyyyyyy -+bc yyyyyyyyyyyyyy - yyyyyyyyyyyy - yyyyyyyyyy - yyyyyyyy - yyyyyy - yyyy - yy - - -\pset border 1 -\pset format unaligned -execute q; -ab - -c|xx -a -bc|yyyyyyyyyyyyyyyyyy - -ab - -c|xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx -a -bc|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -\pset format aligned -execute q; --[ RECORD 1 ]------------- - ab | xx -+ ; -+c ; - a | yyyyyyyyyyyyyyyyyy -+bc ; --[ RECORD 2 ]------------- - ab | xxxx -+ : xxxxxx -+c : xxxxxxxx - : xxxxxxxxxx - : xxxxxxxxxxxx - : xxxxxxxxxxxxxx - : xxxxxxxxxxxxxxxx - : xxxxxxxxxxxxxxxxxx - : xxxxxxxxxxxxxxxxxxxx - a | yyyyyyyyyyyyyyyy -+bc : yyyyyyyyyyyyyy - : yyyyyyyyyyyy - : yyyyyyyyyy - : yyyyyyyy - : yyyyyy - : yyyy - : yy - : - -\pset format wrapped -execute q; --[ RECORD 1 ]------- - ab | xx -+ ; -+c ; - a | yyyyyyyyyyyyyy -+bc ; yyyy --[ RECORD 2 ]------- - ab | xxxx -+ : xxxxxx -+c : xxxxxxxx - : xxxxxxxxxx - : xxxxxxxxxxxx - : xxxxxxxxxxxxxx - : xxxxxxxxxxxxxx - ; xx - : xxxxxxxxxxxxxx - ; xxxx - : xxxxxxxxxxxxxx - ; xxxxxx - a | yyyyyyyyyyyyyy -+bc ; yy - : yyyyyyyyyyyyyy - : yyyyyyyyyyyy - : yyyyyyyyyy - : yyyyyyyy - : yyyyyy - : yyyy - : yy - : - -\pset border 2 -\pset format unaligned -execute q; -ab - -c|xx -a -bc|yyyyyyyyyyyyyyyyyy - -ab - -c|xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx -a -bc|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -\pset format aligned -execute q; -+-[ RECORD 1 ]--------------+ -| ab | xx | -|+ ; | -|+c ; | -| a | yyyyyyyyyyyyyyyyyy | -|+bc ; | -+-[ RECORD 2 ]--------------+ -| ab | xxxx | -|+ : xxxxxx | -|+c : xxxxxxxx | -| : xxxxxxxxxx | -| : xxxxxxxxxxxx | -| : xxxxxxxxxxxxxx | -| : xxxxxxxxxxxxxxxx | -| : xxxxxxxxxxxxxxxxxx | -| : xxxxxxxxxxxxxxxxxxxx | -| a | yyyyyyyyyyyyyyyy | -|+bc : yyyyyyyyyyyyyy | -| : yyyyyyyyyyyy | -| : yyyyyyyyyy | -| : yyyyyyyy | -| : yyyyyy | -| : yyyy | -| : yy | -| : | -+----+----------------------+ - -\pset format wrapped -execute q; -+-[ RECORD 1 ]-----+ -| ab | xx | -|+ ; | -|+c ; | -| a | yyyyyyyyyyy | -|+bc ; yyyyyyy | -+-[ RECORD 2 ]-----+ -| ab | xxxx | -|+ : xxxxxx | -|+c : xxxxxxxx | -| : xxxxxxxxxx | -| : xxxxxxxxxxx | -| ; x | -| : xxxxxxxxxxx | -| ; xxx | -| : xxxxxxxxxxx | -| ; xxxxx | -| : xxxxxxxxxxx | -| ; xxxxxxx | -| : xxxxxxxxxxx | -| ; xxxxxxxxx | -| a | yyyyyyyyyyy | -|+bc ; yyyyy | -| : yyyyyyyyyyy | -| ; yyy | -| : yyyyyyyyyyy | -| ; y | -| : yyyyyyyyyy | -| : yyyyyyyy | -| : yyyyyy | -| : yyyy | -| : yy | -| : | -+----+-------------+ - -deallocate q; --- test single-line header and data -prepare q as select repeat('x',2*n) as "0123456789abcdef", repeat('y',20-2*n) as "0123456789" from generate_series(1,10) as n; -\pset linestyle ascii -\pset expanded off -\pset columns 40 -\pset border 0 -\pset format unaligned -execute q; -0123456789abcdef|0123456789 -xx|yyyyyyyyyyyyyyyyyy -xxxx|yyyyyyyyyyyyyyyy -xxxxxx|yyyyyyyyyyyyyy -xxxxxxxx|yyyyyyyyyyyy -xxxxxxxxxx|yyyyyyyyyy -xxxxxxxxxxxx|yyyyyyyy -xxxxxxxxxxxxxx|yyyyyy -xxxxxxxxxxxxxxxx|yyyy -xxxxxxxxxxxxxxxxxx|yy -xxxxxxxxxxxxxxxxxxxx| -(10 rows) -\pset format aligned -execute q; - 0123456789abcdef 0123456789 --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx yyyyyyyyyyyyyyyy -xxxxxx yyyyyyyyyyyyyy -xxxxxxxx yyyyyyyyyyyy -xxxxxxxxxx yyyyyyyyyy -xxxxxxxxxxxx yyyyyyyy -xxxxxxxxxxxxxx yyyyyy -xxxxxxxxxxxxxxxx yyyy -xxxxxxxxxxxxxxxxxx yy -xxxxxxxxxxxxxxxxxxxx -(10 rows) - -\pset format wrapped -execute q; - 0123456789abcdef 0123456789 --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx yyyyyyyyyyyyyyyy -xxxxxx yyyyyyyyyyyyyy -xxxxxxxx yyyyyyyyyyyy -xxxxxxxxxx yyyyyyyyyy -xxxxxxxxxxxx yyyyyyyy -xxxxxxxxxxxxxx yyyyyy -xxxxxxxxxxxxxxxx yyyy -xxxxxxxxxxxxxxxxxx yy -xxxxxxxxxxxxxxxxxxxx -(10 rows) - -\pset border 1 -\pset format unaligned -execute q; -0123456789abcdef|0123456789 -xx|yyyyyyyyyyyyyyyyyy -xxxx|yyyyyyyyyyyyyyyy -xxxxxx|yyyyyyyyyyyyyy -xxxxxxxx|yyyyyyyyyyyy -xxxxxxxxxx|yyyyyyyyyy -xxxxxxxxxxxx|yyyyyyyy -xxxxxxxxxxxxxx|yyyyyy -xxxxxxxxxxxxxxxx|yyyy -xxxxxxxxxxxxxxxxxx|yy -xxxxxxxxxxxxxxxxxxxx| -(10 rows) -\pset format aligned -execute q; - 0123456789abcdef | 0123456789 -----------------------+-------------------- - xx | yyyyyyyyyyyyyyyyyy - xxxx | yyyyyyyyyyyyyyyy - xxxxxx | yyyyyyyyyyyyyy - xxxxxxxx | yyyyyyyyyyyy - xxxxxxxxxx | yyyyyyyyyy - xxxxxxxxxxxx | yyyyyyyy - xxxxxxxxxxxxxx | yyyyyy - xxxxxxxxxxxxxxxx | yyyy - xxxxxxxxxxxxxxxxxx | yy - xxxxxxxxxxxxxxxxxxxx | -(10 rows) - -\pset format wrapped -execute q; - 0123456789abcdef | 0123456789 ----------------------+------------------ - xx | yyyyyyyyyyyyyyyy. - |.yy - xxxx | yyyyyyyyyyyyyyyy - xxxxxx | yyyyyyyyyyyyyy - xxxxxxxx | yyyyyyyyyyyy - xxxxxxxxxx | yyyyyyyyyy - xxxxxxxxxxxx | yyyyyyyy - xxxxxxxxxxxxxx | yyyyyy - xxxxxxxxxxxxxxxx | yyyy - xxxxxxxxxxxxxxxxxx | yy - xxxxxxxxxxxxxxxxxxx.| -.x | -(10 rows) - -\pset border 2 -\pset format unaligned -execute q; -0123456789abcdef|0123456789 -xx|yyyyyyyyyyyyyyyyyy -xxxx|yyyyyyyyyyyyyyyy -xxxxxx|yyyyyyyyyyyyyy -xxxxxxxx|yyyyyyyyyyyy -xxxxxxxxxx|yyyyyyyyyy -xxxxxxxxxxxx|yyyyyyyy -xxxxxxxxxxxxxx|yyyyyy -xxxxxxxxxxxxxxxx|yyyy -xxxxxxxxxxxxxxxxxx|yy -xxxxxxxxxxxxxxxxxxxx| -(10 rows) -\pset format aligned -execute q; -+----------------------+--------------------+ -| 0123456789abcdef | 0123456789 | -+----------------------+--------------------+ -| xx | yyyyyyyyyyyyyyyyyy | -| xxxx | yyyyyyyyyyyyyyyy | -| xxxxxx | yyyyyyyyyyyyyy | -| xxxxxxxx | yyyyyyyyyyyy | -| xxxxxxxxxx | yyyyyyyyyy | -| xxxxxxxxxxxx | yyyyyyyy | -| xxxxxxxxxxxxxx | yyyyyy | -| xxxxxxxxxxxxxxxx | yyyy | -| xxxxxxxxxxxxxxxxxx | yy | -| xxxxxxxxxxxxxxxxxxxx | | -+----------------------+--------------------+ -(10 rows) - -\pset format wrapped -execute q; -+--------------------+-----------------+ -| 0123456789abcdef | 0123456789 | -+--------------------+-----------------+ -| xx | yyyyyyyyyyyyyyy.| -| |.yyy | -| xxxx | yyyyyyyyyyyyyyy.| -| |.y | -| xxxxxx | yyyyyyyyyyyyyy | -| xxxxxxxx | yyyyyyyyyyyy | -| xxxxxxxxxx | yyyyyyyyyy | -| xxxxxxxxxxxx | yyyyyyyy | -| xxxxxxxxxxxxxx | yyyyyy | -| xxxxxxxxxxxxxxxx | yyyy | -| xxxxxxxxxxxxxxxxxx | yy | -| xxxxxxxxxxxxxxxxxx.| | -|.xx | | -+--------------------+-----------------+ -(10 rows) - -\pset expanded on -\pset columns 30 -\pset border 0 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; -* Record 1 -0123456789abcdef xx -0123456789 yyyyyyyyyyyyyyyyyy -* Record 2 -0123456789abcdef xxxx -0123456789 yyyyyyyyyyyyyyyy -* Record 3 -0123456789abcdef xxxxxx -0123456789 yyyyyyyyyyyyyy -* Record 4 -0123456789abcdef xxxxxxxx -0123456789 yyyyyyyyyyyy -* Record 5 -0123456789abcdef xxxxxxxxxx -0123456789 yyyyyyyyyy -* Record 6 -0123456789abcdef xxxxxxxxxxxx -0123456789 yyyyyyyy -* Record 7 -0123456789abcdef xxxxxxxxxxxxxx -0123456789 yyyyyy -* Record 8 -0123456789abcdef xxxxxxxxxxxxxxxx -0123456789 yyyy -* Record 9 -0123456789abcdef xxxxxxxxxxxxxxxxxx -0123456789 yy -* Record 10 -0123456789abcdef xxxxxxxxxxxxxxxxxxxx -0123456789 - -\pset format wrapped -execute q; -* Record 1 -0123456789abcdef xx -0123456789 yyyyyyyyyyyy. - .yyyyyy -* Record 2 -0123456789abcdef xxxx -0123456789 yyyyyyyyyyyy. - .yyyy -* Record 3 -0123456789abcdef xxxxxx -0123456789 yyyyyyyyyyyy. - .yy -* Record 4 -0123456789abcdef xxxxxxxx -0123456789 yyyyyyyyyyyy -* Record 5 -0123456789abcdef xxxxxxxxxx -0123456789 yyyyyyyyyy -* Record 6 -0123456789abcdef xxxxxxxxxxxx -0123456789 yyyyyyyy -* Record 7 -0123456789abcdef xxxxxxxxxxxx. - .xx -0123456789 yyyyyy -* Record 8 -0123456789abcdef xxxxxxxxxxxx. - .xxxx -0123456789 yyyy -* Record 9 -0123456789abcdef xxxxxxxxxxxx. - .xxxxxx -0123456789 yy -* Record 10 -0123456789abcdef xxxxxxxxxxxx. - .xxxxxxxx -0123456789 - -\pset border 1 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; --[ RECORD 1 ]----+--------------------- -0123456789abcdef | xx -0123456789 | yyyyyyyyyyyyyyyyyy --[ RECORD 2 ]----+--------------------- -0123456789abcdef | xxxx -0123456789 | yyyyyyyyyyyyyyyy --[ RECORD 3 ]----+--------------------- -0123456789abcdef | xxxxxx -0123456789 | yyyyyyyyyyyyyy --[ RECORD 4 ]----+--------------------- -0123456789abcdef | xxxxxxxx -0123456789 | yyyyyyyyyyyy --[ RECORD 5 ]----+--------------------- -0123456789abcdef | xxxxxxxxxx -0123456789 | yyyyyyyyyy --[ RECORD 6 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxx -0123456789 | yyyyyyyy --[ RECORD 7 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxx -0123456789 | yyyyyy --[ RECORD 8 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxx -0123456789 | yyyy --[ RECORD 9 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxx -0123456789 | yy --[ RECORD 10 ]---+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxxxx -0123456789 | - -\pset format wrapped -execute q; --[ RECORD 1 ]----+----------- -0123456789abcdef | xx -0123456789 | yyyyyyyyyy. - |.yyyyyyyy --[ RECORD 2 ]----+----------- -0123456789abcdef | xxxx -0123456789 | yyyyyyyyyy. - |.yyyyyy --[ RECORD 3 ]----+----------- -0123456789abcdef | xxxxxx -0123456789 | yyyyyyyyyy. - |.yyyy --[ RECORD 4 ]----+----------- -0123456789abcdef | xxxxxxxx -0123456789 | yyyyyyyyyy. - |.yy --[ RECORD 5 ]----+----------- -0123456789abcdef | xxxxxxxxxx -0123456789 | yyyyyyyyyy --[ RECORD 6 ]----+----------- -0123456789abcdef | xxxxxxxxxx. - |.xx -0123456789 | yyyyyyyy --[ RECORD 7 ]----+----------- -0123456789abcdef | xxxxxxxxxx. - |.xxxx -0123456789 | yyyyyy --[ RECORD 8 ]----+----------- -0123456789abcdef | xxxxxxxxxx. - |.xxxxxx -0123456789 | yyyy --[ RECORD 9 ]----+----------- -0123456789abcdef | xxxxxxxxxx. - |.xxxxxxxx -0123456789 | yy --[ RECORD 10 ]---+----------- -0123456789abcdef | xxxxxxxxxx. - |.xxxxxxxxxx -0123456789 | - -\pset border 2 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; -+-[ RECORD 1 ]-----+----------------------+ -| 0123456789abcdef | xx | -| 0123456789 | yyyyyyyyyyyyyyyyyy | -+-[ RECORD 2 ]-----+----------------------+ -| 0123456789abcdef | xxxx | -| 0123456789 | yyyyyyyyyyyyyyyy | -+-[ RECORD 3 ]-----+----------------------+ -| 0123456789abcdef | xxxxxx | -| 0123456789 | yyyyyyyyyyyyyy | -+-[ RECORD 4 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxx | -| 0123456789 | yyyyyyyyyyyy | -+-[ RECORD 5 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxx | -| 0123456789 | yyyyyyyyyy | -+-[ RECORD 6 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxx | -| 0123456789 | yyyyyyyy | -+-[ RECORD 7 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxx | -| 0123456789 | yyyyyy | -+-[ RECORD 8 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxx | -| 0123456789 | yyyy | -+-[ RECORD 9 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxxx | -| 0123456789 | yy | -+-[ RECORD 10 ]----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxxxxx | -| 0123456789 | | -+------------------+----------------------+ - -\pset format wrapped -execute q; -+-[ RECORD 1 ]-----+---------+ -| 0123456789abcdef | xx | -| 0123456789 | yyyyyyy.| -| |.yyyyyyy.| -| |.yyyy | -+-[ RECORD 2 ]-----+---------+ -| 0123456789abcdef | xxxx | -| 0123456789 | yyyyyyy.| -| |.yyyyyyy.| -| |.yy | -+-[ RECORD 3 ]-----+---------+ -| 0123456789abcdef | xxxxxx | -| 0123456789 | yyyyyyy.| -| |.yyyyyyy | -+-[ RECORD 4 ]-----+---------+ -| 0123456789abcdef | xxxxxxx.| -| |.x | -| 0123456789 | yyyyyyy.| -| |.yyyyy | -+-[ RECORD 5 ]-----+---------+ -| 0123456789abcdef | xxxxxxx.| -| |.xxx | -| 0123456789 | yyyyyyy.| -| |.yyy | -+-[ RECORD 6 ]-----+---------+ -| 0123456789abcdef | xxxxxxx.| -| |.xxxxx | -| 0123456789 | yyyyyyy.| -| |.y | -+-[ RECORD 7 ]-----+---------+ -| 0123456789abcdef | xxxxxxx.| -| |.xxxxxxx | -| 0123456789 | yyyyyy | -+-[ RECORD 8 ]-----+---------+ -| 0123456789abcdef | xxxxxxx.| -| |.xxxxxxx.| -| |.xx | -| 0123456789 | yyyy | -+-[ RECORD 9 ]-----+---------+ -| 0123456789abcdef | xxxxxxx.| -| |.xxxxxxx.| -| |.xxxx | -| 0123456789 | yy | -+-[ RECORD 10 ]----+---------+ -| 0123456789abcdef | xxxxxxx.| -| |.xxxxxxx.| -| |.xxxxxx | -| 0123456789 | | -+------------------+---------+ - -\pset expanded on -\pset columns 20 -\pset border 0 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; -* Record 1 -0123456789abcdef xx -0123456789 yyyyyyyyyyyyyyyyyy -* Record 2 -0123456789abcdef xxxx -0123456789 yyyyyyyyyyyyyyyy -* Record 3 -0123456789abcdef xxxxxx -0123456789 yyyyyyyyyyyyyy -* Record 4 -0123456789abcdef xxxxxxxx -0123456789 yyyyyyyyyyyy -* Record 5 -0123456789abcdef xxxxxxxxxx -0123456789 yyyyyyyyyy -* Record 6 -0123456789abcdef xxxxxxxxxxxx -0123456789 yyyyyyyy -* Record 7 -0123456789abcdef xxxxxxxxxxxxxx -0123456789 yyyyyy -* Record 8 -0123456789abcdef xxxxxxxxxxxxxxxx -0123456789 yyyy -* Record 9 -0123456789abcdef xxxxxxxxxxxxxxxxxx -0123456789 yy -* Record 10 -0123456789abcdef xxxxxxxxxxxxxxxxxxxx -0123456789 - -\pset format wrapped -execute q; -* Record 1 -0123456789abcdef xx -0123456789 yyy. - .yyy. - .yyy. - .yyy. - .yyy. - .yyy -* Record 2 -0123456789abcdef xxx. - .x -0123456789 yyy. - .yyy. - .yyy. - .yyy. - .yyy. - .y -* Record 3 -0123456789abcdef xxx. - .xxx -0123456789 yyy. - .yyy. - .yyy. - .yyy. - .yy -* Record 4 -0123456789abcdef xxx. - .xxx. - .xx -0123456789 yyy. - .yyy. - .yyy. - .yyy -* Record 5 -0123456789abcdef xxx. - .xxx. - .xxx. - .x -0123456789 yyy. - .yyy. - .yyy. - .y -* Record 6 -0123456789abcdef xxx. - .xxx. - .xxx. - .xxx -0123456789 yyy. - .yyy. - .yy -* Record 7 -0123456789abcdef xxx. - .xxx. - .xxx. - .xxx. - .xx -0123456789 yyy. - .yyy -* Record 8 -0123456789abcdef xxx. - .xxx. - .xxx. - .xxx. - .xxx. - .x -0123456789 yyy. - .y -* Record 9 -0123456789abcdef xxx. - .xxx. - .xxx. - .xxx. - .xxx. - .xxx -0123456789 yy -* Record 10 -0123456789abcdef xxx. - .xxx. - .xxx. - .xxx. - .xxx. - .xxx. - .xx -0123456789 - -\pset border 1 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; --[ RECORD 1 ]----+--------------------- -0123456789abcdef | xx -0123456789 | yyyyyyyyyyyyyyyyyy --[ RECORD 2 ]----+--------------------- -0123456789abcdef | xxxx -0123456789 | yyyyyyyyyyyyyyyy --[ RECORD 3 ]----+--------------------- -0123456789abcdef | xxxxxx -0123456789 | yyyyyyyyyyyyyy --[ RECORD 4 ]----+--------------------- -0123456789abcdef | xxxxxxxx -0123456789 | yyyyyyyyyyyy --[ RECORD 5 ]----+--------------------- -0123456789abcdef | xxxxxxxxxx -0123456789 | yyyyyyyyyy --[ RECORD 6 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxx -0123456789 | yyyyyyyy --[ RECORD 7 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxx -0123456789 | yyyyyy --[ RECORD 8 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxx -0123456789 | yyyy --[ RECORD 9 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxx -0123456789 | yy --[ RECORD 10 ]---+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxxxx -0123456789 | - -\pset format wrapped -execute q; --[ RECORD 1 ]----+---- -0123456789abcdef | xx -0123456789 | yyy. - |.yyy. - |.yyy. - |.yyy. - |.yyy. - |.yyy --[ RECORD 2 ]----+---- -0123456789abcdef | xxx. - |.x -0123456789 | yyy. - |.yyy. - |.yyy. - |.yyy. - |.yyy. - |.y --[ RECORD 3 ]----+---- -0123456789abcdef | xxx. - |.xxx -0123456789 | yyy. - |.yyy. - |.yyy. - |.yyy. - |.yy --[ RECORD 4 ]----+---- -0123456789abcdef | xxx. - |.xxx. - |.xx -0123456789 | yyy. - |.yyy. - |.yyy. - |.yyy --[ RECORD 5 ]----+---- -0123456789abcdef | xxx. - |.xxx. - |.xxx. - |.x -0123456789 | yyy. - |.yyy. - |.yyy. - |.y --[ RECORD 6 ]----+---- -0123456789abcdef | xxx. - |.xxx. - |.xxx. - |.xxx -0123456789 | yyy. - |.yyy. - |.yy --[ RECORD 7 ]----+---- -0123456789abcdef | xxx. - |.xxx. - |.xxx. - |.xxx. - |.xx -0123456789 | yyy. - |.yyy --[ RECORD 8 ]----+---- -0123456789abcdef | xxx. - |.xxx. - |.xxx. - |.xxx. - |.xxx. - |.x -0123456789 | yyy. - |.y --[ RECORD 9 ]----+---- -0123456789abcdef | xxx. - |.xxx. - |.xxx. - |.xxx. - |.xxx. - |.xxx -0123456789 | yy --[ RECORD 10 ]---+---- -0123456789abcdef | xxx. - |.xxx. - |.xxx. - |.xxx. - |.xxx. - |.xxx. - |.xx -0123456789 | - -\pset border 2 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; -+-[ RECORD 1 ]-----+----------------------+ -| 0123456789abcdef | xx | -| 0123456789 | yyyyyyyyyyyyyyyyyy | -+-[ RECORD 2 ]-----+----------------------+ -| 0123456789abcdef | xxxx | -| 0123456789 | yyyyyyyyyyyyyyyy | -+-[ RECORD 3 ]-----+----------------------+ -| 0123456789abcdef | xxxxxx | -| 0123456789 | yyyyyyyyyyyyyy | -+-[ RECORD 4 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxx | -| 0123456789 | yyyyyyyyyyyy | -+-[ RECORD 5 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxx | -| 0123456789 | yyyyyyyyyy | -+-[ RECORD 6 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxx | -| 0123456789 | yyyyyyyy | -+-[ RECORD 7 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxx | -| 0123456789 | yyyyyy | -+-[ RECORD 8 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxx | -| 0123456789 | yyyy | -+-[ RECORD 9 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxxx | -| 0123456789 | yy | -+-[ RECORD 10 ]----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxxxxx | -| 0123456789 | | -+------------------+----------------------+ - -\pset format wrapped -execute q; -+-[ RECORD 1 ]-----+-----+ -| 0123456789abcdef | xx | -| 0123456789 | yyy.| -| |.yyy.| -| |.yyy.| -| |.yyy.| -| |.yyy.| -| |.yyy | -+-[ RECORD 2 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.x | -| 0123456789 | yyy.| -| |.yyy.| -| |.yyy.| -| |.yyy.| -| |.yyy.| -| |.y | -+-[ RECORD 3 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx | -| 0123456789 | yyy.| -| |.yyy.| -| |.yyy.| -| |.yyy.| -| |.yy | -+-[ RECORD 4 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx.| -| |.xx | -| 0123456789 | yyy.| -| |.yyy.| -| |.yyy.| -| |.yyy | -+-[ RECORD 5 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx.| -| |.xxx.| -| |.x | -| 0123456789 | yyy.| -| |.yyy.| -| |.yyy.| -| |.y | -+-[ RECORD 6 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx | -| 0123456789 | yyy.| -| |.yyy.| -| |.yy | -+-[ RECORD 7 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.xx | -| 0123456789 | yyy.| -| |.yyy | -+-[ RECORD 8 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.x | -| 0123456789 | yyy.| -| |.y | -+-[ RECORD 9 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx | -| 0123456789 | yy | -+-[ RECORD 10 ]----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.xx | -| 0123456789 | | -+------------------+-----+ - -\pset linestyle old-ascii -\pset expanded off -\pset columns 40 -\pset border 0 -\pset format unaligned -execute q; -0123456789abcdef|0123456789 -xx|yyyyyyyyyyyyyyyyyy -xxxx|yyyyyyyyyyyyyyyy -xxxxxx|yyyyyyyyyyyyyy -xxxxxxxx|yyyyyyyyyyyy -xxxxxxxxxx|yyyyyyyyyy -xxxxxxxxxxxx|yyyyyyyy -xxxxxxxxxxxxxx|yyyyyy -xxxxxxxxxxxxxxxx|yyyy -xxxxxxxxxxxxxxxxxx|yy -xxxxxxxxxxxxxxxxxxxx| -(10 rows) -\pset format aligned -execute q; - 0123456789abcdef 0123456789 --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx yyyyyyyyyyyyyyyy -xxxxxx yyyyyyyyyyyyyy -xxxxxxxx yyyyyyyyyyyy -xxxxxxxxxx yyyyyyyyyy -xxxxxxxxxxxx yyyyyyyy -xxxxxxxxxxxxxx yyyyyy -xxxxxxxxxxxxxxxx yyyy -xxxxxxxxxxxxxxxxxx yy -xxxxxxxxxxxxxxxxxxxx -(10 rows) - -\pset format wrapped -execute q; - 0123456789abcdef 0123456789 --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx yyyyyyyyyyyyyyyy -xxxxxx yyyyyyyyyyyyyy -xxxxxxxx yyyyyyyyyyyy -xxxxxxxxxx yyyyyyyyyy -xxxxxxxxxxxx yyyyyyyy -xxxxxxxxxxxxxx yyyyyy -xxxxxxxxxxxxxxxx yyyy -xxxxxxxxxxxxxxxxxx yy -xxxxxxxxxxxxxxxxxxxx -(10 rows) - -\pset border 1 -\pset format unaligned -execute q; -0123456789abcdef|0123456789 -xx|yyyyyyyyyyyyyyyyyy -xxxx|yyyyyyyyyyyyyyyy -xxxxxx|yyyyyyyyyyyyyy -xxxxxxxx|yyyyyyyyyyyy -xxxxxxxxxx|yyyyyyyyyy -xxxxxxxxxxxx|yyyyyyyy -xxxxxxxxxxxxxx|yyyyyy -xxxxxxxxxxxxxxxx|yyyy -xxxxxxxxxxxxxxxxxx|yy -xxxxxxxxxxxxxxxxxxxx| -(10 rows) -\pset format aligned -execute q; - 0123456789abcdef | 0123456789 -----------------------+-------------------- - xx | yyyyyyyyyyyyyyyyyy - xxxx | yyyyyyyyyyyyyyyy - xxxxxx | yyyyyyyyyyyyyy - xxxxxxxx | yyyyyyyyyyyy - xxxxxxxxxx | yyyyyyyyyy - xxxxxxxxxxxx | yyyyyyyy - xxxxxxxxxxxxxx | yyyyyy - xxxxxxxxxxxxxxxx | yyyy - xxxxxxxxxxxxxxxxxx | yy - xxxxxxxxxxxxxxxxxxxx | -(10 rows) - -\pset format wrapped -execute q; - 0123456789abcdef | 0123456789 ----------------------+------------------ - xx | yyyyyyyyyyyyyyyy - ; yy - xxxx | yyyyyyyyyyyyyyyy - xxxxxx | yyyyyyyyyyyyyy - xxxxxxxx | yyyyyyyyyyyy - xxxxxxxxxx | yyyyyyyyyy - xxxxxxxxxxxx | yyyyyyyy - xxxxxxxxxxxxxx | yyyyyy - xxxxxxxxxxxxxxxx | yyyy - xxxxxxxxxxxxxxxxxx | yy - xxxxxxxxxxxxxxxxxxx | - x -(10 rows) - -\pset border 2 -\pset format unaligned -execute q; -0123456789abcdef|0123456789 -xx|yyyyyyyyyyyyyyyyyy -xxxx|yyyyyyyyyyyyyyyy -xxxxxx|yyyyyyyyyyyyyy -xxxxxxxx|yyyyyyyyyyyy -xxxxxxxxxx|yyyyyyyyyy -xxxxxxxxxxxx|yyyyyyyy -xxxxxxxxxxxxxx|yyyyyy -xxxxxxxxxxxxxxxx|yyyy -xxxxxxxxxxxxxxxxxx|yy -xxxxxxxxxxxxxxxxxxxx| -(10 rows) -\pset format aligned -execute q; -+----------------------+--------------------+ -| 0123456789abcdef | 0123456789 | -+----------------------+--------------------+ -| xx | yyyyyyyyyyyyyyyyyy | -| xxxx | yyyyyyyyyyyyyyyy | -| xxxxxx | yyyyyyyyyyyyyy | -| xxxxxxxx | yyyyyyyyyyyy | -| xxxxxxxxxx | yyyyyyyyyy | -| xxxxxxxxxxxx | yyyyyyyy | -| xxxxxxxxxxxxxx | yyyyyy | -| xxxxxxxxxxxxxxxx | yyyy | -| xxxxxxxxxxxxxxxxxx | yy | -| xxxxxxxxxxxxxxxxxxxx | | -+----------------------+--------------------+ -(10 rows) - -\pset format wrapped -execute q; -+--------------------+-----------------+ -| 0123456789abcdef | 0123456789 | -+--------------------+-----------------+ -| xx | yyyyyyyyyyyyyyy | -| ; yyy | -| xxxx | yyyyyyyyyyyyyyy | -| ; y | -| xxxxxx | yyyyyyyyyyyyyy | -| xxxxxxxx | yyyyyyyyyyyy | -| xxxxxxxxxx | yyyyyyyyyy | -| xxxxxxxxxxxx | yyyyyyyy | -| xxxxxxxxxxxxxx | yyyyyy | -| xxxxxxxxxxxxxxxx | yyyy | -| xxxxxxxxxxxxxxxxxx | yy | -| xxxxxxxxxxxxxxxxxx | | -| xx | -+--------------------+-----------------+ -(10 rows) - -\pset expanded on -\pset border 0 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; -* Record 1 -0123456789abcdef xx -0123456789 yyyyyyyyyyyyyyyyyy -* Record 2 -0123456789abcdef xxxx -0123456789 yyyyyyyyyyyyyyyy -* Record 3 -0123456789abcdef xxxxxx -0123456789 yyyyyyyyyyyyyy -* Record 4 -0123456789abcdef xxxxxxxx -0123456789 yyyyyyyyyyyy -* Record 5 -0123456789abcdef xxxxxxxxxx -0123456789 yyyyyyyyyy -* Record 6 -0123456789abcdef xxxxxxxxxxxx -0123456789 yyyyyyyy -* Record 7 -0123456789abcdef xxxxxxxxxxxxxx -0123456789 yyyyyy -* Record 8 -0123456789abcdef xxxxxxxxxxxxxxxx -0123456789 yyyy -* Record 9 -0123456789abcdef xxxxxxxxxxxxxxxxxx -0123456789 yy -* Record 10 -0123456789abcdef xxxxxxxxxxxxxxxxxxxx -0123456789 - -\pset format wrapped -execute q; -* Record 1 -0123456789abcdef xx -0123456789 yyyyyyyyyyyyyyyyyy -* Record 2 -0123456789abcdef xxxx -0123456789 yyyyyyyyyyyyyyyy -* Record 3 -0123456789abcdef xxxxxx -0123456789 yyyyyyyyyyyyyy -* Record 4 -0123456789abcdef xxxxxxxx -0123456789 yyyyyyyyyyyy -* Record 5 -0123456789abcdef xxxxxxxxxx -0123456789 yyyyyyyyyy -* Record 6 -0123456789abcdef xxxxxxxxxxxx -0123456789 yyyyyyyy -* Record 7 -0123456789abcdef xxxxxxxxxxxxxx -0123456789 yyyyyy -* Record 8 -0123456789abcdef xxxxxxxxxxxxxxxx -0123456789 yyyy -* Record 9 -0123456789abcdef xxxxxxxxxxxxxxxxxx -0123456789 yy -* Record 10 -0123456789abcdef xxxxxxxxxxxxxxxxxxxx -0123456789 - -\pset border 1 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; --[ RECORD 1 ]----+--------------------- -0123456789abcdef | xx -0123456789 | yyyyyyyyyyyyyyyyyy --[ RECORD 2 ]----+--------------------- -0123456789abcdef | xxxx -0123456789 | yyyyyyyyyyyyyyyy --[ RECORD 3 ]----+--------------------- -0123456789abcdef | xxxxxx -0123456789 | yyyyyyyyyyyyyy --[ RECORD 4 ]----+--------------------- -0123456789abcdef | xxxxxxxx -0123456789 | yyyyyyyyyyyy --[ RECORD 5 ]----+--------------------- -0123456789abcdef | xxxxxxxxxx -0123456789 | yyyyyyyyyy --[ RECORD 6 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxx -0123456789 | yyyyyyyy --[ RECORD 7 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxx -0123456789 | yyyyyy --[ RECORD 8 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxx -0123456789 | yyyy --[ RECORD 9 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxx -0123456789 | yy --[ RECORD 10 ]---+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxxxx -0123456789 | - -\pset format wrapped -execute q; --[ RECORD 1 ]----+--------------------- -0123456789abcdef | xx -0123456789 | yyyyyyyyyyyyyyyyyy --[ RECORD 2 ]----+--------------------- -0123456789abcdef | xxxx -0123456789 | yyyyyyyyyyyyyyyy --[ RECORD 3 ]----+--------------------- -0123456789abcdef | xxxxxx -0123456789 | yyyyyyyyyyyyyy --[ RECORD 4 ]----+--------------------- -0123456789abcdef | xxxxxxxx -0123456789 | yyyyyyyyyyyy --[ RECORD 5 ]----+--------------------- -0123456789abcdef | xxxxxxxxxx -0123456789 | yyyyyyyyyy --[ RECORD 6 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxx -0123456789 | yyyyyyyy --[ RECORD 7 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxx -0123456789 | yyyyyy --[ RECORD 8 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxx -0123456789 | yyyy --[ RECORD 9 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxx -0123456789 | yy --[ RECORD 10 ]---+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxxxx -0123456789 | - -\pset border 2 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; -+-[ RECORD 1 ]-----+----------------------+ -| 0123456789abcdef | xx | -| 0123456789 | yyyyyyyyyyyyyyyyyy | -+-[ RECORD 2 ]-----+----------------------+ -| 0123456789abcdef | xxxx | -| 0123456789 | yyyyyyyyyyyyyyyy | -+-[ RECORD 3 ]-----+----------------------+ -| 0123456789abcdef | xxxxxx | -| 0123456789 | yyyyyyyyyyyyyy | -+-[ RECORD 4 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxx | -| 0123456789 | yyyyyyyyyyyy | -+-[ RECORD 5 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxx | -| 0123456789 | yyyyyyyyyy | -+-[ RECORD 6 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxx | -| 0123456789 | yyyyyyyy | -+-[ RECORD 7 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxx | -| 0123456789 | yyyyyy | -+-[ RECORD 8 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxx | -| 0123456789 | yyyy | -+-[ RECORD 9 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxxx | -| 0123456789 | yy | -+-[ RECORD 10 ]----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxxxxx | -| 0123456789 | | -+------------------+----------------------+ - -\pset format wrapped -execute q; -+-[ RECORD 1 ]-----+-------------------+ -| 0123456789abcdef | xx | -| 0123456789 | yyyyyyyyyyyyyyyyy | -| ; y | -+-[ RECORD 2 ]-----+-------------------+ -| 0123456789abcdef | xxxx | -| 0123456789 | yyyyyyyyyyyyyyyy | -+-[ RECORD 3 ]-----+-------------------+ -| 0123456789abcdef | xxxxxx | -| 0123456789 | yyyyyyyyyyyyyy | -+-[ RECORD 4 ]-----+-------------------+ -| 0123456789abcdef | xxxxxxxx | -| 0123456789 | yyyyyyyyyyyy | -+-[ RECORD 5 ]-----+-------------------+ -| 0123456789abcdef | xxxxxxxxxx | -| 0123456789 | yyyyyyyyyy | -+-[ RECORD 6 ]-----+-------------------+ -| 0123456789abcdef | xxxxxxxxxxxx | -| 0123456789 | yyyyyyyy | -+-[ RECORD 7 ]-----+-------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxx | -| 0123456789 | yyyyyy | -+-[ RECORD 8 ]-----+-------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxx | -| 0123456789 | yyyy | -+-[ RECORD 9 ]-----+-------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxx | -| ; x | -| 0123456789 | yy | -+-[ RECORD 10 ]----+-------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxx | -| ; xxx | -| 0123456789 | | -+------------------+-------------------+ - -deallocate q; -\pset linestyle ascii -\pset border 1 --- support table for output-format tests (useful to create a footer) -create table psql_serial_tab (id serial); --- test header/footer/tuples_only behavior in aligned/unaligned/wrapped cases -\pset format aligned -\pset expanded off -\d psql_serial_tab_id_seq - Sequence "public.psql_serial_tab_id_seq" - Type | Start | Minimum | Maximum | Increment | Cycles? | Cache ----------+-------+---------+------------+-----------+---------+------- - integer | 1 | 1 | 2147483647 | 1 | no | 1 -Owned by: public.psql_serial_tab.id - -\pset tuples_only true -\df exp - pg_catalog | exp | double precision | double precision | func - pg_catalog | exp | numeric | numeric | func - -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq -Sequence "public.psql_serial_tab_id_seq" --[ RECORD 1 ]--------- -Type | integer -Start | 1 -Minimum | 1 -Maximum | 2147483647 -Increment | 1 -Cycles? | no -Cache | 1 - -Owned by: public.psql_serial_tab.id - -\pset tuples_only true -\df exp -Schema | pg_catalog -Name | exp -Result data type | double precision -Argument data types | double precision -Type | func ---------------------+----------------- -Schema | pg_catalog -Name | exp -Result data type | numeric -Argument data types | numeric -Type | func - -\pset tuples_only false --- empty table is a special case for this format -select 1 where false; -(0 rows) - -\pset format unaligned -\pset expanded off -\d psql_serial_tab_id_seq -Sequence "public.psql_serial_tab_id_seq" -Type|Start|Minimum|Maximum|Increment|Cycles?|Cache -integer|1|1|2147483647|1|no|1 -Owned by: public.psql_serial_tab.id -\pset tuples_only true -\df exp -pg_catalog|exp|double precision|double precision|func -pg_catalog|exp|numeric|numeric|func -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq -Sequence "public.psql_serial_tab_id_seq" - -Type|integer -Start|1 -Minimum|1 -Maximum|2147483647 -Increment|1 -Cycles?|no -Cache|1 - -Owned by: public.psql_serial_tab.id -\pset tuples_only true -\df exp -Schema|pg_catalog -Name|exp -Result data type|double precision -Argument data types|double precision -Type|func - -Schema|pg_catalog -Name|exp -Result data type|numeric -Argument data types|numeric -Type|func -\pset tuples_only false -\pset format wrapped -\pset expanded off -\d psql_serial_tab_id_seq - Sequence "public.psql_serial_tab_id_seq" - Type | Start | Minimum | Maximum | Increment | Cycles? | Cache ----------+-------+---------+------------+-----------+---------+------- - integer | 1 | 1 | 2147483647 | 1 | no | 1 -Owned by: public.psql_serial_tab.id - -\pset tuples_only true -\df exp - pg_catalog | exp | double precision | double precision | func - pg_catalog | exp | numeric | numeric | func - -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq -Sequence "public.psql_serial_tab_id_seq" --[ RECORD 1 ]--------- -Type | integer -Start | 1 -Minimum | 1 -Maximum | 2147483647 -Increment | 1 -Cycles? | no -Cache | 1 - -Owned by: public.psql_serial_tab.id - -\pset tuples_only true -\df exp -Schema | pg_catalog -Name | exp -Result data type | double precision -Argument data types | double precision -Type | func ---------------------+----------------- -Schema | pg_catalog -Name | exp -Result data type | numeric -Argument data types | numeric -Type | func - -\pset tuples_only false --- check conditional am display -\pset expanded off -CREATE SCHEMA tableam_display; -CREATE ROLE regress_display_role; -ALTER SCHEMA tableam_display OWNER TO regress_display_role; -SET search_path TO tableam_display; -CREATE ACCESS METHOD heap_psql TYPE TABLE HANDLER heap_tableam_handler; -SET ROLE TO regress_display_role; --- Use only relations with a physical size of zero. -CREATE TABLE tbl_heap_psql(f1 int, f2 char(100)) using heap_psql; -CREATE TABLE tbl_heap(f1 int, f2 char(100)) using heap; -CREATE VIEW view_heap_psql AS SELECT f1 from tbl_heap_psql; -CREATE MATERIALIZED VIEW mat_view_heap_psql USING heap_psql AS SELECT f1 from tbl_heap_psql; -\d+ tbl_heap_psql - Table "tableam_display.tbl_heap_psql" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+----------------+-----------+----------+---------+----------+--------------+------------- - f1 | integer | | | | plain | | - f2 | character(100) | | | | extended | | - -\d+ tbl_heap - Table "tableam_display.tbl_heap" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+----------------+-----------+----------+---------+----------+--------------+------------- - f1 | integer | | | | plain | | - f2 | character(100) | | | | extended | | - -\set HIDE_TABLEAM off -\d+ tbl_heap_psql - Table "tableam_display.tbl_heap_psql" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+----------------+-----------+----------+---------+----------+--------------+------------- - f1 | integer | | | | plain | | - f2 | character(100) | | | | extended | | -Access method: heap_psql - -\d+ tbl_heap - Table "tableam_display.tbl_heap" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+----------------+-----------+----------+---------+----------+--------------+------------- - f1 | integer | | | | plain | | - f2 | character(100) | | | | extended | | -Access method: heap - --- AM is displayed for tables, indexes and materialized views. -\d+ - List of relations - Schema | Name | Type | Owner | Persistence | Access method | Size | Description ------------------+--------------------+-------------------+----------------------+-------------+---------------+---------+------------- - tableam_display | mat_view_heap_psql | materialized view | regress_display_role | permanent | heap_psql | 0 bytes | - tableam_display | tbl_heap | table | regress_display_role | permanent | heap | 0 bytes | - tableam_display | tbl_heap_psql | table | regress_display_role | permanent | heap_psql | 0 bytes | - tableam_display | view_heap_psql | view | regress_display_role | permanent | | 0 bytes | -(4 rows) - -\dt+ - List of relations - Schema | Name | Type | Owner | Persistence | Access method | Size | Description ------------------+---------------+-------+----------------------+-------------+---------------+---------+------------- - tableam_display | tbl_heap | table | regress_display_role | permanent | heap | 0 bytes | - tableam_display | tbl_heap_psql | table | regress_display_role | permanent | heap_psql | 0 bytes | -(2 rows) - -\dm+ - List of relations - Schema | Name | Type | Owner | Persistence | Access method | Size | Description ------------------+--------------------+-------------------+----------------------+-------------+---------------+---------+------------- - tableam_display | mat_view_heap_psql | materialized view | regress_display_role | permanent | heap_psql | 0 bytes | -(1 row) - --- But not for views and sequences. -\dv+ - List of relations - Schema | Name | Type | Owner | Persistence | Size | Description ------------------+----------------+------+----------------------+-------------+---------+------------- - tableam_display | view_heap_psql | view | regress_display_role | permanent | 0 bytes | -(1 row) - -\set HIDE_TABLEAM on -\d+ - List of relations - Schema | Name | Type | Owner | Persistence | Size | Description ------------------+--------------------+-------------------+----------------------+-------------+---------+------------- - tableam_display | mat_view_heap_psql | materialized view | regress_display_role | permanent | 0 bytes | - tableam_display | tbl_heap | table | regress_display_role | permanent | 0 bytes | - tableam_display | tbl_heap_psql | table | regress_display_role | permanent | 0 bytes | - tableam_display | view_heap_psql | view | regress_display_role | permanent | 0 bytes | -(4 rows) - -RESET ROLE; -RESET search_path; -DROP SCHEMA tableam_display CASCADE; -NOTICE: drop cascades to 4 other objects -DETAIL: drop cascades to table tableam_display.tbl_heap_psql -drop cascades to table tableam_display.tbl_heap -drop cascades to view tableam_display.view_heap_psql -drop cascades to materialized view tableam_display.mat_view_heap_psql -DROP ACCESS METHOD heap_psql; -DROP ROLE regress_display_role; --- test numericlocale (as best we can without control of psql's locale) -\pset format aligned -\pset expanded off -\pset numericlocale true -select n, -n as m, n * 111 as x, '1e90'::float8 as f -from generate_series(0,3) n; - n | m | x | f ----+----+-----+------- - 0 | 0 | 0 | 1e+90 - 1 | -1 | 111 | 1e+90 - 2 | -2 | 222 | 1e+90 - 3 | -3 | 333 | 1e+90 -(4 rows) - -\pset numericlocale false --- test asciidoc output format -\pset format asciidoc -\pset border 1 -\pset expanded off -\d psql_serial_tab_id_seq - -.Sequence "public.psql_serial_tab_id_seq" -[options="header",cols="l,>l,>l,>l,l",frame="none"] -|==== -^l|Type ^l|Start ^l|Minimum ^l|Maximum ^l|Increment ^l|Cycles? ^l|Cache -|integer |1 |1 |2147483647 |1 |no |1 -|==== - -.... -Owned by: public.psql_serial_tab.id -.... -\pset tuples_only true -\df exp - -[cols="l|1 -l|1 -l|2147483647 -l|1 -l|1 -|==== - -.... -Owned by: public.psql_serial_tab.id -.... -\pset tuples_only true -\df exp - -[cols="h,l",frame="none"] -|==== -2+| -l|1 -2+^|Record 2 -l|2 -|==== -\pset border 1 -execute q; - -[cols="h,l",frame="none"] -|==== -2+^|Record 1 -l|1 -2+^|Record 2 -l|2 -|==== -\pset border 2 -execute q; - -[cols="h,l",frame="all",grid="all"] -|==== -2+^|Record 1 -l|1 -2+^|Record 2 -l|2 -|==== -deallocate q; --- test csv output format -\pset format csv -\pset border 1 -\pset expanded off -\d psql_serial_tab_id_seq -Type,Start,Minimum,Maximum,Increment,Cycles?,Cache -integer,1,1,2147483647,1,no,1 -\pset tuples_only true -\df exp -pg_catalog,exp,double precision,double precision,func -pg_catalog,exp,numeric,numeric,func -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq -Type,integer -Start,1 -Minimum,1 -Maximum,2147483647 -Increment,1 -Cycles?,no -Cache,1 -\pset tuples_only true -\df exp -Schema,pg_catalog -Name,exp -Result data type,double precision -Argument data types,double precision -Type,func -Schema,pg_catalog -Name,exp -Result data type,numeric -Argument data types,numeric -Type,func -\pset tuples_only false -prepare q as - select 'some"text' as "a""title", E' \n' as "junk", - ' ' as "empty", n as int - from generate_series(1,2) as n; -\pset expanded off -execute q; -"a""title",junk,empty,int -"some""text"," -", ,1 -"some""text"," -", ,2 -\pset expanded on -execute q; -"a""title","some""text" -junk," -" -empty, -int,1 -"a""title","some""text" -junk," -" -empty, -int,2 -deallocate q; --- special cases -\pset expanded off -select 'comma,comma' as comma, 'semi;semi' as semi; -comma,semi -"comma,comma",semi;semi -\pset csv_fieldsep ';' -select 'comma,comma' as comma, 'semi;semi' as semi; -comma;semi -comma,comma;"semi;semi" -select '\.' as data; -data -"\." -\pset csv_fieldsep '.' -select '\' as d1, '' as d2; -"d1"."d2" -"\"."" --- illegal csv separators -\pset csv_fieldsep '' -\pset: csv_fieldsep must be a single one-byte character -\pset csv_fieldsep '\0' -\pset: csv_fieldsep must be a single one-byte character -\pset csv_fieldsep '\n' -\pset: csv_fieldsep cannot be a double quote, a newline, or a carriage return -\pset csv_fieldsep '\r' -\pset: csv_fieldsep cannot be a double quote, a newline, or a carriage return -\pset csv_fieldsep '"' -\pset: csv_fieldsep cannot be a double quote, a newline, or a carriage return -\pset csv_fieldsep ',,' -\pset: csv_fieldsep must be a single one-byte character -\pset csv_fieldsep ',' --- test html output format -\pset format html -\pset border 1 -\pset expanded off -\d psql_serial_tab_id_seq - - - - - - - - - - - - - - - - - - - - -
Sequence "public.psql_serial_tab_id_seq"
TypeStartMinimumMaximumIncrementCycles?Cache
integer1121474836471no1
-

Owned by: public.psql_serial_tab.id
-

-\pset tuples_only true -\df exp - - - - - - - - - - - - - - - -
pg_catalogexpdouble precisiondouble precisionfunc
pg_catalogexpnumericnumericfunc
- -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Sequence "public.psql_serial_tab_id_seq"
Record 1
Typeinteger
Start1
Minimum1
Maximum2147483647
Increment1
Cycles?no
Cache1
-

Owned by: public.psql_serial_tab.id
-

-\pset tuples_only true -\df exp - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 
Schemapg_catalog
Nameexp
Result data typedouble precision
Argument data typesdouble precision
Typefunc
 
Schemapg_catalog
Nameexp
Result data typenumeric
Argument data typesnumeric
Typefunc
- -\pset tuples_only false -prepare q as - select 'some"text' as "a&title", E' \n' as "junk", - ' ' as "empty", n as int - from generate_series(1,2) as n; -\pset expanded off -\pset border 0 -execute q; - - - - - - - - - - - - - - - - - - - -
a&titlejunkemptyint
some"text  <foo>
-<bar>
  1
some"text  <foo>
-<bar>
  2
-

(2 rows)
-

-\pset border 1 -execute q; - - - - - - - - - - - - - - - - - - - -
a&titlejunkemptyint
some"text  <foo>
-<bar>
  1
some"text  <foo>
-<bar>
  2
-

(2 rows)
-

-\pset tableattr foobar -execute q; - - - - - - - - - - - - - - - - - - - -
a&titlejunkemptyint
some"text  <foo>
-<bar>
  1
some"text  <foo>
-<bar>
  2
-

(2 rows)
-

-\pset tableattr -\pset expanded on -\pset border 0 -execute q; - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Record 1
a&titlesome"text
junk  <foo>
-<bar>
empty 
int1
Record 2
a&titlesome"text
junk  <foo>
-<bar>
empty 
int2
- -\pset border 1 -execute q; - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Record 1
a&titlesome"text
junk  <foo>
-<bar>
empty 
int1
Record 2
a&titlesome"text
junk  <foo>
-<bar>
empty 
int2
- -\pset tableattr foobar -execute q; - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Record 1
a&titlesome"text
junk  <foo>
-<bar>
empty 
int1
Record 2
a&titlesome"text
junk  <foo>
-<bar>
empty 
int2
- -\pset tableattr -deallocate q; --- test latex output format -\pset format latex -\pset border 1 -\pset expanded off -\d psql_serial_tab_id_seq -\begin{center} -Sequence "public.psql\_serial\_tab\_id\_seq" -\end{center} - -\begin{tabular}{l | r | r | r | r | l | r} -\textit{Type} & \textit{Start} & \textit{Minimum} & \textit{Maximum} & \textit{Increment} & \textit{Cycles?} & \textit{Cache} \\ -\hline -integer & 1 & 1 & 2147483647 & 1 & no & 1 \\ -\end{tabular} - -\noindent Owned by: public.psql\_serial\_tab.id \\ - -\pset tuples_only true -\df exp -\begin{tabular}{l | l | l | l | l} -pg\_catalog & exp & double precision & double precision & func \\ -pg\_catalog & exp & numeric & numeric & func \\ -\end{tabular} - -\noindent -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq -\begin{center} -Sequence "public.psql\_serial\_tab\_id\_seq" -\end{center} - -\begin{tabular}{c|l} -\multicolumn{2}{c}{\textit{Record 1}} \\ -\hline -Type & integer \\ -Start & 1 \\ -Minimum & 1 \\ -Maximum & 2147483647 \\ -Increment & 1 \\ -Cycles? & no \\ -Cache & 1 \\ -\end{tabular} - -\noindent Owned by: public.psql\_serial\_tab.id \\ - -\pset tuples_only true -\df exp -\begin{tabular}{c|l} -\hline -Schema & pg\_catalog \\ -Name & exp \\ -Result data type & double precision \\ -Argument data types & double precision \\ -Type & func \\ -\hline -Schema & pg\_catalog \\ -Name & exp \\ -Result data type & numeric \\ -Argument data types & numeric \\ -Type & func \\ -\end{tabular} - -\noindent -\pset tuples_only false -prepare q as - select 'some\more_text' as "a$title", E' #%&^~|\n{bar}' as "junk", - ' ' as "empty", n as int - from generate_series(1,2) as n; -\pset expanded off -\pset border 0 -execute q; -\begin{tabular}{lllr} -\textit{a\$title} & \textit{junk} & \textit{empty} & \textit{int} \\ -\hline -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 1 \\ -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 2 \\ -\end{tabular} - -\noindent (2 rows) \\ - -\pset border 1 -execute q; -\begin{tabular}{l | l | l | r} -\textit{a\$title} & \textit{junk} & \textit{empty} & \textit{int} \\ -\hline -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 1 \\ -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 2 \\ -\end{tabular} - -\noindent (2 rows) \\ - -\pset border 2 -execute q; -\begin{tabular}{| l | l | l | r |} -\hline -\textit{a\$title} & \textit{junk} & \textit{empty} & \textit{int} \\ -\hline -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 1 \\ -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 2 \\ -\hline -\end{tabular} - -\noindent (2 rows) \\ - -\pset border 3 -execute q; -\begin{tabular}{| l | l | l | r |} -\hline -\textit{a\$title} & \textit{junk} & \textit{empty} & \textit{int} \\ -\hline -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 1 \\ -\hline -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 2 \\ -\hline -\end{tabular} - -\noindent (2 rows) \\ - -\pset expanded on -\pset border 0 -execute q; -\begin{tabular}{cl} -\multicolumn{2}{c}{\textit{Record 1}} \\ -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\multicolumn{2}{c}{\textit{Record 2}} \\ -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\end{tabular} - -\noindent -\pset border 1 -execute q; -\begin{tabular}{c|l} -\multicolumn{2}{c}{\textit{Record 1}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\multicolumn{2}{c}{\textit{Record 2}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\end{tabular} - -\noindent -\pset border 2 -execute q; -\begin{tabular}{|c|l|} -\hline -\multicolumn{2}{|c|}{\textit{Record 1}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\hline -\multicolumn{2}{|c|}{\textit{Record 2}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\hline -\end{tabular} - -\noindent -\pset border 3 -execute q; -\begin{tabular}{|c|l|} -\hline -\multicolumn{2}{|c|}{\textit{Record 1}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\hline -\multicolumn{2}{|c|}{\textit{Record 2}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\hline -\end{tabular} - -\noindent -deallocate q; --- test latex-longtable output format -\pset format latex-longtable -\pset border 1 -\pset expanded off -\d psql_serial_tab_id_seq -\begin{longtable}{l | r | r | r | r | l | r} -\small\textbf{\textit{Type}} & \small\textbf{\textit{Start}} & \small\textbf{\textit{Minimum}} & \small\textbf{\textit{Maximum}} & \small\textbf{\textit{Increment}} & \small\textbf{\textit{Cycles?}} & \small\textbf{\textit{Cache}} \\ -\midrule -\endfirsthead -\small\textbf{\textit{Type}} & \small\textbf{\textit{Start}} & \small\textbf{\textit{Minimum}} & \small\textbf{\textit{Maximum}} & \small\textbf{\textit{Increment}} & \small\textbf{\textit{Cycles?}} & \small\textbf{\textit{Cache}} \\ -\midrule -\endhead -\caption[Sequence "public.psql\_serial\_tab\_id\_seq" (Continued)]{Sequence "public.psql\_serial\_tab\_id\_seq"} -\endfoot -\caption[Sequence "public.psql\_serial\_tab\_id\_seq"]{Sequence "public.psql\_serial\_tab\_id\_seq"} -\endlastfoot -\raggedright{integer} -& -\raggedright{1} -& -\raggedright{1} -& -\raggedright{2147483647} -& -\raggedright{1} -& -\raggedright{no} -& -\raggedright{1} \tabularnewline -\end{longtable} -\pset tuples_only true -\df exp -\begin{longtable}{l | l | l | l | l} -\raggedright{pg\_catalog} -& -\raggedright{exp} -& -\raggedright{double precision} -& -\raggedright{double precision} -& -\raggedright{func} \tabularnewline -\raggedright{pg\_catalog} -& -\raggedright{exp} -& -\raggedright{numeric} -& -\raggedright{numeric} -& -\raggedright{func} \tabularnewline -\end{longtable} -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq -\begin{center} -Sequence "public.psql\_serial\_tab\_id\_seq" -\end{center} - -\begin{tabular}{c|l} -\multicolumn{2}{c}{\textit{Record 1}} \\ -\hline -Type & integer \\ -Start & 1 \\ -Minimum & 1 \\ -Maximum & 2147483647 \\ -Increment & 1 \\ -Cycles? & no \\ -Cache & 1 \\ -\end{tabular} - -\noindent Owned by: public.psql\_serial\_tab.id \\ - -\pset tuples_only true -\df exp -\begin{tabular}{c|l} -\hline -Schema & pg\_catalog \\ -Name & exp \\ -Result data type & double precision \\ -Argument data types & double precision \\ -Type & func \\ -\hline -Schema & pg\_catalog \\ -Name & exp \\ -Result data type & numeric \\ -Argument data types & numeric \\ -Type & func \\ -\end{tabular} - -\noindent -\pset tuples_only false -prepare q as - select 'some\more_text' as "a$title", E' #%&^~|\n{bar}' as "junk", - ' ' as "empty", n as int - from generate_series(1,2) as n; -\pset expanded off -\pset border 0 -execute q; -\begin{longtable}{lllr} -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endfirsthead -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endhead -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{1} \tabularnewline -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{2} \tabularnewline -\end{longtable} -\pset border 1 -execute q; -\begin{longtable}{l | l | l | r} -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endfirsthead -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endhead -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{1} \tabularnewline -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{2} \tabularnewline -\end{longtable} -\pset border 2 -execute q; -\begin{longtable}{| l | l | l | r |} -\toprule -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endfirsthead -\toprule -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endhead -\bottomrule -\endfoot -\bottomrule -\endlastfoot -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{1} \tabularnewline -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{2} \tabularnewline -\end{longtable} -\pset border 3 -execute q; -\begin{longtable}{| l | l | l | r |} -\toprule -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endfirsthead -\toprule -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\endhead -\bottomrule -\endfoot -\bottomrule -\endlastfoot -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{1} \tabularnewline - \hline -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{2} \tabularnewline - \hline -\end{longtable} -\pset tableattr lr -execute q; -\begin{longtable}{| p{lr\textwidth} | p{lr\textwidth} | p{lr\textwidth} | r |} -\toprule -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endfirsthead -\toprule -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\endhead -\bottomrule -\endfoot -\bottomrule -\endlastfoot -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{1} \tabularnewline - \hline -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{2} \tabularnewline - \hline -\end{longtable} -\pset tableattr -\pset expanded on -\pset border 0 -execute q; -\begin{tabular}{cl} -\multicolumn{2}{c}{\textit{Record 1}} \\ -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\multicolumn{2}{c}{\textit{Record 2}} \\ -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\end{tabular} - -\noindent -\pset border 1 -execute q; -\begin{tabular}{c|l} -\multicolumn{2}{c}{\textit{Record 1}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\multicolumn{2}{c}{\textit{Record 2}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\end{tabular} - -\noindent -\pset border 2 -execute q; -\begin{tabular}{|c|l|} -\hline -\multicolumn{2}{|c|}{\textit{Record 1}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\hline -\multicolumn{2}{|c|}{\textit{Record 2}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\hline -\end{tabular} - -\noindent -\pset border 3 -execute q; -\begin{tabular}{|c|l|} -\hline -\multicolumn{2}{|c|}{\textit{Record 1}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\hline -\multicolumn{2}{|c|}{\textit{Record 2}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\hline -\end{tabular} - -\noindent -\pset tableattr lr -execute q; -\begin{tabular}{|c|l|} -\hline -\multicolumn{2}{|c|}{\textit{Record 1}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\hline -\multicolumn{2}{|c|}{\textit{Record 2}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\hline -\end{tabular} - -\noindent -\pset tableattr -deallocate q; --- test troff-ms output format -\pset format troff-ms -\pset border 1 -\pset expanded off -\d psql_serial_tab_id_seq -.LP -.DS C -Sequence "public.psql_serial_tab_id_seq" -.DE -.LP -.TS -center; -l | r | r | r | r | l | r. -\fIType\fP \fIStart\fP \fIMinimum\fP \fIMaximum\fP \fIIncrement\fP \fICycles?\fP \fICache\fP -_ -integer 1 1 2147483647 1 no 1 -.TE -.DS L -Owned by: public.psql_serial_tab.id -.DE -\pset tuples_only true -\df exp -.LP -.TS -center; -l | l | l | l | l. -pg_catalog exp double precision double precision func -pg_catalog exp numeric numeric func -.TE -.DS L -.DE -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq -.LP -.DS C -Sequence "public.psql_serial_tab_id_seq" -.DE -.LP -.TS -center; -c s. -\fIRecord 1\fP -_ -.T& -c | l. -Type integer -Start 1 -Minimum 1 -Maximum 2147483647 -Increment 1 -Cycles? no -Cache 1 -.TE -.DS L -Owned by: public.psql_serial_tab.id -.DE -\pset tuples_only true -\df exp -.LP -.TS -center; -c l; -_ -Schema pg_catalog -Name exp -Result data type double precision -Argument data types double precision -Type func -_ -Schema pg_catalog -Name exp -Result data type numeric -Argument data types numeric -Type func -.TE -.DS L -.DE -\pset tuples_only false -prepare q as - select 'some\text' as "a\title", E' \n' as "junk", - ' ' as "empty", n as int - from generate_series(1,2) as n; -\pset expanded off -\pset border 0 -execute q; -.LP -.TS -center; -lllr. -\fIa\(rstitle\fP \fIjunk\fP \fIempty\fP \fIint\fP -_ -some\(rstext - 1 -some\(rstext - 2 -.TE -.DS L -(2 rows) -.DE -\pset border 1 -execute q; -.LP -.TS -center; -l | l | l | r. -\fIa\(rstitle\fP \fIjunk\fP \fIempty\fP \fIint\fP -_ -some\(rstext - 1 -some\(rstext - 2 -.TE -.DS L -(2 rows) -.DE -\pset border 2 -execute q; -.LP -.TS -center box; -l | l | l | r. -\fIa\(rstitle\fP \fIjunk\fP \fIempty\fP \fIint\fP -_ -some\(rstext - 1 -some\(rstext - 2 -.TE -.DS L -(2 rows) -.DE -\pset expanded on -\pset border 0 -execute q; -.LP -.TS -center; -c s. -\fIRecord 1\fP -.T& -c l. -a\(rstitle some\(rstext -junk - -empty -int 1 -.T& -c s. -\fIRecord 2\fP -.T& -c l. -a\(rstitle some\(rstext -junk - -empty -int 2 -.TE -.DS L -.DE -\pset border 1 -execute q; -.LP -.TS -center; -c s. -\fIRecord 1\fP -_ -.T& -c | l. -a\(rstitle some\(rstext -junk - -empty -int 1 -.T& -c s. -\fIRecord 2\fP -_ -.T& -c | l. -a\(rstitle some\(rstext -junk - -empty -int 2 -.TE -.DS L -.DE -\pset border 2 -execute q; -.LP -.TS -center box; -c s. -\fIRecord 1\fP -_ -.T& -c l. -a\(rstitle some\(rstext -junk - -empty -int 1 -_ -.T& -c s. -\fIRecord 2\fP -_ -.T& -c l. -a\(rstitle some\(rstext -junk - -empty -int 2 -.TE -.DS L -.DE -deallocate q; --- check ambiguous format requests -\pset format a -\pset: ambiguous abbreviation "a" matches both "aligned" and "asciidoc" -\pset format l --- clean up after output format tests -drop table psql_serial_tab; -\pset format aligned -\pset expanded off -\pset border 1 --- \echo and allied features -\echo this is a test -this is a test -\echo -n without newline -without newline\echo with -n newline -with -n newline -\echo '-n' with newline --n with newline -\set foo bar -\echo foo = :foo -foo = bar -\qecho this is a test -this is a test -\qecho foo = :foo -foo = bar -\warn this is a test -this is a test -\warn foo = :foo -foo = bar --- tests for \if ... \endif -\if true - select 'okay'; - ?column? ----------- - okay -(1 row) - - select 'still okay'; - ?column? ------------- - still okay -(1 row) - -\else - not okay; - still not okay -\endif --- at this point query buffer should still have last valid line -\g - ?column? ------------- - still okay -(1 row) - --- \if should work okay on part of a query -select - \if true - 42 - \else - (bogus - \endif - forty_two; - forty_two ------------ - 42 -(1 row) - -select \if false \\ (bogus \else \\ 42 \endif \\ forty_two; - forty_two ------------ - 42 -(1 row) - --- test a large nested if using a variety of true-equivalents -\if true - \if 1 - \if yes - \if on - \echo 'all true' -all true - \else - \echo 'should not print #1-1' - \endif - \else - \echo 'should not print #1-2' - \endif - \else - \echo 'should not print #1-3' - \endif -\else - \echo 'should not print #1-4' -\endif --- test a variety of false-equivalents in an if/elif/else structure -\if false - \echo 'should not print #2-1' -\elif 0 - \echo 'should not print #2-2' -\elif no - \echo 'should not print #2-3' -\elif off - \echo 'should not print #2-4' -\else - \echo 'all false' -all false -\endif --- test true-false elif after initial true branch -\if true - \echo 'should print #2-5' -should print #2-5 -\elif true - \echo 'should not print #2-6' -\elif false - \echo 'should not print #2-7' -\else - \echo 'should not print #2-8' -\endif --- test simple true-then-else -\if true - \echo 'first thing true' -first thing true -\else - \echo 'should not print #3-1' -\endif --- test simple false-true-else -\if false - \echo 'should not print #4-1' -\elif true - \echo 'second thing true' -second thing true -\else - \echo 'should not print #5-1' -\endif --- invalid boolean expressions are false -\if invalid boolean expression -unrecognized value "invalid boolean expression" for "\if expression": Boolean expected - \echo 'will not print #6-1' -\else - \echo 'will print anyway #6-2' -will print anyway #6-2 -\endif --- test un-matched endif -\endif -\endif: no matching \if --- test un-matched else -\else -\else: no matching \if --- test un-matched elif -\elif -\elif: no matching \if --- test double-else error -\if true -\else -\else -\else: cannot occur after \else -\endif --- test elif out-of-order -\if false -\else -\elif -\elif: cannot occur after \else -\endif --- test if-endif matching in a false branch -\if false - \if false - \echo 'should not print #7-1' - \else - \echo 'should not print #7-2' - \endif - \echo 'should not print #7-3' -\else - \echo 'should print #7-4' -should print #7-4 -\endif --- show that vars and backticks are not expanded when ignoring extra args -\set foo bar -\echo :foo :'foo' :"foo" -bar 'bar' "bar" -\pset fieldsep | `nosuchcommand` :foo :'foo' :"foo" -\pset: extra argument "nosuchcommand" ignored -\pset: extra argument ":foo" ignored -\pset: extra argument ":'foo'" ignored -\pset: extra argument ":"foo"" ignored --- show that vars and backticks are not expanded and commands are ignored --- when in a false if-branch -\set try_to_quit '\\q' -\if false - :try_to_quit - \echo `nosuchcommand` :foo :'foo' :"foo" - \pset fieldsep | `nosuchcommand` :foo :'foo' :"foo" - \a - SELECT $1 \bind 1 \g - \C arg1 - \c arg1 arg2 arg3 arg4 - \cd arg1 - \conninfo - \copy arg1 arg2 arg3 arg4 arg5 arg6 - \copyright - SELECT 1 as one, 2, 3 \crosstabview - \dt arg1 - \e arg1 arg2 - \ef whole_line - \ev whole_line - \echo arg1 arg2 arg3 arg4 arg5 - \echo arg1 - \encoding arg1 - \errverbose - \f arg1 - \g arg1 - \gx arg1 - \gexec - SELECT 1 AS one \gset - \h - \? - \html - \i arg1 - \ir arg1 - \l arg1 - \lo arg1 arg2 -invalid command \lo - \lo_list - \o arg1 - \p - \password arg1 - \prompt arg1 arg2 - \pset arg1 arg2 - \q - \reset - \s arg1 - \set arg1 arg2 arg3 arg4 arg5 arg6 arg7 - \setenv arg1 arg2 - \sf whole_line - \sv whole_line - \t arg1 - \T arg1 - \timing arg1 - \unset arg1 - \w arg1 - \watch arg1 arg2 - \x arg1 - -- \else here is eaten as part of OT_FILEPIPE argument - \w |/no/such/file \else - -- \endif here is eaten as part of whole-line argument - \! whole_line \endif - \z -\else - \echo 'should print #8-1' -should print #8-1 -\endif --- :{?...} defined variable test -\set i 1 -\if :{?i} - \echo '#9-1 ok, variable i is defined' -#9-1 ok, variable i is defined -\else - \echo 'should not print #9-2' -\endif -\if :{?no_such_variable} - \echo 'should not print #10-1' -\else - \echo '#10-2 ok, variable no_such_variable is not defined' -#10-2 ok, variable no_such_variable is not defined -\endif -SELECT :{?i} AS i_is_defined; - i_is_defined --------------- - t -(1 row) - -SELECT NOT :{?no_such_var} AS no_such_var_is_not_defined; - no_such_var_is_not_defined ----------------------------- - t -(1 row) - --- SHOW_CONTEXT -\set SHOW_CONTEXT never -do $$ -begin - raise notice 'foo'; - raise exception 'bar'; -end $$; -NOTICE: foo -ERROR: bar -\set SHOW_CONTEXT errors -do $$ -begin - raise notice 'foo'; - raise exception 'bar'; -end $$; -NOTICE: foo -ERROR: bar -CONTEXT: PL/pgSQL function inline_code_block line 4 at RAISE -\set SHOW_CONTEXT always -do $$ -begin - raise notice 'foo'; - raise exception 'bar'; -end $$; -NOTICE: foo -CONTEXT: PL/pgSQL function inline_code_block line 3 at RAISE -ERROR: bar -CONTEXT: PL/pgSQL function inline_code_block line 4 at RAISE --- test printing and clearing the query buffer -SELECT 1; - ?column? ----------- - 1 -(1 row) - -\p -SELECT 1; -SELECT 2 \r -\p -SELECT 1; -SELECT 3 \p -SELECT 3 -UNION SELECT 4 \p -SELECT 3 -UNION SELECT 4 -UNION SELECT 5 -ORDER BY 1; - ?column? ----------- - 3 - 4 - 5 -(3 rows) - -\r -\p -SELECT 3 -UNION SELECT 4 -UNION SELECT 5 -ORDER BY 1; --- tests for special result variables --- working query, 2 rows selected -SELECT 1 AS stuff UNION SELECT 2; - stuff -------- - 1 - 2 -(2 rows) - -\echo 'error:' :ERROR -error: false -\echo 'error code:' :SQLSTATE -error code: 00000 -\echo 'number of rows:' :ROW_COUNT -number of rows: 2 --- syntax error -SELECT 1 UNION; -ERROR: syntax error at or near ";" -LINE 1: SELECT 1 UNION; - ^ -\echo 'error:' :ERROR -error: true -\echo 'error code:' :SQLSTATE -error code: 42601 -\echo 'number of rows:' :ROW_COUNT -number of rows: 0 -\echo 'last error message:' :LAST_ERROR_MESSAGE -last error message: syntax error at or near ";" -\echo 'last error code:' :LAST_ERROR_SQLSTATE -last error code: 42601 --- empty query -; -\echo 'error:' :ERROR -error: false -\echo 'error code:' :SQLSTATE -error code: 00000 -\echo 'number of rows:' :ROW_COUNT -number of rows: 0 --- must have kept previous values -\echo 'last error message:' :LAST_ERROR_MESSAGE -last error message: syntax error at or near ";" -\echo 'last error code:' :LAST_ERROR_SQLSTATE -last error code: 42601 --- other query error -DROP TABLE this_table_does_not_exist; -ERROR: table "this_table_does_not_exist" does not exist -\echo 'error:' :ERROR -error: true -\echo 'error code:' :SQLSTATE -error code: 42P01 -\echo 'number of rows:' :ROW_COUNT -number of rows: 0 -\echo 'last error message:' :LAST_ERROR_MESSAGE -last error message: table "this_table_does_not_exist" does not exist -\echo 'last error code:' :LAST_ERROR_SQLSTATE -last error code: 42P01 --- nondefault verbosity error settings (except verbose, which is too unstable) -\set VERBOSITY terse -SELECT 1 UNION; -ERROR: syntax error at or near ";" at character 15 -\echo 'error:' :ERROR -error: true -\echo 'error code:' :SQLSTATE -error code: 42601 -\echo 'last error message:' :LAST_ERROR_MESSAGE -last error message: syntax error at or near ";" -\set VERBOSITY sqlstate -SELECT 1/0; -ERROR: 22012 -\echo 'error:' :ERROR -error: true -\echo 'error code:' :SQLSTATE -error code: 22012 -\echo 'last error message:' :LAST_ERROR_MESSAGE -last error message: division by zero -\set VERBOSITY default --- working \gdesc -SELECT 3 AS three, 4 AS four \gdesc - Column | Type ---------+--------- - three | integer - four | integer -(2 rows) - -\echo 'error:' :ERROR -error: false -\echo 'error code:' :SQLSTATE -error code: 00000 -\echo 'number of rows:' :ROW_COUNT -number of rows: 2 --- \gdesc with an error -SELECT 4 AS \gdesc -ERROR: syntax error at end of input -LINE 1: SELECT 4 AS - ^ -\echo 'error:' :ERROR -error: true -\echo 'error code:' :SQLSTATE -error code: 42601 -\echo 'number of rows:' :ROW_COUNT -number of rows: 0 -\echo 'last error message:' :LAST_ERROR_MESSAGE -last error message: syntax error at end of input -\echo 'last error code:' :LAST_ERROR_SQLSTATE -last error code: 42601 --- check row count for a cursor-fetched query -\set FETCH_COUNT 10 -select unique2 from tenk1 order by unique2 limit 19; - unique2 ---------- - 0 - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 - 11 - 12 - 13 - 14 - 15 - 16 - 17 - 18 -(19 rows) - -\echo 'error:' :ERROR -error: false -\echo 'error code:' :SQLSTATE -error code: 00000 -\echo 'number of rows:' :ROW_COUNT -number of rows: 19 --- cursor-fetched query with an error after the first group -select 1/(15-unique2) from tenk1 order by unique2 limit 19; - ?column? ----------- - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 -ERROR: division by zero -\echo 'error:' :ERROR -error: true -\echo 'error code:' :SQLSTATE -error code: 22012 -\echo 'number of rows:' :ROW_COUNT -number of rows: 0 -\echo 'last error message:' :LAST_ERROR_MESSAGE -last error message: division by zero -\echo 'last error code:' :LAST_ERROR_SQLSTATE -last error code: 22012 -\unset FETCH_COUNT -create schema testpart; -create role regress_partitioning_role; -alter schema testpart owner to regress_partitioning_role; -set role to regress_partitioning_role; --- run test inside own schema and hide other partitions -set search_path to testpart; -create table testtable_apple(logdate date); -create table testtable_orange(logdate date); -create index testtable_apple_index on testtable_apple(logdate); -create index testtable_orange_index on testtable_orange(logdate); -create table testpart_apple(logdate date) partition by range(logdate); -create table testpart_orange(logdate date) partition by range(logdate); -create index testpart_apple_index on testpart_apple(logdate); -create index testpart_orange_index on testpart_orange(logdate); --- only partition related object should be displayed -\dP test*apple* - List of partitioned relations - Schema | Name | Owner | Type | Parent name | Table -----------+----------------------+---------------------------+-------------------+-------------+---------------- - testpart | testpart_apple | regress_partitioning_role | partitioned table | | - testpart | testpart_apple_index | regress_partitioning_role | partitioned index | | testpart_apple -(2 rows) - -\dPt test*apple* - List of partitioned tables - Schema | Name | Owner | Parent name -----------+----------------+---------------------------+------------- - testpart | testpart_apple | regress_partitioning_role | -(1 row) - -\dPi test*apple* - List of partitioned indexes - Schema | Name | Owner | Parent name | Table -----------+----------------------+---------------------------+-------------+---------------- - testpart | testpart_apple_index | regress_partitioning_role | | testpart_apple -(1 row) - -drop table testtable_apple; -drop table testtable_orange; -drop table testpart_apple; -drop table testpart_orange; -create table parent_tab (id int) partition by range (id); -create index parent_index on parent_tab (id); -create table child_0_10 partition of parent_tab - for values from (0) to (10); -create table child_10_20 partition of parent_tab - for values from (10) to (20); -create table child_20_30 partition of parent_tab - for values from (20) to (30); -insert into parent_tab values (generate_series(0,29)); -create table child_30_40 partition of parent_tab -for values from (30) to (40) - partition by range(id); -create table child_30_35 partition of child_30_40 - for values from (30) to (35); -create table child_35_40 partition of child_30_40 - for values from (35) to (40); -insert into parent_tab values (generate_series(30,39)); -\dPt - List of partitioned tables - Schema | Name | Owner -----------+------------+--------------------------- - testpart | parent_tab | regress_partitioning_role -(1 row) - -\dPi - List of partitioned indexes - Schema | Name | Owner | Table -----------+--------------+---------------------------+------------ - testpart | parent_index | regress_partitioning_role | parent_tab -(1 row) - -\dP testpart.* - List of partitioned relations - Schema | Name | Owner | Type | Parent name | Table -----------+--------------------+---------------------------+-------------------+--------------+------------- - testpart | parent_tab | regress_partitioning_role | partitioned table | | - testpart | child_30_40 | regress_partitioning_role | partitioned table | parent_tab | - testpart | parent_index | regress_partitioning_role | partitioned index | | parent_tab - testpart | child_30_40_id_idx | regress_partitioning_role | partitioned index | parent_index | child_30_40 -(4 rows) - -\dP - List of partitioned relations - Schema | Name | Owner | Type | Table -----------+--------------+---------------------------+-------------------+------------ - testpart | parent_tab | regress_partitioning_role | partitioned table | - testpart | parent_index | regress_partitioning_role | partitioned index | parent_tab -(2 rows) - -\dPtn - List of partitioned tables - Schema | Name | Owner | Parent name -----------+-------------+---------------------------+------------- - testpart | parent_tab | regress_partitioning_role | - testpart | child_30_40 | regress_partitioning_role | parent_tab -(2 rows) - -\dPin - List of partitioned indexes - Schema | Name | Owner | Parent name | Table -----------+--------------------+---------------------------+--------------+------------- - testpart | parent_index | regress_partitioning_role | | parent_tab - testpart | child_30_40_id_idx | regress_partitioning_role | parent_index | child_30_40 -(2 rows) - -\dPn - List of partitioned relations - Schema | Name | Owner | Type | Parent name | Table -----------+--------------------+---------------------------+-------------------+--------------+------------- - testpart | parent_tab | regress_partitioning_role | partitioned table | | - testpart | child_30_40 | regress_partitioning_role | partitioned table | parent_tab | - testpart | parent_index | regress_partitioning_role | partitioned index | | parent_tab - testpart | child_30_40_id_idx | regress_partitioning_role | partitioned index | parent_index | child_30_40 -(4 rows) - -\dPn testpart.* - List of partitioned relations - Schema | Name | Owner | Type | Parent name | Table -----------+--------------------+---------------------------+-------------------+--------------+------------- - testpart | parent_tab | regress_partitioning_role | partitioned table | | - testpart | child_30_40 | regress_partitioning_role | partitioned table | parent_tab | - testpart | parent_index | regress_partitioning_role | partitioned index | | parent_tab - testpart | child_30_40_id_idx | regress_partitioning_role | partitioned index | parent_index | child_30_40 -(4 rows) - -drop table parent_tab cascade; -drop schema testpart; -set search_path to default; -set role to default; -drop role regress_partitioning_role; --- \d on toast table (use pg_statistic's toast table, which has a known name) -\d pg_toast.pg_toast_2619 -TOAST table "pg_toast.pg_toast_2619" - Column | Type -------------+--------- - chunk_id | oid - chunk_seq | integer - chunk_data | bytea -Owning table: "pg_catalog.pg_statistic" -Indexes: - "pg_toast_2619_index" PRIMARY KEY, btree (chunk_id, chunk_seq) - --- check printing info about access methods -\dA -List of access methods - Name | Type ---------+------- - brin | Index - btree | Index - gin | Index - gist | Index - hash | Index - heap | Table - heap2 | Table - spgist | Index -(8 rows) - -\dA * -List of access methods - Name | Type ---------+------- - brin | Index - btree | Index - gin | Index - gist | Index - hash | Index - heap | Table - heap2 | Table - spgist | Index -(8 rows) - -\dA h* -List of access methods - Name | Type --------+------- - hash | Index - heap | Table - heap2 | Table -(3 rows) - -\dA foo -List of access methods - Name | Type -------+------ -(0 rows) - -\dA foo bar -List of access methods - Name | Type -------+------ -(0 rows) - -\dA: extra argument "bar" ignored -\dA+ - List of access methods - Name | Type | Handler | Description ---------+-------+----------------------+---------------------------------------- - brin | Index | brinhandler | block range index (BRIN) access method - btree | Index | bthandler | b-tree index access method - gin | Index | ginhandler | GIN index access method - gist | Index | gisthandler | GiST index access method - hash | Index | hashhandler | hash index access method - heap | Table | heap_tableam_handler | heap table access method - heap2 | Table | heap_tableam_handler | - spgist | Index | spghandler | SP-GiST index access method -(8 rows) - -\dA+ * - List of access methods - Name | Type | Handler | Description ---------+-------+----------------------+---------------------------------------- - brin | Index | brinhandler | block range index (BRIN) access method - btree | Index | bthandler | b-tree index access method - gin | Index | ginhandler | GIN index access method - gist | Index | gisthandler | GiST index access method - hash | Index | hashhandler | hash index access method - heap | Table | heap_tableam_handler | heap table access method - heap2 | Table | heap_tableam_handler | - spgist | Index | spghandler | SP-GiST index access method -(8 rows) - -\dA+ h* - List of access methods - Name | Type | Handler | Description --------+-------+----------------------+-------------------------- - hash | Index | hashhandler | hash index access method - heap | Table | heap_tableam_handler | heap table access method - heap2 | Table | heap_tableam_handler | -(3 rows) - -\dA+ foo - List of access methods - Name | Type | Handler | Description -------+------+---------+------------- -(0 rows) - -\dAc brin pg*.oid* - List of operator classes - AM | Input type | Storage type | Operator class | Default? -------+------------+--------------+----------------------+---------- - brin | oid | | oid_bloom_ops | no - brin | oid | | oid_minmax_multi_ops | no - brin | oid | | oid_minmax_ops | yes -(3 rows) - -\dAf spgist - List of operator families - AM | Operator family | Applicable types ---------+-----------------+------------------ - spgist | box_ops | box - spgist | kd_point_ops | point - spgist | network_ops | inet - spgist | poly_ops | polygon - spgist | quad_point_ops | point - spgist | range_ops | anyrange - spgist | text_ops | text -(7 rows) - -\dAf btree int4 - List of operator families - AM | Operator family | Applicable types --------+-----------------+--------------------------- - btree | integer_ops | smallint, integer, bigint -(1 row) - -\dAo+ btree float_ops - List of operators of operator families - AM | Operator family | Operator | Strategy | Purpose | Sort opfamily --------+-----------------+---------------------------------------+----------+---------+--------------- - btree | float_ops | <(double precision,double precision) | 1 | search | - btree | float_ops | <=(double precision,double precision) | 2 | search | - btree | float_ops | =(double precision,double precision) | 3 | search | - btree | float_ops | >=(double precision,double precision) | 4 | search | - btree | float_ops | >(double precision,double precision) | 5 | search | - btree | float_ops | <(real,real) | 1 | search | - btree | float_ops | <=(real,real) | 2 | search | - btree | float_ops | =(real,real) | 3 | search | - btree | float_ops | >=(real,real) | 4 | search | - btree | float_ops | >(real,real) | 5 | search | - btree | float_ops | <(double precision,real) | 1 | search | - btree | float_ops | <=(double precision,real) | 2 | search | - btree | float_ops | =(double precision,real) | 3 | search | - btree | float_ops | >=(double precision,real) | 4 | search | - btree | float_ops | >(double precision,real) | 5 | search | - btree | float_ops | <(real,double precision) | 1 | search | - btree | float_ops | <=(real,double precision) | 2 | search | - btree | float_ops | =(real,double precision) | 3 | search | - btree | float_ops | >=(real,double precision) | 4 | search | - btree | float_ops | >(real,double precision) | 5 | search | -(20 rows) - -\dAo * pg_catalog.jsonb_path_ops - List of operators of operator families - AM | Operator family | Operator | Strategy | Purpose ------+-----------------+--------------------+----------+--------- - gin | jsonb_path_ops | @>(jsonb,jsonb) | 7 | search - gin | jsonb_path_ops | @?(jsonb,jsonpath) | 15 | search - gin | jsonb_path_ops | @@(jsonb,jsonpath) | 16 | search -(3 rows) - -\dAp+ btree float_ops - List of support functions of operator families - AM | Operator family | Registered left type | Registered right type | Number | Function --------+-----------------+----------------------+-----------------------+--------+------------------------------------------------------------------------------ - btree | float_ops | double precision | double precision | 1 | btfloat8cmp(double precision,double precision) - btree | float_ops | double precision | double precision | 2 | btfloat8sortsupport(internal) - btree | float_ops | double precision | double precision | 3 | in_range(double precision,double precision,double precision,boolean,boolean) - btree | float_ops | real | real | 1 | btfloat4cmp(real,real) - btree | float_ops | real | real | 2 | btfloat4sortsupport(internal) - btree | float_ops | double precision | real | 1 | btfloat84cmp(double precision,real) - btree | float_ops | real | double precision | 1 | btfloat48cmp(real,double precision) - btree | float_ops | real | double precision | 3 | in_range(real,real,double precision,boolean,boolean) -(8 rows) - -\dAp * pg_catalog.uuid_ops - List of support functions of operator families - AM | Operator family | Registered left type | Registered right type | Number | Function --------+-----------------+----------------------+-----------------------+--------+-------------------- - btree | uuid_ops | uuid | uuid | 1 | uuid_cmp - btree | uuid_ops | uuid | uuid | 2 | uuid_sortsupport - btree | uuid_ops | uuid | uuid | 4 | btequalimage - hash | uuid_ops | uuid | uuid | 1 | uuid_hash - hash | uuid_ops | uuid | uuid | 2 | uuid_hash_extended -(5 rows) - --- check \dconfig -set work_mem = 10240; -\dconfig work_mem -List of configuration parameters - Parameter | Value ------------+------- - work_mem | 10MB -(1 row) - -\dconfig+ work* - List of configuration parameters - Parameter | Value | Type | Context | Access privileges ------------+-------+---------+---------+------------------- - work_mem | 10MB | integer | user | -(1 row) - -reset work_mem; --- check \df, \do with argument specifications -\df *sqrt - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+--------------+------------------+---------------------+------ - pg_catalog | dsqrt | double precision | double precision | func - pg_catalog | numeric_sqrt | numeric | numeric | func - pg_catalog | sqrt | double precision | double precision | func - pg_catalog | sqrt | numeric | numeric | func -(4 rows) - -\df *sqrt num* - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+--------------+------------------+---------------------+------ - pg_catalog | numeric_sqrt | numeric | numeric | func - pg_catalog | sqrt | numeric | numeric | func -(2 rows) - -\df int*pl - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+-------------+------------------+---------------------+------ - pg_catalog | int24pl | integer | smallint, integer | func - pg_catalog | int28pl | bigint | smallint, bigint | func - pg_catalog | int2pl | smallint | smallint, smallint | func - pg_catalog | int42pl | integer | integer, smallint | func - pg_catalog | int48pl | bigint | integer, bigint | func - pg_catalog | int4pl | integer | integer, integer | func - pg_catalog | int82pl | bigint | bigint, smallint | func - pg_catalog | int84pl | bigint | bigint, integer | func - pg_catalog | int8pl | bigint | bigint, bigint | func - pg_catalog | interval_pl | interval | interval, interval | func -(10 rows) - -\df int*pl int4 - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+---------+------------------+---------------------+------ - pg_catalog | int42pl | integer | integer, smallint | func - pg_catalog | int48pl | bigint | integer, bigint | func - pg_catalog | int4pl | integer | integer, integer | func -(3 rows) - -\df int*pl * pg_catalog.int8 - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+---------+------------------+---------------------+------ - pg_catalog | int28pl | bigint | smallint, bigint | func - pg_catalog | int48pl | bigint | integer, bigint | func - pg_catalog | int8pl | bigint | bigint, bigint | func -(3 rows) - -\df acl* aclitem[] - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+-------------+------------------+----------------------------------------------------------------------------------------------------+------ - pg_catalog | aclcontains | boolean | aclitem[], aclitem | func - pg_catalog | aclexplode | SETOF record | acl aclitem[], OUT grantor oid, OUT grantee oid, OUT privilege_type text, OUT is_grantable boolean | func - pg_catalog | aclinsert | aclitem[] | aclitem[], aclitem | func - pg_catalog | aclremove | aclitem[] | aclitem[], aclitem | func -(4 rows) - -\df has_database_privilege oid text - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+------------------------+------------------+---------------------+------ - pg_catalog | has_database_privilege | boolean | oid, text | func - pg_catalog | has_database_privilege | boolean | oid, text, text | func -(2 rows) - -\df has_database_privilege oid text - - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+------------------------+------------------+---------------------+------ - pg_catalog | has_database_privilege | boolean | oid, text | func -(1 row) - -\dfa bit* small* - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+---------+------------------+---------------------+------ - pg_catalog | bit_and | smallint | smallint | agg - pg_catalog | bit_or | smallint | smallint | agg - pg_catalog | bit_xor | smallint | smallint | agg -(3 rows) - -\df *._pg_expandarray - List of functions - Schema | Name | Result data type | Argument data types | Type ---------------------+-----------------+------------------+-------------------------------------------+------ - information_schema | _pg_expandarray | SETOF record | anyarray, OUT x anyelement, OUT n integer | func -(1 row) - -\do - pg_catalog.int4 - List of operators - Schema | Name | Left arg type | Right arg type | Result type | Description -------------+------+---------------+----------------+-------------+------------- - pg_catalog | - | | integer | integer | negate -(1 row) - -\do && anyarray * - List of operators - Schema | Name | Left arg type | Right arg type | Result type | Description -------------+------+---------------+----------------+-------------+------------- - pg_catalog | && | anyarray | anyarray | boolean | overlaps -(1 row) - --- check \df+ --- we have to use functions with a predictable owner name, so make a role -create role regress_psql_user superuser; -begin; -set session authorization regress_psql_user; -create function psql_df_internal (float8) - returns float8 - language internal immutable parallel safe strict - as 'dsin'; -create function psql_df_sql (x integer) - returns integer - security definer - begin atomic select x + 1; end; -create function psql_df_plpgsql () - returns void - language plpgsql - as $$ begin return; end; $$; -comment on function psql_df_plpgsql () is 'some comment'; -\df+ psql_df_* - List of functions - Schema | Name | Result data type | Argument data types | Type | Volatility | Parallel | Owner | Security | Access privileges | Language | Internal name | Description ---------+------------------+------------------+---------------------+------+------------+----------+-------------------+----------+-------------------+----------+---------------+-------------- - public | psql_df_internal | double precision | double precision | func | immutable | safe | regress_psql_user | invoker | | internal | dsin | - public | psql_df_plpgsql | void | | func | volatile | unsafe | regress_psql_user | invoker | | plpgsql | | some comment - public | psql_df_sql | integer | x integer | func | volatile | unsafe | regress_psql_user | definer | | sql | | -(3 rows) - -rollback; -drop role regress_psql_user; --- check \sf -\sf information_schema._pg_index_position -CREATE OR REPLACE FUNCTION information_schema._pg_index_position(oid, smallint) - RETURNS integer - LANGUAGE sql - STABLE STRICT -BEGIN ATOMIC - SELECT (ss.a).n AS n - FROM ( SELECT information_schema._pg_expandarray(pg_index.indkey) AS a - FROM pg_index - WHERE (pg_index.indexrelid = $1)) ss - WHERE ((ss.a).x = $2); -END -\sf+ information_schema._pg_index_position - CREATE OR REPLACE FUNCTION information_schema._pg_index_position(oid, smallint) - RETURNS integer - LANGUAGE sql - STABLE STRICT -1 BEGIN ATOMIC -2 SELECT (ss.a).n AS n -3 FROM ( SELECT information_schema._pg_expandarray(pg_index.indkey) AS a -4 FROM pg_index -5 WHERE (pg_index.indexrelid = $1)) ss -6 WHERE ((ss.a).x = $2); -7 END -\sf+ interval_pl_time - CREATE OR REPLACE FUNCTION pg_catalog.interval_pl_time(interval, time without time zone) - RETURNS time without time zone - LANGUAGE sql - IMMUTABLE PARALLEL SAFE STRICT COST 1 -1 RETURN ($2 + $1) -\sf ts_debug(text); -CREATE OR REPLACE FUNCTION pg_catalog.ts_debug(document text, OUT alias text, OUT description text, OUT token text, OUT dictionaries regdictionary[], OUT dictionary regdictionary, OUT lexemes text[]) - RETURNS SETOF record - LANGUAGE sql - STABLE PARALLEL SAFE STRICT -BEGIN ATOMIC - SELECT ts_debug.alias, - ts_debug.description, - ts_debug.token, - ts_debug.dictionaries, - ts_debug.dictionary, - ts_debug.lexemes - FROM ts_debug(get_current_ts_config(), ts_debug.document) ts_debug(alias, description, token, dictionaries, dictionary, lexemes); -END -\sf+ ts_debug(text) - CREATE OR REPLACE FUNCTION pg_catalog.ts_debug(document text, OUT alias text, OUT description text, OUT token text, OUT dictionaries regdictionary[], OUT dictionary regdictionary, OUT lexemes text[]) - RETURNS SETOF record - LANGUAGE sql - STABLE PARALLEL SAFE STRICT -1 BEGIN ATOMIC -2 SELECT ts_debug.alias, -3 ts_debug.description, -4 ts_debug.token, -5 ts_debug.dictionaries, -6 ts_debug.dictionary, -7 ts_debug.lexemes -8 FROM ts_debug(get_current_ts_config(), ts_debug.document) ts_debug(alias, description, token, dictionaries, dictionary, lexemes); -9 END --- AUTOCOMMIT -CREATE TABLE ac_test (a int); -\set AUTOCOMMIT off -INSERT INTO ac_test VALUES (1); -COMMIT; -SELECT * FROM ac_test; - a ---- - 1 -(1 row) - -COMMIT; -INSERT INTO ac_test VALUES (2); -ROLLBACK; -SELECT * FROM ac_test; - a ---- - 1 -(1 row) - -COMMIT; -BEGIN; -INSERT INTO ac_test VALUES (3); -COMMIT; -SELECT * FROM ac_test; - a ---- - 1 - 3 -(2 rows) - -COMMIT; -BEGIN; -INSERT INTO ac_test VALUES (4); -ROLLBACK; -SELECT * FROM ac_test; - a ---- - 1 - 3 -(2 rows) - -COMMIT; -\set AUTOCOMMIT on -DROP TABLE ac_test; -SELECT * FROM ac_test; -- should be gone now -ERROR: relation "ac_test" does not exist -LINE 1: SELECT * FROM ac_test; - ^ --- ON_ERROR_ROLLBACK -\set ON_ERROR_ROLLBACK on -CREATE TABLE oer_test (a int); -BEGIN; -INSERT INTO oer_test VALUES (1); -INSERT INTO oer_test VALUES ('foo'); -ERROR: invalid input syntax for type integer: "foo" -LINE 1: INSERT INTO oer_test VALUES ('foo'); - ^ -INSERT INTO oer_test VALUES (3); -COMMIT; -SELECT * FROM oer_test; - a ---- - 1 - 3 -(2 rows) - -BEGIN; -INSERT INTO oer_test VALUES (4); -ROLLBACK; -SELECT * FROM oer_test; - a ---- - 1 - 3 -(2 rows) - -BEGIN; -INSERT INTO oer_test VALUES (5); -COMMIT AND CHAIN; -INSERT INTO oer_test VALUES (6); -COMMIT; -SELECT * FROM oer_test; - a ---- - 1 - 3 - 5 - 6 -(4 rows) - -DROP TABLE oer_test; -\set ON_ERROR_ROLLBACK off --- ECHO errors -\set ECHO errors -ERROR: relation "notexists" does not exist -LINE 1: SELECT * FROM notexists; - ^ -STATEMENT: SELECT * FROM notexists; --- --- combined queries --- -CREATE FUNCTION warn(msg TEXT) RETURNS BOOLEAN LANGUAGE plpgsql -AS $$ - BEGIN RAISE NOTICE 'warn %', msg ; RETURN TRUE ; END -$$; --- show both -SELECT 1 AS one \; SELECT warn('1.5') \; SELECT 2 AS two ; -NOTICE: warn 1.5 -CONTEXT: PL/pgSQL function warn(text) line 2 at RAISE - one ------ - 1 -(1 row) - - warn ------- - t -(1 row) - - two ------ - 2 -(1 row) - --- \gset applies to last query only -SELECT 3 AS three \; SELECT warn('3.5') \; SELECT 4 AS four \gset -NOTICE: warn 3.5 -CONTEXT: PL/pgSQL function warn(text) line 2 at RAISE - three -------- - 3 -(1 row) - - warn ------- - t -(1 row) - -\echo :three :four -:three 4 --- syntax error stops all processing -SELECT 5 \; SELECT 6 + \; SELECT warn('6.5') \; SELECT 7 ; -ERROR: syntax error at or near ";" -LINE 1: SELECT 5 ; SELECT 6 + ; SELECT warn('6.5') ; SELECT 7 ; - ^ --- with aborted transaction, stop on first error -BEGIN \; SELECT 8 AS eight \; SELECT 9/0 AS nine \; ROLLBACK \; SELECT 10 AS ten ; - eight -------- - 8 -(1 row) - -ERROR: division by zero --- close previously aborted transaction -ROLLBACK; --- miscellaneous SQL commands --- (non SELECT output is sent to stderr, thus is not shown in expected results) -SELECT 'ok' AS "begin" \; -CREATE TABLE psql_comics(s TEXT) \; -INSERT INTO psql_comics VALUES ('Calvin'), ('hobbes') \; -COPY psql_comics FROM STDIN \; -UPDATE psql_comics SET s = 'Hobbes' WHERE s = 'hobbes' \; -DELETE FROM psql_comics WHERE s = 'Moe' \; -COPY psql_comics TO STDOUT \; -TRUNCATE psql_comics \; -DROP TABLE psql_comics \; -SELECT 'ok' AS "done" ; - begin -------- - ok -(1 row) - -Calvin -Susie -Hobbes - done ------- - ok -(1 row) - -\set SHOW_ALL_RESULTS off -SELECT 1 AS one \; SELECT warn('1.5') \; SELECT 2 AS two ; -NOTICE: warn 1.5 -CONTEXT: PL/pgSQL function warn(text) line 2 at RAISE - two ------ - 2 -(1 row) - -\set SHOW_ALL_RESULTS on -DROP FUNCTION warn(TEXT); --- --- \g with file --- -\getenv abs_builddir PG_ABS_BUILDDIR -\set g_out_file :abs_builddir '/results/psql-output1' -CREATE TEMPORARY TABLE reload_output( - lineno int NOT NULL GENERATED ALWAYS AS IDENTITY, - line text -); -SELECT 1 AS a \g :g_out_file -COPY reload_output(line) FROM :'g_out_file'; -SELECT 2 AS b\; SELECT 3 AS c\; SELECT 4 AS d \g :g_out_file -COPY reload_output(line) FROM :'g_out_file'; -COPY (SELECT 'foo') TO STDOUT \; COPY (SELECT 'bar') TO STDOUT \g :g_out_file -COPY reload_output(line) FROM :'g_out_file'; -SELECT line FROM reload_output ORDER BY lineno; - line ---------- - a - --- - 1 - (1 row) - - b - --- - 2 - (1 row) - - c - --- - 3 - (1 row) - - d - --- - 4 - (1 row) - - foo - bar -(22 rows) - -TRUNCATE TABLE reload_output; --- --- \o with file --- -\set o_out_file :abs_builddir '/results/psql-output2' -\o :o_out_file -SELECT max(unique1) FROM onek; -SELECT 1 AS a\; SELECT 2 AS b\; SELECT 3 AS c; --- COPY TO file --- The data goes to :g_out_file and the status to :o_out_file -\set QUIET false -COPY (SELECT unique1 FROM onek ORDER BY unique1 LIMIT 10) TO :'g_out_file'; --- DML command status -UPDATE onek SET unique1 = unique1 WHERE false; -\set QUIET true -\o --- Check the contents of the files generated. -COPY reload_output(line) FROM :'g_out_file'; -SELECT line FROM reload_output ORDER BY lineno; - line ------- - 0 - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 -(10 rows) - -TRUNCATE TABLE reload_output; -COPY reload_output(line) FROM :'o_out_file'; -SELECT line FROM reload_output ORDER BY lineno; - line ----------- - max - ----- - 999 - (1 row) - - a - --- - 1 - (1 row) - - b - --- - 2 - (1 row) - - c - --- - 3 - (1 row) - - COPY 10 - UPDATE 0 -(22 rows) - -TRUNCATE TABLE reload_output; --- Multiple COPY TO STDOUT with output file -\o :o_out_file --- The data goes to :o_out_file with no status generated. -COPY (SELECT 'foo1') TO STDOUT \; COPY (SELECT 'bar1') TO STDOUT; --- Combination of \o and \g file with multiple COPY queries. -COPY (SELECT 'foo2') TO STDOUT \; COPY (SELECT 'bar2') TO STDOUT \g :g_out_file -\o --- Check the contents of the files generated. -COPY reload_output(line) FROM :'g_out_file'; -SELECT line FROM reload_output ORDER BY lineno; - line ------- - foo2 - bar2 -(2 rows) - -TRUNCATE TABLE reload_output; -COPY reload_output(line) FROM :'o_out_file'; -SELECT line FROM reload_output ORDER BY lineno; - line ------- - foo1 - bar1 -(2 rows) - -DROP TABLE reload_output; --- --- AUTOCOMMIT and combined queries --- -\set AUTOCOMMIT off -\echo '# AUTOCOMMIT:' :AUTOCOMMIT -# AUTOCOMMIT: off --- BEGIN is now implicit -CREATE TABLE foo(s TEXT) \; -ROLLBACK; -CREATE TABLE foo(s TEXT) \; -INSERT INTO foo(s) VALUES ('hello'), ('world') \; -COMMIT; -DROP TABLE foo \; -ROLLBACK; --- table foo is still there -SELECT * FROM foo ORDER BY 1 \; -DROP TABLE foo \; -COMMIT; - s -------- - hello - world -(2 rows) - -\set AUTOCOMMIT on -\echo '# AUTOCOMMIT:' :AUTOCOMMIT -# AUTOCOMMIT: on --- BEGIN now explicit for multi-statement transactions -BEGIN \; -CREATE TABLE foo(s TEXT) \; -INSERT INTO foo(s) VALUES ('hello'), ('world') \; -COMMIT; -BEGIN \; -DROP TABLE foo \; -ROLLBACK \; --- implicit transactions -SELECT * FROM foo ORDER BY 1 \; -DROP TABLE foo; - s -------- - hello - world -(2 rows) - --- --- test ON_ERROR_ROLLBACK and combined queries --- -CREATE FUNCTION psql_error(msg TEXT) RETURNS BOOLEAN AS $$ - BEGIN - RAISE EXCEPTION 'error %', msg; - END; -$$ LANGUAGE plpgsql; -\set ON_ERROR_ROLLBACK on -\echo '# ON_ERROR_ROLLBACK:' :ON_ERROR_ROLLBACK -# ON_ERROR_ROLLBACK: on -\echo '# AUTOCOMMIT:' :AUTOCOMMIT -# AUTOCOMMIT: on -BEGIN; -CREATE TABLE bla(s NO_SUCH_TYPE); -- fails -ERROR: type "no_such_type" does not exist -LINE 1: CREATE TABLE bla(s NO_SUCH_TYPE); - ^ -CREATE TABLE bla(s TEXT); -- succeeds -SELECT psql_error('oops!'); -- fails -ERROR: error oops! -CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE -INSERT INTO bla VALUES ('Calvin'), ('Hobbes'); -COMMIT; -SELECT * FROM bla ORDER BY 1; - s --------- - Calvin - Hobbes -(2 rows) - -BEGIN; -INSERT INTO bla VALUES ('Susie'); -- succeeds --- now with combined queries -INSERT INTO bla VALUES ('Rosalyn') \; -- will rollback -SELECT 'before error' AS show \; -- will show nevertheless! - SELECT psql_error('boum!') \; -- failure - SELECT 'after error' AS noshow; -- hidden by preceding error - show --------------- - before error -(1 row) - -ERROR: error boum! -CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE -INSERT INTO bla(s) VALUES ('Moe') \; -- will rollback - SELECT psql_error('bam!'); -ERROR: error bam! -CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE -INSERT INTO bla VALUES ('Miss Wormwood'); -- succeeds -COMMIT; -SELECT * FROM bla ORDER BY 1; - s ---------------- - Calvin - Hobbes - Miss Wormwood - Susie -(4 rows) - --- some with autocommit off -\set AUTOCOMMIT off -\echo '# AUTOCOMMIT:' :AUTOCOMMIT -# AUTOCOMMIT: off --- implicit BEGIN -INSERT INTO bla VALUES ('Dad'); -- succeeds -SELECT psql_error('bad!'); -- implicit partial rollback -ERROR: error bad! -CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE -INSERT INTO bla VALUES ('Mum') \; -- will rollback -SELECT COUNT(*) AS "#mum" -FROM bla WHERE s = 'Mum' \; -- but be counted here -SELECT psql_error('bad!'); -- implicit partial rollback - #mum ------- - 1 -(1 row) - -ERROR: error bad! -CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE -COMMIT; -SELECT COUNT(*) AS "#mum" -FROM bla WHERE s = 'Mum' \; -- no mum here -SELECT * FROM bla ORDER BY 1; - #mum ------- - 0 -(1 row) - - s ---------------- - Calvin - Dad - Hobbes - Miss Wormwood - Susie -(5 rows) - -COMMIT; --- reset all -\set AUTOCOMMIT on -\set ON_ERROR_ROLLBACK off -\echo '# final ON_ERROR_ROLLBACK:' :ON_ERROR_ROLLBACK -# final ON_ERROR_ROLLBACK: off -DROP TABLE bla; -DROP FUNCTION psql_error; --- check describing invalid multipart names -\dA regression.heap -improper qualified name (too many dotted names): regression.heap -\dA nonesuch.heap -improper qualified name (too many dotted names): nonesuch.heap -\dt host.regression.pg_catalog.pg_class -improper qualified name (too many dotted names): host.regression.pg_catalog.pg_class -\dt |.pg_catalog.pg_class -cross-database references are not implemented: |.pg_catalog.pg_class -\dt nonesuch.pg_catalog.pg_class -cross-database references are not implemented: nonesuch.pg_catalog.pg_class -\da host.regression.pg_catalog.sum -improper qualified name (too many dotted names): host.regression.pg_catalog.sum -\da +.pg_catalog.sum -cross-database references are not implemented: +.pg_catalog.sum -\da nonesuch.pg_catalog.sum -cross-database references are not implemented: nonesuch.pg_catalog.sum -\dAc nonesuch.brin -improper qualified name (too many dotted names): nonesuch.brin -\dAc regression.brin -improper qualified name (too many dotted names): regression.brin -\dAf nonesuch.brin -improper qualified name (too many dotted names): nonesuch.brin -\dAf regression.brin -improper qualified name (too many dotted names): regression.brin -\dAo nonesuch.brin -improper qualified name (too many dotted names): nonesuch.brin -\dAo regression.brin -improper qualified name (too many dotted names): regression.brin -\dAp nonesuch.brin -improper qualified name (too many dotted names): nonesuch.brin -\dAp regression.brin -improper qualified name (too many dotted names): regression.brin -\db nonesuch.pg_default -improper qualified name (too many dotted names): nonesuch.pg_default -\db regression.pg_default -improper qualified name (too many dotted names): regression.pg_default -\dc host.regression.public.conversion -improper qualified name (too many dotted names): host.regression.public.conversion -\dc (.public.conversion -cross-database references are not implemented: (.public.conversion -\dc nonesuch.public.conversion -cross-database references are not implemented: nonesuch.public.conversion -\dC host.regression.pg_catalog.int8 -improper qualified name (too many dotted names): host.regression.pg_catalog.int8 -\dC ).pg_catalog.int8 -cross-database references are not implemented: ).pg_catalog.int8 -\dC nonesuch.pg_catalog.int8 -cross-database references are not implemented: nonesuch.pg_catalog.int8 -\dd host.regression.pg_catalog.pg_class -improper qualified name (too many dotted names): host.regression.pg_catalog.pg_class -\dd [.pg_catalog.pg_class -cross-database references are not implemented: [.pg_catalog.pg_class -\dd nonesuch.pg_catalog.pg_class -cross-database references are not implemented: nonesuch.pg_catalog.pg_class -\dD host.regression.public.gtestdomain1 -improper qualified name (too many dotted names): host.regression.public.gtestdomain1 -\dD ].public.gtestdomain1 -cross-database references are not implemented: ].public.gtestdomain1 -\dD nonesuch.public.gtestdomain1 -cross-database references are not implemented: nonesuch.public.gtestdomain1 -\ddp host.regression.pg_catalog.pg_class -improper qualified name (too many dotted names): host.regression.pg_catalog.pg_class -\ddp {.pg_catalog.pg_class -cross-database references are not implemented: {.pg_catalog.pg_class -\ddp nonesuch.pg_catalog.pg_class -cross-database references are not implemented: nonesuch.pg_catalog.pg_class -\dE host.regression.public.ft -improper qualified name (too many dotted names): host.regression.public.ft -\dE }.public.ft -cross-database references are not implemented: }.public.ft -\dE nonesuch.public.ft -cross-database references are not implemented: nonesuch.public.ft -\di host.regression.public.tenk1_hundred -improper qualified name (too many dotted names): host.regression.public.tenk1_hundred -\di ..public.tenk1_hundred -improper qualified name (too many dotted names): ..public.tenk1_hundred -\di nonesuch.public.tenk1_hundred -cross-database references are not implemented: nonesuch.public.tenk1_hundred -\dm host.regression.public.mvtest_bb -improper qualified name (too many dotted names): host.regression.public.mvtest_bb -\dm ^.public.mvtest_bb -cross-database references are not implemented: ^.public.mvtest_bb -\dm nonesuch.public.mvtest_bb -cross-database references are not implemented: nonesuch.public.mvtest_bb -\ds host.regression.public.check_seq -improper qualified name (too many dotted names): host.regression.public.check_seq -\ds regression|mydb.public.check_seq -cross-database references are not implemented: regression|mydb.public.check_seq -\ds nonesuch.public.check_seq -cross-database references are not implemented: nonesuch.public.check_seq -\dt host.regression.public.b_star -improper qualified name (too many dotted names): host.regression.public.b_star -\dt regres+ion.public.b_star -cross-database references are not implemented: regres+ion.public.b_star -\dt nonesuch.public.b_star -cross-database references are not implemented: nonesuch.public.b_star -\dv host.regression.public.shoe -improper qualified name (too many dotted names): host.regression.public.shoe -\dv regress(ion).public.shoe -cross-database references are not implemented: regress(ion).public.shoe -\dv nonesuch.public.shoe -cross-database references are not implemented: nonesuch.public.shoe -\des nonesuch.server -improper qualified name (too many dotted names): nonesuch.server -\des regression.server -improper qualified name (too many dotted names): regression.server -\des nonesuch.server -improper qualified name (too many dotted names): nonesuch.server -\des regression.server -improper qualified name (too many dotted names): regression.server -\des nonesuch.username -improper qualified name (too many dotted names): nonesuch.username -\des regression.username -improper qualified name (too many dotted names): regression.username -\dew nonesuch.fdw -improper qualified name (too many dotted names): nonesuch.fdw -\dew regression.fdw -improper qualified name (too many dotted names): regression.fdw -\df host.regression.public.namelen -improper qualified name (too many dotted names): host.regression.public.namelen -\df regres[qrstuv]ion.public.namelen -cross-database references are not implemented: regres[qrstuv]ion.public.namelen -\df nonesuch.public.namelen -cross-database references are not implemented: nonesuch.public.namelen -\dF host.regression.pg_catalog.arabic -improper qualified name (too many dotted names): host.regression.pg_catalog.arabic -\dF regres{1,2}ion.pg_catalog.arabic -cross-database references are not implemented: regres{1,2}ion.pg_catalog.arabic -\dF nonesuch.pg_catalog.arabic -cross-database references are not implemented: nonesuch.pg_catalog.arabic -\dFd host.regression.pg_catalog.arabic_stem -improper qualified name (too many dotted names): host.regression.pg_catalog.arabic_stem -\dFd regres?ion.pg_catalog.arabic_stem -cross-database references are not implemented: regres?ion.pg_catalog.arabic_stem -\dFd nonesuch.pg_catalog.arabic_stem -cross-database references are not implemented: nonesuch.pg_catalog.arabic_stem -\dFp host.regression.pg_catalog.default -improper qualified name (too many dotted names): host.regression.pg_catalog.default -\dFp ^regression.pg_catalog.default -cross-database references are not implemented: ^regression.pg_catalog.default -\dFp nonesuch.pg_catalog.default -cross-database references are not implemented: nonesuch.pg_catalog.default -\dFt host.regression.pg_catalog.ispell -improper qualified name (too many dotted names): host.regression.pg_catalog.ispell -\dFt regression$.pg_catalog.ispell -cross-database references are not implemented: regression$.pg_catalog.ispell -\dFt nonesuch.pg_catalog.ispell -cross-database references are not implemented: nonesuch.pg_catalog.ispell -\dg nonesuch.pg_database_owner -improper qualified name (too many dotted names): nonesuch.pg_database_owner -\dg regression.pg_database_owner -improper qualified name (too many dotted names): regression.pg_database_owner -\dL host.regression.plpgsql -improper qualified name (too many dotted names): host.regression.plpgsql -\dL *.plpgsql -cross-database references are not implemented: *.plpgsql -\dL nonesuch.plpgsql -cross-database references are not implemented: nonesuch.plpgsql -\dn host.regression.public -improper qualified name (too many dotted names): host.regression.public -\dn """".public -cross-database references are not implemented: """".public -\dn nonesuch.public -cross-database references are not implemented: nonesuch.public -\do host.regression.public.!=- -improper qualified name (too many dotted names): host.regression.public.!=- -\do "regression|mydb".public.!=- -cross-database references are not implemented: "regression|mydb".public.!=- -\do nonesuch.public.!=- -cross-database references are not implemented: nonesuch.public.!=- -\dO host.regression.pg_catalog.POSIX -improper qualified name (too many dotted names): host.regression.pg_catalog.POSIX -\dO .pg_catalog.POSIX -cross-database references are not implemented: .pg_catalog.POSIX -\dO nonesuch.pg_catalog.POSIX -cross-database references are not implemented: nonesuch.pg_catalog.POSIX -\dp host.regression.public.a_star -improper qualified name (too many dotted names): host.regression.public.a_star -\dp "regres+ion".public.a_star -cross-database references are not implemented: "regres+ion".public.a_star -\dp nonesuch.public.a_star -cross-database references are not implemented: nonesuch.public.a_star -\dP host.regression.public.mlparted -improper qualified name (too many dotted names): host.regression.public.mlparted -\dP "regres(sion)".public.mlparted -cross-database references are not implemented: "regres(sion)".public.mlparted -\dP nonesuch.public.mlparted -cross-database references are not implemented: nonesuch.public.mlparted -\drds nonesuch.lc_messages -improper qualified name (too many dotted names): nonesuch.lc_messages -\drds regression.lc_messages -improper qualified name (too many dotted names): regression.lc_messages -\dRp public.mypub -improper qualified name (too many dotted names): public.mypub -\dRp regression.mypub -improper qualified name (too many dotted names): regression.mypub -\dRs public.mysub -improper qualified name (too many dotted names): public.mysub -\dRs regression.mysub -improper qualified name (too many dotted names): regression.mysub -\dT host.regression.public.widget -improper qualified name (too many dotted names): host.regression.public.widget -\dT "regression{1,2}".public.widget -cross-database references are not implemented: "regression{1,2}".public.widget -\dT nonesuch.public.widget -cross-database references are not implemented: nonesuch.public.widget -\dx regression.plpgsql -improper qualified name (too many dotted names): regression.plpgsql -\dx nonesuch.plpgsql -improper qualified name (too many dotted names): nonesuch.plpgsql -\dX host.regression.public.func_deps_stat -improper qualified name (too many dotted names): host.regression.public.func_deps_stat -\dX "^regression$".public.func_deps_stat -cross-database references are not implemented: "^regression$".public.func_deps_stat -\dX nonesuch.public.func_deps_stat -cross-database references are not implemented: nonesuch.public.func_deps_stat -\dy regression.myevt -improper qualified name (too many dotted names): regression.myevt -\dy nonesuch.myevt -improper qualified name (too many dotted names): nonesuch.myevt --- check that dots within quoted name segments are not counted -\dA "no.such.access.method" -List of access methods - Name | Type -------+------ -(0 rows) - -\dt "no.such.table.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\da "no.such.aggregate.function" - List of aggregate functions - Schema | Name | Result data type | Argument data types | Description ---------+------+------------------+---------------------+------------- -(0 rows) - -\dAc "no.such.operator.class" - List of operator classes - AM | Input type | Storage type | Operator class | Default? -----+------------+--------------+----------------+---------- -(0 rows) - -\dAf "no.such.operator.family" - List of operator families - AM | Operator family | Applicable types -----+-----------------+------------------ -(0 rows) - -\dAo "no.such.operator.of.operator.family" - List of operators of operator families - AM | Operator family | Operator | Strategy | Purpose -----+-----------------+----------+----------+--------- -(0 rows) - -\dAp "no.such.operator.support.function.of.operator.family" - List of support functions of operator families - AM | Operator family | Registered left type | Registered right type | Number | Function -----+-----------------+----------------------+-----------------------+--------+---------- -(0 rows) - -\db "no.such.tablespace" - List of tablespaces - Name | Owner | Location -------+-------+---------- -(0 rows) - -\dc "no.such.conversion" - List of conversions - Schema | Name | Source | Destination | Default? ---------+------+--------+-------------+---------- -(0 rows) - -\dC "no.such.cast" - List of casts - Source type | Target type | Function | Implicit? --------------+-------------+----------+----------- -(0 rows) - -\dd "no.such.object.description" - Object descriptions - Schema | Name | Object | Description ---------+------+--------+------------- -(0 rows) - -\dD "no.such.domain" - List of domains - Schema | Name | Type | Collation | Nullable | Default | Check ---------+------+------+-----------+----------+---------+------- -(0 rows) - -\ddp "no.such.default.access.privilege" - Default access privileges - Owner | Schema | Type | Access privileges --------+--------+------+------------------- -(0 rows) - -\di "no.such.index.relation" - List of relations - Schema | Name | Type | Owner | Table ---------+------+------+-------+------- -(0 rows) - -\dm "no.such.materialized.view" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\ds "no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\dt "no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\dv "no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\des "no.such.foreign.server" - List of foreign servers - Name | Owner | Foreign-data wrapper -------+-------+---------------------- -(0 rows) - -\dew "no.such.foreign.data.wrapper" - List of foreign-data wrappers - Name | Owner | Handler | Validator -------+-------+---------+----------- -(0 rows) - -\df "no.such.function" - List of functions - Schema | Name | Result data type | Argument data types | Type ---------+------+------------------+---------------------+------ -(0 rows) - -\dF "no.such.text.search.configuration" -List of text search configurations - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFd "no.such.text.search.dictionary" -List of text search dictionaries - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFp "no.such.text.search.parser" - List of text search parsers - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFt "no.such.text.search.template" -List of text search templates - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dg "no.such.role" - List of roles - Role name | Attributes ------------+------------ - -\dL "no.such.language" - List of languages - Name | Owner | Trusted | Description -------+-------+---------+------------- -(0 rows) - -\dn "no.such.schema" -List of schemas - Name | Owner -------+------- -(0 rows) - -\do "no.such.operator" - List of operators - Schema | Name | Left arg type | Right arg type | Result type | Description ---------+------+---------------+----------------+-------------+------------- -(0 rows) - -\dO "no.such.collation" - List of collations - Schema | Name | Provider | Collate | Ctype | ICU Locale | ICU Rules | Deterministic? ---------+------+----------+---------+-------+------------+-----------+---------------- -(0 rows) - -\dp "no.such.access.privilege" - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+------+------+-------------------+-------------------+---------- -(0 rows) - -\dP "no.such.partitioned.relation" - List of partitioned relations - Schema | Name | Owner | Type | Parent name | Table ---------+------+-------+------+-------------+------- -(0 rows) - -\drds "no.such.setting" - List of settings - Role | Database | Settings -------+----------+---------- -(0 rows) - -\dRp "no.such.publication" - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root -------+-------+------------+---------+---------+---------+-----------+---------- -(0 rows) - -\dRs "no.such.subscription" - List of subscriptions - Name | Owner | Enabled | Publication -------+-------+---------+------------- -(0 rows) - -\dT "no.such.data.type" - List of data types - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dx "no.such.installed.extension" - List of installed extensions - Name | Version | Schema | Description -------+---------+--------+------------- -(0 rows) - -\dX "no.such.extended.statistics" - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+------+------------+-----------+--------------+----- -(0 rows) - -\dy "no.such.event.trigger" - List of event triggers - Name | Event | Owner | Enabled | Function | Tags -------+-------+-------+---------+----------+------ -(0 rows) - --- again, but with dotted schema qualifications. -\dA "no.such.schema"."no.such.access.method" -improper qualified name (too many dotted names): "no.such.schema"."no.such.access.method" -\dt "no.such.schema"."no.such.table.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\da "no.such.schema"."no.such.aggregate.function" - List of aggregate functions - Schema | Name | Result data type | Argument data types | Description ---------+------+------------------+---------------------+------------- -(0 rows) - -\dAc "no.such.schema"."no.such.operator.class" -improper qualified name (too many dotted names): "no.such.schema"."no.such.operator.class" -\dAf "no.such.schema"."no.such.operator.family" -improper qualified name (too many dotted names): "no.such.schema"."no.such.operator.family" -\dAo "no.such.schema"."no.such.operator.of.operator.family" -improper qualified name (too many dotted names): "no.such.schema"."no.such.operator.of.operator.family" -\dAp "no.such.schema"."no.such.operator.support.function.of.operator.family" -improper qualified name (too many dotted names): "no.such.schema"."no.such.operator.support.function.of.operator.family" -\db "no.such.schema"."no.such.tablespace" -improper qualified name (too many dotted names): "no.such.schema"."no.such.tablespace" -\dc "no.such.schema"."no.such.conversion" - List of conversions - Schema | Name | Source | Destination | Default? ---------+------+--------+-------------+---------- -(0 rows) - -\dC "no.such.schema"."no.such.cast" - List of casts - Source type | Target type | Function | Implicit? --------------+-------------+----------+----------- -(0 rows) - -\dd "no.such.schema"."no.such.object.description" - Object descriptions - Schema | Name | Object | Description ---------+------+--------+------------- -(0 rows) - -\dD "no.such.schema"."no.such.domain" - List of domains - Schema | Name | Type | Collation | Nullable | Default | Check ---------+------+------+-----------+----------+---------+------- -(0 rows) - -\ddp "no.such.schema"."no.such.default.access.privilege" - Default access privileges - Owner | Schema | Type | Access privileges --------+--------+------+------------------- -(0 rows) - -\di "no.such.schema"."no.such.index.relation" - List of relations - Schema | Name | Type | Owner | Table ---------+------+------+-------+------- -(0 rows) - -\dm "no.such.schema"."no.such.materialized.view" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\ds "no.such.schema"."no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\dt "no.such.schema"."no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\dv "no.such.schema"."no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\des "no.such.schema"."no.such.foreign.server" -improper qualified name (too many dotted names): "no.such.schema"."no.such.foreign.server" -\dew "no.such.schema"."no.such.foreign.data.wrapper" -improper qualified name (too many dotted names): "no.such.schema"."no.such.foreign.data.wrapper" -\df "no.such.schema"."no.such.function" - List of functions - Schema | Name | Result data type | Argument data types | Type ---------+------+------------------+---------------------+------ -(0 rows) - -\dF "no.such.schema"."no.such.text.search.configuration" -List of text search configurations - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFd "no.such.schema"."no.such.text.search.dictionary" -List of text search dictionaries - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFp "no.such.schema"."no.such.text.search.parser" - List of text search parsers - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFt "no.such.schema"."no.such.text.search.template" -List of text search templates - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dg "no.such.schema"."no.such.role" -improper qualified name (too many dotted names): "no.such.schema"."no.such.role" -\dL "no.such.schema"."no.such.language" -cross-database references are not implemented: "no.such.schema"."no.such.language" -\do "no.such.schema"."no.such.operator" - List of operators - Schema | Name | Left arg type | Right arg type | Result type | Description ---------+------+---------------+----------------+-------------+------------- -(0 rows) - -\dO "no.such.schema"."no.such.collation" - List of collations - Schema | Name | Provider | Collate | Ctype | ICU Locale | ICU Rules | Deterministic? ---------+------+----------+---------+-------+------------+-----------+---------------- -(0 rows) - -\dp "no.such.schema"."no.such.access.privilege" - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+------+------+-------------------+-------------------+---------- -(0 rows) - -\dP "no.such.schema"."no.such.partitioned.relation" - List of partitioned relations - Schema | Name | Owner | Type | Parent name | Table ---------+------+-------+------+-------------+------- -(0 rows) - -\drds "no.such.schema"."no.such.setting" -improper qualified name (too many dotted names): "no.such.schema"."no.such.setting" -\dRp "no.such.schema"."no.such.publication" -improper qualified name (too many dotted names): "no.such.schema"."no.such.publication" -\dRs "no.such.schema"."no.such.subscription" -improper qualified name (too many dotted names): "no.such.schema"."no.such.subscription" -\dT "no.such.schema"."no.such.data.type" - List of data types - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dx "no.such.schema"."no.such.installed.extension" -improper qualified name (too many dotted names): "no.such.schema"."no.such.installed.extension" -\dX "no.such.schema"."no.such.extended.statistics" - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+------+------------+-----------+--------------+----- -(0 rows) - -\dy "no.such.schema"."no.such.event.trigger" -improper qualified name (too many dotted names): "no.such.schema"."no.such.event.trigger" --- again, but with current database and dotted schema qualifications. -\dt regression."no.such.schema"."no.such.table.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\da regression."no.such.schema"."no.such.aggregate.function" - List of aggregate functions - Schema | Name | Result data type | Argument data types | Description ---------+------+------------------+---------------------+------------- -(0 rows) - -\dc regression."no.such.schema"."no.such.conversion" - List of conversions - Schema | Name | Source | Destination | Default? ---------+------+--------+-------------+---------- -(0 rows) - -\dC regression."no.such.schema"."no.such.cast" - List of casts - Source type | Target type | Function | Implicit? --------------+-------------+----------+----------- -(0 rows) - -\dd regression."no.such.schema"."no.such.object.description" - Object descriptions - Schema | Name | Object | Description ---------+------+--------+------------- -(0 rows) - -\dD regression."no.such.schema"."no.such.domain" - List of domains - Schema | Name | Type | Collation | Nullable | Default | Check ---------+------+------+-----------+----------+---------+------- -(0 rows) - -\di regression."no.such.schema"."no.such.index.relation" - List of relations - Schema | Name | Type | Owner | Table ---------+------+------+-------+------- -(0 rows) - -\dm regression."no.such.schema"."no.such.materialized.view" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\ds regression."no.such.schema"."no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\dt regression."no.such.schema"."no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\dv regression."no.such.schema"."no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\df regression."no.such.schema"."no.such.function" - List of functions - Schema | Name | Result data type | Argument data types | Type ---------+------+------------------+---------------------+------ -(0 rows) - -\dF regression."no.such.schema"."no.such.text.search.configuration" -List of text search configurations - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFd regression."no.such.schema"."no.such.text.search.dictionary" -List of text search dictionaries - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFp regression."no.such.schema"."no.such.text.search.parser" - List of text search parsers - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFt regression."no.such.schema"."no.such.text.search.template" -List of text search templates - Schema | Name | Description ---------+------+------------- -(0 rows) - -\do regression."no.such.schema"."no.such.operator" - List of operators - Schema | Name | Left arg type | Right arg type | Result type | Description ---------+------+---------------+----------------+-------------+------------- -(0 rows) - -\dO regression."no.such.schema"."no.such.collation" - List of collations - Schema | Name | Provider | Collate | Ctype | ICU Locale | ICU Rules | Deterministic? ---------+------+----------+---------+-------+------------+-----------+---------------- -(0 rows) - -\dp regression."no.such.schema"."no.such.access.privilege" - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+------+------+-------------------+-------------------+---------- -(0 rows) - -\dP regression."no.such.schema"."no.such.partitioned.relation" - List of partitioned relations - Schema | Name | Owner | Type | Parent name | Table ---------+------+-------+------+-------------+------- -(0 rows) - -\dT regression."no.such.schema"."no.such.data.type" - List of data types - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dX regression."no.such.schema"."no.such.extended.statistics" - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+------+------------+-----------+--------------+----- -(0 rows) - --- again, but with dotted database and dotted schema qualifications. -\dt "no.such.database"."no.such.schema"."no.such.table.relation" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.table.relation" -\da "no.such.database"."no.such.schema"."no.such.aggregate.function" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.aggregate.function" -\dc "no.such.database"."no.such.schema"."no.such.conversion" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.conversion" -\dC "no.such.database"."no.such.schema"."no.such.cast" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.cast" -\dd "no.such.database"."no.such.schema"."no.such.object.description" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.object.description" -\dD "no.such.database"."no.such.schema"."no.such.domain" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.domain" -\ddp "no.such.database"."no.such.schema"."no.such.default.access.privilege" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.default.access.privilege" -\di "no.such.database"."no.such.schema"."no.such.index.relation" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.index.relation" -\dm "no.such.database"."no.such.schema"."no.such.materialized.view" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.materialized.view" -\ds "no.such.database"."no.such.schema"."no.such.relation" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.relation" -\dt "no.such.database"."no.such.schema"."no.such.relation" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.relation" -\dv "no.such.database"."no.such.schema"."no.such.relation" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.relation" -\df "no.such.database"."no.such.schema"."no.such.function" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.function" -\dF "no.such.database"."no.such.schema"."no.such.text.search.configuration" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.text.search.configuration" -\dFd "no.such.database"."no.such.schema"."no.such.text.search.dictionary" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.text.search.dictionary" -\dFp "no.such.database"."no.such.schema"."no.such.text.search.parser" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.text.search.parser" -\dFt "no.such.database"."no.such.schema"."no.such.text.search.template" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.text.search.template" -\do "no.such.database"."no.such.schema"."no.such.operator" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.operator" -\dO "no.such.database"."no.such.schema"."no.such.collation" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.collation" -\dp "no.such.database"."no.such.schema"."no.such.access.privilege" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.access.privilege" -\dP "no.such.database"."no.such.schema"."no.such.partitioned.relation" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.partitioned.relation" -\dT "no.such.database"."no.such.schema"."no.such.data.type" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.data.type" -\dX "no.such.database"."no.such.schema"."no.such.extended.statistics" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.extended.statistics" --- check \drg and \du -CREATE ROLE regress_du_role0; -CREATE ROLE regress_du_role1; -CREATE ROLE regress_du_role2; -CREATE ROLE regress_du_admin; -GRANT regress_du_role0 TO regress_du_admin WITH ADMIN TRUE; -GRANT regress_du_role1 TO regress_du_admin WITH ADMIN TRUE; -GRANT regress_du_role2 TO regress_du_admin WITH ADMIN TRUE; -GRANT regress_du_role0 TO regress_du_role1 WITH ADMIN TRUE, INHERIT TRUE, SET TRUE GRANTED BY regress_du_admin; -GRANT regress_du_role0 TO regress_du_role2 WITH ADMIN TRUE, INHERIT FALSE, SET FALSE GRANTED BY regress_du_admin; -GRANT regress_du_role1 TO regress_du_role2 WITH ADMIN TRUE , INHERIT FALSE, SET TRUE GRANTED BY regress_du_admin; -GRANT regress_du_role0 TO regress_du_role1 WITH ADMIN FALSE, INHERIT TRUE, SET FALSE GRANTED BY regress_du_role1; -GRANT regress_du_role0 TO regress_du_role2 WITH ADMIN FALSE, INHERIT TRUE , SET TRUE GRANTED BY regress_du_role1; -GRANT regress_du_role0 TO regress_du_role1 WITH ADMIN FALSE, INHERIT FALSE, SET TRUE GRANTED BY regress_du_role2; -GRANT regress_du_role0 TO regress_du_role2 WITH ADMIN FALSE, INHERIT FALSE, SET FALSE GRANTED BY regress_du_role2; -\drg regress_du_role* - List of role grants - Role name | Member of | Options | Grantor -------------------+------------------+---------------------+------------------ - regress_du_role1 | regress_du_role0 | ADMIN, INHERIT, SET | regress_du_admin - regress_du_role1 | regress_du_role0 | INHERIT | regress_du_role1 - regress_du_role1 | regress_du_role0 | SET | regress_du_role2 - regress_du_role2 | regress_du_role0 | ADMIN | regress_du_admin - regress_du_role2 | regress_du_role0 | INHERIT, SET | regress_du_role1 - regress_du_role2 | regress_du_role0 | | regress_du_role2 - regress_du_role2 | regress_du_role1 | ADMIN, SET | regress_du_admin -(7 rows) - -\du regress_du_role* - List of roles - Role name | Attributes -------------------+-------------- - regress_du_role0 | Cannot login - regress_du_role1 | Cannot login - regress_du_role2 | Cannot login - -DROP ROLE regress_du_role0; -DROP ROLE regress_du_role1; -DROP ROLE regress_du_role2; -DROP ROLE regress_du_admin; --- Test display of empty privileges. -BEGIN; --- Create an owner for tested objects because output contains owner name. -CREATE ROLE regress_zeropriv_owner; -SET LOCAL ROLE regress_zeropriv_owner; -CREATE DOMAIN regress_zeropriv_domain AS int; -REVOKE ALL ON DOMAIN regress_zeropriv_domain FROM CURRENT_USER, PUBLIC; -\dD+ regress_zeropriv_domain - List of domains - Schema | Name | Type | Collation | Nullable | Default | Check | Access privileges | Description ---------+-------------------------+---------+-----------+----------+---------+-------+-------------------+------------- - public | regress_zeropriv_domain | integer | | | | | (none) | -(1 row) - -CREATE PROCEDURE regress_zeropriv_proc() LANGUAGE sql AS ''; -REVOKE ALL ON PROCEDURE regress_zeropriv_proc() FROM CURRENT_USER, PUBLIC; -\df+ regress_zeropriv_proc - List of functions - Schema | Name | Result data type | Argument data types | Type | Volatility | Parallel | Owner | Security | Access privileges | Language | Internal name | Description ---------+-----------------------+------------------+---------------------+------+------------+----------+------------------------+----------+-------------------+----------+---------------+------------- - public | regress_zeropriv_proc | | | proc | volatile | unsafe | regress_zeropriv_owner | invoker | (none) | sql | | -(1 row) - -CREATE TABLE regress_zeropriv_tbl (a int); -REVOKE ALL ON TABLE regress_zeropriv_tbl FROM CURRENT_USER; -\dp regress_zeropriv_tbl - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+----------------------+-------+-------------------+-------------------+---------- - public | regress_zeropriv_tbl | table | (none) | | -(1 row) - -CREATE TYPE regress_zeropriv_type AS (a int); -REVOKE ALL ON TYPE regress_zeropriv_type FROM CURRENT_USER, PUBLIC; -\dT+ regress_zeropriv_type - List of data types - Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description ---------+-----------------------+-----------------------+-------+----------+------------------------+-------------------+------------- - public | regress_zeropriv_type | regress_zeropriv_type | tuple | | regress_zeropriv_owner | (none) | -(1 row) - -ROLLBACK; --- Test display of default privileges with \pset null. -CREATE TABLE defprivs (a int); -\pset null '(default)' -\z defprivs - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+----------+-------+-------------------+-------------------+---------- - public | defprivs | table | (default) | | -(1 row) - -\pset null '' -DROP TABLE defprivs; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/psql_crosstab.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/psql_crosstab.out --- /tmp/cirrus-ci-build/src/test/regress/expected/psql_crosstab.out 2024-03-07 14:25:00.333122000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/psql_crosstab.out 2024-03-07 14:27:17.154251000 +0000 @@ -1,216 +1,2 @@ --- --- \crosstabview --- -CREATE TABLE ctv_data (v, h, c, i, d) AS -VALUES - ('v1','h2','foo', 3, '2015-04-01'::date), - ('v2','h1','bar', 3, '2015-01-02'), - ('v1','h0','baz', NULL, '2015-07-12'), - ('v0','h4','qux', 4, '2015-07-15'), - ('v0','h4','dbl', -3, '2014-12-15'), - ('v0',NULL,'qux', 5, '2014-07-15'), - ('v1','h2','quux',7, '2015-04-04'); --- make plans more stable -ANALYZE ctv_data; --- running \crosstabview after query uses query in buffer -SELECT v, EXTRACT(year FROM d), count(*) - FROM ctv_data - GROUP BY 1, 2 - ORDER BY 1, 2; - v | extract | count -----+---------+------- - v0 | 2014 | 2 - v0 | 2015 | 1 - v1 | 2015 | 3 - v2 | 2015 | 1 -(4 rows) - --- basic usage with 3 columns - \crosstabview - v | 2014 | 2015 -----+------+------ - v0 | 2 | 1 - v1 | | 3 - v2 | | 1 -(3 rows) - --- ordered months in horizontal header, quoted column name -SELECT v, to_char(d, 'Mon') AS "month name", EXTRACT(month FROM d) AS num, - count(*) FROM ctv_data GROUP BY 1,2,3 ORDER BY 1 - \crosstabview v "month name" 4 num - v | Jan | Apr | Jul | Dec -----+-----+-----+-----+----- - v0 | | | 2 | 1 - v1 | | 2 | 1 | - v2 | 1 | | | -(3 rows) - --- ordered months in vertical header, ordered years in horizontal header -SELECT EXTRACT(year FROM d) AS year, to_char(d,'Mon') AS """month"" name", - EXTRACT(month FROM d) AS month, - format('sum=%s avg=%s', sum(i), avg(i)::numeric(2,1)) - FROM ctv_data - GROUP BY EXTRACT(year FROM d), to_char(d,'Mon'), EXTRACT(month FROM d) -ORDER BY month -\crosstabview """month"" name" year format year - "month" name | 2014 | 2015 ---------------+-----------------+---------------- - Jan | | sum=3 avg=3.0 - Apr | | sum=10 avg=5.0 - Jul | sum=5 avg=5.0 | sum=4 avg=4.0 - Dec | sum=-3 avg=-3.0 | -(4 rows) - --- combine contents vertically into the same cell (V/H duplicates) -SELECT v, h, string_agg(c, E'\n') FROM ctv_data GROUP BY v, h ORDER BY 1,2,3 - \crosstabview 1 2 3 - v | h4 | | h0 | h2 | h1 -----+-----+-----+-----+------+----- - v0 | qux+| qux | | | - | dbl | | | | - v1 | | | baz | foo +| - | | | | quux | - v2 | | | | | bar -(3 rows) - --- horizontal ASC order from window function -SELECT v,h, string_agg(c, E'\n') AS c, row_number() OVER(ORDER BY h) AS r -FROM ctv_data GROUP BY v, h ORDER BY 1,3,2 - \crosstabview v h c r - v | h0 | h1 | h2 | h4 | -----+-----+-----+------+-----+----- - v0 | | | | qux+| qux - | | | | dbl | - v1 | baz | | foo +| | - | | | quux | | - v2 | | bar | | | -(3 rows) - --- horizontal DESC order from window function -SELECT v, h, string_agg(c, E'\n') AS c, row_number() OVER(ORDER BY h DESC) AS r -FROM ctv_data GROUP BY v, h ORDER BY 1,3,2 - \crosstabview v h c r - v | | h4 | h2 | h1 | h0 -----+-----+-----+------+-----+----- - v0 | qux | qux+| | | - | | dbl | | | - v1 | | | foo +| | baz - | | | quux | | - v2 | | | | bar | -(3 rows) - --- horizontal ASC order from window function, NULLs pushed rightmost -SELECT v,h, string_agg(c, E'\n') AS c, row_number() OVER(ORDER BY h NULLS LAST) AS r -FROM ctv_data GROUP BY v, h ORDER BY 1,3,2 - \crosstabview v h c r - v | h0 | h1 | h2 | h4 | -----+-----+-----+------+-----+----- - v0 | | | | qux+| qux - | | | | dbl | - v1 | baz | | foo +| | - | | | quux | | - v2 | | bar | | | -(3 rows) - --- only null, no column name, 2 columns: error -SELECT null,null \crosstabview -\crosstabview: query must return at least three columns --- only null, no column name, 3 columns: works -SELECT null,null,null \crosstabview - ?column? | -----------+-- - | -(1 row) - --- null display -\pset null '#null#' -SELECT v,h, string_agg(i::text, E'\n') AS i FROM ctv_data -GROUP BY v, h ORDER BY h,v - \crosstabview v h i - v | h0 | h1 | h2 | h4 | #null# -----+--------+----+----+----+-------- - v1 | #null# | | 3 +| | - | | | 7 | | - v2 | | 3 | | | - v0 | | | | 4 +| 5 - | | | | -3 | -(3 rows) - -\pset null '' --- refer to columns by position -SELECT v,h,string_agg(i::text, E'\n'), string_agg(c, E'\n') -FROM ctv_data GROUP BY v, h ORDER BY h,v - \crosstabview 2 1 4 - h | v1 | v2 | v0 -----+------+-----+----- - h0 | baz | | - h1 | | bar | - h2 | foo +| | - | quux | | - h4 | | | qux+ - | | | dbl - | | | qux -(5 rows) - --- refer to columns by positions and names mixed -SELECT v,h, string_agg(i::text, E'\n') AS i, string_agg(c, E'\n') AS c -FROM ctv_data GROUP BY v, h ORDER BY h,v - \crosstabview 1 "h" 4 - v | h0 | h1 | h2 | h4 | -----+-----+-----+------+-----+----- - v1 | baz | | foo +| | - | | | quux | | - v2 | | bar | | | - v0 | | | | qux+| qux - | | | | dbl | -(3 rows) - --- refer to columns by quoted names, check downcasing of unquoted name -SELECT 1 as "22", 2 as b, 3 as "Foo" - \crosstabview "22" B "Foo" - 22 | 2 -----+--- - 1 | 3 -(1 row) - --- error: bad column name -SELECT v,h,c,i FROM ctv_data - \crosstabview v h j -\crosstabview: column name not found: "j" --- error: need to quote name -SELECT 1 as "22", 2 as b, 3 as "Foo" - \crosstabview 1 2 Foo -\crosstabview: column name not found: "foo" --- error: need to not quote name -SELECT 1 as "22", 2 as b, 3 as "Foo" - \crosstabview 1 "B" "Foo" -\crosstabview: column name not found: "B" --- error: bad column number -SELECT v,h,i,c FROM ctv_data - \crosstabview 2 1 5 -\crosstabview: column number 5 is out of range 1..4 --- error: same H and V columns -SELECT v,h,i,c FROM ctv_data - \crosstabview 2 h 4 -\crosstabview: vertical and horizontal headers must be different columns --- error: too many columns -SELECT a,a,1 FROM generate_series(1,3000) AS a - \crosstabview -\crosstabview: maximum number of columns (1600) exceeded --- error: only one column -SELECT 1 \crosstabview -\crosstabview: query must return at least three columns -DROP TABLE ctv_data; --- check error reporting (bug #14476) -CREATE TABLE ctv_data (x int, y int, v text); -INSERT INTO ctv_data SELECT 1, x, '*' || x FROM generate_series(1,10) x; -SELECT * FROM ctv_data \crosstabview - x | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 ----+----+----+----+----+----+----+----+----+----+----- - 1 | *1 | *2 | *3 | *4 | *5 | *6 | *7 | *8 | *9 | *10 -(1 row) - -INSERT INTO ctv_data VALUES (1, 10, '*'); -- duplicate data to cause error -SELECT * FROM ctv_data \crosstabview -\crosstabview: query result contains multiple data values for row "1", column "10" -DROP TABLE ctv_data; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/amutils.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/amutils.out --- /tmp/cirrus-ci-build/src/test/regress/expected/amutils.out 2024-03-07 14:25:00.329311000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/amutils.out 2024-03-07 14:27:17.151177000 +0000 @@ -1,254 +1,2 @@ --- --- Test index AM property-reporting functions --- -select prop, - pg_indexam_has_property(a.oid, prop) as "AM", - pg_index_has_property('onek_hundred'::regclass, prop) as "Index", - pg_index_column_has_property('onek_hundred'::regclass, 1, prop) as "Column" - from pg_am a, - unnest(array['asc', 'desc', 'nulls_first', 'nulls_last', - 'orderable', 'distance_orderable', 'returnable', - 'search_array', 'search_nulls', - 'clusterable', 'index_scan', 'bitmap_scan', - 'backward_scan', - 'can_order', 'can_unique', 'can_multi_col', - 'can_exclude', 'can_include', - 'bogus']::text[]) - with ordinality as u(prop,ord) - where a.amname = 'btree' - order by ord; - prop | AM | Index | Column ---------------------+----+-------+-------- - asc | | | t - desc | | | f - nulls_first | | | f - nulls_last | | | t - orderable | | | t - distance_orderable | | | f - returnable | | | t - search_array | | | t - search_nulls | | | t - clusterable | | t | - index_scan | | t | - bitmap_scan | | t | - backward_scan | | t | - can_order | t | | - can_unique | t | | - can_multi_col | t | | - can_exclude | t | | - can_include | t | | - bogus | | | -(19 rows) - -select prop, - pg_indexam_has_property(a.oid, prop) as "AM", - pg_index_has_property('gcircleind'::regclass, prop) as "Index", - pg_index_column_has_property('gcircleind'::regclass, 1, prop) as "Column" - from pg_am a, - unnest(array['asc', 'desc', 'nulls_first', 'nulls_last', - 'orderable', 'distance_orderable', 'returnable', - 'search_array', 'search_nulls', - 'clusterable', 'index_scan', 'bitmap_scan', - 'backward_scan', - 'can_order', 'can_unique', 'can_multi_col', - 'can_exclude', 'can_include', - 'bogus']::text[]) - with ordinality as u(prop,ord) - where a.amname = 'gist' - order by ord; - prop | AM | Index | Column ---------------------+----+-------+-------- - asc | | | f - desc | | | f - nulls_first | | | f - nulls_last | | | f - orderable | | | f - distance_orderable | | | t - returnable | | | f - search_array | | | f - search_nulls | | | t - clusterable | | t | - index_scan | | t | - bitmap_scan | | t | - backward_scan | | f | - can_order | f | | - can_unique | f | | - can_multi_col | t | | - can_exclude | t | | - can_include | t | | - bogus | | | -(19 rows) - -select prop, - pg_index_column_has_property('onek_hundred'::regclass, 1, prop) as btree, - pg_index_column_has_property('hash_i4_index'::regclass, 1, prop) as hash, - pg_index_column_has_property('gcircleind'::regclass, 1, prop) as gist, - pg_index_column_has_property('sp_radix_ind'::regclass, 1, prop) as spgist_radix, - pg_index_column_has_property('sp_quad_ind'::regclass, 1, prop) as spgist_quad, - pg_index_column_has_property('botharrayidx'::regclass, 1, prop) as gin, - pg_index_column_has_property('brinidx'::regclass, 1, prop) as brin - from unnest(array['asc', 'desc', 'nulls_first', 'nulls_last', - 'orderable', 'distance_orderable', 'returnable', - 'search_array', 'search_nulls', - 'bogus']::text[]) - with ordinality as u(prop,ord) - order by ord; - prop | btree | hash | gist | spgist_radix | spgist_quad | gin | brin ---------------------+-------+------+------+--------------+-------------+-----+------ - asc | t | f | f | f | f | f | f - desc | f | f | f | f | f | f | f - nulls_first | f | f | f | f | f | f | f - nulls_last | t | f | f | f | f | f | f - orderable | t | f | f | f | f | f | f - distance_orderable | f | f | t | f | t | f | f - returnable | t | f | f | t | t | f | f - search_array | t | f | f | f | f | f | f - search_nulls | t | f | t | t | t | f | t - bogus | | | | | | | -(10 rows) - -select prop, - pg_index_has_property('onek_hundred'::regclass, prop) as btree, - pg_index_has_property('hash_i4_index'::regclass, prop) as hash, - pg_index_has_property('gcircleind'::regclass, prop) as gist, - pg_index_has_property('sp_radix_ind'::regclass, prop) as spgist, - pg_index_has_property('botharrayidx'::regclass, prop) as gin, - pg_index_has_property('brinidx'::regclass, prop) as brin - from unnest(array['clusterable', 'index_scan', 'bitmap_scan', - 'backward_scan', - 'bogus']::text[]) - with ordinality as u(prop,ord) - order by ord; - prop | btree | hash | gist | spgist | gin | brin ----------------+-------+------+------+--------+-----+------ - clusterable | t | f | t | f | f | f - index_scan | t | t | t | t | f | f - bitmap_scan | t | t | t | t | t | t - backward_scan | t | t | f | f | f | f - bogus | | | | | | -(5 rows) - -select amname, prop, pg_indexam_has_property(a.oid, prop) as p - from pg_am a, - unnest(array['can_order', 'can_unique', 'can_multi_col', - 'can_exclude', 'can_include', 'bogus']::text[]) - with ordinality as u(prop,ord) - where amtype = 'i' - order by amname, ord; - amname | prop | p ---------+---------------+--- - brin | can_order | f - brin | can_unique | f - brin | can_multi_col | t - brin | can_exclude | f - brin | can_include | f - brin | bogus | - btree | can_order | t - btree | can_unique | t - btree | can_multi_col | t - btree | can_exclude | t - btree | can_include | t - btree | bogus | - gin | can_order | f - gin | can_unique | f - gin | can_multi_col | t - gin | can_exclude | f - gin | can_include | f - gin | bogus | - gist | can_order | f - gist | can_unique | f - gist | can_multi_col | t - gist | can_exclude | t - gist | can_include | t - gist | bogus | - hash | can_order | f - hash | can_unique | f - hash | can_multi_col | f - hash | can_exclude | t - hash | can_include | f - hash | bogus | - spgist | can_order | f - spgist | can_unique | f - spgist | can_multi_col | f - spgist | can_exclude | t - spgist | can_include | t - spgist | bogus | -(36 rows) - --- --- additional checks for pg_index_column_has_property --- -CREATE TEMP TABLE foo (f1 int, f2 int, f3 int, f4 int); -CREATE INDEX fooindex ON foo (f1 desc, f2 asc, f3 nulls first, f4 nulls last); -select col, prop, pg_index_column_has_property(o, col, prop) - from (values ('fooindex'::regclass)) v1(o), - (values (1,'orderable'),(2,'asc'),(3,'desc'), - (4,'nulls_first'),(5,'nulls_last'), - (6, 'bogus')) v2(idx,prop), - generate_series(1,4) col - order by col, idx; - col | prop | pg_index_column_has_property ------+-------------+------------------------------ - 1 | orderable | t - 1 | asc | f - 1 | desc | t - 1 | nulls_first | t - 1 | nulls_last | f - 1 | bogus | - 2 | orderable | t - 2 | asc | t - 2 | desc | f - 2 | nulls_first | f - 2 | nulls_last | t - 2 | bogus | - 3 | orderable | t - 3 | asc | t - 3 | desc | f - 3 | nulls_first | t - 3 | nulls_last | f - 3 | bogus | - 4 | orderable | t - 4 | asc | t - 4 | desc | f - 4 | nulls_first | f - 4 | nulls_last | t - 4 | bogus | -(24 rows) - -CREATE INDEX foocover ON foo (f1) INCLUDE (f2,f3); -select col, prop, pg_index_column_has_property(o, col, prop) - from (values ('foocover'::regclass)) v1(o), - (values (1,'orderable'),(2,'asc'),(3,'desc'), - (4,'nulls_first'),(5,'nulls_last'), - (6,'distance_orderable'),(7,'returnable'), - (8, 'bogus')) v2(idx,prop), - generate_series(1,3) col - order by col, idx; - col | prop | pg_index_column_has_property ------+--------------------+------------------------------ - 1 | orderable | t - 1 | asc | t - 1 | desc | f - 1 | nulls_first | f - 1 | nulls_last | t - 1 | distance_orderable | f - 1 | returnable | t - 1 | bogus | - 2 | orderable | f - 2 | asc | - 2 | desc | - 2 | nulls_first | - 2 | nulls_last | - 2 | distance_orderable | f - 2 | returnable | t - 2 | bogus | - 3 | orderable | f - 3 | asc | - 3 | desc | - 3 | nulls_first | - 3 | nulls_last | - 3 | distance_orderable | f - 3 | returnable | t - 3 | bogus | -(24 rows) - +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/stats_ext.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/stats_ext.out --- /tmp/cirrus-ci-build/src/test/regress/expected/stats_ext.out 2024-03-07 14:25:00.333976000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/stats_ext.out 2024-03-07 14:27:17.148585000 +0000 @@ -1,3292 +1,2 @@ --- Generic extended statistics support --- --- Note: tables for which we check estimated row counts should be created --- with autovacuum_enabled = off, so that we don't have unstable results --- from auto-analyze happening when we didn't expect it. --- --- check the number of estimated/actual rows in the top node -create function check_estimated_rows(text) returns table (estimated int, actual int) -language plpgsql as -$$ -declare - ln text; - tmp text[]; - first_row bool := true; -begin - for ln in - execute format('explain analyze %s', $1) - loop - if first_row then - first_row := false; - tmp := regexp_match(ln, 'rows=(\d*) .* rows=(\d*)'); - return query select tmp[1]::int, tmp[2]::int; - end if; - end loop; -end; -$$; --- Verify failures -CREATE TABLE ext_stats_test (x text, y int, z int); -CREATE STATISTICS tst; -ERROR: syntax error at or near ";" -LINE 1: CREATE STATISTICS tst; - ^ -CREATE STATISTICS tst ON a, b; -ERROR: syntax error at or near ";" -LINE 1: CREATE STATISTICS tst ON a, b; - ^ -CREATE STATISTICS tst FROM sometab; -ERROR: syntax error at or near "FROM" -LINE 1: CREATE STATISTICS tst FROM sometab; - ^ -CREATE STATISTICS tst ON a, b FROM nonexistent; -ERROR: relation "nonexistent" does not exist -CREATE STATISTICS tst ON a, b FROM ext_stats_test; -ERROR: column "a" does not exist -CREATE STATISTICS tst ON x, x, y FROM ext_stats_test; -ERROR: duplicate column name in statistics definition -CREATE STATISTICS tst ON x, x, y, x, x, y, x, x, y FROM ext_stats_test; -ERROR: cannot have more than 8 columns in statistics -CREATE STATISTICS tst ON x, x, y, x, x, (x || 'x'), (y + 1), (x || 'x'), (x || 'x'), (y + 1) FROM ext_stats_test; -ERROR: cannot have more than 8 columns in statistics -CREATE STATISTICS tst ON (x || 'x'), (x || 'x'), (y + 1), (x || 'x'), (x || 'x'), (y + 1), (x || 'x'), (x || 'x'), (y + 1) FROM ext_stats_test; -ERROR: cannot have more than 8 columns in statistics -CREATE STATISTICS tst ON (x || 'x'), (x || 'x'), y FROM ext_stats_test; -ERROR: duplicate expression in statistics definition -CREATE STATISTICS tst (unrecognized) ON x, y FROM ext_stats_test; -ERROR: unrecognized statistics kind "unrecognized" --- incorrect expressions -CREATE STATISTICS tst ON (y) FROM ext_stats_test; -- single column reference -ERROR: extended statistics require at least 2 columns -CREATE STATISTICS tst ON y + z FROM ext_stats_test; -- missing parentheses -ERROR: syntax error at or near "+" -LINE 1: CREATE STATISTICS tst ON y + z FROM ext_stats_test; - ^ -CREATE STATISTICS tst ON (x, y) FROM ext_stats_test; -- tuple expression -ERROR: syntax error at or near "," -LINE 1: CREATE STATISTICS tst ON (x, y) FROM ext_stats_test; - ^ -DROP TABLE ext_stats_test; --- Ensure stats are dropped sanely, and test IF NOT EXISTS while at it -CREATE TABLE ab1 (a INTEGER, b INTEGER, c INTEGER); -CREATE STATISTICS IF NOT EXISTS ab1_a_b_stats ON a, b FROM ab1; -COMMENT ON STATISTICS ab1_a_b_stats IS 'new comment'; -CREATE ROLE regress_stats_ext; -SET SESSION AUTHORIZATION regress_stats_ext; -COMMENT ON STATISTICS ab1_a_b_stats IS 'changed comment'; -ERROR: must be owner of statistics object ab1_a_b_stats -DROP STATISTICS ab1_a_b_stats; -ERROR: must be owner of statistics object ab1_a_b_stats -ALTER STATISTICS ab1_a_b_stats RENAME TO ab1_a_b_stats_new; -ERROR: must be owner of statistics object ab1_a_b_stats -RESET SESSION AUTHORIZATION; -DROP ROLE regress_stats_ext; -CREATE STATISTICS IF NOT EXISTS ab1_a_b_stats ON a, b FROM ab1; -NOTICE: statistics object "ab1_a_b_stats" already exists, skipping -DROP STATISTICS ab1_a_b_stats; -CREATE SCHEMA regress_schema_2; -CREATE STATISTICS regress_schema_2.ab1_a_b_stats ON a, b FROM ab1; --- Let's also verify the pg_get_statisticsobjdef output looks sane. -SELECT pg_get_statisticsobjdef(oid) FROM pg_statistic_ext WHERE stxname = 'ab1_a_b_stats'; - pg_get_statisticsobjdef -------------------------------------------------------------------- - CREATE STATISTICS regress_schema_2.ab1_a_b_stats ON a, b FROM ab1 -(1 row) - -DROP STATISTICS regress_schema_2.ab1_a_b_stats; --- Ensure statistics are dropped when columns are -CREATE STATISTICS ab1_b_c_stats ON b, c FROM ab1; -CREATE STATISTICS ab1_a_b_c_stats ON a, b, c FROM ab1; -CREATE STATISTICS ab1_b_a_stats ON b, a FROM ab1; -ALTER TABLE ab1 DROP COLUMN a; -\d ab1 - Table "public.ab1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - b | integer | | | - c | integer | | | -Statistics objects: - "public.ab1_b_c_stats" ON b, c FROM ab1 - --- Ensure statistics are dropped when table is -SELECT stxname FROM pg_statistic_ext WHERE stxname LIKE 'ab1%'; - stxname ---------------- - ab1_b_c_stats -(1 row) - -DROP TABLE ab1; -SELECT stxname FROM pg_statistic_ext WHERE stxname LIKE 'ab1%'; - stxname ---------- -(0 rows) - --- Ensure things work sanely with SET STATISTICS 0 -CREATE TABLE ab1 (a INTEGER, b INTEGER); -ALTER TABLE ab1 ALTER a SET STATISTICS 0; -INSERT INTO ab1 SELECT a, a%23 FROM generate_series(1, 1000) a; -CREATE STATISTICS ab1_a_b_stats ON a, b FROM ab1; -ANALYZE ab1; -WARNING: statistics object "public.ab1_a_b_stats" could not be computed for relation "public.ab1" -ALTER TABLE ab1 ALTER a SET STATISTICS -1; --- setting statistics target 0 skips the statistics, without printing any message, so check catalog -ALTER STATISTICS ab1_a_b_stats SET STATISTICS 0; -\d ab1 - Table "public.ab1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Statistics objects: - "public.ab1_a_b_stats" ON a, b FROM ab1; STATISTICS 0 - -ANALYZE ab1; -SELECT stxname, stxdndistinct, stxddependencies, stxdmcv, stxdinherit - FROM pg_statistic_ext s LEFT JOIN pg_statistic_ext_data d ON (d.stxoid = s.oid) - WHERE s.stxname = 'ab1_a_b_stats'; - stxname | stxdndistinct | stxddependencies | stxdmcv | stxdinherit ----------------+---------------+------------------+---------+------------- - ab1_a_b_stats | | | | -(1 row) - -ALTER STATISTICS ab1_a_b_stats SET STATISTICS -1; -\d+ ab1 - Table "public.ab1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - a | integer | | | | plain | | - b | integer | | | | plain | | -Statistics objects: - "public.ab1_a_b_stats" ON a, b FROM ab1 - --- partial analyze doesn't build stats either -ANALYZE ab1 (a); -WARNING: statistics object "public.ab1_a_b_stats" could not be computed for relation "public.ab1" -ANALYZE ab1; -DROP TABLE ab1; -ALTER STATISTICS ab1_a_b_stats SET STATISTICS 0; -ERROR: statistics object "ab1_a_b_stats" does not exist -ALTER STATISTICS IF EXISTS ab1_a_b_stats SET STATISTICS 0; -NOTICE: statistics object "ab1_a_b_stats" does not exist, skipping --- Ensure we can build statistics for tables with inheritance. -CREATE TABLE ab1 (a INTEGER, b INTEGER); -CREATE TABLE ab1c () INHERITS (ab1); -INSERT INTO ab1 VALUES (1,1); -CREATE STATISTICS ab1_a_b_stats ON a, b FROM ab1; -ANALYZE ab1; -DROP TABLE ab1 CASCADE; -NOTICE: drop cascades to table ab1c --- Tests for stats with inheritance -CREATE TABLE stxdinh(a int, b int); -CREATE TABLE stxdinh1() INHERITS(stxdinh); -CREATE TABLE stxdinh2() INHERITS(stxdinh); -INSERT INTO stxdinh SELECT mod(a,50), mod(a,100) FROM generate_series(0, 1999) a; -INSERT INTO stxdinh1 SELECT mod(a,100), mod(a,100) FROM generate_series(0, 999) a; -INSERT INTO stxdinh2 SELECT mod(a,100), mod(a,100) FROM generate_series(0, 999) a; -VACUUM ANALYZE stxdinh, stxdinh1, stxdinh2; --- Ensure non-inherited stats are not applied to inherited query --- Without stats object, it looks like this -SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinh* GROUP BY 1, 2'); - estimated | actual ------------+-------- - 400 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinh* WHERE a = 0 AND b = 0'); - estimated | actual ------------+-------- - 3 | 40 -(1 row) - -CREATE STATISTICS stxdinh ON a, b FROM stxdinh; -VACUUM ANALYZE stxdinh, stxdinh1, stxdinh2; --- See if the extended stats affect the estimates -SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinh* GROUP BY 1, 2'); - estimated | actual ------------+-------- - 150 | 150 -(1 row) - --- Dependencies are applied at individual relations (within append), so --- this estimate changes a bit because we improve estimates for the parent -SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinh* WHERE a = 0 AND b = 0'); - estimated | actual ------------+-------- - 22 | 40 -(1 row) - --- Ensure correct (non-inherited) stats are applied to inherited query -SELECT * FROM check_estimated_rows('SELECT a, b FROM ONLY stxdinh GROUP BY 1, 2'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT a, b FROM ONLY stxdinh WHERE a = 0 AND b = 0'); - estimated | actual ------------+-------- - 20 | 20 -(1 row) - -DROP TABLE stxdinh, stxdinh1, stxdinh2; --- Ensure inherited stats ARE applied to inherited query in partitioned table -CREATE TABLE stxdinp(i int, a int, b int) PARTITION BY RANGE (i); -CREATE TABLE stxdinp1 PARTITION OF stxdinp FOR VALUES FROM (1) TO (100); -INSERT INTO stxdinp SELECT 1, a/100, a/100 FROM generate_series(1, 999) a; -CREATE STATISTICS stxdinp ON (a + 1), a, b FROM stxdinp; -VACUUM ANALYZE stxdinp; -- partitions are processed recursively -SELECT 1 FROM pg_statistic_ext WHERE stxrelid = 'stxdinp'::regclass; - ?column? ----------- - 1 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinp GROUP BY 1, 2'); - estimated | actual ------------+-------- - 10 | 10 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT a + 1, b FROM ONLY stxdinp GROUP BY 1, 2'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -DROP TABLE stxdinp; --- basic test for statistics on expressions -CREATE TABLE ab1 (a INTEGER, b INTEGER, c TIMESTAMP, d TIMESTAMPTZ); --- expression stats may be built on a single expression column -CREATE STATISTICS ab1_exprstat_1 ON (a+b) FROM ab1; --- with a single expression, we only enable expression statistics -CREATE STATISTICS ab1_exprstat_2 ON (a+b) FROM ab1; -SELECT stxkind FROM pg_statistic_ext WHERE stxname = 'ab1_exprstat_2'; - stxkind ---------- - {e} -(1 row) - --- adding anything to the expression builds all statistics kinds -CREATE STATISTICS ab1_exprstat_3 ON (a+b), a FROM ab1; -SELECT stxkind FROM pg_statistic_ext WHERE stxname = 'ab1_exprstat_3'; - stxkind ------------ - {d,f,m,e} -(1 row) - --- date_trunc on timestamptz is not immutable, but that should not matter -CREATE STATISTICS ab1_exprstat_4 ON date_trunc('day', d) FROM ab1; --- date_trunc on timestamp is immutable -CREATE STATISTICS ab1_exprstat_5 ON date_trunc('day', c) FROM ab1; --- check use of a boolean-returning expression -CREATE STATISTICS ab1_exprstat_6 ON - (case a when 1 then true else false end), b FROM ab1; --- insert some data and run analyze, to test that these cases build properly -INSERT INTO ab1 -SELECT x / 10, x / 3, - '2020-10-01'::timestamp + x * interval '1 day', - '2020-10-01'::timestamptz + x * interval '1 day' -FROM generate_series(1, 100) x; -ANALYZE ab1; --- apply some stats -SELECT * FROM check_estimated_rows('SELECT * FROM ab1 WHERE (case a when 1 then true else false end) AND b=2'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -DROP TABLE ab1; --- Verify supported object types for extended statistics -CREATE schema tststats; -CREATE TABLE tststats.t (a int, b int, c text); -CREATE INDEX ti ON tststats.t (a, b); -CREATE SEQUENCE tststats.s; -CREATE VIEW tststats.v AS SELECT * FROM tststats.t; -CREATE MATERIALIZED VIEW tststats.mv AS SELECT * FROM tststats.t; -CREATE TYPE tststats.ty AS (a int, b int, c text); -CREATE FOREIGN DATA WRAPPER extstats_dummy_fdw; -CREATE SERVER extstats_dummy_srv FOREIGN DATA WRAPPER extstats_dummy_fdw; -CREATE FOREIGN TABLE tststats.f (a int, b int, c text) SERVER extstats_dummy_srv; -CREATE TABLE tststats.pt (a int, b int, c text) PARTITION BY RANGE (a, b); -CREATE TABLE tststats.pt1 PARTITION OF tststats.pt FOR VALUES FROM (-10, -10) TO (10, 10); -CREATE STATISTICS tststats.s1 ON a, b FROM tststats.t; -CREATE STATISTICS tststats.s2 ON a, b FROM tststats.ti; -ERROR: cannot define statistics for relation "ti" -DETAIL: This operation is not supported for indexes. -CREATE STATISTICS tststats.s3 ON a, b FROM tststats.s; -ERROR: cannot define statistics for relation "s" -DETAIL: This operation is not supported for sequences. -CREATE STATISTICS tststats.s4 ON a, b FROM tststats.v; -ERROR: cannot define statistics for relation "v" -DETAIL: This operation is not supported for views. -CREATE STATISTICS tststats.s5 ON a, b FROM tststats.mv; -CREATE STATISTICS tststats.s6 ON a, b FROM tststats.ty; -ERROR: cannot define statistics for relation "ty" -DETAIL: This operation is not supported for composite types. -CREATE STATISTICS tststats.s7 ON a, b FROM tststats.f; -CREATE STATISTICS tststats.s8 ON a, b FROM tststats.pt; -CREATE STATISTICS tststats.s9 ON a, b FROM tststats.pt1; -DO $$ -DECLARE - relname text := reltoastrelid::regclass FROM pg_class WHERE oid = 'tststats.t'::regclass; -BEGIN - EXECUTE 'CREATE STATISTICS tststats.s10 ON a, b FROM ' || relname; -EXCEPTION WHEN wrong_object_type THEN - RAISE NOTICE 'stats on toast table not created'; -END; -$$; -NOTICE: stats on toast table not created -DROP SCHEMA tststats CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table tststats.t -drop cascades to sequence tststats.s -drop cascades to view tststats.v -drop cascades to materialized view tststats.mv -drop cascades to type tststats.ty -drop cascades to foreign table tststats.f -drop cascades to table tststats.pt -DROP FOREIGN DATA WRAPPER extstats_dummy_fdw CASCADE; -NOTICE: drop cascades to server extstats_dummy_srv --- n-distinct tests -CREATE TABLE ndistinct ( - filler1 TEXT, - filler2 NUMERIC, - a INT, - b INT, - filler3 DATE, - c INT, - d INT -) -WITH (autovacuum_enabled = off); --- over-estimates when using only per-column statistics -INSERT INTO ndistinct (a, b, c, filler1) - SELECT i/100, i/100, i/100, (i/100) || ' dollars and zero cents' - FROM generate_series(1,1000) s(i); -ANALYZE ndistinct; --- Group Aggregate, due to over-estimate of the number of groups -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 100 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c'); - estimated | actual ------------+-------- - 100 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c'); - estimated | actual ------------+-------- - 100 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d'); - estimated | actual ------------+-------- - 200 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d'); - estimated | actual ------------+-------- - 200 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (a+1)'); - estimated | actual ------------+-------- - 100 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); - estimated | actual ------------+-------- - 100 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); - estimated | actual ------------+-------- - 100 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); - estimated | actual ------------+-------- - 100 | 11 -(1 row) - --- correct command -CREATE STATISTICS s10 ON a, b, c FROM ndistinct; -ANALYZE ndistinct; -SELECT s.stxkind, d.stxdndistinct - FROM pg_statistic_ext s, pg_statistic_ext_data d - WHERE s.stxrelid = 'ndistinct'::regclass - AND d.stxoid = s.oid; - stxkind | stxdndistinct ----------+----------------------------------------------------- - {d,f,m} | {"3, 4": 11, "3, 6": 11, "4, 6": 11, "3, 4, 6": 11} -(1 row) - --- minor improvement, make sure the ctid does not break the matching -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY ctid, a, b'); - estimated | actual ------------+-------- - 1000 | 1000 -(1 row) - --- Hash Aggregate, thanks to estimates improved by the statistic -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 11 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c'); - estimated | actual ------------+-------- - 11 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c'); - estimated | actual ------------+-------- - 11 | 11 -(1 row) - --- partial improvement (match on attributes) -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (a+1)'); - estimated | actual ------------+-------- - 11 | 11 -(1 row) - --- expressions - no improvement -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); - estimated | actual ------------+-------- - 11 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); - estimated | actual ------------+-------- - 11 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); - estimated | actual ------------+-------- - 11 | 11 -(1 row) - --- last two plans keep using Group Aggregate, because 'd' is not covered --- by the statistic and while it's NULL-only we assume 200 values for it -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d'); - estimated | actual ------------+-------- - 200 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d'); - estimated | actual ------------+-------- - 200 | 11 -(1 row) - -TRUNCATE TABLE ndistinct; --- under-estimates when using only per-column statistics -INSERT INTO ndistinct (a, b, c, filler1) - SELECT mod(i,13), mod(i,17), mod(i,19), - mod(i,23) || ' dollars and zero cents' - FROM generate_series(1,1000) s(i); -ANALYZE ndistinct; -SELECT s.stxkind, d.stxdndistinct - FROM pg_statistic_ext s, pg_statistic_ext_data d - WHERE s.stxrelid = 'ndistinct'::regclass - AND d.stxoid = s.oid; - stxkind | stxdndistinct ----------+---------------------------------------------------------- - {d,f,m} | {"3, 4": 221, "3, 6": 247, "4, 6": 323, "3, 4, 6": 1000} -(1 row) - --- correct estimates -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 221 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c'); - estimated | actual ------------+-------- - 1000 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d'); - estimated | actual ------------+-------- - 1000 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d'); - estimated | actual ------------+-------- - 323 | 323 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, d'); - estimated | actual ------------+-------- - 200 | 13 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (a+1)'); - estimated | actual ------------+-------- - 221 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); - estimated | actual ------------+-------- - 221 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); - estimated | actual ------------+-------- - 1000 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); - estimated | actual ------------+-------- - 221 | 221 -(1 row) - -DROP STATISTICS s10; -SELECT s.stxkind, d.stxdndistinct - FROM pg_statistic_ext s, pg_statistic_ext_data d - WHERE s.stxrelid = 'ndistinct'::regclass - AND d.stxoid = s.oid; - stxkind | stxdndistinct ----------+--------------- -(0 rows) - --- dropping the statistics results in under-estimates -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 100 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c'); - estimated | actual ------------+-------- - 100 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d'); - estimated | actual ------------+-------- - 200 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d'); - estimated | actual ------------+-------- - 200 | 323 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, d'); - estimated | actual ------------+-------- - 200 | 13 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (a+1)'); - estimated | actual ------------+-------- - 100 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); - estimated | actual ------------+-------- - 100 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); - estimated | actual ------------+-------- - 100 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); - estimated | actual ------------+-------- - 100 | 221 -(1 row) - --- ndistinct estimates with statistics on expressions -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); - estimated | actual ------------+-------- - 100 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); - estimated | actual ------------+-------- - 100 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); - estimated | actual ------------+-------- - 100 | 221 -(1 row) - -CREATE STATISTICS s10 (ndistinct) ON (a+1), (b+100), (2*c) FROM ndistinct; -ANALYZE ndistinct; -SELECT s.stxkind, d.stxdndistinct - FROM pg_statistic_ext s, pg_statistic_ext_data d - WHERE s.stxrelid = 'ndistinct'::regclass - AND d.stxoid = s.oid; - stxkind | stxdndistinct ----------+------------------------------------------------------------------- - {d,e} | {"-1, -2": 221, "-1, -3": 247, "-2, -3": 323, "-1, -2, -3": 1000} -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); - estimated | actual ------------+-------- - 221 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); - estimated | actual ------------+-------- - 1000 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); - estimated | actual ------------+-------- - 221 | 221 -(1 row) - -DROP STATISTICS s10; --- a mix of attributes and expressions -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 100 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (2*c)'); - estimated | actual ------------+-------- - 100 | 247 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (2*c)'); - estimated | actual ------------+-------- - 100 | 1000 -(1 row) - -CREATE STATISTICS s10 (ndistinct) ON a, b, (2*c) FROM ndistinct; -ANALYZE ndistinct; -SELECT s.stxkind, d.stxdndistinct - FROM pg_statistic_ext s, pg_statistic_ext_data d - WHERE s.stxrelid = 'ndistinct'::regclass - AND d.stxoid = s.oid; - stxkind | stxdndistinct ----------+------------------------------------------------------------- - {d,e} | {"3, 4": 221, "3, -1": 247, "4, -1": 323, "3, 4, -1": 1000} -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 221 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (2*c)'); - estimated | actual ------------+-------- - 247 | 247 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (2*c)'); - estimated | actual ------------+-------- - 1000 | 1000 -(1 row) - -DROP STATISTICS s10; --- combination of multiple ndistinct statistics, with/without expressions -TRUNCATE ndistinct; --- two mostly independent groups of columns -INSERT INTO ndistinct (a, b, c, d) - SELECT mod(i,3), mod(i,9), mod(i,5), mod(i,20) - FROM generate_series(1,1000) s(i); -ANALYZE ndistinct; -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 27 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); - estimated | actual ------------+-------- - 27 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); - estimated | actual ------------+-------- - 27 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); - estimated | actual ------------+-------- - 27 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); - estimated | actual ------------+-------- - 100 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); - estimated | actual ------------+-------- - 100 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); - estimated | actual ------------+-------- - 100 | 180 -(1 row) - --- basic statistics on both attributes (no expressions) -CREATE STATISTICS s11 (ndistinct) ON a, b FROM ndistinct; -CREATE STATISTICS s12 (ndistinct) ON c, d FROM ndistinct; -ANALYZE ndistinct; -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); - estimated | actual ------------+-------- - 100 | 180 -(1 row) - --- replace the second statistics by statistics on expressions -DROP STATISTICS s12; -CREATE STATISTICS s12 (ndistinct) ON (c * 10), (d - 1) FROM ndistinct; -ANALYZE ndistinct; -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); - estimated | actual ------------+-------- - 100 | 180 -(1 row) - --- replace the second statistics by statistics on both attributes and expressions -DROP STATISTICS s12; -CREATE STATISTICS s12 (ndistinct) ON c, d, (c * 10), (d - 1) FROM ndistinct; -ANALYZE ndistinct; -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); - estimated | actual ------------+-------- - 100 | 180 -(1 row) - --- replace the other statistics by statistics on both attributes and expressions -DROP STATISTICS s11; -CREATE STATISTICS s11 (ndistinct) ON a, b, (a*5), (b+1) FROM ndistinct; -ANALYZE ndistinct; -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); - estimated | actual ------------+-------- - 100 | 180 -(1 row) - --- replace statistics by somewhat overlapping ones (this expected to get worse estimate --- because the first statistics shall be applied to 3 columns, and the second one can't --- be really applied) -DROP STATISTICS s11; -DROP STATISTICS s12; -CREATE STATISTICS s11 (ndistinct) ON a, b, (a*5), (b+1) FROM ndistinct; -CREATE STATISTICS s12 (ndistinct) ON a, (b+1), (c * 10) FROM ndistinct; -ANALYZE ndistinct; -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); - estimated | actual ------------+-------- - 100 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); - estimated | actual ------------+-------- - 100 | 180 -(1 row) - -DROP STATISTICS s11; -DROP STATISTICS s12; --- functional dependencies tests -CREATE TABLE functional_dependencies ( - filler1 TEXT, - filler2 NUMERIC, - a INT, - b TEXT, - filler3 DATE, - c INT, - d TEXT -) -WITH (autovacuum_enabled = off); -CREATE INDEX fdeps_ab_idx ON functional_dependencies (a, b); -CREATE INDEX fdeps_abc_idx ON functional_dependencies (a, b, c); --- random data (no functional dependencies) -INSERT INTO functional_dependencies (a, b, c, filler1) - SELECT mod(i, 5), mod(i, 7), mod(i, 11), i FROM generate_series(1,1000) s(i); -ANALYZE functional_dependencies; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 29 | 29 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 3 | 3 -(1 row) - --- create statistics -CREATE STATISTICS func_deps_stat (dependencies) ON a, b, c FROM functional_dependencies; -ANALYZE functional_dependencies; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 29 | 29 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 3 | 3 -(1 row) - --- a => b, a => c, b => c -TRUNCATE functional_dependencies; -DROP STATISTICS func_deps_stat; --- now do the same thing, but with expressions -INSERT INTO functional_dependencies (a, b, c, filler1) - SELECT i, i, i, i FROM generate_series(1,5000) s(i); -ANALYZE functional_dependencies; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE mod(a, 11) = 1 AND mod(b::int, 13) = 1'); - estimated | actual ------------+-------- - 1 | 35 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE mod(a, 11) = 1 AND mod(b::int, 13) = 1 AND mod(c, 7) = 1'); - estimated | actual ------------+-------- - 1 | 5 -(1 row) - --- create statistics -CREATE STATISTICS func_deps_stat (dependencies) ON (mod(a,11)), (mod(b::int, 13)), (mod(c, 7)) FROM functional_dependencies; -ANALYZE functional_dependencies; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE mod(a, 11) = 1 AND mod(b::int, 13) = 1'); - estimated | actual ------------+-------- - 35 | 35 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE mod(a, 11) = 1 AND mod(b::int, 13) = 1 AND mod(c, 7) = 1'); - estimated | actual ------------+-------- - 5 | 5 -(1 row) - --- a => b, a => c, b => c -TRUNCATE functional_dependencies; -DROP STATISTICS func_deps_stat; -INSERT INTO functional_dependencies (a, b, c, filler1) - SELECT mod(i,100), mod(i,50), mod(i,25), i FROM generate_series(1,5000) s(i); -ANALYZE functional_dependencies; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - --- IN -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ''1'''); - estimated | actual ------------+-------- - 2 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 4 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 8 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b = ''1'''); - estimated | actual ------------+-------- - 4 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 26, 51, 76) AND b IN (''1'', ''26'') AND c = 1'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 26, 51, 76) AND b IN (''1'', ''26'') AND c IN (1)'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 26, 27, 51, 52, 76, 77) AND b IN (''1'', ''2'', ''26'', ''27'') AND c IN (1, 2)'); - estimated | actual ------------+-------- - 3 | 400 -(1 row) - --- OR clauses referencing the same attribute -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 51) AND b = ''1'''); - estimated | actual ------------+-------- - 2 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 51) AND (b = ''1'' OR b = ''2'')'); - estimated | actual ------------+-------- - 4 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 2 OR a = 51 OR a = 52) AND (b = ''1'' OR b = ''2'')'); - estimated | actual ------------+-------- - 8 | 200 -(1 row) - --- OR clauses referencing different attributes -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR b = ''1'') AND b = ''1'''); - estimated | actual ------------+-------- - 3 | 100 -(1 row) - --- ANY -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 51]) AND b = ''1'''); - estimated | actual ------------+-------- - 2 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 51]) AND b = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 4 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 8 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 26, 51, 76]) AND b = ANY (ARRAY[''1'', ''26'']) AND c = 1'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 26, 51, 76]) AND b = ANY (ARRAY[''1'', ''26'']) AND c = ANY (ARRAY[1])'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 2, 26, 27, 51, 52, 76, 77]) AND b = ANY (ARRAY[''1'', ''2'', ''26'', ''27'']) AND c = ANY (ARRAY[1, 2])'); - estimated | actual ------------+-------- - 3 | 400 -(1 row) - --- ANY with inequalities should not benefit from functional dependencies -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a < ANY (ARRAY[1, 51]) AND b > ''1'''); - estimated | actual ------------+-------- - 2472 | 2400 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a >= ANY (ARRAY[1, 51]) AND b <= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1441 | 1250 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a <= ANY (ARRAY[1, 2, 51, 52]) AND b >= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 3909 | 2550 -(1 row) - --- ALL (should not benefit from functional dependencies) -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ALL (ARRAY[''1''])'); - estimated | actual ------------+-------- - 2 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - --- create statistics -CREATE STATISTICS func_deps_stat (dependencies) ON a, b, c FROM functional_dependencies; -ANALYZE functional_dependencies; --- print the detected dependencies -SELECT dependencies FROM pg_stats_ext WHERE statistics_name = 'func_deps_stat'; - dependencies ------------------------------------------------------------------------------------------------------------- - {"3 => 4": 1.000000, "3 => 6": 1.000000, "4 => 6": 1.000000, "3, 4 => 6": 1.000000, "3, 6 => 4": 1.000000} -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - --- IN -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ''1'''); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b = ''1'''); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 26, 51, 76) AND b IN (''1'', ''26'') AND c = 1'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 26, 51, 76) AND b IN (''1'', ''26'') AND c IN (1)'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 26, 27, 51, 52, 76, 77) AND b IN (''1'', ''2'', ''26'', ''27'') AND c IN (1, 2)'); - estimated | actual ------------+-------- - 400 | 400 -(1 row) - --- OR clauses referencing the same attribute -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 51) AND b = ''1'''); - estimated | actual ------------+-------- - 99 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 51) AND (b = ''1'' OR b = ''2'')'); - estimated | actual ------------+-------- - 99 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 2 OR a = 51 OR a = 52) AND (b = ''1'' OR b = ''2'')'); - estimated | actual ------------+-------- - 197 | 200 -(1 row) - --- OR clauses referencing different attributes are incompatible -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR b = ''1'') AND b = ''1'''); - estimated | actual ------------+-------- - 3 | 100 -(1 row) - --- ANY -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 51]) AND b = ''1'''); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 51]) AND b = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 26, 51, 76]) AND b = ANY (ARRAY[''1'', ''26'']) AND c = 1'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 26, 51, 76]) AND b = ANY (ARRAY[''1'', ''26'']) AND c = ANY (ARRAY[1])'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 2, 26, 27, 51, 52, 76, 77]) AND b = ANY (ARRAY[''1'', ''2'', ''26'', ''27'']) AND c = ANY (ARRAY[1, 2])'); - estimated | actual ------------+-------- - 400 | 400 -(1 row) - --- ANY with inequalities should not benefit from functional dependencies -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a < ANY (ARRAY[1, 51]) AND b > ''1'''); - estimated | actual ------------+-------- - 2472 | 2400 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a >= ANY (ARRAY[1, 51]) AND b <= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1441 | 1250 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a <= ANY (ARRAY[1, 2, 51, 52]) AND b >= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 3909 | 2550 -(1 row) - --- ALL (should not benefit from functional dependencies) -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ALL (ARRAY[''1''])'); - estimated | actual ------------+-------- - 2 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - --- changing the type of column c causes all its stats to be dropped, reverting --- to default estimates without any statistics, i.e. 0.5% selectivity for each --- condition -ALTER TABLE functional_dependencies ALTER COLUMN c TYPE numeric; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -ANALYZE functional_dependencies; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -DROP STATISTICS func_deps_stat; --- now try functional dependencies with expressions -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = 2 AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = 2 AND upper(b) = ''1'' AND (c + 1) = 2'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - --- IN -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 52, 102, 152) AND upper(b) IN (''1'', ''26'') AND (c + 1) = 2'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 52, 102, 152) AND upper(b) IN (''1'', ''26'') AND (c + 1) IN (2)'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 52, 54, 102, 104, 152, 154) AND upper(b) IN (''1'', ''2'', ''26'', ''27'') AND (c + 1) IN (2, 3)'); - estimated | actual ------------+-------- - 1 | 400 -(1 row) - --- OR clauses referencing the same attribute -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 102) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 102) AND (upper(b) = ''1'' OR upper(b) = ''2'')'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 4 OR (a * 2) = 102 OR (a * 2) = 104) AND (upper(b) = ''1'' OR upper(b) = ''2'')'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - --- OR clauses referencing different attributes -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR upper(b) = ''1'') AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - --- ANY -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 102]) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 102]) AND upper(b) = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 4, 102, 104]) AND upper(b) = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 52, 102, 152]) AND upper(b) = ANY (ARRAY[''1'', ''26'']) AND (c + 1) = 2'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 52, 102, 152]) AND upper(b) = ANY (ARRAY[''1'', ''26'']) AND (c + 1) = ANY (ARRAY[2])'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 4, 52, 54, 102, 104, 152, 154]) AND upper(b) = ANY (ARRAY[''1'', ''2'', ''26'', ''27'']) AND (c + 1) = ANY (ARRAY[2, 3])'); - estimated | actual ------------+-------- - 1 | 400 -(1 row) - --- ANY with inequalities should not benefit from functional dependencies --- the estimates however improve thanks to having expression statistics -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) < ANY (ARRAY[2, 102]) AND upper(b) > ''1'''); - estimated | actual ------------+-------- - 926 | 2400 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) >= ANY (ARRAY[2, 102]) AND upper(b) <= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1543 | 1250 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) <= ANY (ARRAY[2, 4, 102, 104]) AND upper(b) >= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 2229 | 2550 -(1 row) - --- ALL (should not benefit from functional dependencies) -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ALL (ARRAY[''1''])'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - --- create statistics on expressions -CREATE STATISTICS func_deps_stat (dependencies) ON (a * 2), upper(b), (c + 1) FROM functional_dependencies; -ANALYZE functional_dependencies; --- print the detected dependencies -SELECT dependencies FROM pg_stats_ext WHERE statistics_name = 'func_deps_stat'; - dependencies ------------------------------------------------------------------------------------------------------------------------- - {"-1 => -2": 1.000000, "-1 => -3": 1.000000, "-2 => -3": 1.000000, "-1, -2 => -3": 1.000000, "-1, -3 => -2": 1.000000} -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = 2 AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = 2 AND upper(b) = ''1'' AND (c + 1) = 2'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - --- IN -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 52, 102, 152) AND upper(b) IN (''1'', ''26'') AND (c + 1) = 2'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 52, 102, 152) AND upper(b) IN (''1'', ''26'') AND (c + 1) IN (2)'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 52, 54, 102, 104, 152, 154) AND upper(b) IN (''1'', ''2'', ''26'', ''27'') AND (c + 1) IN (2, 3)'); - estimated | actual ------------+-------- - 400 | 400 -(1 row) - --- OR clauses referencing the same attribute -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 102) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 99 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 102) AND (upper(b) = ''1'' OR upper(b) = ''2'')'); - estimated | actual ------------+-------- - 99 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 4 OR (a * 2) = 102 OR (a * 2) = 104) AND (upper(b) = ''1'' OR upper(b) = ''2'')'); - estimated | actual ------------+-------- - 197 | 200 -(1 row) - --- OR clauses referencing different attributes -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR upper(b) = ''1'') AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 3 | 100 -(1 row) - --- ANY -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 102]) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 102]) AND upper(b) = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 4, 102, 104]) AND upper(b) = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 52, 102, 152]) AND upper(b) = ANY (ARRAY[''1'', ''26'']) AND (c + 1) = 2'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 52, 102, 152]) AND upper(b) = ANY (ARRAY[''1'', ''26'']) AND (c + 1) = ANY (ARRAY[2])'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 4, 52, 54, 102, 104, 152, 154]) AND upper(b) = ANY (ARRAY[''1'', ''2'', ''26'', ''27'']) AND (c + 1) = ANY (ARRAY[2, 3])'); - estimated | actual ------------+-------- - 400 | 400 -(1 row) - --- ANY with inequalities should not benefit from functional dependencies --- the estimates however improve thanks to having expression statistics -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) < ANY (ARRAY[2, 102]) AND upper(b) > ''1'''); - estimated | actual ------------+-------- - 2472 | 2400 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) >= ANY (ARRAY[2, 102]) AND upper(b) <= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1441 | 1250 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) <= ANY (ARRAY[2, 4, 102, 104]) AND upper(b) >= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 3909 | 2550 -(1 row) - --- ALL (should not benefit from functional dependencies) -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ALL (ARRAY[''1''])'); - estimated | actual ------------+-------- - 2 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - --- check the ability to use multiple functional dependencies -CREATE TABLE functional_dependencies_multi ( - a INTEGER, - b INTEGER, - c INTEGER, - d INTEGER -) -WITH (autovacuum_enabled = off); -INSERT INTO functional_dependencies_multi (a, b, c, d) - SELECT - mod(i,7), - mod(i,7), - mod(i,11), - mod(i,11) - FROM generate_series(1,5000) s(i); -ANALYZE functional_dependencies_multi; --- estimates without any functional dependencies -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE a = 0 AND b = 0'); - estimated | actual ------------+-------- - 102 | 714 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE 0 = a AND 0 = b'); - estimated | actual ------------+-------- - 102 | 714 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE c = 0 AND d = 0'); - estimated | actual ------------+-------- - 41 | 454 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE a = 0 AND b = 0 AND c = 0 AND d = 0'); - estimated | actual ------------+-------- - 1 | 64 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE 0 = a AND b = 0 AND 0 = c AND d = 0'); - estimated | actual ------------+-------- - 1 | 64 -(1 row) - --- create separate functional dependencies -CREATE STATISTICS functional_dependencies_multi_1 (dependencies) ON a, b FROM functional_dependencies_multi; -CREATE STATISTICS functional_dependencies_multi_2 (dependencies) ON c, d FROM functional_dependencies_multi; -ANALYZE functional_dependencies_multi; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE a = 0 AND b = 0'); - estimated | actual ------------+-------- - 714 | 714 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE 0 = a AND 0 = b'); - estimated | actual ------------+-------- - 714 | 714 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE c = 0 AND d = 0'); - estimated | actual ------------+-------- - 454 | 454 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE a = 0 AND b = 0 AND c = 0 AND d = 0'); - estimated | actual ------------+-------- - 65 | 64 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE 0 = a AND b = 0 AND 0 = c AND d = 0'); - estimated | actual ------------+-------- - 65 | 64 -(1 row) - -DROP TABLE functional_dependencies_multi; --- MCV lists -CREATE TABLE mcv_lists ( - filler1 TEXT, - filler2 NUMERIC, - a INT, - b VARCHAR, - filler3 DATE, - c INT, - d TEXT, - ia INT[] -) -WITH (autovacuum_enabled = off); --- random data (no MCV list) -INSERT INTO mcv_lists (a, b, c, filler1) - SELECT mod(i,37), mod(i,41), mod(i,43), mod(i,47) FROM generate_series(1,5000) s(i); -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 3 | 4 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 1 | 1 -(1 row) - --- create statistics -CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, c FROM mcv_lists; -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 3 | 4 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 1 | 1 -(1 row) - -TRUNCATE mcv_lists; -DROP STATISTICS mcv_lists_stats; --- random data (no MCV list), but with expression -INSERT INTO mcv_lists (a, b, c, filler1) - SELECT i, i, i, i FROM generate_series(1,1000) s(i); -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,7) = 1 AND mod(b::int,11) = 1'); - estimated | actual ------------+-------- - 1 | 13 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,7) = 1 AND mod(b::int,11) = 1 AND mod(c,13) = 1'); - estimated | actual ------------+-------- - 1 | 1 -(1 row) - --- create statistics -CREATE STATISTICS mcv_lists_stats (mcv) ON (mod(a,7)), (mod(b::int,11)), (mod(c,13)) FROM mcv_lists; -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,7) = 1 AND mod(b::int,11) = 1'); - estimated | actual ------------+-------- - 13 | 13 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,7) = 1 AND mod(b::int,11) = 1 AND mod(c,13) = 1'); - estimated | actual ------------+-------- - 1 | 1 -(1 row) - --- 100 distinct combinations, all in the MCV list -TRUNCATE mcv_lists; -DROP STATISTICS mcv_lists_stats; -INSERT INTO mcv_lists (a, b, c, ia, filler1) - SELECT mod(i,100), mod(i,50), mod(i,25), array[mod(i,25)], i - FROM generate_series(1,5000) s(i); -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = a AND ''1'' = b'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 1 AND b < ''1'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > a AND ''1'' > b'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= 0 AND b <= ''0'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 0 >= a AND ''0'' >= b'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 5 AND b < ''1'' AND c < 5'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 5 AND ''1'' > b AND 5 > c'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= 4 AND b <= ''0'' AND c <= 4'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 4 >= a AND ''0'' >= b AND 4 >= c'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1'); - estimated | actual ------------+-------- - 343 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1 OR d IS NOT NULL'); - estimated | actual ------------+-------- - 343 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (1, 2, 51, 52) AND b IN ( ''1'', ''2'')'); - estimated | actual ------------+-------- - 8 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (1, 2, 51, 52, NULL) AND b IN ( ''1'', ''2'', NULL)'); - estimated | actual ------------+-------- - 8 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 8 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[NULL, 1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2'', NULL])'); - estimated | actual ------------+-------- - 8 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= ANY (ARRAY[1, 2, 3]) AND b IN (''1'', ''2'', ''3'')'); - estimated | actual ------------+-------- - 26 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= ANY (ARRAY[1, NULL, 2, 3]) AND b IN (''1'', ''2'', NULL, ''3'')'); - estimated | actual ------------+-------- - 26 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND c > ANY (ARRAY[1, 2, 3])'); - estimated | actual ------------+-------- - 10 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND c > ANY (ARRAY[1, 2, 3, NULL])'); - estimated | actual ------------+-------- - 10 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND b IN (''1'', ''2'', ''3'') AND c > ANY (ARRAY[1, 2, 3])'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND b IN (''1'', ''2'', NULL, ''3'') AND c > ANY (ARRAY[1, 2, NULL, 3])'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[4,5]) AND 4 = ANY(ia)'); - estimated | actual ------------+-------- - 4 | 50 -(1 row) - --- create statistics -CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, c, ia FROM mcv_lists; -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = a AND ''1'' = b'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 1 AND b < ''1'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > a AND ''1'' > b'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= 0 AND b <= ''0'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 0 >= a AND ''0'' >= b'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 5 AND b < ''1'' AND c < 5'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 5 AND ''1'' > b AND 5 > c'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= 4 AND b <= ''0'' AND c <= 4'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 4 >= a AND ''0'' >= b AND 4 >= c'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1 OR d IS NOT NULL'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1 OR d IS NOT NULL'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (1, 2, 51, 52) AND b IN ( ''1'', ''2'')'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (1, 2, 51, 52, NULL) AND b IN ( ''1'', ''2'', NULL)'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[NULL, 1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2'', NULL])'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= ANY (ARRAY[1, 2, 3]) AND b IN (''1'', ''2'', ''3'')'); - estimated | actual ------------+-------- - 150 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= ANY (ARRAY[1, NULL, 2, 3]) AND b IN (''1'', ''2'', NULL, ''3'')'); - estimated | actual ------------+-------- - 150 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND c > ANY (ARRAY[1, 2, 3])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND c > ANY (ARRAY[1, 2, 3, NULL])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND b IN (''1'', ''2'', ''3'') AND c > ANY (ARRAY[1, 2, 3])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND b IN (''1'', ''2'', NULL, ''3'') AND c > ANY (ARRAY[1, 2, NULL, 3])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[4,5]) AND 4 = ANY(ia)'); - estimated | actual ------------+-------- - 4 | 50 -(1 row) - --- check change of unrelated column type does not reset the MCV statistics -ALTER TABLE mcv_lists ALTER COLUMN d TYPE VARCHAR(64); -SELECT d.stxdmcv IS NOT NULL - FROM pg_statistic_ext s, pg_statistic_ext_data d - WHERE s.stxname = 'mcv_lists_stats' - AND d.stxoid = s.oid; - ?column? ----------- - t -(1 row) - --- check change of column type resets the MCV statistics -ALTER TABLE mcv_lists ALTER COLUMN c TYPE numeric; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - --- 100 distinct combinations, all in the MCV list, but with expressions -TRUNCATE mcv_lists; -DROP STATISTICS mcv_lists_stats; -INSERT INTO mcv_lists (a, b, c, filler1) - SELECT i, i, i, i FROM generate_series(1,1000) s(i); -ANALYZE mcv_lists; --- without any stats on the expressions, we have to use default selectivities, which --- is why the estimates here are different from the pre-computed case above -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = mod(a,20) AND 1 = mod(b::int,10)'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < 1 AND mod(b::int,10) < 1'); - estimated | actual ------------+-------- - 111 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > mod(a,20) AND 1 > mod(b::int,10)'); - estimated | actual ------------+-------- - 111 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1 AND mod(c,5) = 1'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 OR mod(b::int,10) = 1 OR mod(c,25) = 1 OR d IS NOT NULL'); - estimated | actual ------------+-------- - 15 | 120 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) IN (1, 2, 51, 52, NULL) AND mod(b::int,10) IN ( 1, 2, NULL)'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = ANY (ARRAY[1, 2, 51, 52]) AND mod(b::int,10) = ANY (ARRAY[1, 2])'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) <= ANY (ARRAY[1, NULL, 2, 3]) AND mod(b::int,10) IN (1, 2, NULL, 3)'); - estimated | actual ------------+-------- - 11 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < ALL (ARRAY[4, 5]) AND mod(b::int,10) IN (1, 2, 3) AND mod(c,5) > ANY (ARRAY[1, 2, 3])'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - --- create statistics with expressions only (we create three separate stats, in order not to build more complex extended stats) -CREATE STATISTICS mcv_lists_stats_1 ON (mod(a,20)) FROM mcv_lists; -CREATE STATISTICS mcv_lists_stats_2 ON (mod(b::int,10)) FROM mcv_lists; -CREATE STATISTICS mcv_lists_stats_3 ON (mod(c,5)) FROM mcv_lists; -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1'); - estimated | actual ------------+-------- - 5 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = mod(a,20) AND 1 = mod(b::int,10)'); - estimated | actual ------------+-------- - 5 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < 1 AND mod(b::int,10) < 1'); - estimated | actual ------------+-------- - 5 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > mod(a,20) AND 1 > mod(b::int,10)'); - estimated | actual ------------+-------- - 5 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1 AND mod(c,5) = 1'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 OR mod(b::int,10) = 1 OR mod(c,25) = 1 OR d IS NOT NULL'); - estimated | actual ------------+-------- - 149 | 120 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) IN (1, 2, 51, 52, NULL) AND mod(b::int,10) IN ( 1, 2, NULL)'); - estimated | actual ------------+-------- - 20 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = ANY (ARRAY[1, 2, 51, 52]) AND mod(b::int,10) = ANY (ARRAY[1, 2])'); - estimated | actual ------------+-------- - 20 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) <= ANY (ARRAY[1, NULL, 2, 3]) AND mod(b::int,10) IN (1, 2, NULL, 3)'); - estimated | actual ------------+-------- - 116 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < ALL (ARRAY[4, 5]) AND mod(b::int,10) IN (1, 2, 3) AND mod(c,5) > ANY (ARRAY[1, 2, 3])'); - estimated | actual ------------+-------- - 12 | 100 -(1 row) - -DROP STATISTICS mcv_lists_stats_1; -DROP STATISTICS mcv_lists_stats_2; -DROP STATISTICS mcv_lists_stats_3; --- create statistics with both MCV and expressions -CREATE STATISTICS mcv_lists_stats (mcv) ON (mod(a,20)), (mod(b::int,10)), (mod(c,5)) FROM mcv_lists; -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = mod(a,20) AND 1 = mod(b::int,10)'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < 1 AND mod(b::int,10) < 1'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > mod(a,20) AND 1 > mod(b::int,10)'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1 AND mod(c,5) = 1'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 OR mod(b::int,10) = 1 OR mod(c,25) = 1 OR d IS NOT NULL'); - estimated | actual ------------+-------- - 105 | 120 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) IN (1, 2, 51, 52, NULL) AND mod(b::int,10) IN ( 1, 2, NULL)'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = ANY (ARRAY[1, 2, 51, 52]) AND mod(b::int,10) = ANY (ARRAY[1, 2])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) <= ANY (ARRAY[1, NULL, 2, 3]) AND mod(b::int,10) IN (1, 2, NULL, 3)'); - estimated | actual ------------+-------- - 150 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < ALL (ARRAY[4, 5]) AND mod(b::int,10) IN (1, 2, 3) AND mod(c,5) > ANY (ARRAY[1, 2, 3])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - --- we can't use the statistic for OR clauses that are not fully covered (missing 'd' attribute) -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 OR mod(b::int,10) = 1 OR mod(c,5) = 1 OR d IS NOT NULL'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - --- 100 distinct combinations with NULL values, all in the MCV list -TRUNCATE mcv_lists; -DROP STATISTICS mcv_lists_stats; -INSERT INTO mcv_lists (a, b, c, filler1) - SELECT - (CASE WHEN mod(i,100) = 1 THEN NULL ELSE mod(i,100) END), - (CASE WHEN mod(i,50) = 1 THEN NULL ELSE mod(i,50) END), - (CASE WHEN mod(i,25) = 1 THEN NULL ELSE mod(i,25) END), - i - FROM generate_series(1,5000) s(i); -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NULL'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NULL AND c IS NULL'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NOT NULL'); - estimated | actual ------------+-------- - 49 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NOT NULL AND b IS NULL AND c IS NOT NULL'); - estimated | actual ------------+-------- - 95 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (0, 1) AND b IN (''0'', ''1'')'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - --- create statistics -CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, c FROM mcv_lists; -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NULL'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NULL AND c IS NULL'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NOT NULL'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NOT NULL AND b IS NULL AND c IS NOT NULL'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (0, 1) AND b IN (''0'', ''1'')'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - --- test pg_mcv_list_items with a very simple (single item) MCV list -TRUNCATE mcv_lists; -INSERT INTO mcv_lists (a, b, c) SELECT 1, 2, 3 FROM generate_series(1,1000) s(i); -ANALYZE mcv_lists; -SELECT m.* - FROM pg_statistic_ext s, pg_statistic_ext_data d, - pg_mcv_list_items(d.stxdmcv) m - WHERE s.stxname = 'mcv_lists_stats' - AND d.stxoid = s.oid; - index | values | nulls | frequency | base_frequency --------+---------+---------+-----------+---------------- - 0 | {1,2,3} | {f,f,f} | 1 | 1 -(1 row) - --- 2 distinct combinations with NULL values, all in the MCV list -TRUNCATE mcv_lists; -DROP STATISTICS mcv_lists_stats; -INSERT INTO mcv_lists (a, b, c, d) - SELECT - NULL, -- always NULL - (CASE WHEN mod(i,2) = 0 THEN NULL ELSE 'x' END), - (CASE WHEN mod(i,2) = 0 THEN NULL ELSE 0 END), - (CASE WHEN mod(i,2) = 0 THEN NULL ELSE 'x' END) - FROM generate_series(1,5000) s(i); -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE b = ''x'' OR d = ''x'''); - estimated | actual ------------+-------- - 3750 | 2500 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''x'' OR d = ''x'''); - estimated | actual ------------+-------- - 3750 | 2500 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND (b = ''x'' OR d = ''x'')'); - estimated | actual ------------+-------- - 3750 | 2500 -(1 row) - --- create statistics -CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, d FROM mcv_lists; -ANALYZE mcv_lists; --- test pg_mcv_list_items with MCV list containing variable-length data and NULLs -SELECT m.* - FROM pg_statistic_ext s, pg_statistic_ext_data d, - pg_mcv_list_items(d.stxdmcv) m - WHERE s.stxname = 'mcv_lists_stats' - AND d.stxoid = s.oid; - index | values | nulls | frequency | base_frequency --------+------------------+---------+-----------+---------------- - 0 | {NULL,x,x} | {t,f,f} | 0.5 | 0.25 - 1 | {NULL,NULL,NULL} | {t,t,t} | 0.5 | 0.25 -(2 rows) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE b = ''x'' OR d = ''x'''); - estimated | actual ------------+-------- - 2500 | 2500 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''x'' OR d = ''x'''); - estimated | actual ------------+-------- - 2500 | 2500 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND (b = ''x'' OR d = ''x'')'); - estimated | actual ------------+-------- - 2500 | 2500 -(1 row) - --- mcv with pass-by-ref fixlen types, e.g. uuid -CREATE TABLE mcv_lists_uuid ( - a UUID, - b UUID, - c UUID -) -WITH (autovacuum_enabled = off); -INSERT INTO mcv_lists_uuid (a, b, c) - SELECT - fipshash(mod(i,100)::text)::uuid, - fipshash(mod(i,50)::text)::uuid, - fipshash(mod(i,25)::text)::uuid - FROM generate_series(1,5000) s(i); -ANALYZE mcv_lists_uuid; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND c = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -CREATE STATISTICS mcv_lists_uuid_stats (mcv) ON a, b, c - FROM mcv_lists_uuid; -ANALYZE mcv_lists_uuid; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND c = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -DROP TABLE mcv_lists_uuid; --- mcv with arrays -CREATE TABLE mcv_lists_arrays ( - a TEXT[], - b NUMERIC[], - c INT[] -) -WITH (autovacuum_enabled = off); -INSERT INTO mcv_lists_arrays (a, b, c) - SELECT - ARRAY[fipshash((i/100)::text), fipshash((i/100-1)::text), fipshash((i/100+1)::text)], - ARRAY[(i/100-1)::numeric/1000, (i/100)::numeric/1000, (i/100+1)::numeric/1000], - ARRAY[(i/100-1), i/100, (i/100+1)] - FROM generate_series(1,5000) s(i); -CREATE STATISTICS mcv_lists_arrays_stats (mcv) ON a, b, c - FROM mcv_lists_arrays; -ANALYZE mcv_lists_arrays; --- mcv with bool -CREATE TABLE mcv_lists_bool ( - a BOOL, - b BOOL, - c BOOL -) -WITH (autovacuum_enabled = off); -INSERT INTO mcv_lists_bool (a, b, c) - SELECT - (mod(i,2) = 0), (mod(i,4) = 0), (mod(i,8) = 0) - FROM generate_series(1,10000) s(i); -ANALYZE mcv_lists_bool; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE a AND b AND c'); - estimated | actual ------------+-------- - 156 | 1250 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND b AND c'); - estimated | actual ------------+-------- - 156 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND NOT b AND c'); - estimated | actual ------------+-------- - 469 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND b AND NOT c'); - estimated | actual ------------+-------- - 1094 | 0 -(1 row) - -CREATE STATISTICS mcv_lists_bool_stats (mcv) ON a, b, c - FROM mcv_lists_bool; -ANALYZE mcv_lists_bool; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE a AND b AND c'); - estimated | actual ------------+-------- - 1250 | 1250 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND b AND c'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND NOT b AND c'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND b AND NOT c'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - --- mcv covering just a small fraction of data -CREATE TABLE mcv_lists_partial ( - a INT, - b INT, - c INT -); --- 10 frequent groups, each with 100 elements -INSERT INTO mcv_lists_partial (a, b, c) - SELECT - mod(i,10), - mod(i,10), - mod(i,10) - FROM generate_series(0,999) s(i); --- 100 groups that will make it to the MCV list (includes the 10 frequent ones) -INSERT INTO mcv_lists_partial (a, b, c) - SELECT - i, - i, - i - FROM generate_series(0,99) s(i); --- 4000 groups in total, most of which won't make it (just a single item) -INSERT INTO mcv_lists_partial (a, b, c) - SELECT - i, - i, - i - FROM generate_series(0,3999) s(i); -ANALYZE mcv_lists_partial; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 AND b = 0 AND c = 0'); - estimated | actual ------------+-------- - 1 | 102 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 OR b = 0 OR c = 0'); - estimated | actual ------------+-------- - 300 | 102 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 10 AND b = 10 AND c = 10'); - estimated | actual ------------+-------- - 1 | 2 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 10 OR b = 10 OR c = 10'); - estimated | actual ------------+-------- - 6 | 2 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 AND b = 0 AND c = 10'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 OR b = 0 OR c = 10'); - estimated | actual ------------+-------- - 204 | 104 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE (a = 0 AND b = 0 AND c = 0) OR (a = 1 AND b = 1 AND c = 1) OR (a = 2 AND b = 2 AND c = 2)'); - estimated | actual ------------+-------- - 1 | 306 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE (a = 0 AND b = 0) OR (a = 0 AND c = 0) OR (b = 0 AND c = 0)'); - estimated | actual ------------+-------- - 6 | 102 -(1 row) - -CREATE STATISTICS mcv_lists_partial_stats (mcv) ON a, b, c - FROM mcv_lists_partial; -ANALYZE mcv_lists_partial; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 AND b = 0 AND c = 0'); - estimated | actual ------------+-------- - 102 | 102 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 OR b = 0 OR c = 0'); - estimated | actual ------------+-------- - 96 | 102 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 10 AND b = 10 AND c = 10'); - estimated | actual ------------+-------- - 2 | 2 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 10 OR b = 10 OR c = 10'); - estimated | actual ------------+-------- - 2 | 2 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 AND b = 0 AND c = 10'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 OR b = 0 OR c = 10'); - estimated | actual ------------+-------- - 102 | 104 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE (a = 0 AND b = 0 AND c = 0) OR (a = 1 AND b = 1 AND c = 1) OR (a = 2 AND b = 2 AND c = 2)'); - estimated | actual ------------+-------- - 306 | 306 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE (a = 0 AND b = 0) OR (a = 0 AND c = 0) OR (b = 0 AND c = 0)'); - estimated | actual ------------+-------- - 108 | 102 -(1 row) - -DROP TABLE mcv_lists_partial; --- check the ability to use multiple MCV lists -CREATE TABLE mcv_lists_multi ( - a INTEGER, - b INTEGER, - c INTEGER, - d INTEGER -) -WITH (autovacuum_enabled = off); -INSERT INTO mcv_lists_multi (a, b, c, d) - SELECT - mod(i,5), - mod(i,5), - mod(i,7), - mod(i,7) - FROM generate_series(1,5000) s(i); -ANALYZE mcv_lists_multi; --- estimates without any mcv statistics -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 AND b = 0'); - estimated | actual ------------+-------- - 200 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE c = 0 AND d = 0'); - estimated | actual ------------+-------- - 102 | 714 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE b = 0 AND c = 0'); - estimated | actual ------------+-------- - 143 | 142 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE b = 0 OR c = 0'); - estimated | actual ------------+-------- - 1571 | 1572 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 AND b = 0 AND c = 0 AND d = 0'); - estimated | actual ------------+-------- - 4 | 142 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE (a = 0 AND b = 0) OR (c = 0 AND d = 0)'); - estimated | actual ------------+-------- - 298 | 1572 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 OR b = 0 OR c = 0 OR d = 0'); - estimated | actual ------------+-------- - 2649 | 1572 -(1 row) - --- create separate MCV statistics -CREATE STATISTICS mcv_lists_multi_1 (mcv) ON a, b FROM mcv_lists_multi; -CREATE STATISTICS mcv_lists_multi_2 (mcv) ON c, d FROM mcv_lists_multi; -ANALYZE mcv_lists_multi; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 AND b = 0'); - estimated | actual ------------+-------- - 1000 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE c = 0 AND d = 0'); - estimated | actual ------------+-------- - 714 | 714 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE b = 0 AND c = 0'); - estimated | actual ------------+-------- - 143 | 142 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE b = 0 OR c = 0'); - estimated | actual ------------+-------- - 1571 | 1572 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 AND b = 0 AND c = 0 AND d = 0'); - estimated | actual ------------+-------- - 143 | 142 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE (a = 0 AND b = 0) OR (c = 0 AND d = 0)'); - estimated | actual ------------+-------- - 1571 | 1572 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 OR b = 0 OR c = 0 OR d = 0'); - estimated | actual ------------+-------- - 1571 | 1572 -(1 row) - -DROP TABLE mcv_lists_multi; --- statistics on integer expressions -CREATE TABLE expr_stats (a int, b int, c int); -INSERT INTO expr_stats SELECT mod(i,10), mod(i,10), mod(i,10) FROM generate_series(1,1000) s(i); -ANALYZE expr_stats; -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE (2*a) = 0 AND (3*b) = 0'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE (a+b) = 0 AND (a-b) = 0'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -CREATE STATISTICS expr_stats_1 (mcv) ON (a+b), (a-b), (2*a), (3*b) FROM expr_stats; -ANALYZE expr_stats; -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE (2*a) = 0 AND (3*b) = 0'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE (a+b) = 0 AND (a-b) = 0'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -DROP STATISTICS expr_stats_1; -DROP TABLE expr_stats; --- statistics on a mix columns and expressions -CREATE TABLE expr_stats (a int, b int, c int); -INSERT INTO expr_stats SELECT mod(i,10), mod(i,10), mod(i,10) FROM generate_series(1,1000) s(i); -ANALYZE expr_stats; -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND (2*a) = 0 AND (3*b) = 0'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 3 AND b = 3 AND (a-b) = 0'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND b = 1 AND (a-b) = 0'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -CREATE STATISTICS expr_stats_1 (mcv) ON a, b, (2*a), (3*b), (a+b), (a-b) FROM expr_stats; -ANALYZE expr_stats; -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND (2*a) = 0 AND (3*b) = 0'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 3 AND b = 3 AND (a-b) = 0'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND b = 1 AND (a-b) = 0'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -DROP TABLE expr_stats; --- statistics on expressions with different data types -CREATE TABLE expr_stats (a int, b name, c text); -INSERT INTO expr_stats SELECT mod(i,10), fipshash(mod(i,10)::text), fipshash(mod(i,10)::text) FROM generate_series(1,1000) s(i); -ANALYZE expr_stats; -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND (b || c) <= ''z'' AND (c || b) >= ''0'''); - estimated | actual ------------+-------- - 11 | 100 -(1 row) - -CREATE STATISTICS expr_stats_1 (mcv) ON a, b, (b || c), (c || b) FROM expr_stats; -ANALYZE expr_stats; -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND (b || c) <= ''z'' AND (c || b) >= ''0'''); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -DROP TABLE expr_stats; --- test handling of a mix of compatible and incompatible expressions -CREATE TABLE expr_stats_incompatible_test ( - c0 double precision, - c1 boolean NOT NULL -); -CREATE STATISTICS expr_stat_comp_1 ON c0, c1 FROM expr_stats_incompatible_test; -INSERT INTO expr_stats_incompatible_test VALUES (1234,false), (5678,true); -ANALYZE expr_stats_incompatible_test; -SELECT c0 FROM ONLY expr_stats_incompatible_test WHERE -( - upper('x') LIKE ('x'||('[0,1]'::int4range)) - AND - (c0 IN (0, 1) OR c1) -); - c0 ----- -(0 rows) - -DROP TABLE expr_stats_incompatible_test; --- Permission tests. Users should not be able to see specific data values in --- the extended statistics, if they lack permission to see those values in --- the underlying table. --- --- Currently this is only relevant for MCV stats. -CREATE SCHEMA tststats; -CREATE TABLE tststats.priv_test_tbl ( - a int, - b int -); -INSERT INTO tststats.priv_test_tbl - SELECT mod(i,5), mod(i,10) FROM generate_series(1,100) s(i); -CREATE STATISTICS tststats.priv_test_stats (mcv) ON a, b - FROM tststats.priv_test_tbl; -ANALYZE tststats.priv_test_tbl; --- Check printing info about extended statistics by \dX -create table stts_t1 (a int, b int); -create statistics (ndistinct) on a, b from stts_t1; -create statistics (ndistinct, dependencies) on a, b from stts_t1; -create statistics (ndistinct, dependencies, mcv) on a, b from stts_t1; -create table stts_t2 (a int, b int, c int); -create statistics on b, c from stts_t2; -create table stts_t3 (col1 int, col2 int, col3 int); -create statistics stts_hoge on col1, col2, col3 from stts_t3; -create schema stts_s1; -create schema stts_s2; -create statistics stts_s1.stts_foo on col1, col2 from stts_t3; -create statistics stts_s2.stts_yama (dependencies, mcv) on col1, col3 from stts_t3; -insert into stts_t1 select i,i from generate_series(1,100) i; -analyze stts_t1; -set search_path to public, stts_s1, stts_s2, tststats; -\dX - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV -----------+------------------------+------------------------------------------------------------------+-----------+--------------+--------- - public | func_deps_stat | (a * 2), upper(b), (c + 1::numeric) FROM functional_dependencies | | defined | - public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined - public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined - public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined - public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined - public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | - public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | - public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined - public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined - stts_s1 | stts_foo | col1, col2 FROM stts_t3 | defined | defined | defined - stts_s2 | stts_yama | col1, col3 FROM stts_t3 | | defined | defined - tststats | priv_test_stats | a, b FROM priv_test_tbl | | | defined -(12 rows) - -\dX stts_t* - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+-------------------+-------------------+-----------+--------------+--------- - public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | - public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | - public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined - public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined -(4 rows) - -\dX *stts_hoge - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+-----------+-------------------------------+-----------+--------------+--------- - public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined -(1 row) - -\dX+ - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV -----------+------------------------+------------------------------------------------------------------+-----------+--------------+--------- - public | func_deps_stat | (a * 2), upper(b), (c + 1::numeric) FROM functional_dependencies | | defined | - public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined - public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined - public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined - public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined - public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | - public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | - public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined - public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined - stts_s1 | stts_foo | col1, col2 FROM stts_t3 | defined | defined | defined - stts_s2 | stts_yama | col1, col3 FROM stts_t3 | | defined | defined - tststats | priv_test_stats | a, b FROM priv_test_tbl | | | defined -(12 rows) - -\dX+ stts_t* - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+-------------------+-------------------+-----------+--------------+--------- - public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | - public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | - public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined - public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined -(4 rows) - -\dX+ *stts_hoge - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+-----------+-------------------------------+-----------+--------------+--------- - public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined -(1 row) - -\dX+ stts_s2.stts_yama - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ----------+-----------+-------------------------+-----------+--------------+--------- - stts_s2 | stts_yama | col1, col3 FROM stts_t3 | | defined | defined -(1 row) - -create statistics (mcv) ON a, b, (a+b), (a-b) FROM stts_t1; -create statistics (mcv) ON a, b, (a+b), (a-b) FROM stts_t1; -create statistics (mcv) ON (a+b), (a-b) FROM stts_t1; -\dX stts_t*expr* - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+-----------------------------+-------------------------------------+-----------+--------------+--------- - public | stts_t1_a_b_expr_expr_stat | a, b, (a + b), (a - b) FROM stts_t1 | | | defined - public | stts_t1_a_b_expr_expr_stat1 | a, b, (a + b), (a - b) FROM stts_t1 | | | defined - public | stts_t1_expr_expr_stat | (a + b), (a - b) FROM stts_t1 | | | defined -(3 rows) - -drop statistics stts_t1_a_b_expr_expr_stat; -drop statistics stts_t1_a_b_expr_expr_stat1; -drop statistics stts_t1_expr_expr_stat; -set search_path to public, stts_s1; -\dX - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ----------+------------------------+------------------------------------------------------------------+-----------+--------------+--------- - public | func_deps_stat | (a * 2), upper(b), (c + 1::numeric) FROM functional_dependencies | | defined | - public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined - public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined - public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined - public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined - public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | - public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | - public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined - public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined - stts_s1 | stts_foo | col1, col2 FROM stts_t3 | defined | defined | defined -(10 rows) - -create role regress_stats_ext nosuperuser; -set role regress_stats_ext; -\dX - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+------------------------+------------------------------------------------------------------+-----------+--------------+--------- - public | func_deps_stat | (a * 2), upper(b), (c + 1::numeric) FROM functional_dependencies | | defined | - public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined - public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined - public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined - public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined - public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | - public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | - public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined - public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined -(9 rows) - -reset role; -drop table stts_t1, stts_t2, stts_t3; -drop schema stts_s1, stts_s2 cascade; -drop user regress_stats_ext; -reset search_path; --- User with no access -CREATE USER regress_stats_user1; -GRANT USAGE ON SCHEMA tststats TO regress_stats_user1; -SET SESSION AUTHORIZATION regress_stats_user1; -SELECT * FROM tststats.priv_test_tbl; -- Permission denied -ERROR: permission denied for table priv_test_tbl --- Check individual columns if we don't have table privilege -SELECT * FROM tststats.priv_test_tbl - WHERE a = 1 and tststats.priv_test_tbl.* > (1, 1) is not null; -ERROR: permission denied for table priv_test_tbl --- Attempt to gain access using a leaky operator -CREATE FUNCTION op_leak(int, int) RETURNS bool - AS 'BEGIN RAISE NOTICE ''op_leak => %, %'', $1, $2; RETURN $1 < $2; END' - LANGUAGE plpgsql; -CREATE OPERATOR <<< (procedure = op_leak, leftarg = int, rightarg = int, - restrict = scalarltsel); -SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied -ERROR: permission denied for table priv_test_tbl -DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied -ERROR: permission denied for table priv_test_tbl --- Grant access via a security barrier view, but hide all data -RESET SESSION AUTHORIZATION; -CREATE VIEW tststats.priv_test_view WITH (security_barrier=true) - AS SELECT * FROM tststats.priv_test_tbl WHERE false; -GRANT SELECT, DELETE ON tststats.priv_test_view TO regress_stats_user1; --- Should now have access via the view, but see nothing and leak nothing -SET SESSION AUTHORIZATION regress_stats_user1; -SELECT * FROM tststats.priv_test_view WHERE a <<< 0 AND b <<< 0; -- Should not leak - a | b ----+--- -(0 rows) - -DELETE FROM tststats.priv_test_view WHERE a <<< 0 AND b <<< 0; -- Should not leak --- Grant table access, but hide all data with RLS -RESET SESSION AUTHORIZATION; -ALTER TABLE tststats.priv_test_tbl ENABLE ROW LEVEL SECURITY; -GRANT SELECT, DELETE ON tststats.priv_test_tbl TO regress_stats_user1; --- Should now have direct table access, but see nothing and leak nothing -SET SESSION AUTHORIZATION regress_stats_user1; -SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak - a | b ----+--- -(0 rows) - -DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak --- Tidy up -DROP OPERATOR <<< (int, int); -DROP FUNCTION op_leak(int, int); -RESET SESSION AUTHORIZATION; -DROP SCHEMA tststats CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table tststats.priv_test_tbl -drop cascades to view tststats.priv_test_view -DROP USER regress_stats_user1; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/collate.linux.utf8_1.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/collate.linux.utf8.out --- /tmp/cirrus-ci-build/src/test/regress/expected/collate.linux.utf8_1.out 2024-03-07 14:25:00.329609000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/collate.linux.utf8.out 2024-03-07 14:27:17.151298000 +0000 @@ -1,11 +1,2 @@ -/* - * This test is for Linux/glibc systems and assumes that a full set of - * locales is installed. It must be run in a database with UTF-8 encoding, - * because other encodings don't support all the characters used. - */ -SELECT getdatabaseencoding() <> 'UTF8' OR - (SELECT count(*) FROM pg_collation WHERE collname IN ('de_DE', 'en_US', 'sv_SE', 'tr_TR') AND collencoding = pg_char_to_encoding('UTF8')) <> 4 OR - version() !~ 'linux-gnu' - AS skip_test \gset -\if :skip_test -\quit +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/collate.windows.win1252_1.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/collate.windows.win1252.out --- /tmp/cirrus-ci-build/src/test/regress/expected/collate.windows.win1252_1.out 2024-03-07 14:25:00.329653000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/collate.windows.win1252.out 2024-03-07 14:27:17.150262000 +0000 @@ -1,13 +1,2 @@ -/* - * This test is meant to run on Windows systems that has successfully - * run pg_import_system_collations(). Also, the database must have - * WIN1252 encoding, because of the locales' own encodings. Because - * of this, some test are lost from UTF-8 version, such as Turkish - * dotted and undotted 'i'. - */ -SELECT getdatabaseencoding() <> 'WIN1252' OR - (SELECT count(*) FROM pg_collation WHERE collname IN ('de_DE', 'en_US', 'sv_SE') AND collencoding = pg_char_to_encoding('WIN1252')) <> 3 OR - (version() !~ 'Visual C\+\+' AND version() !~ 'mingw32' AND version() !~ 'windows') - AS skip_test \gset -\if :skip_test -\quit +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/select_parallel.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/select_parallel.out --- /tmp/cirrus-ci-build/src/test/regress/expected/select_parallel.out 2024-03-07 14:25:00.333654000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/select_parallel.out 2024-03-07 14:27:17.281092000 +0000 @@ -1,1274 +1,2 @@ --- --- PARALLEL --- -create function sp_parallel_restricted(int) returns int as - $$begin return $1; end$$ language plpgsql parallel restricted; -begin; --- encourage use of parallel plans -set parallel_setup_cost=0; -set parallel_tuple_cost=0; -set min_parallel_table_scan_size=0; -set max_parallel_workers_per_gather=4; --- Parallel Append with partial-subplans -explain (costs off) - select round(avg(aa)), sum(aa) from a_star; - QUERY PLAN --------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 3 - -> Partial Aggregate - -> Parallel Append - -> Parallel Seq Scan on d_star a_star_4 - -> Parallel Seq Scan on f_star a_star_6 - -> Parallel Seq Scan on e_star a_star_5 - -> Parallel Seq Scan on b_star a_star_2 - -> Parallel Seq Scan on c_star a_star_3 - -> Parallel Seq Scan on a_star a_star_1 -(11 rows) - -select round(avg(aa)), sum(aa) from a_star a1; - round | sum --------+----- - 14 | 355 -(1 row) - --- Parallel Append with both partial and non-partial subplans -alter table c_star set (parallel_workers = 0); -alter table d_star set (parallel_workers = 0); -explain (costs off) - select round(avg(aa)), sum(aa) from a_star; - QUERY PLAN --------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 3 - -> Partial Aggregate - -> Parallel Append - -> Seq Scan on d_star a_star_4 - -> Seq Scan on c_star a_star_3 - -> Parallel Seq Scan on f_star a_star_6 - -> Parallel Seq Scan on e_star a_star_5 - -> Parallel Seq Scan on b_star a_star_2 - -> Parallel Seq Scan on a_star a_star_1 -(11 rows) - -select round(avg(aa)), sum(aa) from a_star a2; - round | sum --------+----- - 14 | 355 -(1 row) - --- Parallel Append with only non-partial subplans -alter table a_star set (parallel_workers = 0); -alter table b_star set (parallel_workers = 0); -alter table e_star set (parallel_workers = 0); -alter table f_star set (parallel_workers = 0); -explain (costs off) - select round(avg(aa)), sum(aa) from a_star; - QUERY PLAN ------------------------------------------------------ - Finalize Aggregate - -> Gather - Workers Planned: 3 - -> Partial Aggregate - -> Parallel Append - -> Seq Scan on d_star a_star_4 - -> Seq Scan on f_star a_star_6 - -> Seq Scan on e_star a_star_5 - -> Seq Scan on b_star a_star_2 - -> Seq Scan on c_star a_star_3 - -> Seq Scan on a_star a_star_1 -(11 rows) - -select round(avg(aa)), sum(aa) from a_star a3; - round | sum --------+----- - 14 | 355 -(1 row) - --- Disable Parallel Append -alter table a_star reset (parallel_workers); -alter table b_star reset (parallel_workers); -alter table c_star reset (parallel_workers); -alter table d_star reset (parallel_workers); -alter table e_star reset (parallel_workers); -alter table f_star reset (parallel_workers); -set enable_parallel_append to off; -explain (costs off) - select round(avg(aa)), sum(aa) from a_star; - QUERY PLAN --------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 1 - -> Partial Aggregate - -> Append - -> Parallel Seq Scan on a_star a_star_1 - -> Parallel Seq Scan on b_star a_star_2 - -> Parallel Seq Scan on c_star a_star_3 - -> Parallel Seq Scan on d_star a_star_4 - -> Parallel Seq Scan on e_star a_star_5 - -> Parallel Seq Scan on f_star a_star_6 -(11 rows) - -select round(avg(aa)), sum(aa) from a_star a4; - round | sum --------+----- - 14 | 355 -(1 row) - -reset enable_parallel_append; --- Parallel Append that runs serially -create function sp_test_func() returns setof text as -$$ select 'foo'::varchar union all select 'bar'::varchar $$ -language sql stable; -select sp_test_func() order by 1; - sp_test_func --------------- - bar - foo -(2 rows) - --- Parallel Append is not to be used when the subpath depends on the outer param -create table part_pa_test(a int, b int) partition by range(a); -create table part_pa_test_p1 partition of part_pa_test for values from (minvalue) to (0); -create table part_pa_test_p2 partition of part_pa_test for values from (0) to (maxvalue); -explain (costs off) - select (select max((select pa1.b from part_pa_test pa1 where pa1.a = pa2.a))) - from part_pa_test pa2; - QUERY PLAN --------------------------------------------------------------- - Aggregate - -> Gather - Workers Planned: 3 - -> Parallel Append - -> Parallel Seq Scan on part_pa_test_p1 pa2_1 - -> Parallel Seq Scan on part_pa_test_p2 pa2_2 - SubPlan 2 - -> Result - SubPlan 1 - -> Append - -> Seq Scan on part_pa_test_p1 pa1_1 - Filter: (a = pa2.a) - -> Seq Scan on part_pa_test_p2 pa1_2 - Filter: (a = pa2.a) -(14 rows) - -drop table part_pa_test; --- test with leader participation disabled -set parallel_leader_participation = off; -explain (costs off) - select count(*) from tenk1 where stringu1 = 'GRAAAA'; - QUERY PLAN ---------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Parallel Seq Scan on tenk1 - Filter: (stringu1 = 'GRAAAA'::name) -(6 rows) - -select count(*) from tenk1 where stringu1 = 'GRAAAA'; - count -------- - 15 -(1 row) - --- test with leader participation disabled, but no workers available (so --- the leader will have to run the plan despite the setting) -set max_parallel_workers = 0; -explain (costs off) - select count(*) from tenk1 where stringu1 = 'GRAAAA'; - QUERY PLAN ---------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Parallel Seq Scan on tenk1 - Filter: (stringu1 = 'GRAAAA'::name) -(6 rows) - -select count(*) from tenk1 where stringu1 = 'GRAAAA'; - count -------- - 15 -(1 row) - -reset max_parallel_workers; -reset parallel_leader_participation; --- test that parallel_restricted function doesn't run in worker -alter table tenk1 set (parallel_workers = 4); -explain (verbose, costs off) -select sp_parallel_restricted(unique1) from tenk1 - where stringu1 = 'GRAAAA' order by 1; - QUERY PLAN ---------------------------------------------------------- - Sort - Output: (sp_parallel_restricted(unique1)) - Sort Key: (sp_parallel_restricted(tenk1.unique1)) - -> Gather - Output: sp_parallel_restricted(unique1) - Workers Planned: 4 - -> Parallel Seq Scan on public.tenk1 - Output: unique1 - Filter: (tenk1.stringu1 = 'GRAAAA'::name) -(9 rows) - --- test parallel plan when group by expression is in target list. -explain (costs off) - select length(stringu1) from tenk1 group by length(stringu1); - QUERY PLAN ---------------------------------------------------- - Finalize HashAggregate - Group Key: (length((stringu1)::text)) - -> Gather - Workers Planned: 4 - -> Partial HashAggregate - Group Key: length((stringu1)::text) - -> Parallel Seq Scan on tenk1 -(7 rows) - -select length(stringu1) from tenk1 group by length(stringu1); - length --------- - 6 -(1 row) - -explain (costs off) - select stringu1, count(*) from tenk1 group by stringu1 order by stringu1; - QUERY PLAN ----------------------------------------------------- - Sort - Sort Key: stringu1 - -> Finalize HashAggregate - Group Key: stringu1 - -> Gather - Workers Planned: 4 - -> Partial HashAggregate - Group Key: stringu1 - -> Parallel Seq Scan on tenk1 -(9 rows) - --- test that parallel plan for aggregates is not selected when --- target list contains parallel restricted clause. -explain (costs off) - select sum(sp_parallel_restricted(unique1)) from tenk1 - group by(sp_parallel_restricted(unique1)); - QUERY PLAN -------------------------------------------------------------------- - HashAggregate - Group Key: sp_parallel_restricted(unique1) - -> Gather - Workers Planned: 4 - -> Parallel Index Only Scan using tenk1_unique1 on tenk1 -(5 rows) - --- test prepared statement -prepare tenk1_count(integer) As select count((unique1)) from tenk1 where hundred > $1; -explain (costs off) execute tenk1_count(1); - QUERY PLAN ----------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Parallel Seq Scan on tenk1 - Filter: (hundred > 1) -(6 rows) - -execute tenk1_count(1); - count -------- - 9800 -(1 row) - -deallocate tenk1_count; --- test parallel plans for queries containing un-correlated subplans. -alter table tenk2 set (parallel_workers = 0); -explain (costs off) - select count(*) from tenk1 where (two, four) not in - (select hundred, thousand from tenk2 where thousand > 100); - QUERY PLAN ------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Parallel Seq Scan on tenk1 - Filter: (NOT (hashed SubPlan 1)) - SubPlan 1 - -> Seq Scan on tenk2 - Filter: (thousand > 100) -(9 rows) - -select count(*) from tenk1 where (two, four) not in - (select hundred, thousand from tenk2 where thousand > 100); - count -------- - 10000 -(1 row) - --- this is not parallel-safe due to use of random() within SubLink's testexpr: -explain (costs off) - select * from tenk1 where (unique1 + random())::integer not in - (select ten from tenk2); - QUERY PLAN ------------------------------------- - Seq Scan on tenk1 - Filter: (NOT (hashed SubPlan 1)) - SubPlan 1 - -> Seq Scan on tenk2 -(4 rows) - -alter table tenk2 reset (parallel_workers); --- test parallel plan for a query containing initplan. -set enable_indexscan = off; -set enable_indexonlyscan = off; -set enable_bitmapscan = off; -alter table tenk2 set (parallel_workers = 2); -explain (costs off) - select count(*) from tenk1 - where tenk1.unique1 = (Select max(tenk2.unique1) from tenk2); - QUERY PLAN ------------------------------------------------------- - Aggregate - InitPlan 1 (returns $2) - -> Finalize Aggregate - -> Gather - Workers Planned: 2 - -> Partial Aggregate - -> Parallel Seq Scan on tenk2 - -> Gather - Workers Planned: 4 - Params Evaluated: $2 - -> Parallel Seq Scan on tenk1 - Filter: (unique1 = $2) -(12 rows) - -select count(*) from tenk1 - where tenk1.unique1 = (Select max(tenk2.unique1) from tenk2); - count -------- - 1 -(1 row) - -reset enable_indexscan; -reset enable_indexonlyscan; -reset enable_bitmapscan; -alter table tenk2 reset (parallel_workers); --- test parallel index scans. -set enable_seqscan to off; -set enable_bitmapscan to off; -explain (costs off) - select count((unique1)) from tenk1 where hundred > 1; - QUERY PLAN --------------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Parallel Index Scan using tenk1_hundred on tenk1 - Index Cond: (hundred > 1) -(6 rows) - -select count((unique1)) from tenk1 where hundred > 1; - count -------- - 9800 -(1 row) - --- test parallel index-only scans. -explain (costs off) - select count(*) from tenk1 where thousand > 95; - QUERY PLAN --------------------------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Parallel Index Only Scan using tenk1_thous_tenthous on tenk1 - Index Cond: (thousand > 95) -(6 rows) - -select count(*) from tenk1 where thousand > 95; - count -------- - 9040 -(1 row) - --- test rescan cases too -set enable_material = false; -explain (costs off) -select * from - (select count(unique1) from tenk1 where hundred > 10) ss - right join (values (1),(2),(3)) v(x) on true; - QUERY PLAN --------------------------------------------------------------------------- - Nested Loop Left Join - -> Values Scan on "*VALUES*" - -> Finalize Aggregate - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Parallel Index Scan using tenk1_hundred on tenk1 - Index Cond: (hundred > 10) -(8 rows) - -select * from - (select count(unique1) from tenk1 where hundred > 10) ss - right join (values (1),(2),(3)) v(x) on true; - count | x --------+--- - 8900 | 1 - 8900 | 2 - 8900 | 3 -(3 rows) - -explain (costs off) -select * from - (select count(*) from tenk1 where thousand > 99) ss - right join (values (1),(2),(3)) v(x) on true; - QUERY PLAN --------------------------------------------------------------------------------------- - Nested Loop Left Join - -> Values Scan on "*VALUES*" - -> Finalize Aggregate - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Parallel Index Only Scan using tenk1_thous_tenthous on tenk1 - Index Cond: (thousand > 99) -(8 rows) - -select * from - (select count(*) from tenk1 where thousand > 99) ss - right join (values (1),(2),(3)) v(x) on true; - count | x --------+--- - 9000 | 1 - 9000 | 2 - 9000 | 3 -(3 rows) - --- test rescans for a Limit node with a parallel node beneath it. -reset enable_seqscan; -set enable_indexonlyscan to off; -set enable_indexscan to off; -alter table tenk1 set (parallel_workers = 0); -alter table tenk2 set (parallel_workers = 1); -explain (costs off) -select count(*) from tenk1 - left join (select tenk2.unique1 from tenk2 order by 1 limit 1000) ss - on tenk1.unique1 < ss.unique1 + 1 - where tenk1.unique1 < 2; - QUERY PLAN ------------------------------------------------------------- - Aggregate - -> Nested Loop Left Join - Join Filter: (tenk1.unique1 < (tenk2.unique1 + 1)) - -> Seq Scan on tenk1 - Filter: (unique1 < 2) - -> Limit - -> Gather Merge - Workers Planned: 1 - -> Sort - Sort Key: tenk2.unique1 - -> Parallel Seq Scan on tenk2 -(11 rows) - -select count(*) from tenk1 - left join (select tenk2.unique1 from tenk2 order by 1 limit 1000) ss - on tenk1.unique1 < ss.unique1 + 1 - where tenk1.unique1 < 2; - count -------- - 1999 -(1 row) - ---reset the value of workers for each table as it was before this test. -alter table tenk1 set (parallel_workers = 4); -alter table tenk2 reset (parallel_workers); -reset enable_material; -reset enable_bitmapscan; -reset enable_indexonlyscan; -reset enable_indexscan; --- test parallel bitmap heap scan. -set enable_seqscan to off; -set enable_indexscan to off; -set enable_hashjoin to off; -set enable_mergejoin to off; -set enable_material to off; --- test prefetching, if the platform allows it -DO $$ -BEGIN - SET effective_io_concurrency = 50; -EXCEPTION WHEN invalid_parameter_value THEN -END $$; -set work_mem='64kB'; --set small work mem to force lossy pages -explain (costs off) - select count(*) from tenk1, tenk2 where tenk1.hundred > 1 and tenk2.thousand=0; - QUERY PLAN ------------------------------------------------------------- - Aggregate - -> Nested Loop - -> Seq Scan on tenk2 - Filter: (thousand = 0) - -> Gather - Workers Planned: 4 - -> Parallel Bitmap Heap Scan on tenk1 - Recheck Cond: (hundred > 1) - -> Bitmap Index Scan on tenk1_hundred - Index Cond: (hundred > 1) -(10 rows) - -select count(*) from tenk1, tenk2 where tenk1.hundred > 1 and tenk2.thousand=0; - count -------- - 98000 -(1 row) - -create table bmscantest (a int, t text); -insert into bmscantest select r, 'fooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo' FROM generate_series(1,100000) r; -create index i_bmtest ON bmscantest(a); -select count(*) from bmscantest where a>1; - count -------- - 99999 -(1 row) - --- test accumulation of stats for parallel nodes -reset enable_seqscan; -alter table tenk2 set (parallel_workers = 0); -explain (analyze, timing off, summary off, costs off) - select count(*) from tenk1, tenk2 where tenk1.hundred > 1 - and tenk2.thousand=0; - QUERY PLAN --------------------------------------------------------------------------- - Aggregate (actual rows=1 loops=1) - -> Nested Loop (actual rows=98000 loops=1) - -> Seq Scan on tenk2 (actual rows=10 loops=1) - Filter: (thousand = 0) - Rows Removed by Filter: 9990 - -> Gather (actual rows=9800 loops=10) - Workers Planned: 4 - Workers Launched: 4 - -> Parallel Seq Scan on tenk1 (actual rows=1960 loops=50) - Filter: (hundred > 1) - Rows Removed by Filter: 40 -(11 rows) - -alter table tenk2 reset (parallel_workers); -reset work_mem; -create function explain_parallel_sort_stats() returns setof text -language plpgsql as -$$ -declare ln text; -begin - for ln in - explain (analyze, timing off, summary off, costs off) - select * from - (select ten from tenk1 where ten < 100 order by ten) ss - right join (values (1),(2),(3)) v(x) on true - loop - ln := regexp_replace(ln, 'Memory: \S*', 'Memory: xxx'); - return next ln; - end loop; -end; -$$; -select * from explain_parallel_sort_stats(); - explain_parallel_sort_stats --------------------------------------------------------------------------- - Nested Loop Left Join (actual rows=30000 loops=1) - -> Values Scan on "*VALUES*" (actual rows=3 loops=1) - -> Gather Merge (actual rows=10000 loops=3) - Workers Planned: 4 - Workers Launched: 4 - -> Sort (actual rows=2000 loops=15) - Sort Key: tenk1.ten - Sort Method: quicksort Memory: xxx - Worker 0: Sort Method: quicksort Memory: xxx - Worker 1: Sort Method: quicksort Memory: xxx - Worker 2: Sort Method: quicksort Memory: xxx - Worker 3: Sort Method: quicksort Memory: xxx - -> Parallel Seq Scan on tenk1 (actual rows=2000 loops=15) - Filter: (ten < 100) -(14 rows) - -reset enable_indexscan; -reset enable_hashjoin; -reset enable_mergejoin; -reset enable_material; -reset effective_io_concurrency; -drop table bmscantest; -drop function explain_parallel_sort_stats(); --- test parallel merge join path. -set enable_hashjoin to off; -set enable_nestloop to off; -explain (costs off) - select count(*) from tenk1, tenk2 where tenk1.unique1 = tenk2.unique1; - QUERY PLAN -------------------------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Merge Join - Merge Cond: (tenk1.unique1 = tenk2.unique1) - -> Parallel Index Only Scan using tenk1_unique1 on tenk1 - -> Index Only Scan using tenk2_unique1 on tenk2 -(8 rows) - -select count(*) from tenk1, tenk2 where tenk1.unique1 = tenk2.unique1; - count -------- - 10000 -(1 row) - -reset enable_hashjoin; -reset enable_nestloop; --- test gather merge -set enable_hashagg = false; -explain (costs off) - select count(*) from tenk1 group by twenty; - QUERY PLAN ----------------------------------------------------- - Finalize GroupAggregate - Group Key: twenty - -> Gather Merge - Workers Planned: 4 - -> Partial GroupAggregate - Group Key: twenty - -> Sort - Sort Key: twenty - -> Parallel Seq Scan on tenk1 -(9 rows) - -select count(*) from tenk1 group by twenty; - count -------- - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 -(20 rows) - ---test expressions in targetlist are pushed down for gather merge -create function sp_simple_func(var1 integer) returns integer -as $$ -begin - return var1 + 10; -end; -$$ language plpgsql PARALLEL SAFE; -explain (costs off, verbose) - select ten, sp_simple_func(ten) from tenk1 where ten < 100 order by ten; - QUERY PLAN ------------------------------------------------------ - Gather Merge - Output: ten, (sp_simple_func(ten)) - Workers Planned: 4 - -> Result - Output: ten, sp_simple_func(ten) - -> Sort - Output: ten - Sort Key: tenk1.ten - -> Parallel Seq Scan on public.tenk1 - Output: ten - Filter: (tenk1.ten < 100) -(11 rows) - -drop function sp_simple_func(integer); --- test handling of SRFs in targetlist (bug in 10.0) -explain (costs off) - select count(*), generate_series(1,2) from tenk1 group by twenty; - QUERY PLAN ----------------------------------------------------------- - ProjectSet - -> Finalize GroupAggregate - Group Key: twenty - -> Gather Merge - Workers Planned: 4 - -> Partial GroupAggregate - Group Key: twenty - -> Sort - Sort Key: twenty - -> Parallel Seq Scan on tenk1 -(10 rows) - -select count(*), generate_series(1,2) from tenk1 group by twenty; - count | generate_series --------+----------------- - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 -(40 rows) - --- test gather merge with parallel leader participation disabled -set parallel_leader_participation = off; -explain (costs off) - select count(*) from tenk1 group by twenty; - QUERY PLAN ----------------------------------------------------- - Finalize GroupAggregate - Group Key: twenty - -> Gather Merge - Workers Planned: 4 - -> Partial GroupAggregate - Group Key: twenty - -> Sort - Sort Key: twenty - -> Parallel Seq Scan on tenk1 -(9 rows) - -select count(*) from tenk1 group by twenty; - count -------- - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 -(20 rows) - -reset parallel_leader_participation; ---test rescan behavior of gather merge -set enable_material = false; -explain (costs off) -select * from - (select string4, count(unique2) - from tenk1 group by string4 order by string4) ss - right join (values (1),(2),(3)) v(x) on true; - QUERY PLAN ----------------------------------------------------------- - Nested Loop Left Join - -> Values Scan on "*VALUES*" - -> Finalize GroupAggregate - Group Key: tenk1.string4 - -> Gather Merge - Workers Planned: 4 - -> Partial GroupAggregate - Group Key: tenk1.string4 - -> Sort - Sort Key: tenk1.string4 - -> Parallel Seq Scan on tenk1 -(11 rows) - -select * from - (select string4, count(unique2) - from tenk1 group by string4 order by string4) ss - right join (values (1),(2),(3)) v(x) on true; - string4 | count | x ----------+-------+--- - AAAAxx | 2500 | 1 - HHHHxx | 2500 | 1 - OOOOxx | 2500 | 1 - VVVVxx | 2500 | 1 - AAAAxx | 2500 | 2 - HHHHxx | 2500 | 2 - OOOOxx | 2500 | 2 - VVVVxx | 2500 | 2 - AAAAxx | 2500 | 3 - HHHHxx | 2500 | 3 - OOOOxx | 2500 | 3 - VVVVxx | 2500 | 3 -(12 rows) - -reset enable_material; -reset enable_hashagg; --- check parallelized int8 aggregate (bug #14897) -explain (costs off) -select avg(unique1::int8) from tenk1; - QUERY PLAN -------------------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Parallel Index Only Scan using tenk1_unique1 on tenk1 -(5 rows) - -select avg(unique1::int8) from tenk1; - avg ------------------------ - 4999.5000000000000000 -(1 row) - --- gather merge test with a LIMIT -explain (costs off) - select fivethous from tenk1 order by fivethous limit 4; - QUERY PLAN ----------------------------------------------- - Limit - -> Gather Merge - Workers Planned: 4 - -> Sort - Sort Key: fivethous - -> Parallel Seq Scan on tenk1 -(6 rows) - -select fivethous from tenk1 order by fivethous limit 4; - fivethous ------------ - 0 - 0 - 1 - 1 -(4 rows) - --- gather merge test with 0 worker -set max_parallel_workers = 0; -explain (costs off) - select string4 from tenk1 order by string4 limit 5; - QUERY PLAN ----------------------------------------------- - Limit - -> Gather Merge - Workers Planned: 4 - -> Sort - Sort Key: string4 - -> Parallel Seq Scan on tenk1 -(6 rows) - -select string4 from tenk1 order by string4 limit 5; - string4 ---------- - AAAAxx - AAAAxx - AAAAxx - AAAAxx - AAAAxx -(5 rows) - --- gather merge test with 0 workers, with parallel leader --- participation disabled (the leader will have to run the plan --- despite the setting) -set parallel_leader_participation = off; -explain (costs off) - select string4 from tenk1 order by string4 limit 5; - QUERY PLAN ----------------------------------------------- - Limit - -> Gather Merge - Workers Planned: 4 - -> Sort - Sort Key: string4 - -> Parallel Seq Scan on tenk1 -(6 rows) - -select string4 from tenk1 order by string4 limit 5; - string4 ---------- - AAAAxx - AAAAxx - AAAAxx - AAAAxx - AAAAxx -(5 rows) - -reset parallel_leader_participation; -reset max_parallel_workers; -create function parallel_safe_volatile(a int) returns int as - $$ begin return a; end; $$ parallel safe volatile language plpgsql; --- Test gather merge atop of a sort of a partial path -explain (costs off) -select * from tenk1 where four = 2 -order by four, hundred, parallel_safe_volatile(thousand); - QUERY PLAN ---------------------------------------------------------------- - Gather Merge - Workers Planned: 4 - -> Sort - Sort Key: hundred, (parallel_safe_volatile(thousand)) - -> Parallel Seq Scan on tenk1 - Filter: (four = 2) -(6 rows) - --- Test gather merge atop of an incremental sort a of partial path -set min_parallel_index_scan_size = 0; -set enable_seqscan = off; -explain (costs off) -select * from tenk1 where four = 2 -order by four, hundred, parallel_safe_volatile(thousand); - QUERY PLAN ---------------------------------------------------------------- - Gather Merge - Workers Planned: 4 - -> Incremental Sort - Sort Key: hundred, (parallel_safe_volatile(thousand)) - Presorted Key: hundred - -> Parallel Index Scan using tenk1_hundred on tenk1 - Filter: (four = 2) -(7 rows) - -reset min_parallel_index_scan_size; -reset enable_seqscan; --- Test GROUP BY with a gather merge path atop of a sort of a partial path -explain (costs off) -select count(*) from tenk1 -group by twenty, parallel_safe_volatile(two); - QUERY PLAN --------------------------------------------------------------------- - Finalize GroupAggregate - Group Key: twenty, (parallel_safe_volatile(two)) - -> Gather Merge - Workers Planned: 4 - -> Sort - Sort Key: twenty, (parallel_safe_volatile(two)) - -> Partial HashAggregate - Group Key: twenty, parallel_safe_volatile(two) - -> Parallel Seq Scan on tenk1 -(9 rows) - -drop function parallel_safe_volatile(int); -SAVEPOINT settings; -SET LOCAL debug_parallel_query = 1; -explain (costs off) - select stringu1::int2 from tenk1 where unique1 = 1; - QUERY PLAN ------------------------------------------------ - Gather - Workers Planned: 1 - Single Copy: true - -> Index Scan using tenk1_unique1 on tenk1 - Index Cond: (unique1 = 1) -(5 rows) - -ROLLBACK TO SAVEPOINT settings; --- exercise record typmod remapping between backends -CREATE FUNCTION make_record(n int) - RETURNS RECORD LANGUAGE plpgsql PARALLEL SAFE AS -$$ -BEGIN - RETURN CASE n - WHEN 1 THEN ROW(1) - WHEN 2 THEN ROW(1, 2) - WHEN 3 THEN ROW(1, 2, 3) - WHEN 4 THEN ROW(1, 2, 3, 4) - ELSE ROW(1, 2, 3, 4, 5) - END; -END; -$$; -SAVEPOINT settings; -SET LOCAL debug_parallel_query = 1; -SELECT make_record(x) FROM (SELECT generate_series(1, 5) x) ss ORDER BY x; - make_record -------------- - (1) - (1,2) - (1,2,3) - (1,2,3,4) - (1,2,3,4,5) -(5 rows) - -ROLLBACK TO SAVEPOINT settings; -DROP function make_record(n int); --- test the sanity of parallel query after the active role is dropped. -drop role if exists regress_parallel_worker; -NOTICE: role "regress_parallel_worker" does not exist, skipping -create role regress_parallel_worker; -set role regress_parallel_worker; -reset session authorization; -drop role regress_parallel_worker; -set debug_parallel_query = 1; -select count(*) from tenk1; - count -------- - 10000 -(1 row) - -reset debug_parallel_query; -reset role; --- Window function calculation can't be pushed to workers. -explain (costs off, verbose) - select count(*) from tenk1 a where (unique1, two) in - (select unique1, row_number() over() from tenk1 b); - QUERY PLAN ----------------------------------------------------------------------------------------------- - Aggregate - Output: count(*) - -> Hash Semi Join - Hash Cond: ((a.unique1 = b.unique1) AND (a.two = (row_number() OVER (?)))) - -> Gather - Output: a.unique1, a.two - Workers Planned: 4 - -> Parallel Seq Scan on public.tenk1 a - Output: a.unique1, a.two - -> Hash - Output: b.unique1, (row_number() OVER (?)) - -> WindowAgg - Output: b.unique1, row_number() OVER (?) - -> Gather - Output: b.unique1 - Workers Planned: 4 - -> Parallel Index Only Scan using tenk1_unique1 on public.tenk1 b - Output: b.unique1 -(18 rows) - --- LIMIT/OFFSET within sub-selects can't be pushed to workers. -explain (costs off) - select * from tenk1 a where two in - (select two from tenk1 b where stringu1 like '%AAAA' limit 3); - QUERY PLAN ---------------------------------------------------------------- - Hash Semi Join - Hash Cond: (a.two = b.two) - -> Gather - Workers Planned: 4 - -> Parallel Seq Scan on tenk1 a - -> Hash - -> Limit - -> Gather - Workers Planned: 4 - -> Parallel Seq Scan on tenk1 b - Filter: (stringu1 ~~ '%AAAA'::text) -(11 rows) - --- to increase the parallel query test coverage -SAVEPOINT settings; -SET LOCAL debug_parallel_query = 1; -EXPLAIN (analyze, timing off, summary off, costs off) SELECT * FROM tenk1; - QUERY PLAN -------------------------------------------------------------- - Gather (actual rows=10000 loops=1) - Workers Planned: 4 - Workers Launched: 4 - -> Parallel Seq Scan on tenk1 (actual rows=2000 loops=5) -(4 rows) - -ROLLBACK TO SAVEPOINT settings; --- provoke error in worker --- (make the error message long enough to require multiple bufferloads) -SAVEPOINT settings; -SET LOCAL debug_parallel_query = 1; -select (stringu1 || repeat('abcd', 5000))::int2 from tenk1 where unique1 = 1; -ERROR: invalid input syntax for type smallint: "BAAAAAabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd" -CONTEXT: parallel worker -ROLLBACK TO SAVEPOINT settings; --- test interaction with set-returning functions -SAVEPOINT settings; --- multiple subqueries under a single Gather node --- must set parallel_setup_cost > 0 to discourage multiple Gather nodes -SET LOCAL parallel_setup_cost = 10; -EXPLAIN (COSTS OFF) -SELECT unique1 FROM tenk1 WHERE fivethous = tenthous + 1 -UNION ALL -SELECT unique1 FROM tenk1 WHERE fivethous = tenthous + 1; - QUERY PLAN ----------------------------------------------------- - Gather - Workers Planned: 4 - -> Parallel Append - -> Parallel Seq Scan on tenk1 - Filter: (fivethous = (tenthous + 1)) - -> Parallel Seq Scan on tenk1 tenk1_1 - Filter: (fivethous = (tenthous + 1)) -(7 rows) - -ROLLBACK TO SAVEPOINT settings; --- can't use multiple subqueries under a single Gather node due to initPlans -EXPLAIN (COSTS OFF) -SELECT unique1 FROM tenk1 WHERE fivethous = - (SELECT unique1 FROM tenk1 WHERE fivethous = 1 LIMIT 1) -UNION ALL -SELECT unique1 FROM tenk1 WHERE fivethous = - (SELECT unique2 FROM tenk1 WHERE fivethous = 1 LIMIT 1) -ORDER BY 1; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: tenk1.unique1 - -> Append - -> Gather - Workers Planned: 4 - Params Evaluated: $1 - InitPlan 1 (returns $1) - -> Limit - -> Gather - Workers Planned: 4 - -> Parallel Seq Scan on tenk1 tenk1_2 - Filter: (fivethous = 1) - -> Parallel Seq Scan on tenk1 - Filter: (fivethous = $1) - -> Gather - Workers Planned: 4 - Params Evaluated: $3 - InitPlan 2 (returns $3) - -> Limit - -> Gather - Workers Planned: 4 - -> Parallel Seq Scan on tenk1 tenk1_3 - Filter: (fivethous = 1) - -> Parallel Seq Scan on tenk1 tenk1_1 - Filter: (fivethous = $3) -(25 rows) - --- test interaction with SRFs -SELECT * FROM information_schema.foreign_data_wrapper_options -ORDER BY 1, 2, 3; - foreign_data_wrapper_catalog | foreign_data_wrapper_name | option_name | option_value -------------------------------+---------------------------+-------------+-------------- -(0 rows) - -EXPLAIN (VERBOSE, COSTS OFF) -SELECT generate_series(1, two), array(select generate_series(1, two)) - FROM tenk1 ORDER BY tenthous; - QUERY PLAN ----------------------------------------------------------------------- - ProjectSet - Output: generate_series(1, tenk1.two), (SubPlan 1), tenk1.tenthous - -> Gather Merge - Output: tenk1.two, tenk1.tenthous - Workers Planned: 4 - -> Result - Output: tenk1.two, tenk1.tenthous - -> Sort - Output: tenk1.tenthous, tenk1.two - Sort Key: tenk1.tenthous - -> Parallel Seq Scan on public.tenk1 - Output: tenk1.tenthous, tenk1.two - SubPlan 1 - -> ProjectSet - Output: generate_series(1, tenk1.two) - -> Result -(16 rows) - --- must disallow pushing sort below gather when pathkey contains an SRF -EXPLAIN (VERBOSE, COSTS OFF) -SELECT unnest(ARRAY[]::integer[]) + 1 AS pathkey - FROM tenk1 t1 JOIN tenk1 t2 ON TRUE - ORDER BY pathkey; - QUERY PLAN ------------------------------------------------------------------------------------------------------ - Sort - Output: (((unnest('{}'::integer[])) + 1)) - Sort Key: (((unnest('{}'::integer[])) + 1)) - -> Result - Output: ((unnest('{}'::integer[])) + 1) - -> ProjectSet - Output: unnest('{}'::integer[]) - -> Nested Loop - -> Gather - Workers Planned: 4 - -> Parallel Index Only Scan using tenk1_hundred on public.tenk1 t1 - -> Materialize - -> Gather - Workers Planned: 4 - -> Parallel Index Only Scan using tenk1_hundred on public.tenk1 t2 -(15 rows) - --- test passing expanded-value representations to workers -CREATE FUNCTION make_some_array(int,int) returns int[] as -$$declare x int[]; - begin - x[1] := $1; - x[2] := $2; - return x; - end$$ language plpgsql parallel safe; -CREATE TABLE fooarr(f1 text, f2 int[], f3 text); -INSERT INTO fooarr VALUES('1', ARRAY[1,2], 'one'); -PREPARE pstmt(text, int[]) AS SELECT * FROM fooarr WHERE f1 = $1 AND f2 = $2; -EXPLAIN (COSTS OFF) EXECUTE pstmt('1', make_some_array(1,2)); - QUERY PLAN ------------------------------------------------------------------- - Gather - Workers Planned: 3 - -> Parallel Seq Scan on fooarr - Filter: ((f1 = '1'::text) AND (f2 = '{1,2}'::integer[])) -(4 rows) - -EXECUTE pstmt('1', make_some_array(1,2)); - f1 | f2 | f3 -----+-------+----- - 1 | {1,2} | one -(1 row) - -DEALLOCATE pstmt; --- test interaction between subquery and partial_paths -CREATE VIEW tenk1_vw_sec WITH (security_barrier) AS SELECT * FROM tenk1; -EXPLAIN (COSTS OFF) -SELECT 1 FROM tenk1_vw_sec - WHERE (SELECT sum(f1) FROM int4_tbl WHERE f1 < unique1) < 100; - QUERY PLAN -------------------------------------------------------------------- - Subquery Scan on tenk1_vw_sec - Filter: ((SubPlan 1) < 100) - -> Gather - Workers Planned: 4 - -> Parallel Index Only Scan using tenk1_unique1 on tenk1 - SubPlan 1 - -> Aggregate - -> Seq Scan on int4_tbl - Filter: (f1 < tenk1_vw_sec.unique1) -(9 rows) - -rollback; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/write_parallel.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/write_parallel.out --- /tmp/cirrus-ci-build/src/test/regress/expected/write_parallel.out 2024-03-07 14:25:00.334839000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/write_parallel.out 2024-03-07 14:27:17.290341000 +0000 @@ -1,80 +1,2 @@ --- --- PARALLEL --- -begin; --- encourage use of parallel plans -set parallel_setup_cost=0; -set parallel_tuple_cost=0; -set min_parallel_table_scan_size=0; -set max_parallel_workers_per_gather=4; --- --- Test write operations that has an underlying query that is eligible --- for parallel plans --- -explain (costs off) create table parallel_write as - select length(stringu1) from tenk1 group by length(stringu1); - QUERY PLAN ---------------------------------------------------- - Finalize HashAggregate - Group Key: (length((stringu1)::text)) - -> Gather - Workers Planned: 4 - -> Partial HashAggregate - Group Key: length((stringu1)::text) - -> Parallel Seq Scan on tenk1 -(7 rows) - -create table parallel_write as - select length(stringu1) from tenk1 group by length(stringu1); -drop table parallel_write; -explain (costs off) select length(stringu1) into parallel_write - from tenk1 group by length(stringu1); - QUERY PLAN ---------------------------------------------------- - Finalize HashAggregate - Group Key: (length((stringu1)::text)) - -> Gather - Workers Planned: 4 - -> Partial HashAggregate - Group Key: length((stringu1)::text) - -> Parallel Seq Scan on tenk1 -(7 rows) - -select length(stringu1) into parallel_write - from tenk1 group by length(stringu1); -drop table parallel_write; -explain (costs off) create materialized view parallel_mat_view as - select length(stringu1) from tenk1 group by length(stringu1); - QUERY PLAN ---------------------------------------------------- - Finalize HashAggregate - Group Key: (length((stringu1)::text)) - -> Gather - Workers Planned: 4 - -> Partial HashAggregate - Group Key: length((stringu1)::text) - -> Parallel Seq Scan on tenk1 -(7 rows) - -create materialized view parallel_mat_view as - select length(stringu1) from tenk1 group by length(stringu1); -create unique index on parallel_mat_view(length); -refresh materialized view parallel_mat_view; -refresh materialized view concurrently parallel_mat_view; -drop materialized view parallel_mat_view; -prepare prep_stmt as select length(stringu1) from tenk1 group by length(stringu1); -explain (costs off) create table parallel_write as execute prep_stmt; - QUERY PLAN ---------------------------------------------------- - Finalize HashAggregate - Group Key: (length((stringu1)::text)) - -> Gather - Workers Planned: 4 - -> Partial HashAggregate - Group Key: length((stringu1)::text) - -> Parallel Seq Scan on tenk1 -(7 rows) - -create table parallel_write as execute prep_stmt; -drop table parallel_write; -rollback; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/vacuum_parallel.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/vacuum_parallel.out --- /tmp/cirrus-ci-build/src/test/regress/expected/vacuum_parallel.out 2024-03-07 14:25:00.334676000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/vacuum_parallel.out 2024-03-07 14:27:17.296752000 +0000 @@ -1,49 +1,2 @@ -SET max_parallel_maintenance_workers TO 4; -SET min_parallel_index_scan_size TO '128kB'; --- Bug #17245: Make sure that we don't totally fail to VACUUM individual indexes that --- happen to be below min_parallel_index_scan_size during parallel VACUUM: -CREATE TABLE parallel_vacuum_table (a int) WITH (autovacuum_enabled = off); -INSERT INTO parallel_vacuum_table SELECT i from generate_series(1, 10000) i; --- Parallel VACUUM will never be used unless there are at least two indexes --- that exceed min_parallel_index_scan_size. Create two such indexes, and --- a third index that is smaller than min_parallel_index_scan_size. -CREATE INDEX regular_sized_index ON parallel_vacuum_table(a); -CREATE INDEX typically_sized_index ON parallel_vacuum_table(a); --- Note: vacuum_in_leader_small_index can apply deduplication, making it ~3x --- smaller than the other indexes -CREATE INDEX vacuum_in_leader_small_index ON parallel_vacuum_table((1)); --- Verify (as best we can) that the cost model for parallel VACUUM --- will make our VACUUM run in parallel, while always leaving it up to the --- parallel leader to handle the vacuum_in_leader_small_index index: -SELECT EXISTS ( -SELECT 1 -FROM pg_class -WHERE oid = 'vacuum_in_leader_small_index'::regclass AND - pg_relation_size(oid) < - pg_size_bytes(current_setting('min_parallel_index_scan_size')) -) as leader_will_handle_small_index; - leader_will_handle_small_index --------------------------------- - t -(1 row) - -SELECT count(*) as trigger_parallel_vacuum_nindexes -FROM pg_class -WHERE oid in ('regular_sized_index'::regclass, 'typically_sized_index'::regclass) AND - pg_relation_size(oid) >= - pg_size_bytes(current_setting('min_parallel_index_scan_size')); - trigger_parallel_vacuum_nindexes ----------------------------------- - 2 -(1 row) - --- Parallel VACUUM with B-Tree page deletions, ambulkdelete calls: -DELETE FROM parallel_vacuum_table; -VACUUM (PARALLEL 4, INDEX_CLEANUP ON) parallel_vacuum_table; --- Since vacuum_in_leader_small_index uses deduplication, we expect an --- assertion failure with bug #17245 (in the absence of bugfix): -INSERT INTO parallel_vacuum_table SELECT i FROM generate_series(1, 10000) i; -RESET max_parallel_maintenance_workers; -RESET min_parallel_index_scan_size; --- Deliberately don't drop table, to get further coverage from tools like --- pg_amcheck in some testing scenarios +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/publication.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/publication.out --- /tmp/cirrus-ci-build/src/test/regress/expected/publication.out 2024-03-07 14:25:00.333170000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/publication.out 2024-03-07 14:27:17.303616000 +0000 @@ -1,1743 +1,2 @@ --- --- PUBLICATION --- -CREATE ROLE regress_publication_user LOGIN SUPERUSER; -CREATE ROLE regress_publication_user2; -CREATE ROLE regress_publication_user_dummy LOGIN NOSUPERUSER; -SET SESSION AUTHORIZATION 'regress_publication_user'; --- suppress warning that depends on wal_level -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_default; -RESET client_min_messages; -COMMENT ON PUBLICATION testpub_default IS 'test publication'; -SELECT obj_description(p.oid, 'pg_publication') FROM pg_publication p; - obj_description ------------------- - test publication -(1 row) - -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpib_ins_trunct WITH (publish = insert); -RESET client_min_messages; -ALTER PUBLICATION testpub_default SET (publish = update); --- error cases -CREATE PUBLICATION testpub_xxx WITH (foo); -ERROR: unrecognized publication parameter: "foo" -CREATE PUBLICATION testpub_xxx WITH (publish = 'cluster, vacuum'); -ERROR: unrecognized value for publication option "publish": "cluster" -CREATE PUBLICATION testpub_xxx WITH (publish_via_partition_root = 'true', publish_via_partition_root = '0'); -ERROR: conflicting or redundant options -LINE 1: ...ub_xxx WITH (publish_via_partition_root = 'true', publish_vi... - ^ -\dRp - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------+--------------------------+------------+---------+---------+---------+-----------+---------- - testpib_ins_trunct | regress_publication_user | f | t | f | f | f | f - testpub_default | regress_publication_user | f | f | t | f | f | f -(2 rows) - -ALTER PUBLICATION testpub_default SET (publish = 'insert, update, delete'); -\dRp - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------+--------------------------+------------+---------+---------+---------+-----------+---------- - testpib_ins_trunct | regress_publication_user | f | t | f | f | f | f - testpub_default | regress_publication_user | f | t | t | t | f | f -(2 rows) - ---- adding tables -CREATE SCHEMA pub_test; -CREATE TABLE testpub_tbl1 (id serial primary key, data text); -CREATE TABLE pub_test.testpub_nopk (foo int, bar int); -CREATE VIEW testpub_view AS SELECT 1; -CREATE TABLE testpub_parted (a int) PARTITION BY LIST (a); -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_foralltables FOR ALL TABLES WITH (publish = 'insert'); -RESET client_min_messages; -ALTER PUBLICATION testpub_foralltables SET (publish = 'insert, update'); -CREATE TABLE testpub_tbl2 (id serial primary key, data text); --- fail - can't add to for all tables publication -ALTER PUBLICATION testpub_foralltables ADD TABLE testpub_tbl2; -ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES -DETAIL: Tables cannot be added to or dropped from FOR ALL TABLES publications. --- fail - can't drop from all tables publication -ALTER PUBLICATION testpub_foralltables DROP TABLE testpub_tbl2; -ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES -DETAIL: Tables cannot be added to or dropped from FOR ALL TABLES publications. --- fail - can't add to for all tables publication -ALTER PUBLICATION testpub_foralltables SET TABLE pub_test.testpub_nopk; -ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES -DETAIL: Tables cannot be added to or dropped from FOR ALL TABLES publications. --- fail - can't add schema to 'FOR ALL TABLES' publication -ALTER PUBLICATION testpub_foralltables ADD TABLES IN SCHEMA pub_test; -ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES -DETAIL: Schemas cannot be added to or dropped from FOR ALL TABLES publications. --- fail - can't drop schema from 'FOR ALL TABLES' publication -ALTER PUBLICATION testpub_foralltables DROP TABLES IN SCHEMA pub_test; -ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES -DETAIL: Schemas cannot be added to or dropped from FOR ALL TABLES publications. --- fail - can't set schema to 'FOR ALL TABLES' publication -ALTER PUBLICATION testpub_foralltables SET TABLES IN SCHEMA pub_test; -ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES -DETAIL: Schemas cannot be added to or dropped from FOR ALL TABLES publications. -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_fortable FOR TABLE testpub_tbl1; -RESET client_min_messages; --- should be able to add schema to 'FOR TABLE' publication -ALTER PUBLICATION testpub_fortable ADD TABLES IN SCHEMA pub_test; -\dRp+ testpub_fortable - Publication testpub_fortable - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables: - "public.testpub_tbl1" -Tables from schemas: - "pub_test" - --- should be able to drop schema from 'FOR TABLE' publication -ALTER PUBLICATION testpub_fortable DROP TABLES IN SCHEMA pub_test; -\dRp+ testpub_fortable - Publication testpub_fortable - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables: - "public.testpub_tbl1" - --- should be able to set schema to 'FOR TABLE' publication -ALTER PUBLICATION testpub_fortable SET TABLES IN SCHEMA pub_test; -\dRp+ testpub_fortable - Publication testpub_fortable - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables from schemas: - "pub_test" - -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_forschema FOR TABLES IN SCHEMA pub_test; --- should be able to create publication with schema and table of the same --- schema -CREATE PUBLICATION testpub_for_tbl_schema FOR TABLES IN SCHEMA pub_test, TABLE pub_test.testpub_nopk; -RESET client_min_messages; -\dRp+ testpub_for_tbl_schema - Publication testpub_for_tbl_schema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables: - "pub_test.testpub_nopk" -Tables from schemas: - "pub_test" - --- weird parser corner case -CREATE PUBLICATION testpub_parsertst FOR TABLE pub_test.testpub_nopk, CURRENT_SCHEMA; -ERROR: invalid table name -LINE 1: ...estpub_parsertst FOR TABLE pub_test.testpub_nopk, CURRENT_SC... - ^ -CREATE PUBLICATION testpub_parsertst FOR TABLES IN SCHEMA foo, test.foo; -ERROR: invalid schema name -LINE 1: ...CATION testpub_parsertst FOR TABLES IN SCHEMA foo, test.foo; - ^ --- should be able to add a table of the same schema to the schema publication -ALTER PUBLICATION testpub_forschema ADD TABLE pub_test.testpub_nopk; -\dRp+ testpub_forschema - Publication testpub_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables: - "pub_test.testpub_nopk" -Tables from schemas: - "pub_test" - --- should be able to drop the table -ALTER PUBLICATION testpub_forschema DROP TABLE pub_test.testpub_nopk; -\dRp+ testpub_forschema - Publication testpub_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables from schemas: - "pub_test" - --- fail - can't drop a table from the schema publication which isn't in the --- publication -ALTER PUBLICATION testpub_forschema DROP TABLE pub_test.testpub_nopk; -ERROR: relation "testpub_nopk" is not part of the publication --- should be able to set table to schema publication -ALTER PUBLICATION testpub_forschema SET TABLE pub_test.testpub_nopk; -\dRp+ testpub_forschema - Publication testpub_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables: - "pub_test.testpub_nopk" - -SELECT pubname, puballtables FROM pg_publication WHERE pubname = 'testpub_foralltables'; - pubname | puballtables -----------------------+-------------- - testpub_foralltables | t -(1 row) - -\d+ testpub_tbl2 - Table "public.testpub_tbl2" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+------------------------------------------+----------+--------------+------------- - id | integer | | not null | nextval('testpub_tbl2_id_seq'::regclass) | plain | | - data | text | | | | extended | | -Indexes: - "testpub_tbl2_pkey" PRIMARY KEY, btree (id) -Publications: - "testpub_foralltables" -Not-null constraints: - "testpub_tbl2_id_not_null" NOT NULL "id" - -\dRp+ testpub_foralltables - Publication testpub_foralltables - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | t | t | t | f | f | f -(1 row) - -DROP TABLE testpub_tbl2; -DROP PUBLICATION testpub_foralltables, testpub_fortable, testpub_forschema, testpub_for_tbl_schema; -CREATE TABLE testpub_tbl3 (a int); -CREATE TABLE testpub_tbl3a (b text) INHERITS (testpub_tbl3); -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub3 FOR TABLE testpub_tbl3; -CREATE PUBLICATION testpub4 FOR TABLE ONLY testpub_tbl3; -RESET client_min_messages; -\dRp+ testpub3 - Publication testpub3 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables: - "public.testpub_tbl3" - "public.testpub_tbl3a" - -\dRp+ testpub4 - Publication testpub4 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables: - "public.testpub_tbl3" - -DROP TABLE testpub_tbl3, testpub_tbl3a; -DROP PUBLICATION testpub3, testpub4; --- Tests for partitioned tables -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_forparted; -CREATE PUBLICATION testpub_forparted1; -RESET client_min_messages; -CREATE TABLE testpub_parted1 (LIKE testpub_parted); -CREATE TABLE testpub_parted2 (LIKE testpub_parted); -ALTER PUBLICATION testpub_forparted1 SET (publish='insert'); -ALTER TABLE testpub_parted ATTACH PARTITION testpub_parted1 FOR VALUES IN (1); -ALTER TABLE testpub_parted ATTACH PARTITION testpub_parted2 FOR VALUES IN (2); --- works despite missing REPLICA IDENTITY, because updates are not replicated -UPDATE testpub_parted1 SET a = 1; --- only parent is listed as being in publication, not the partition -ALTER PUBLICATION testpub_forparted ADD TABLE testpub_parted; -\dRp+ testpub_forparted - Publication testpub_forparted - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables: - "public.testpub_parted" - --- works despite missing REPLICA IDENTITY, because no actual update happened -UPDATE testpub_parted SET a = 1 WHERE false; --- should now fail, because parent's publication replicates updates -UPDATE testpub_parted1 SET a = 1; -ERROR: cannot update table "testpub_parted1" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. -ALTER TABLE testpub_parted DETACH PARTITION testpub_parted1; --- works again, because parent's publication is no longer considered -UPDATE testpub_parted1 SET a = 1; -ALTER PUBLICATION testpub_forparted SET (publish_via_partition_root = true); -\dRp+ testpub_forparted - Publication testpub_forparted - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | t -Tables: - "public.testpub_parted" - --- still fail, because parent's publication replicates updates -UPDATE testpub_parted2 SET a = 2; -ERROR: cannot update table "testpub_parted2" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. -ALTER PUBLICATION testpub_forparted DROP TABLE testpub_parted; --- works again, because update is no longer replicated -UPDATE testpub_parted2 SET a = 2; -DROP TABLE testpub_parted1, testpub_parted2; -DROP PUBLICATION testpub_forparted, testpub_forparted1; --- Tests for row filters -CREATE TABLE testpub_rf_tbl1 (a integer, b text); -CREATE TABLE testpub_rf_tbl2 (c text, d integer); -CREATE TABLE testpub_rf_tbl3 (e integer); -CREATE TABLE testpub_rf_tbl4 (g text); -CREATE TABLE testpub_rf_tbl5 (a xml); -CREATE SCHEMA testpub_rf_schema1; -CREATE TABLE testpub_rf_schema1.testpub_rf_tbl5 (h integer); -CREATE SCHEMA testpub_rf_schema2; -CREATE TABLE testpub_rf_schema2.testpub_rf_tbl6 (i integer); -SET client_min_messages = 'ERROR'; --- Firstly, test using the option publish='insert' because the row filter --- validation of referenced columns is less strict than for delete/update. -CREATE PUBLICATION testpub5 FOR TABLE testpub_rf_tbl1, testpub_rf_tbl2 WHERE (c <> 'test' AND d < 5) WITH (publish = 'insert'); -RESET client_min_messages; -\dRp+ testpub5 - Publication testpub5 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | f | f | f | f -Tables: - "public.testpub_rf_tbl1" - "public.testpub_rf_tbl2" WHERE ((c <> 'test'::text) AND (d < 5)) - -\d testpub_rf_tbl3 - Table "public.testpub_rf_tbl3" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - e | integer | | | - -ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl3 WHERE (e > 1000 AND e < 2000); -\dRp+ testpub5 - Publication testpub5 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | f | f | f | f -Tables: - "public.testpub_rf_tbl1" - "public.testpub_rf_tbl2" WHERE ((c <> 'test'::text) AND (d < 5)) - "public.testpub_rf_tbl3" WHERE ((e > 1000) AND (e < 2000)) - -\d testpub_rf_tbl3 - Table "public.testpub_rf_tbl3" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - e | integer | | | -Publications: - "testpub5" WHERE ((e > 1000) AND (e < 2000)) - -ALTER PUBLICATION testpub5 DROP TABLE testpub_rf_tbl2; -\dRp+ testpub5 - Publication testpub5 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | f | f | f | f -Tables: - "public.testpub_rf_tbl1" - "public.testpub_rf_tbl3" WHERE ((e > 1000) AND (e < 2000)) - --- remove testpub_rf_tbl1 and add testpub_rf_tbl3 again (another WHERE expression) -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (e > 300 AND e < 500); -\dRp+ testpub5 - Publication testpub5 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | f | f | f | f -Tables: - "public.testpub_rf_tbl3" WHERE ((e > 300) AND (e < 500)) - -\d testpub_rf_tbl3 - Table "public.testpub_rf_tbl3" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - e | integer | | | -Publications: - "testpub5" WHERE ((e > 300) AND (e < 500)) - --- test \d (now it displays filter information) -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_rf_yes FOR TABLE testpub_rf_tbl1 WHERE (a > 1) WITH (publish = 'insert'); -CREATE PUBLICATION testpub_rf_no FOR TABLE testpub_rf_tbl1; -RESET client_min_messages; -\d testpub_rf_tbl1 - Table "public.testpub_rf_tbl1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | text | | | -Publications: - "testpub_rf_no" - "testpub_rf_yes" WHERE (a > 1) - -DROP PUBLICATION testpub_rf_yes, testpub_rf_no; --- some more syntax tests to exercise other parser pathways -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_syntax1 FOR TABLE testpub_rf_tbl1, ONLY testpub_rf_tbl3 WHERE (e < 999) WITH (publish = 'insert'); -RESET client_min_messages; -\dRp+ testpub_syntax1 - Publication testpub_syntax1 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | f | f | f | f -Tables: - "public.testpub_rf_tbl1" - "public.testpub_rf_tbl3" WHERE (e < 999) - -DROP PUBLICATION testpub_syntax1; -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_syntax2 FOR TABLE testpub_rf_tbl1, testpub_rf_schema1.testpub_rf_tbl5 WHERE (h < 999) WITH (publish = 'insert'); -RESET client_min_messages; -\dRp+ testpub_syntax2 - Publication testpub_syntax2 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | f | f | f | f -Tables: - "public.testpub_rf_tbl1" - "testpub_rf_schema1.testpub_rf_tbl5" WHERE (h < 999) - -DROP PUBLICATION testpub_syntax2; --- fail - schemas don't allow WHERE clause -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_syntax3 FOR TABLES IN SCHEMA testpub_rf_schema1 WHERE (a = 123); -ERROR: syntax error at or near "WHERE" -LINE 1: ...b_syntax3 FOR TABLES IN SCHEMA testpub_rf_schema1 WHERE (a =... - ^ -CREATE PUBLICATION testpub_syntax3 FOR TABLES IN SCHEMA testpub_rf_schema1, testpub_rf_schema1 WHERE (a = 123); -ERROR: WHERE clause not allowed for schema -LINE 1: ..._syntax3 FOR TABLES IN SCHEMA testpub_rf_schema1, testpub_rf... - ^ -RESET client_min_messages; --- fail - duplicate tables are not allowed if that table has any WHERE clause -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_dups FOR TABLE testpub_rf_tbl1 WHERE (a = 1), testpub_rf_tbl1 WITH (publish = 'insert'); -ERROR: conflicting or redundant WHERE clauses for table "testpub_rf_tbl1" -CREATE PUBLICATION testpub_dups FOR TABLE testpub_rf_tbl1, testpub_rf_tbl1 WHERE (a = 2) WITH (publish = 'insert'); -ERROR: conflicting or redundant WHERE clauses for table "testpub_rf_tbl1" -RESET client_min_messages; --- fail - publication WHERE clause must be boolean -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (1234); -ERROR: argument of PUBLICATION WHERE must be type boolean, not type integer -LINE 1: ...PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (1234); - ^ --- fail - aggregate functions not allowed in WHERE clause -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (e < AVG(e)); -ERROR: aggregate functions are not allowed in WHERE -LINE 1: ...ATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (e < AVG(e)); - ^ --- fail - user-defined operators are not allowed -CREATE FUNCTION testpub_rf_func1(integer, integer) RETURNS boolean AS $$ SELECT hashint4($1) > $2 $$ LANGUAGE SQL; -CREATE OPERATOR =#> (PROCEDURE = testpub_rf_func1, LEFTARG = integer, RIGHTARG = integer); -CREATE PUBLICATION testpub6 FOR TABLE testpub_rf_tbl3 WHERE (e =#> 27); -ERROR: invalid publication WHERE expression -LINE 1: ...ICATION testpub6 FOR TABLE testpub_rf_tbl3 WHERE (e =#> 27); - ^ -DETAIL: User-defined operators are not allowed. --- fail - user-defined functions are not allowed -CREATE FUNCTION testpub_rf_func2() RETURNS integer AS $$ BEGIN RETURN 123; END; $$ LANGUAGE plpgsql; -ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (a >= testpub_rf_func2()); -ERROR: invalid publication WHERE expression -LINE 1: ...ON testpub5 ADD TABLE testpub_rf_tbl1 WHERE (a >= testpub_rf... - ^ -DETAIL: User-defined or built-in mutable functions are not allowed. --- fail - non-immutable functions are not allowed. random() is volatile. -ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (a < random()); -ERROR: invalid publication WHERE expression -LINE 1: ...ION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (a < random()); - ^ -DETAIL: User-defined or built-in mutable functions are not allowed. --- fail - user-defined collations are not allowed -CREATE COLLATION user_collation FROM "C"; -ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (b < '2' COLLATE user_collation); -ERROR: invalid publication WHERE expression -LINE 1: ...ICATION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (b < '2' CO... - ^ -DETAIL: User-defined collations are not allowed. --- ok - NULLIF is allowed -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (NULLIF(1,2) = a); --- ok - built-in operators are allowed -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (a IS NULL); -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE ((a > 5) IS FALSE); -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (a IS DISTINCT FROM 5); -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE ((a, a + 1) < (2, 3)); --- ok - built-in type coercions between two binary compatible datatypes are allowed -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (b::varchar < '2'); --- ok - immutable built-in functions are allowed -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl4 WHERE (length(g) < 6); --- fail - user-defined types are not allowed -CREATE TYPE rf_bug_status AS ENUM ('new', 'open', 'closed'); -CREATE TABLE rf_bug (id serial, description text, status rf_bug_status); -CREATE PUBLICATION testpub6 FOR TABLE rf_bug WHERE (status = 'open') WITH (publish = 'insert'); -ERROR: invalid publication WHERE expression -LINE 1: ...EATE PUBLICATION testpub6 FOR TABLE rf_bug WHERE (status = '... - ^ -DETAIL: User-defined types are not allowed. -DROP TABLE rf_bug; -DROP TYPE rf_bug_status; --- fail - row filter expression is not simple -CREATE PUBLICATION testpub6 FOR TABLE testpub_rf_tbl1 WHERE (a IN (SELECT generate_series(1,5))); -ERROR: invalid publication WHERE expression -LINE 1: ...ICATION testpub6 FOR TABLE testpub_rf_tbl1 WHERE (a IN (SELE... - ^ -DETAIL: Only columns, constants, built-in operators, built-in data types, built-in collations, and immutable built-in functions are allowed. --- fail - system columns are not allowed -CREATE PUBLICATION testpub6 FOR TABLE testpub_rf_tbl1 WHERE ('(0,1)'::tid = ctid); -ERROR: invalid publication WHERE expression -LINE 1: ...tpub6 FOR TABLE testpub_rf_tbl1 WHERE ('(0,1)'::tid = ctid); - ^ -DETAIL: System columns are not allowed. --- ok - conditional expressions are allowed -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl5 WHERE (a IS DOCUMENT); -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl5 WHERE (xmlexists('//foo[text() = ''bar'']' PASSING BY VALUE a)); -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (NULLIF(1, 2) = a); -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (CASE a WHEN 5 THEN true ELSE false END); -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (COALESCE(b, 'foo') = 'foo'); -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (GREATEST(a, 10) > 10); -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (a IN (2, 4, 6)); -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (ARRAY[a] <@ ARRAY[2, 4, 6]); -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (ROW(a, 2) IS NULL); --- fail - WHERE not allowed in DROP -ALTER PUBLICATION testpub5 DROP TABLE testpub_rf_tbl1 WHERE (e < 27); -ERROR: cannot use a WHERE clause when removing a table from a publication --- fail - cannot ALTER SET table which is a member of a pre-existing schema -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub6 FOR TABLES IN SCHEMA testpub_rf_schema2; --- should be able to set publication with schema and table of the same schema -ALTER PUBLICATION testpub6 SET TABLES IN SCHEMA testpub_rf_schema2, TABLE testpub_rf_schema2.testpub_rf_tbl6 WHERE (i < 99); -RESET client_min_messages; -\dRp+ testpub6 - Publication testpub6 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables: - "testpub_rf_schema2.testpub_rf_tbl6" WHERE (i < 99) -Tables from schemas: - "testpub_rf_schema2" - -DROP TABLE testpub_rf_tbl1; -DROP TABLE testpub_rf_tbl2; -DROP TABLE testpub_rf_tbl3; -DROP TABLE testpub_rf_tbl4; -DROP TABLE testpub_rf_tbl5; -DROP TABLE testpub_rf_schema1.testpub_rf_tbl5; -DROP TABLE testpub_rf_schema2.testpub_rf_tbl6; -DROP SCHEMA testpub_rf_schema1; -DROP SCHEMA testpub_rf_schema2; -DROP PUBLICATION testpub5; -DROP PUBLICATION testpub6; -DROP OPERATOR =#>(integer, integer); -DROP FUNCTION testpub_rf_func1(integer, integer); -DROP FUNCTION testpub_rf_func2(); -DROP COLLATION user_collation; --- ====================================================== --- More row filter tests for validating column references -CREATE TABLE rf_tbl_abcd_nopk(a int, b int, c int, d int); -CREATE TABLE rf_tbl_abcd_pk(a int, b int, c int, d int, PRIMARY KEY(a,b)); -CREATE TABLE rf_tbl_abcd_part_pk (a int PRIMARY KEY, b int) PARTITION by RANGE (a); -CREATE TABLE rf_tbl_abcd_part_pk_1 (b int, a int PRIMARY KEY); -ALTER TABLE rf_tbl_abcd_part_pk ATTACH PARTITION rf_tbl_abcd_part_pk_1 FOR VALUES FROM (1) TO (10); --- Case 1. REPLICA IDENTITY DEFAULT (means use primary key or nothing) --- 1a. REPLICA IDENTITY is DEFAULT and table has a PK. -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub6 FOR TABLE rf_tbl_abcd_pk WHERE (a > 99); -RESET client_min_messages; --- ok - "a" is a PK col -UPDATE rf_tbl_abcd_pk SET a = 1; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (b > 99); --- ok - "b" is a PK col -UPDATE rf_tbl_abcd_pk SET a = 1; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99); --- fail - "c" is not part of the PK -UPDATE rf_tbl_abcd_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_pk" -DETAIL: Column used in the publication WHERE expression is not part of the replica identity. -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (d > 99); --- fail - "d" is not part of the PK -UPDATE rf_tbl_abcd_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_pk" -DETAIL: Column used in the publication WHERE expression is not part of the replica identity. --- 1b. REPLICA IDENTITY is DEFAULT and table has no PK -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99); --- fail - "a" is not part of REPLICA IDENTITY -UPDATE rf_tbl_abcd_nopk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_nopk" -DETAIL: Column used in the publication WHERE expression is not part of the replica identity. --- Case 2. REPLICA IDENTITY FULL -ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY FULL; -ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY FULL; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99); --- ok - "c" is in REPLICA IDENTITY now even though not in PK -UPDATE rf_tbl_abcd_pk SET a = 1; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99); --- ok - "a" is in REPLICA IDENTITY now -UPDATE rf_tbl_abcd_nopk SET a = 1; --- Case 3. REPLICA IDENTITY NOTHING -ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY NOTHING; -ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY NOTHING; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (a > 99); --- fail - "a" is in PK but it is not part of REPLICA IDENTITY NOTHING -UPDATE rf_tbl_abcd_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_pk" -DETAIL: Column used in the publication WHERE expression is not part of the replica identity. -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99); --- fail - "c" is not in PK and not in REPLICA IDENTITY NOTHING -UPDATE rf_tbl_abcd_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_pk" -DETAIL: Column used in the publication WHERE expression is not part of the replica identity. -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99); --- fail - "a" is not in REPLICA IDENTITY NOTHING -UPDATE rf_tbl_abcd_nopk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_nopk" -DETAIL: Column used in the publication WHERE expression is not part of the replica identity. --- Case 4. REPLICA IDENTITY INDEX -ALTER TABLE rf_tbl_abcd_pk ALTER COLUMN c SET NOT NULL; -CREATE UNIQUE INDEX idx_abcd_pk_c ON rf_tbl_abcd_pk(c); -ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY USING INDEX idx_abcd_pk_c; -ALTER TABLE rf_tbl_abcd_nopk ALTER COLUMN c SET NOT NULL; -CREATE UNIQUE INDEX idx_abcd_nopk_c ON rf_tbl_abcd_nopk(c); -ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY USING INDEX idx_abcd_nopk_c; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (a > 99); --- fail - "a" is in PK but it is not part of REPLICA IDENTITY INDEX -UPDATE rf_tbl_abcd_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_pk" -DETAIL: Column used in the publication WHERE expression is not part of the replica identity. -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99); --- ok - "c" is not in PK but it is part of REPLICA IDENTITY INDEX -UPDATE rf_tbl_abcd_pk SET a = 1; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99); --- fail - "a" is not in REPLICA IDENTITY INDEX -UPDATE rf_tbl_abcd_nopk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_nopk" -DETAIL: Column used in the publication WHERE expression is not part of the replica identity. -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (c > 99); --- ok - "c" is part of REPLICA IDENTITY INDEX -UPDATE rf_tbl_abcd_nopk SET a = 1; --- Tests for partitioned table --- set PUBLISH_VIA_PARTITION_ROOT to false and test row filter for partitioned --- table -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); --- fail - cannot use row filter for partitioned table -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk WHERE (a > 99); -ERROR: cannot use publication WHERE clause for relation "rf_tbl_abcd_part_pk" -DETAIL: WHERE clause cannot be used for a partitioned table when publish_via_partition_root is false. --- ok - can use row filter for partition -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk_1 WHERE (a > 99); --- ok - "a" is a PK col -UPDATE rf_tbl_abcd_part_pk SET a = 1; --- set PUBLISH_VIA_PARTITION_ROOT to true and test row filter for partitioned --- table -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=1); --- ok - can use row filter for partitioned table -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk WHERE (a > 99); --- ok - "a" is a PK col -UPDATE rf_tbl_abcd_part_pk SET a = 1; --- fail - cannot set PUBLISH_VIA_PARTITION_ROOT to false if any row filter is --- used for partitioned table -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); -ERROR: cannot set parameter "publish_via_partition_root" to false for publication "testpub6" -DETAIL: The publication contains a WHERE clause for partitioned table "rf_tbl_abcd_part_pk", which is not allowed when "publish_via_partition_root" is false. --- remove partitioned table's row filter -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk; --- ok - we don't have row filter for partitioned table. -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); --- Now change the root filter to use a column "b" --- (which is not in the replica identity) -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk_1 WHERE (b > 99); --- ok - we don't have row filter for partitioned table. -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); --- fail - "b" is not in REPLICA IDENTITY INDEX -UPDATE rf_tbl_abcd_part_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_part_pk_1" -DETAIL: Column used in the publication WHERE expression is not part of the replica identity. --- set PUBLISH_VIA_PARTITION_ROOT to true --- can use row filter for partitioned table -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=1); --- ok - can use row filter for partitioned table -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk WHERE (b > 99); --- fail - "b" is not in REPLICA IDENTITY INDEX -UPDATE rf_tbl_abcd_part_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_part_pk_1" -DETAIL: Column used in the publication WHERE expression is not part of the replica identity. -DROP PUBLICATION testpub6; -DROP TABLE rf_tbl_abcd_pk; -DROP TABLE rf_tbl_abcd_nopk; -DROP TABLE rf_tbl_abcd_part_pk; --- ====================================================== --- fail - duplicate tables are not allowed if that table has any column lists -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_dups FOR TABLE testpub_tbl1 (a), testpub_tbl1 WITH (publish = 'insert'); -ERROR: conflicting or redundant column lists for table "testpub_tbl1" -CREATE PUBLICATION testpub_dups FOR TABLE testpub_tbl1, testpub_tbl1 (a) WITH (publish = 'insert'); -ERROR: conflicting or redundant column lists for table "testpub_tbl1" -RESET client_min_messages; --- test for column lists -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_fortable FOR TABLE testpub_tbl1; -CREATE PUBLICATION testpub_fortable_insert WITH (publish = 'insert'); -RESET client_min_messages; -CREATE TABLE testpub_tbl5 (a int PRIMARY KEY, b text, c text, - d int generated always as (a + length(b)) stored); --- error: column "x" does not exist -ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, x); -ERROR: column "x" of relation "testpub_tbl5" does not exist --- error: replica identity "a" not included in the column list -ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (b, c); -UPDATE testpub_tbl5 SET a = 1; -ERROR: cannot update table "testpub_tbl5" -DETAIL: Column list used by the publication does not cover the replica identity. -ALTER PUBLICATION testpub_fortable DROP TABLE testpub_tbl5; --- error: generated column "d" can't be in list -ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, d); -ERROR: cannot use generated column "d" in publication column list --- error: system attributes "ctid" not allowed in column list -ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, ctid); -ERROR: cannot use system column "ctid" in publication column list --- ok -ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, c); -ALTER TABLE testpub_tbl5 DROP COLUMN c; -- no dice -ERROR: cannot drop column c of table testpub_tbl5 because other objects depend on it -DETAIL: publication of table testpub_tbl5 in publication testpub_fortable depends on column c of table testpub_tbl5 -HINT: Use DROP ... CASCADE to drop the dependent objects too. --- ok: for insert-only publication, any column list is acceptable -ALTER PUBLICATION testpub_fortable_insert ADD TABLE testpub_tbl5 (b, c); -/* not all replica identities are good enough */ -CREATE UNIQUE INDEX testpub_tbl5_b_key ON testpub_tbl5 (b, c); -ALTER TABLE testpub_tbl5 ALTER b SET NOT NULL, ALTER c SET NOT NULL; -ALTER TABLE testpub_tbl5 REPLICA IDENTITY USING INDEX testpub_tbl5_b_key; --- error: replica identity (b,c) is not covered by column list (a, c) -UPDATE testpub_tbl5 SET a = 1; -ERROR: cannot update table "testpub_tbl5" -DETAIL: Column list used by the publication does not cover the replica identity. -ALTER PUBLICATION testpub_fortable DROP TABLE testpub_tbl5; --- error: change the replica identity to "b", and column list to (a, c) --- then update fails, because (a, c) does not cover replica identity -ALTER TABLE testpub_tbl5 REPLICA IDENTITY USING INDEX testpub_tbl5_b_key; -ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, c); -UPDATE testpub_tbl5 SET a = 1; -ERROR: cannot update table "testpub_tbl5" -DETAIL: Column list used by the publication does not cover the replica identity. -/* But if upd/del are not published, it works OK */ -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_table_ins WITH (publish = 'insert, truncate'); -RESET client_min_messages; -ALTER PUBLICATION testpub_table_ins ADD TABLE testpub_tbl5 (a); -- ok -\dRp+ testpub_table_ins - Publication testpub_table_ins - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | f | f | t | f -Tables: - "public.testpub_tbl5" (a) - --- tests with REPLICA IDENTITY FULL -CREATE TABLE testpub_tbl6 (a int, b text, c text); -ALTER TABLE testpub_tbl6 REPLICA IDENTITY FULL; -ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl6 (a, b, c); -UPDATE testpub_tbl6 SET a = 1; -ERROR: cannot update table "testpub_tbl6" -DETAIL: Column list used by the publication does not cover the replica identity. -ALTER PUBLICATION testpub_fortable DROP TABLE testpub_tbl6; -ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl6; -- ok -UPDATE testpub_tbl6 SET a = 1; --- make sure changing the column list is propagated to the catalog -CREATE TABLE testpub_tbl7 (a int primary key, b text, c text); -ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl7 (a, b); -\d+ testpub_tbl7 - Table "public.testpub_tbl7" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - a | integer | | not null | | plain | | - b | text | | | | extended | | - c | text | | | | extended | | -Indexes: - "testpub_tbl7_pkey" PRIMARY KEY, btree (a) -Publications: - "testpub_fortable" (a, b) - --- ok: the column list is the same, we should skip this table (or at least not fail) -ALTER PUBLICATION testpub_fortable SET TABLE testpub_tbl7 (a, b); -\d+ testpub_tbl7 - Table "public.testpub_tbl7" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - a | integer | | not null | | plain | | - b | text | | | | extended | | - c | text | | | | extended | | -Indexes: - "testpub_tbl7_pkey" PRIMARY KEY, btree (a) -Publications: - "testpub_fortable" (a, b) - --- ok: the column list changes, make sure the catalog gets updated -ALTER PUBLICATION testpub_fortable SET TABLE testpub_tbl7 (a, c); -\d+ testpub_tbl7 - Table "public.testpub_tbl7" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - a | integer | | not null | | plain | | - b | text | | | | extended | | - c | text | | | | extended | | -Indexes: - "testpub_tbl7_pkey" PRIMARY KEY, btree (a) -Publications: - "testpub_fortable" (a, c) - --- column list for partitioned tables has to cover replica identities for --- all child relations -CREATE TABLE testpub_tbl8 (a int, b text, c text) PARTITION BY HASH (a); --- first partition has replica identity "a" -CREATE TABLE testpub_tbl8_0 PARTITION OF testpub_tbl8 FOR VALUES WITH (modulus 2, remainder 0); -ALTER TABLE testpub_tbl8_0 ADD PRIMARY KEY (a); -ALTER TABLE testpub_tbl8_0 REPLICA IDENTITY USING INDEX testpub_tbl8_0_pkey; --- second partition has replica identity "b" -CREATE TABLE testpub_tbl8_1 PARTITION OF testpub_tbl8 FOR VALUES WITH (modulus 2, remainder 1); -ALTER TABLE testpub_tbl8_1 ADD PRIMARY KEY (b); -ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY USING INDEX testpub_tbl8_1_pkey; --- ok: column list covers both "a" and "b" -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_col_list FOR TABLE testpub_tbl8 (a, b) WITH (publish_via_partition_root = 'true'); -RESET client_min_messages; --- ok: the same thing, but try plain ADD TABLE -ALTER PUBLICATION testpub_col_list DROP TABLE testpub_tbl8; -ALTER PUBLICATION testpub_col_list ADD TABLE testpub_tbl8 (a, b); -UPDATE testpub_tbl8 SET a = 1; --- failure: column list does not cover replica identity for the second partition -ALTER PUBLICATION testpub_col_list DROP TABLE testpub_tbl8; -ALTER PUBLICATION testpub_col_list ADD TABLE testpub_tbl8 (a, c); -UPDATE testpub_tbl8 SET a = 1; -ERROR: cannot update table "testpub_tbl8_1" -DETAIL: Column list used by the publication does not cover the replica identity. -ALTER PUBLICATION testpub_col_list DROP TABLE testpub_tbl8; --- failure: one of the partitions has REPLICA IDENTITY FULL -ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY FULL; -ALTER PUBLICATION testpub_col_list ADD TABLE testpub_tbl8 (a, c); -UPDATE testpub_tbl8 SET a = 1; -ERROR: cannot update table "testpub_tbl8_1" -DETAIL: Column list used by the publication does not cover the replica identity. -ALTER PUBLICATION testpub_col_list DROP TABLE testpub_tbl8; --- add table and then try changing replica identity -ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY USING INDEX testpub_tbl8_1_pkey; -ALTER PUBLICATION testpub_col_list ADD TABLE testpub_tbl8 (a, b); --- failure: replica identity full can't be used with a column list -ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY FULL; -UPDATE testpub_tbl8 SET a = 1; -ERROR: cannot update table "testpub_tbl8_1" -DETAIL: Column list used by the publication does not cover the replica identity. --- failure: replica identity has to be covered by the column list -ALTER TABLE testpub_tbl8_1 DROP CONSTRAINT testpub_tbl8_1_pkey; -ALTER TABLE testpub_tbl8_1 ADD PRIMARY KEY (c); -ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY USING INDEX testpub_tbl8_1_pkey; -UPDATE testpub_tbl8 SET a = 1; -ERROR: cannot update table "testpub_tbl8_1" -DETAIL: Column list used by the publication does not cover the replica identity. -DROP TABLE testpub_tbl8; --- column list for partitioned tables has to cover replica identities for --- all child relations -CREATE TABLE testpub_tbl8 (a int, b text, c text) PARTITION BY HASH (a); -ALTER PUBLICATION testpub_col_list ADD TABLE testpub_tbl8 (a, b); --- first partition has replica identity "a" -CREATE TABLE testpub_tbl8_0 (a int, b text, c text); -ALTER TABLE testpub_tbl8_0 ADD PRIMARY KEY (a); -ALTER TABLE testpub_tbl8_0 REPLICA IDENTITY USING INDEX testpub_tbl8_0_pkey; --- second partition has replica identity "b" -CREATE TABLE testpub_tbl8_1 (a int, b text, c text); -ALTER TABLE testpub_tbl8_1 ADD PRIMARY KEY (c); -ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY USING INDEX testpub_tbl8_1_pkey; --- ok: attaching first partition works, because (a) is in column list -ALTER TABLE testpub_tbl8 ATTACH PARTITION testpub_tbl8_0 FOR VALUES WITH (modulus 2, remainder 0); --- failure: second partition has replica identity (c), which si not in column list -ALTER TABLE testpub_tbl8 ATTACH PARTITION testpub_tbl8_1 FOR VALUES WITH (modulus 2, remainder 1); -UPDATE testpub_tbl8 SET a = 1; -ERROR: cannot update table "testpub_tbl8_1" -DETAIL: Column list used by the publication does not cover the replica identity. --- failure: changing replica identity to FULL for partition fails, because --- of the column list on the parent -ALTER TABLE testpub_tbl8_0 REPLICA IDENTITY FULL; -UPDATE testpub_tbl8 SET a = 1; -ERROR: cannot update table "testpub_tbl8_0" -DETAIL: Column list used by the publication does not cover the replica identity. --- test that using column list for table is disallowed if any schemas are --- part of the publication -SET client_min_messages = 'ERROR'; --- failure - cannot use column list and schema together -CREATE PUBLICATION testpub_tbl9 FOR TABLES IN SCHEMA public, TABLE public.testpub_tbl7(a); -ERROR: cannot use column list for relation "public.testpub_tbl7" in publication "testpub_tbl9" -DETAIL: Column lists cannot be specified in publications containing FOR TABLES IN SCHEMA elements. --- ok - only publish schema -CREATE PUBLICATION testpub_tbl9 FOR TABLES IN SCHEMA public; --- failure - add a table with column list when there is already a schema in the --- publication -ALTER PUBLICATION testpub_tbl9 ADD TABLE public.testpub_tbl7(a); -ERROR: cannot use column list for relation "public.testpub_tbl7" in publication "testpub_tbl9" -DETAIL: Column lists cannot be specified in publications containing FOR TABLES IN SCHEMA elements. --- ok - only publish table with column list -ALTER PUBLICATION testpub_tbl9 SET TABLE public.testpub_tbl7(a); --- failure - specify a schema when there is already a column list in the --- publication -ALTER PUBLICATION testpub_tbl9 ADD TABLES IN SCHEMA public; -ERROR: cannot add schema to publication "testpub_tbl9" -DETAIL: Schemas cannot be added if any tables that specify a column list are already part of the publication. --- failure - cannot SET column list and schema together -ALTER PUBLICATION testpub_tbl9 SET TABLES IN SCHEMA public, TABLE public.testpub_tbl7(a); -ERROR: cannot use column list for relation "public.testpub_tbl7" in publication "testpub_tbl9" -DETAIL: Column lists cannot be specified in publications containing FOR TABLES IN SCHEMA elements. --- ok - drop table -ALTER PUBLICATION testpub_tbl9 DROP TABLE public.testpub_tbl7; --- failure - cannot ADD column list and schema together -ALTER PUBLICATION testpub_tbl9 ADD TABLES IN SCHEMA public, TABLE public.testpub_tbl7(a); -ERROR: cannot use column list for relation "public.testpub_tbl7" in publication "testpub_tbl9" -DETAIL: Column lists cannot be specified in publications containing FOR TABLES IN SCHEMA elements. -RESET client_min_messages; -DROP TABLE testpub_tbl5, testpub_tbl6, testpub_tbl7, testpub_tbl8, testpub_tbl8_1; -DROP PUBLICATION testpub_table_ins, testpub_fortable, testpub_fortable_insert, testpub_col_list, testpub_tbl9; --- ====================================================== --- Test combination of column list and row filter -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_both_filters; -RESET client_min_messages; -CREATE TABLE testpub_tbl_both_filters (a int, b int, c int, PRIMARY KEY (a,c)); -ALTER TABLE testpub_tbl_both_filters REPLICA IDENTITY USING INDEX testpub_tbl_both_filters_pkey; -ALTER PUBLICATION testpub_both_filters ADD TABLE testpub_tbl_both_filters (a,c) WHERE (c != 1); -\dRp+ testpub_both_filters - Publication testpub_both_filters - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables: - "public.testpub_tbl_both_filters" (a, c) WHERE (c <> 1) - -\d+ testpub_tbl_both_filters - Table "public.testpub_tbl_both_filters" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - a | integer | | not null | | plain | | - b | integer | | | | plain | | - c | integer | | not null | | plain | | -Indexes: - "testpub_tbl_both_filters_pkey" PRIMARY KEY, btree (a, c) REPLICA IDENTITY -Publications: - "testpub_both_filters" (a, c) WHERE (c <> 1) - -DROP TABLE testpub_tbl_both_filters; -DROP PUBLICATION testpub_both_filters; --- ====================================================== --- More column list tests for validating column references -CREATE TABLE rf_tbl_abcd_nopk(a int, b int, c int, d int); -CREATE TABLE rf_tbl_abcd_pk(a int, b int, c int, d int, PRIMARY KEY(a,b)); -CREATE TABLE rf_tbl_abcd_part_pk (a int PRIMARY KEY, b int) PARTITION by RANGE (a); -CREATE TABLE rf_tbl_abcd_part_pk_1 (b int, a int PRIMARY KEY); -ALTER TABLE rf_tbl_abcd_part_pk ATTACH PARTITION rf_tbl_abcd_part_pk_1 FOR VALUES FROM (1) TO (10); --- Case 1. REPLICA IDENTITY DEFAULT (means use primary key or nothing) --- 1a. REPLICA IDENTITY is DEFAULT and table has a PK. -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub6 FOR TABLE rf_tbl_abcd_pk (a, b); -RESET client_min_messages; --- ok - (a,b) coverts all PK cols -UPDATE rf_tbl_abcd_pk SET a = 1; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a, b, c); --- ok - (a,b,c) coverts all PK cols -UPDATE rf_tbl_abcd_pk SET a = 1; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a); --- fail - "b" is missing from the column list -UPDATE rf_tbl_abcd_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_pk" -DETAIL: Column list used by the publication does not cover the replica identity. -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (b); --- fail - "a" is missing from the column list -UPDATE rf_tbl_abcd_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_pk" -DETAIL: Column list used by the publication does not cover the replica identity. --- 1b. REPLICA IDENTITY is DEFAULT and table has no PK -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk (a); --- ok - there's no replica identity, so any column list works --- note: it fails anyway, just a bit later because UPDATE requires RI -UPDATE rf_tbl_abcd_nopk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_nopk" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. --- Case 2. REPLICA IDENTITY FULL -ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY FULL; -ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY FULL; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (c); --- fail - with REPLICA IDENTITY FULL no column list is allowed -UPDATE rf_tbl_abcd_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_pk" -DETAIL: Column list used by the publication does not cover the replica identity. -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk (a, b, c, d); --- fail - with REPLICA IDENTITY FULL no column list is allowed -UPDATE rf_tbl_abcd_nopk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_nopk" -DETAIL: Column list used by the publication does not cover the replica identity. --- Case 3. REPLICA IDENTITY NOTHING -ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY NOTHING; -ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY NOTHING; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a); --- ok - REPLICA IDENTITY NOTHING means all column lists are valid --- it still fails later because without RI we can't replicate updates -UPDATE rf_tbl_abcd_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_pk" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a, b, c, d); --- ok - REPLICA IDENTITY NOTHING means all column lists are valid --- it still fails later because without RI we can't replicate updates -UPDATE rf_tbl_abcd_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_pk" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk (d); --- ok - REPLICA IDENTITY NOTHING means all column lists are valid --- it still fails later because without RI we can't replicate updates -UPDATE rf_tbl_abcd_nopk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_nopk" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. --- Case 4. REPLICA IDENTITY INDEX -ALTER TABLE rf_tbl_abcd_pk ALTER COLUMN c SET NOT NULL; -CREATE UNIQUE INDEX idx_abcd_pk_c ON rf_tbl_abcd_pk(c); -ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY USING INDEX idx_abcd_pk_c; -ALTER TABLE rf_tbl_abcd_nopk ALTER COLUMN c SET NOT NULL; -CREATE UNIQUE INDEX idx_abcd_nopk_c ON rf_tbl_abcd_nopk(c); -ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY USING INDEX idx_abcd_nopk_c; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a); --- fail - column list "a" does not cover the REPLICA IDENTITY INDEX on "c" -UPDATE rf_tbl_abcd_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_pk" -DETAIL: Column list used by the publication does not cover the replica identity. -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (c); --- ok - column list "c" does cover the REPLICA IDENTITY INDEX on "c" -UPDATE rf_tbl_abcd_pk SET a = 1; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk (a); --- fail - column list "a" does not cover the REPLICA IDENTITY INDEX on "c" -UPDATE rf_tbl_abcd_nopk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_nopk" -DETAIL: Column list used by the publication does not cover the replica identity. -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk (c); --- ok - column list "c" does cover the REPLICA IDENTITY INDEX on "c" -UPDATE rf_tbl_abcd_nopk SET a = 1; --- Tests for partitioned table --- set PUBLISH_VIA_PARTITION_ROOT to false and test column list for partitioned --- table -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); --- fail - cannot use column list for partitioned table -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk (a); -ERROR: cannot use column list for relation "public.rf_tbl_abcd_part_pk" in publication "testpub6" -DETAIL: Column lists cannot be specified for partitioned tables when publish_via_partition_root is false. --- ok - can use column list for partition -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk_1 (a); --- ok - "a" is a PK col -UPDATE rf_tbl_abcd_part_pk SET a = 1; --- set PUBLISH_VIA_PARTITION_ROOT to true and test column list for partitioned --- table -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=1); --- ok - can use column list for partitioned table -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk (a); --- ok - "a" is a PK col -UPDATE rf_tbl_abcd_part_pk SET a = 1; --- fail - cannot set PUBLISH_VIA_PARTITION_ROOT to false if any column list is --- used for partitioned table -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); -ERROR: cannot set parameter "publish_via_partition_root" to false for publication "testpub6" -DETAIL: The publication contains a column list for partitioned table "rf_tbl_abcd_part_pk", which is not allowed when "publish_via_partition_root" is false. --- remove partitioned table's column list -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk; --- ok - we don't have column list for partitioned table. -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); --- Now change the root column list to use a column "b" --- (which is not in the replica identity) -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk_1 (b); --- ok - we don't have column list for partitioned table. -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); --- fail - "b" is not in REPLICA IDENTITY INDEX -UPDATE rf_tbl_abcd_part_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_part_pk_1" -DETAIL: Column list used by the publication does not cover the replica identity. --- set PUBLISH_VIA_PARTITION_ROOT to true --- can use column list for partitioned table -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=1); --- ok - can use column list for partitioned table -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk (b); --- fail - "b" is not in REPLICA IDENTITY INDEX -UPDATE rf_tbl_abcd_part_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_part_pk_1" -DETAIL: Column list used by the publication does not cover the replica identity. -DROP PUBLICATION testpub6; -DROP TABLE rf_tbl_abcd_pk; -DROP TABLE rf_tbl_abcd_nopk; -DROP TABLE rf_tbl_abcd_part_pk; --- ====================================================== --- Test cache invalidation FOR ALL TABLES publication -SET client_min_messages = 'ERROR'; -CREATE TABLE testpub_tbl4(a int); -INSERT INTO testpub_tbl4 values(1); -UPDATE testpub_tbl4 set a = 2; -CREATE PUBLICATION testpub_foralltables FOR ALL TABLES; -RESET client_min_messages; --- fail missing REPLICA IDENTITY -UPDATE testpub_tbl4 set a = 3; -ERROR: cannot update table "testpub_tbl4" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. -DROP PUBLICATION testpub_foralltables; --- should pass after dropping the publication -UPDATE testpub_tbl4 set a = 3; -DROP TABLE testpub_tbl4; --- fail - view -CREATE PUBLICATION testpub_fortbl FOR TABLE testpub_view; -ERROR: cannot add relation "testpub_view" to publication -DETAIL: This operation is not supported for views. -CREATE TEMPORARY TABLE testpub_temptbl(a int); --- fail - temporary table -CREATE PUBLICATION testpub_fortemptbl FOR TABLE testpub_temptbl; -ERROR: cannot add relation "testpub_temptbl" to publication -DETAIL: This operation is not supported for temporary tables. -DROP TABLE testpub_temptbl; -CREATE UNLOGGED TABLE testpub_unloggedtbl(a int); --- fail - unlogged table -CREATE PUBLICATION testpub_forunloggedtbl FOR TABLE testpub_unloggedtbl; -ERROR: cannot add relation "testpub_unloggedtbl" to publication -DETAIL: This operation is not supported for unlogged tables. -DROP TABLE testpub_unloggedtbl; --- fail - system table -CREATE PUBLICATION testpub_forsystemtbl FOR TABLE pg_publication; -ERROR: cannot add relation "pg_publication" to publication -DETAIL: This operation is not supported for system tables. -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_fortbl FOR TABLE testpub_tbl1, pub_test.testpub_nopk; -RESET client_min_messages; --- fail - already added -ALTER PUBLICATION testpub_fortbl ADD TABLE testpub_tbl1; -ERROR: relation "testpub_tbl1" is already member of publication "testpub_fortbl" --- fail - already added -CREATE PUBLICATION testpub_fortbl FOR TABLE testpub_tbl1; -ERROR: publication "testpub_fortbl" already exists -\dRp+ testpub_fortbl - Publication testpub_fortbl - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables: - "pub_test.testpub_nopk" - "public.testpub_tbl1" - --- fail - view -ALTER PUBLICATION testpub_default ADD TABLE testpub_view; -ERROR: cannot add relation "testpub_view" to publication -DETAIL: This operation is not supported for views. -ALTER PUBLICATION testpub_default ADD TABLE testpub_tbl1; -ALTER PUBLICATION testpub_default SET TABLE testpub_tbl1; -ALTER PUBLICATION testpub_default ADD TABLE pub_test.testpub_nopk; -ALTER PUBLICATION testpib_ins_trunct ADD TABLE pub_test.testpub_nopk, testpub_tbl1; -\d+ pub_test.testpub_nopk - Table "pub_test.testpub_nopk" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - foo | integer | | | | plain | | - bar | integer | | | | plain | | -Publications: - "testpib_ins_trunct" - "testpub_default" - "testpub_fortbl" - -\d+ testpub_tbl1 - Table "public.testpub_tbl1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+------------------------------------------+----------+--------------+------------- - id | integer | | not null | nextval('testpub_tbl1_id_seq'::regclass) | plain | | - data | text | | | | extended | | -Indexes: - "testpub_tbl1_pkey" PRIMARY KEY, btree (id) -Publications: - "testpib_ins_trunct" - "testpub_default" - "testpub_fortbl" -Not-null constraints: - "testpub_tbl1_id_not_null" NOT NULL "id" - -\dRp+ testpub_default - Publication testpub_default - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | f | f -Tables: - "pub_test.testpub_nopk" - "public.testpub_tbl1" - -ALTER PUBLICATION testpub_default DROP TABLE testpub_tbl1, pub_test.testpub_nopk; --- fail - nonexistent -ALTER PUBLICATION testpub_default DROP TABLE pub_test.testpub_nopk; -ERROR: relation "testpub_nopk" is not part of the publication -\d+ testpub_tbl1 - Table "public.testpub_tbl1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+------------------------------------------+----------+--------------+------------- - id | integer | | not null | nextval('testpub_tbl1_id_seq'::regclass) | plain | | - data | text | | | | extended | | -Indexes: - "testpub_tbl1_pkey" PRIMARY KEY, btree (id) -Publications: - "testpib_ins_trunct" - "testpub_fortbl" -Not-null constraints: - "testpub_tbl1_id_not_null" NOT NULL "id" - --- verify relation cache invalidation when a primary key is added using --- an existing index -CREATE TABLE pub_test.testpub_addpk (id int not null, data int); -ALTER PUBLICATION testpub_default ADD TABLE pub_test.testpub_addpk; -INSERT INTO pub_test.testpub_addpk VALUES(1, 11); -CREATE UNIQUE INDEX testpub_addpk_id_idx ON pub_test.testpub_addpk(id); --- fail: -UPDATE pub_test.testpub_addpk SET id = 2; -ERROR: cannot update table "testpub_addpk" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. -ALTER TABLE pub_test.testpub_addpk ADD PRIMARY KEY USING INDEX testpub_addpk_id_idx; --- now it should work: -UPDATE pub_test.testpub_addpk SET id = 2; -DROP TABLE pub_test.testpub_addpk; --- permissions -SET ROLE regress_publication_user2; -CREATE PUBLICATION testpub2; -- fail -ERROR: permission denied for database regression -SET ROLE regress_publication_user; -GRANT CREATE ON DATABASE regression TO regress_publication_user2; -SET ROLE regress_publication_user2; -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub2; -- ok -CREATE PUBLICATION testpub3 FOR TABLES IN SCHEMA pub_test; -- fail -ERROR: must be superuser to create FOR TABLES IN SCHEMA publication -CREATE PUBLICATION testpub3; -- ok -RESET client_min_messages; -ALTER PUBLICATION testpub2 ADD TABLE testpub_tbl1; -- fail -ERROR: must be owner of table testpub_tbl1 -ALTER PUBLICATION testpub3 ADD TABLES IN SCHEMA pub_test; -- fail -ERROR: must be superuser to add or set schemas -SET ROLE regress_publication_user; -GRANT regress_publication_user TO regress_publication_user2; -SET ROLE regress_publication_user2; -ALTER PUBLICATION testpub2 ADD TABLE testpub_tbl1; -- ok -DROP PUBLICATION testpub2; -DROP PUBLICATION testpub3; -SET ROLE regress_publication_user; -CREATE ROLE regress_publication_user3; -GRANT regress_publication_user2 TO regress_publication_user3; -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub4 FOR TABLES IN SCHEMA pub_test; -RESET client_min_messages; -ALTER PUBLICATION testpub4 OWNER TO regress_publication_user3; -SET ROLE regress_publication_user3; --- fail - new owner must be superuser -ALTER PUBLICATION testpub4 owner to regress_publication_user2; -- fail -ERROR: permission denied to change owner of publication "testpub4" -HINT: The owner of a FOR TABLES IN SCHEMA publication must be a superuser. -ALTER PUBLICATION testpub4 owner to regress_publication_user; -- ok -SET ROLE regress_publication_user; -DROP PUBLICATION testpub4; -DROP ROLE regress_publication_user3; -REVOKE CREATE ON DATABASE regression FROM regress_publication_user2; -DROP TABLE testpub_parted; -DROP TABLE testpub_tbl1; -\dRp+ testpub_default - Publication testpub_default - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | f | f -(1 row) - --- fail - must be owner of publication -SET ROLE regress_publication_user_dummy; -ALTER PUBLICATION testpub_default RENAME TO testpub_dummy; -ERROR: must be owner of publication testpub_default -RESET ROLE; -ALTER PUBLICATION testpub_default RENAME TO testpub_foo; -\dRp testpub_foo - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root --------------+--------------------------+------------+---------+---------+---------+-----------+---------- - testpub_foo | regress_publication_user | f | t | t | t | f | f -(1 row) - --- rename back to keep the rest simple -ALTER PUBLICATION testpub_foo RENAME TO testpub_default; -ALTER PUBLICATION testpub_default OWNER TO regress_publication_user2; -\dRp testpub_default - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ------------------+---------------------------+------------+---------+---------+---------+-----------+---------- - testpub_default | regress_publication_user2 | f | t | t | t | f | f -(1 row) - --- adding schemas and tables -CREATE SCHEMA pub_test1; -CREATE SCHEMA pub_test2; -CREATE SCHEMA pub_test3; -CREATE SCHEMA "CURRENT_SCHEMA"; -CREATE TABLE pub_test1.tbl (id int, data text); -CREATE TABLE pub_test1.tbl1 (id serial primary key, data text); -CREATE TABLE pub_test2.tbl1 (id serial primary key, data text); -CREATE TABLE "CURRENT_SCHEMA"."CURRENT_SCHEMA"(id int); --- suppress warning that depends on wal_level -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub1_forschema FOR TABLES IN SCHEMA pub_test1; -\dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables from schemas: - "pub_test1" - -CREATE PUBLICATION testpub2_forschema FOR TABLES IN SCHEMA pub_test1, pub_test2, pub_test3; -\dRp+ testpub2_forschema - Publication testpub2_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables from schemas: - "pub_test1" - "pub_test2" - "pub_test3" - --- check create publication on CURRENT_SCHEMA -CREATE PUBLICATION testpub3_forschema FOR TABLES IN SCHEMA CURRENT_SCHEMA; -CREATE PUBLICATION testpub4_forschema FOR TABLES IN SCHEMA "CURRENT_SCHEMA"; -CREATE PUBLICATION testpub5_forschema FOR TABLES IN SCHEMA CURRENT_SCHEMA, "CURRENT_SCHEMA"; -CREATE PUBLICATION testpub6_forschema FOR TABLES IN SCHEMA "CURRENT_SCHEMA", CURRENT_SCHEMA; -CREATE PUBLICATION testpub_fortable FOR TABLE "CURRENT_SCHEMA"."CURRENT_SCHEMA"; -RESET client_min_messages; -\dRp+ testpub3_forschema - Publication testpub3_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables from schemas: - "public" - -\dRp+ testpub4_forschema - Publication testpub4_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables from schemas: - "CURRENT_SCHEMA" - -\dRp+ testpub5_forschema - Publication testpub5_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables from schemas: - "CURRENT_SCHEMA" - "public" - -\dRp+ testpub6_forschema - Publication testpub6_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables from schemas: - "CURRENT_SCHEMA" - "public" - -\dRp+ testpub_fortable - Publication testpub_fortable - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables: - "CURRENT_SCHEMA.CURRENT_SCHEMA" - --- check create publication on CURRENT_SCHEMA where search_path is not set -SET SEARCH_PATH=''; -CREATE PUBLICATION testpub_forschema FOR TABLES IN SCHEMA CURRENT_SCHEMA; -ERROR: no schema has been selected for CURRENT_SCHEMA -RESET SEARCH_PATH; --- check create publication on CURRENT_SCHEMA where TABLE/TABLES in SCHEMA --- is not specified -CREATE PUBLICATION testpub_forschema1 FOR CURRENT_SCHEMA; -ERROR: invalid publication object list -LINE 1: CREATE PUBLICATION testpub_forschema1 FOR CURRENT_SCHEMA; - ^ -DETAIL: One of TABLE or TABLES IN SCHEMA must be specified before a standalone table or schema name. --- check create publication on CURRENT_SCHEMA along with FOR TABLE -CREATE PUBLICATION testpub_forschema1 FOR TABLE CURRENT_SCHEMA; -ERROR: syntax error at or near "CURRENT_SCHEMA" -LINE 1: CREATE PUBLICATION testpub_forschema1 FOR TABLE CURRENT_SCHE... - ^ --- check create publication on a schema that does not exist -CREATE PUBLICATION testpub_forschema FOR TABLES IN SCHEMA non_existent_schema; -ERROR: schema "non_existent_schema" does not exist --- check create publication on a system schema -CREATE PUBLICATION testpub_forschema FOR TABLES IN SCHEMA pg_catalog; -ERROR: cannot add schema "pg_catalog" to publication -DETAIL: This operation is not supported for system schemas. --- check create publication on an object which is not schema -CREATE PUBLICATION testpub1_forschema1 FOR TABLES IN SCHEMA testpub_view; -ERROR: schema "testpub_view" does not exist --- dropping the schema should reflect the change in publication -DROP SCHEMA pub_test3; -\dRp+ testpub2_forschema - Publication testpub2_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables from schemas: - "pub_test1" - "pub_test2" - --- renaming the schema should reflect the change in publication -ALTER SCHEMA pub_test1 RENAME to pub_test1_renamed; -\dRp+ testpub2_forschema - Publication testpub2_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables from schemas: - "pub_test1_renamed" - "pub_test2" - -ALTER SCHEMA pub_test1_renamed RENAME to pub_test1; -\dRp+ testpub2_forschema - Publication testpub2_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables from schemas: - "pub_test1" - "pub_test2" - --- alter publication add schema -ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA pub_test2; -\dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables from schemas: - "pub_test1" - "pub_test2" - --- add non existent schema -ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA non_existent_schema; -ERROR: schema "non_existent_schema" does not exist -\dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables from schemas: - "pub_test1" - "pub_test2" - --- add a schema which is already added to the publication -ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA pub_test1; -ERROR: schema "pub_test1" is already member of publication "testpub1_forschema" -\dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables from schemas: - "pub_test1" - "pub_test2" - --- alter publication drop schema -ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA pub_test2; -\dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables from schemas: - "pub_test1" - --- drop schema that is not present in the publication -ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA pub_test2; -ERROR: tables from schema "pub_test2" are not part of the publication -\dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables from schemas: - "pub_test1" - --- drop a schema that does not exist in the system -ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA non_existent_schema; -ERROR: schema "non_existent_schema" does not exist -\dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables from schemas: - "pub_test1" - --- drop all schemas -ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA pub_test1; -\dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -(1 row) - --- alter publication set multiple schema -ALTER PUBLICATION testpub1_forschema SET TABLES IN SCHEMA pub_test1, pub_test2; -\dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables from schemas: - "pub_test1" - "pub_test2" - --- alter publication set non-existent schema -ALTER PUBLICATION testpub1_forschema SET TABLES IN SCHEMA non_existent_schema; -ERROR: schema "non_existent_schema" does not exist -\dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables from schemas: - "pub_test1" - "pub_test2" - --- alter publication set it duplicate schemas should set the schemas after --- removing the duplicate schemas -ALTER PUBLICATION testpub1_forschema SET TABLES IN SCHEMA pub_test1, pub_test1; -\dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables from schemas: - "pub_test1" - --- Verify that it fails to add a schema with a column specification -ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA foo (a, b); -ERROR: syntax error at or near "(" -LINE 1: ...LICATION testpub1_forschema ADD TABLES IN SCHEMA foo (a, b); - ^ -ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA foo, bar (a, b); -ERROR: column specification not allowed for schema -LINE 1: ...TION testpub1_forschema ADD TABLES IN SCHEMA foo, bar (a, b)... - ^ --- cleanup pub_test1 schema for invalidation tests -ALTER PUBLICATION testpub2_forschema DROP TABLES IN SCHEMA pub_test1; -DROP PUBLICATION testpub3_forschema, testpub4_forschema, testpub5_forschema, testpub6_forschema, testpub_fortable; -DROP SCHEMA "CURRENT_SCHEMA" CASCADE; -NOTICE: drop cascades to table "CURRENT_SCHEMA"."CURRENT_SCHEMA" --- verify relation cache invalidations through update statement for the --- default REPLICA IDENTITY on the relation, if schema is part of the --- publication then update will fail because relation's relreplident --- option will be set, if schema is not part of the publication then update --- will be successful. -INSERT INTO pub_test1.tbl VALUES(1, 'test'); --- fail -UPDATE pub_test1.tbl SET id = 2; -ERROR: cannot update table "tbl" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. -ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA pub_test1; --- success -UPDATE pub_test1.tbl SET id = 2; -ALTER PUBLICATION testpub1_forschema SET TABLES IN SCHEMA pub_test1; --- fail -UPDATE pub_test1.tbl SET id = 2; -ERROR: cannot update table "tbl" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. --- verify invalidation of partition table having parent and child tables in --- different schema -CREATE SCHEMA pub_testpart1; -CREATE SCHEMA pub_testpart2; -CREATE TABLE pub_testpart1.parent1 (a int) partition by list (a); -CREATE TABLE pub_testpart2.child_parent1 partition of pub_testpart1.parent1 for values in (1); -INSERT INTO pub_testpart2.child_parent1 values(1); -UPDATE pub_testpart2.child_parent1 set a = 1; -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpubpart_forschema FOR TABLES IN SCHEMA pub_testpart1; -RESET client_min_messages; --- fail -UPDATE pub_testpart1.parent1 set a = 1; -ERROR: cannot update table "child_parent1" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. -UPDATE pub_testpart2.child_parent1 set a = 1; -ERROR: cannot update table "child_parent1" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. -DROP PUBLICATION testpubpart_forschema; --- verify invalidation of partition tables for schema publication that has --- parent and child tables of different partition hierarchies -CREATE TABLE pub_testpart2.parent2 (a int) partition by list (a); -CREATE TABLE pub_testpart1.child_parent2 partition of pub_testpart2.parent2 for values in (1); -INSERT INTO pub_testpart1.child_parent2 values(1); -UPDATE pub_testpart1.child_parent2 set a = 1; -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpubpart_forschema FOR TABLES IN SCHEMA pub_testpart2; -RESET client_min_messages; --- fail -UPDATE pub_testpart2.child_parent1 set a = 1; -ERROR: cannot update table "child_parent1" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. -UPDATE pub_testpart2.parent2 set a = 1; -ERROR: cannot update table "child_parent2" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. -UPDATE pub_testpart1.child_parent2 set a = 1; -ERROR: cannot update table "child_parent2" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. --- alter publication set 'TABLES IN SCHEMA' on an empty publication. -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub3_forschema; -RESET client_min_messages; -\dRp+ testpub3_forschema - Publication testpub3_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -(1 row) - -ALTER PUBLICATION testpub3_forschema SET TABLES IN SCHEMA pub_test1; -\dRp+ testpub3_forschema - Publication testpub3_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables from schemas: - "pub_test1" - --- create publication including both 'FOR TABLE' and 'FOR TABLES IN SCHEMA' -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_forschema_fortable FOR TABLES IN SCHEMA pub_test1, TABLE pub_test2.tbl1; -CREATE PUBLICATION testpub_fortable_forschema FOR TABLE pub_test2.tbl1, TABLES IN SCHEMA pub_test1; -RESET client_min_messages; -\dRp+ testpub_forschema_fortable - Publication testpub_forschema_fortable - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables: - "pub_test2.tbl1" -Tables from schemas: - "pub_test1" - -\dRp+ testpub_fortable_forschema - Publication testpub_fortable_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f -Tables: - "pub_test2.tbl1" -Tables from schemas: - "pub_test1" - --- fail specifying table without any of 'FOR TABLES IN SCHEMA' or ---'FOR TABLE' or 'FOR ALL TABLES' -CREATE PUBLICATION testpub_error FOR pub_test2.tbl1; -ERROR: invalid publication object list -LINE 1: CREATE PUBLICATION testpub_error FOR pub_test2.tbl1; - ^ -DETAIL: One of TABLE or TABLES IN SCHEMA must be specified before a standalone table or schema name. -DROP VIEW testpub_view; -DROP PUBLICATION testpub_default; -DROP PUBLICATION testpib_ins_trunct; -DROP PUBLICATION testpub_fortbl; -DROP PUBLICATION testpub1_forschema; -DROP PUBLICATION testpub2_forschema; -DROP PUBLICATION testpub3_forschema; -DROP PUBLICATION testpub_forschema_fortable; -DROP PUBLICATION testpub_fortable_forschema; -DROP PUBLICATION testpubpart_forschema; -DROP SCHEMA pub_test CASCADE; -NOTICE: drop cascades to table pub_test.testpub_nopk -DROP SCHEMA pub_test1 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table pub_test1.tbl -drop cascades to table pub_test1.tbl1 -DROP SCHEMA pub_test2 CASCADE; -NOTICE: drop cascades to table pub_test2.tbl1 -DROP SCHEMA pub_testpart1 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table pub_testpart1.parent1 -drop cascades to table pub_testpart1.child_parent2 -DROP SCHEMA pub_testpart2 CASCADE; -NOTICE: drop cascades to table pub_testpart2.parent2 --- Test the list of partitions published with or without --- 'PUBLISH_VIA_PARTITION_ROOT' parameter -SET client_min_messages = 'ERROR'; -CREATE SCHEMA sch1; -CREATE SCHEMA sch2; -CREATE TABLE sch1.tbl1 (a int) PARTITION BY RANGE(a); -CREATE TABLE sch2.tbl1_part1 PARTITION OF sch1.tbl1 FOR VALUES FROM (1) to (10); --- Schema publication that does not include the schema that has the parent table -CREATE PUBLICATION pub FOR TABLES IN SCHEMA sch2 WITH (PUBLISH_VIA_PARTITION_ROOT=1); -SELECT * FROM pg_publication_tables; - pubname | schemaname | tablename | attnames | rowfilter ----------+------------+------------+----------+----------- - pub | sch2 | tbl1_part1 | {a} | -(1 row) - -DROP PUBLICATION pub; --- Table publication that does not include the parent table -CREATE PUBLICATION pub FOR TABLE sch2.tbl1_part1 WITH (PUBLISH_VIA_PARTITION_ROOT=1); -SELECT * FROM pg_publication_tables; - pubname | schemaname | tablename | attnames | rowfilter ----------+------------+------------+----------+----------- - pub | sch2 | tbl1_part1 | {a} | -(1 row) - --- Table publication that includes both the parent table and the child table -ALTER PUBLICATION pub ADD TABLE sch1.tbl1; -SELECT * FROM pg_publication_tables; - pubname | schemaname | tablename | attnames | rowfilter ----------+------------+-----------+----------+----------- - pub | sch1 | tbl1 | {a} | -(1 row) - -DROP PUBLICATION pub; --- Schema publication that does not include the schema that has the parent table -CREATE PUBLICATION pub FOR TABLES IN SCHEMA sch2 WITH (PUBLISH_VIA_PARTITION_ROOT=0); -SELECT * FROM pg_publication_tables; - pubname | schemaname | tablename | attnames | rowfilter ----------+------------+------------+----------+----------- - pub | sch2 | tbl1_part1 | {a} | -(1 row) - -DROP PUBLICATION pub; --- Table publication that does not include the parent table -CREATE PUBLICATION pub FOR TABLE sch2.tbl1_part1 WITH (PUBLISH_VIA_PARTITION_ROOT=0); -SELECT * FROM pg_publication_tables; - pubname | schemaname | tablename | attnames | rowfilter ----------+------------+------------+----------+----------- - pub | sch2 | tbl1_part1 | {a} | -(1 row) - --- Table publication that includes both the parent table and the child table -ALTER PUBLICATION pub ADD TABLE sch1.tbl1; -SELECT * FROM pg_publication_tables; - pubname | schemaname | tablename | attnames | rowfilter ----------+------------+------------+----------+----------- - pub | sch2 | tbl1_part1 | {a} | -(1 row) - -DROP PUBLICATION pub; -DROP TABLE sch2.tbl1_part1; -DROP TABLE sch1.tbl1; -CREATE TABLE sch1.tbl1 (a int) PARTITION BY RANGE(a); -CREATE TABLE sch1.tbl1_part1 PARTITION OF sch1.tbl1 FOR VALUES FROM (1) to (10); -CREATE TABLE sch1.tbl1_part2 PARTITION OF sch1.tbl1 FOR VALUES FROM (10) to (20); -CREATE TABLE sch1.tbl1_part3 (a int) PARTITION BY RANGE(a); -ALTER TABLE sch1.tbl1 ATTACH PARTITION sch1.tbl1_part3 FOR VALUES FROM (20) to (30); -CREATE PUBLICATION pub FOR TABLES IN SCHEMA sch1 WITH (PUBLISH_VIA_PARTITION_ROOT=1); -SELECT * FROM pg_publication_tables; - pubname | schemaname | tablename | attnames | rowfilter ----------+------------+-----------+----------+----------- - pub | sch1 | tbl1 | {a} | -(1 row) - -RESET client_min_messages; -DROP PUBLICATION pub; -DROP TABLE sch1.tbl1; -DROP SCHEMA sch1 cascade; -DROP SCHEMA sch2 cascade; -RESET SESSION AUTHORIZATION; -DROP ROLE regress_publication_user, regress_publication_user2; -DROP ROLE regress_publication_user_dummy; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/subscription.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/subscription.out --- /tmp/cirrus-ci-build/src/test/regress/expected/subscription.out 2024-03-07 14:25:00.334027000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/subscription.out 2024-03-07 14:27:17.308531000 +0000 @@ -1,482 +1,2 @@ --- --- SUBSCRIPTION --- -CREATE ROLE regress_subscription_user LOGIN SUPERUSER; -CREATE ROLE regress_subscription_user2; -CREATE ROLE regress_subscription_user3 IN ROLE pg_create_subscription; -CREATE ROLE regress_subscription_user_dummy LOGIN NOSUPERUSER; -SET SESSION AUTHORIZATION 'regress_subscription_user'; --- fail - no publications -CREATE SUBSCRIPTION regress_testsub CONNECTION 'foo'; -ERROR: syntax error at or near ";" -LINE 1: CREATE SUBSCRIPTION regress_testsub CONNECTION 'foo'; - ^ --- fail - no connection -CREATE SUBSCRIPTION regress_testsub PUBLICATION foo; -ERROR: syntax error at or near "PUBLICATION" -LINE 1: CREATE SUBSCRIPTION regress_testsub PUBLICATION foo; - ^ --- fail - cannot do CREATE SUBSCRIPTION CREATE SLOT inside transaction block -BEGIN; -CREATE SUBSCRIPTION regress_testsub CONNECTION 'testconn' PUBLICATION testpub WITH (create_slot); -ERROR: CREATE SUBSCRIPTION ... WITH (create_slot = true) cannot run inside a transaction block -COMMIT; --- fail - invalid connection string -CREATE SUBSCRIPTION regress_testsub CONNECTION 'testconn' PUBLICATION testpub; -ERROR: invalid connection string syntax: missing "=" after "testconn" in connection info string - --- fail - duplicate publications -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION foo, testpub, foo WITH (connect = false); -ERROR: publication name "foo" used more than once --- ok -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false); -WARNING: subscription was created, but is not connected -HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. -COMMENT ON SUBSCRIPTION regress_testsub IS 'test subscription'; -SELECT obj_description(s.oid, 'pg_subscription') FROM pg_subscription s; - obj_description -------------------- - test subscription -(1 row) - --- Check if the subscription stats are created and stats_reset is updated --- by pg_stat_reset_subscription_stats(). -SELECT subname, stats_reset IS NULL stats_reset_is_null FROM pg_stat_subscription_stats WHERE subname = 'regress_testsub'; - subname | stats_reset_is_null ------------------+--------------------- - regress_testsub | t -(1 row) - -SELECT pg_stat_reset_subscription_stats(oid) FROM pg_subscription WHERE subname = 'regress_testsub'; - pg_stat_reset_subscription_stats ----------------------------------- - -(1 row) - -SELECT subname, stats_reset IS NULL stats_reset_is_null FROM pg_stat_subscription_stats WHERE subname = 'regress_testsub'; - subname | stats_reset_is_null ------------------+--------------------- - regress_testsub | f -(1 row) - --- Reset the stats again and check if the new reset_stats is updated. -SELECT stats_reset as prev_stats_reset FROM pg_stat_subscription_stats WHERE subname = 'regress_testsub' \gset -SELECT pg_stat_reset_subscription_stats(oid) FROM pg_subscription WHERE subname = 'regress_testsub'; - pg_stat_reset_subscription_stats ----------------------------------- - -(1 row) - -SELECT :'prev_stats_reset' < stats_reset FROM pg_stat_subscription_stats WHERE subname = 'regress_testsub'; - ?column? ----------- - t -(1 row) - --- fail - name already exists -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false); -ERROR: subscription "regress_testsub" already exists --- fail - must be superuser -SET SESSION AUTHORIZATION 'regress_subscription_user2'; -CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION foo WITH (connect = false); -ERROR: permission denied to create subscription -DETAIL: Only roles with privileges of the "pg_create_subscription" role may create subscriptions. -SET SESSION AUTHORIZATION 'regress_subscription_user'; --- fail - invalid option combinations -CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, copy_data = true); -ERROR: connect = false and copy_data = true are mutually exclusive options -CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, enabled = true); -ERROR: connect = false and enabled = true are mutually exclusive options -CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, create_slot = true); -ERROR: connect = false and create_slot = true are mutually exclusive options -CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, failover = true); -ERROR: connect = false and failover = true are mutually exclusive options -CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, enabled = true); -ERROR: slot_name = NONE and enabled = true are mutually exclusive options -CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, enabled = false, create_slot = true); -ERROR: slot_name = NONE and create_slot = true are mutually exclusive options -CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE); -ERROR: subscription with slot_name = NONE must also set enabled = false -CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, enabled = false); -ERROR: subscription with slot_name = NONE must also set create_slot = false -CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, create_slot = false); -ERROR: subscription with slot_name = NONE must also set enabled = false --- ok - with slot_name = NONE -CREATE SUBSCRIPTION regress_testsub3 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, connect = false); -WARNING: subscription was created, but is not connected -HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. --- fail -ALTER SUBSCRIPTION regress_testsub3 ENABLE; -ERROR: cannot enable subscription that does not have a slot name -ALTER SUBSCRIPTION regress_testsub3 REFRESH PUBLICATION; -ERROR: ALTER SUBSCRIPTION ... REFRESH is not allowed for disabled subscriptions --- fail - origin must be either none or any -CREATE SUBSCRIPTION regress_testsub4 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, connect = false, origin = foo); -ERROR: unrecognized origin value: "foo" --- now it works -CREATE SUBSCRIPTION regress_testsub4 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, connect = false, origin = none); -WARNING: subscription was created, but is not connected -HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. -\dRs+ regress_testsub4 - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN -------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub4 | regress_subscription_user | f | {testpub} | f | off | d | f | none | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -ALTER SUBSCRIPTION regress_testsub4 SET (origin = any); -\dRs+ regress_testsub4 - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN -------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub4 | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -DROP SUBSCRIPTION regress_testsub3; -DROP SUBSCRIPTION regress_testsub4; --- fail, connection string does not parse -CREATE SUBSCRIPTION regress_testsub5 CONNECTION 'i_dont_exist=param' PUBLICATION testpub; -ERROR: invalid connection string syntax: invalid connection option "i_dont_exist" - --- fail, connection string parses, but doesn't work (and does so without --- connecting, so this is reliable and safe) -CREATE SUBSCRIPTION regress_testsub5 CONNECTION 'port=-1' PUBLICATION testpub; -ERROR: could not connect to the publisher: invalid port number: "-1" --- fail - invalid connection string during ALTER -ALTER SUBSCRIPTION regress_testsub CONNECTION 'foobar'; -ERROR: invalid connection string syntax: missing "=" after "foobar" in connection info string - -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -ALTER SUBSCRIPTION regress_testsub SET PUBLICATION testpub2, testpub3 WITH (refresh = false); -ALTER SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist2'; -ALTER SUBSCRIPTION regress_testsub SET (slot_name = 'newname'); -ALTER SUBSCRIPTION regress_testsub SET (password_required = false); -ALTER SUBSCRIPTION regress_testsub SET (run_as_owner = true); -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | off | d | f | any | f | t | f | off | dbname=regress_doesnotexist2 | 0/0 -(1 row) - -ALTER SUBSCRIPTION regress_testsub SET (password_required = true); -ALTER SUBSCRIPTION regress_testsub SET (run_as_owner = false); --- fail -ALTER SUBSCRIPTION regress_testsub SET (slot_name = ''); -ERROR: replication slot name "" is too short --- fail -ALTER SUBSCRIPTION regress_doesnotexist CONNECTION 'dbname=regress_doesnotexist2'; -ERROR: subscription "regress_doesnotexist" does not exist -ALTER SUBSCRIPTION regress_testsub SET (create_slot = false); -ERROR: unrecognized subscription parameter: "create_slot" --- ok -ALTER SUBSCRIPTION regress_testsub SKIP (lsn = '0/12345'); -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | off | d | f | any | t | f | f | off | dbname=regress_doesnotexist2 | 0/12345 -(1 row) - --- ok - with lsn = NONE -ALTER SUBSCRIPTION regress_testsub SKIP (lsn = NONE); --- fail -ALTER SUBSCRIPTION regress_testsub SKIP (lsn = '0/0'); -ERROR: invalid WAL location (LSN): 0/0 -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | off | d | f | any | t | f | f | off | dbname=regress_doesnotexist2 | 0/0 -(1 row) - -BEGIN; -ALTER SUBSCRIPTION regress_testsub ENABLE; -\dRs - List of subscriptions - Name | Owner | Enabled | Publication ------------------+---------------------------+---------+--------------------- - regress_testsub | regress_subscription_user | t | {testpub2,testpub3} -(1 row) - -ALTER SUBSCRIPTION regress_testsub DISABLE; -\dRs - List of subscriptions - Name | Owner | Enabled | Publication ------------------+---------------------------+---------+--------------------- - regress_testsub | regress_subscription_user | f | {testpub2,testpub3} -(1 row) - -COMMIT; --- fail - must be owner of subscription -SET ROLE regress_subscription_user_dummy; -ALTER SUBSCRIPTION regress_testsub RENAME TO regress_testsub_dummy; -ERROR: must be owner of subscription regress_testsub -RESET ROLE; -ALTER SUBSCRIPTION regress_testsub RENAME TO regress_testsub_foo; -ALTER SUBSCRIPTION regress_testsub_foo SET (synchronous_commit = local); -ALTER SUBSCRIPTION regress_testsub_foo SET (synchronous_commit = foobar); -ERROR: invalid value for parameter "synchronous_commit": "foobar" -HINT: Available values: local, remote_write, remote_apply, on, off. -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ----------------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------------+---------- - regress_testsub_foo | regress_subscription_user | f | {testpub2,testpub3} | f | off | d | f | any | t | f | f | local | dbname=regress_doesnotexist2 | 0/0 -(1 row) - --- rename back to keep the rest simple -ALTER SUBSCRIPTION regress_testsub_foo RENAME TO regress_testsub; --- ok, we're a superuser -ALTER SUBSCRIPTION regress_testsub OWNER TO regress_subscription_user2; --- fail - cannot do DROP SUBSCRIPTION inside transaction block with slot name -BEGIN; -DROP SUBSCRIPTION regress_testsub; -ERROR: DROP SUBSCRIPTION cannot run inside a transaction block -COMMIT; -ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); --- now it works -BEGIN; -DROP SUBSCRIPTION regress_testsub; -COMMIT; -DROP SUBSCRIPTION IF EXISTS regress_testsub; -NOTICE: subscription "regress_testsub" does not exist, skipping -DROP SUBSCRIPTION regress_testsub; -- fail -ERROR: subscription "regress_testsub" does not exist --- fail - binary must be boolean -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, binary = foo); -ERROR: binary requires a Boolean value --- now it works -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, binary = true); -WARNING: subscription was created, but is not connected -HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | t | off | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -ALTER SUBSCRIPTION regress_testsub SET (binary = false); -ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -DROP SUBSCRIPTION regress_testsub; --- fail - streaming must be boolean or 'parallel' -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, streaming = foo); -ERROR: streaming requires a Boolean value or "parallel" --- now it works -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, streaming = true); -WARNING: subscription was created, but is not connected -HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | f | on | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -ALTER SUBSCRIPTION regress_testsub SET (streaming = parallel); -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -ALTER SUBSCRIPTION regress_testsub SET (streaming = false); -ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - --- fail - publication already exists -ALTER SUBSCRIPTION regress_testsub ADD PUBLICATION testpub WITH (refresh = false); -ERROR: publication "testpub" is already in subscription "regress_testsub" --- fail - publication used more than once -ALTER SUBSCRIPTION regress_testsub ADD PUBLICATION testpub1, testpub1 WITH (refresh = false); -ERROR: publication name "testpub1" used more than once --- ok - add two publications into subscription -ALTER SUBSCRIPTION regress_testsub ADD PUBLICATION testpub1, testpub2 WITH (refresh = false); --- fail - publications already exist -ALTER SUBSCRIPTION regress_testsub ADD PUBLICATION testpub1, testpub2 WITH (refresh = false); -ERROR: publication "testpub1" is already in subscription "regress_testsub" -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-----------------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub,testpub1,testpub2} | f | off | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - --- fail - publication used more than once -ALTER SUBSCRIPTION regress_testsub DROP PUBLICATION testpub1, testpub1 WITH (refresh = false); -ERROR: publication name "testpub1" used more than once --- fail - all publications are deleted -ALTER SUBSCRIPTION regress_testsub DROP PUBLICATION testpub, testpub1, testpub2 WITH (refresh = false); -ERROR: cannot drop all the publications from a subscription --- fail - publication does not exist in subscription -ALTER SUBSCRIPTION regress_testsub DROP PUBLICATION testpub3 WITH (refresh = false); -ERROR: publication "testpub3" is not in subscription "regress_testsub" --- ok - delete publications -ALTER SUBSCRIPTION regress_testsub DROP PUBLICATION testpub1, testpub2 WITH (refresh = false); -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -DROP SUBSCRIPTION regress_testsub; -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION mypub - WITH (connect = false, create_slot = false, copy_data = false); -WARNING: subscription was created, but is not connected -HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. -ALTER SUBSCRIPTION regress_testsub ENABLE; --- fail - ALTER SUBSCRIPTION with refresh is not allowed in a transaction --- block or function -BEGIN; -ALTER SUBSCRIPTION regress_testsub SET PUBLICATION mypub WITH (refresh = true); -ERROR: ALTER SUBSCRIPTION with refresh cannot run inside a transaction block -END; -BEGIN; -ALTER SUBSCRIPTION regress_testsub REFRESH PUBLICATION; -ERROR: ALTER SUBSCRIPTION ... REFRESH cannot run inside a transaction block -END; -CREATE FUNCTION func() RETURNS VOID AS -$$ ALTER SUBSCRIPTION regress_testsub SET PUBLICATION mypub WITH (refresh = true) $$ LANGUAGE SQL; -SELECT func(); -ERROR: ALTER SUBSCRIPTION with refresh cannot be executed from a function -CONTEXT: SQL function "func" statement 1 -ALTER SUBSCRIPTION regress_testsub DISABLE; -ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); -DROP SUBSCRIPTION regress_testsub; -DROP FUNCTION func; --- fail - two_phase must be boolean -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, two_phase = foo); -ERROR: two_phase requires a Boolean value --- now it works -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, two_phase = true); -WARNING: subscription was created, but is not connected -HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | f | off | p | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - ---fail - alter of two_phase option not supported. -ALTER SUBSCRIPTION regress_testsub SET (two_phase = false); -ERROR: unrecognized subscription parameter: "two_phase" --- but can alter streaming when two_phase enabled -ALTER SUBSCRIPTION regress_testsub SET (streaming = true); -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | f | on | p | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); -DROP SUBSCRIPTION regress_testsub; --- two_phase and streaming are compatible. -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, streaming = true, two_phase = true); -WARNING: subscription was created, but is not connected -HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | f | on | p | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); -DROP SUBSCRIPTION regress_testsub; --- fail - disable_on_error must be boolean -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, disable_on_error = foo); -ERROR: disable_on_error requires a Boolean value --- now it works -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, disable_on_error = false); -WARNING: subscription was created, but is not connected -HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -ALTER SUBSCRIPTION regress_testsub SET (disable_on_error = true); -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | t | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); -DROP SUBSCRIPTION regress_testsub; --- let's do some tests with pg_create_subscription rather than superuser -SET SESSION AUTHORIZATION regress_subscription_user3; --- fail, not enough privileges -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false); -ERROR: permission denied for database regression --- fail, must specify password -RESET SESSION AUTHORIZATION; -GRANT CREATE ON DATABASE REGRESSION TO regress_subscription_user3; -SET SESSION AUTHORIZATION regress_subscription_user3; -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false); -ERROR: password is required -DETAIL: Non-superusers must provide a password in the connection string. --- fail, can't set password_required=false -RESET SESSION AUTHORIZATION; -GRANT CREATE ON DATABASE REGRESSION TO regress_subscription_user3; -SET SESSION AUTHORIZATION regress_subscription_user3; -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, password_required = false); -ERROR: password_required=false is superuser-only -HINT: Subscriptions with the password_required option set to false may only be created or modified by the superuser. --- ok -RESET SESSION AUTHORIZATION; -GRANT CREATE ON DATABASE REGRESSION TO regress_subscription_user3; -SET SESSION AUTHORIZATION regress_subscription_user3; -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist password=regress_fakepassword' PUBLICATION testpub WITH (connect = false); -WARNING: subscription was created, but is not connected -HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. --- we cannot give the subscription away to some random user -ALTER SUBSCRIPTION regress_testsub OWNER TO regress_subscription_user; -ERROR: must be able to SET ROLE "regress_subscription_user" --- but we can rename the subscription we just created -ALTER SUBSCRIPTION regress_testsub RENAME TO regress_testsub2; --- ok, even after losing pg_create_subscription we can still rename it -RESET SESSION AUTHORIZATION; -REVOKE pg_create_subscription FROM regress_subscription_user3; -SET SESSION AUTHORIZATION regress_subscription_user3; -ALTER SUBSCRIPTION regress_testsub2 RENAME TO regress_testsub; --- fail, after losing CREATE on the database we can't rename it any more -RESET SESSION AUTHORIZATION; -REVOKE CREATE ON DATABASE REGRESSION FROM regress_subscription_user3; -SET SESSION AUTHORIZATION regress_subscription_user3; -ALTER SUBSCRIPTION regress_testsub RENAME TO regress_testsub2; -ERROR: permission denied for database regression --- ok, owning it is enough for this stuff -ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); -DROP SUBSCRIPTION regress_testsub; -RESET SESSION AUTHORIZATION; -DROP ROLE regress_subscription_user; -DROP ROLE regress_subscription_user2; -DROP ROLE regress_subscription_user3; -DROP ROLE regress_subscription_user_dummy; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/select_views.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/select_views.out --- /tmp/cirrus-ci-build/src/test/regress/expected/select_views.out 2024-03-07 14:25:00.333858000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/select_views.out 2024-03-07 14:27:17.335546000 +0000 @@ -1,1552 +1,2 @@ --- --- SELECT_VIEWS --- test the views defined in CREATE_VIEWS --- -SELECT * FROM street; - name | thepath | cname -------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------- - Access Rd 25 | [(-121.9283,37.894),(-121.9283,37.9)] | Oakland - Ada St | [(-122.2487,37.398),(-122.2496,37.401)] | Lafayette - Agua Fria Creek | [(-121.9254,37.922),(-121.9281,37.889)] | Oakland - Allen Ct | [(-122.0131,37.602),(-122.0117,37.597)] | Berkeley - Alvarado Niles Road | [(-122.0325,37.903),(-122.0316,37.9)] | Berkeley - Andrea Cir | [(-121.733218,37.88641),(-121.733286,37.90617)] | Oakland - Apricot Lane | [(-121.9471,37.401),(-121.9456,37.392)] | Oakland - Apricot Lane | [(-121.9471,37.401),(-121.9456,37.392)] | Oakland - Arden Road | [(-122.0978,37.177),(-122.1,37.177)] | Oakland - Arizona St | [(-122.0381,37.901),(-122.0367,37.898)] | Berkeley - Arlington Dr | [(-121.8802,37.408),(-121.8807,37.394)] | Oakland - Arlington Dr | [(-121.8802,37.408),(-121.8807,37.394)] | Oakland - Arlington Road | [(-121.7957,37.898),(-121.7956,37.906)] | Oakland - Arroyo Las Positas | [(-121.7973,37.997),(-121.7957,37.005)] | Oakland - Arroyo Las Positas | [(-121.7973,37.997),(-121.7957,37.005)] | Oakland - Arroyo Seco | [(-121.7073,37.766),(-121.6997,37.729)] | Oakland - Ash St | [(-122.0408,37.31),(-122.04,37.292)] | Oakland - Avenue 134th | [(-122.1823,37.002),(-122.1851,37.992)] | Oakland - Avenue 134th | [(-122.1823,37.002),(-122.1851,37.992)] | Berkeley - Avenue 140th | [(-122.1656,37.003),(-122.1691,37.988)] | Oakland - Avenue 140th | [(-122.1656,37.003),(-122.1691,37.988)] | Berkeley - Avenue D | [(-122.298,37.848),(-122.3024,37.849)] | Berkeley - B St | [(-122.1749,37.451),(-122.1743,37.443)] | Oakland - Bancroft Ave | [(-122.15714,37.4242),(-122.156,37.409)] | Oakland - Bancroft Ave | [(-122.1643,37.523),(-122.1631,37.508),(-122.1621,37.493)] | Oakland - Birch St | [(-122.1617,37.425),(-122.1614,37.417)] | Oakland - Birch St | [(-122.1673,37.509),(-122.1661,37.492)] | Oakland - Blacow Road | [(-122.0179,37.469),(-122.0167,37.465)] | Oakland - Bridgepointe Dr | [(-122.0514,37.305),(-122.0509,37.299)] | Oakland - Broadmore Ave | [(-122.095,37.522),(-122.0936,37.497)] | Oakland - Broadway | [(-122.2409,37.586),(-122.2395,37.601)] | Berkeley - Buckingham Blvd | [(-122.2231,37.59),(-122.2214,37.606)] | Berkeley - Butterfield Dr | [(-122.0838,37.002),(-122.0834,37.987)] | Oakland - Butterfield Dr | [(-122.0838,37.002),(-122.0834,37.987)] | Oakland - Butterfield Dr | [(-122.0838,37.002),(-122.0834,37.987)] | Berkeley - C St | [(-122.1768,37.46),(-122.1749,37.435)] | Oakland - Calaveras Creek | [(-121.8203,37.035),(-121.8207,37.931)] | Oakland - Calaveras Creek | [(-121.8203,37.035),(-121.8207,37.931)] | Oakland - California St | [(-122.2032,37.005),(-122.2016,37.996)] | Berkeley - California St | [(-122.2032,37.005),(-122.2016,37.996)] | Lafayette - Cameron Ave | [(-122.1316,37.502),(-122.1327,37.481)] | Oakland - Campus Dr | [(-122.1704,37.905),(-122.1678,37.868),(-122.1671,37.865)] | Berkeley - Capricorn Ave | [(-122.2176,37.404),(-122.2164,37.384)] | Lafayette - Carson St | [(-122.1846,37.9),(-122.1843,37.901)] | Berkeley - Cedar Blvd | [(-122.0282,37.446),(-122.0265,37.43)] | Oakland - Cedar St | [(-122.3011,37.737),(-122.2999,37.739)] | Berkeley - Celia St | [(-122.0611,37.3),(-122.0616,37.299)] | Oakland - Central Ave | [(-122.2343,37.602),(-122.2331,37.595)] | Berkeley - Chambers Dr | [(-122.2004,37.352),(-122.1972,37.368)] | Lafayette - Chambers Lane | [(-122.2001,37.359),(-122.1975,37.371)] | Lafayette - Champion St | [(-122.214,37.991),(-122.2147,37.002)] | Berkeley - Champion St | [(-122.214,37.991),(-122.2147,37.002)] | Lafayette - Chapman Dr | [(-122.0421,37.504),(-122.0414,37.498)] | Oakland - Charles St | [(-122.0255,37.505),(-122.0252,37.499)] | Oakland - Cherry St | [(-122.0437,37.42),(-122.0434,37.413)] | Oakland - Claremont Pl | [(-122.0542,37.995),(-122.0542,37.008)] | Oakland - Claremont Pl | [(-122.0542,37.995),(-122.0542,37.008)] | Oakland - Claremont Pl | [(-122.0542,37.995),(-122.0542,37.008)] | Berkeley - Coliseum Way | [(-122.2001,37.47),(-122.1978,37.516)] | Oakland - Coliseum Way | [(-122.2113,37.626),(-122.2085,37.592),(-122.2063,37.568)] | Berkeley - Coolidge Ave | [(-122.2007,37.058),(-122.1992,37.06)] | Lafayette - Cornell Ave | [(-122.2956,37.925),(-122.2949,37.906),(-122.2939,37.875)] | Berkeley - Corriea Way | [(-121.9501,37.402),(-121.9505,37.398)] | Oakland - Corriea Way | [(-121.9501,37.402),(-121.9505,37.398)] | Oakland - Cowing Road | [(-122.0002,37.934),(-121.9772,37.782)] | Oakland - Creston Road | [(-122.2639,37.002),(-122.2613,37.986),(-122.2602,37.978),(-122.2598,37.973)] | Berkeley - Creston Road | [(-122.2639,37.002),(-122.2613,37.986),(-122.2602,37.978),(-122.2598,37.973)] | Lafayette - Crow Canyon Creek | [(-122.043,37.905),(-122.0368,37.71)] | Berkeley - Crystaline Dr | [(-121.925856,37),(-121.925869,37.00527)] | Oakland - Cull Canyon Road | [(-122.0536,37.435),(-122.0499,37.315)] | Oakland - Cull Creek | [(-122.0624,37.875),(-122.0582,37.527)] | Berkeley - D St | [(-122.1811,37.505),(-122.1805,37.497)] | Oakland - Decoto Road | [(-122.0159,37.006),(-122.016,37.002),(-122.0164,37.993)] | Oakland - Decoto Road | [(-122.0159,37.006),(-122.016,37.002),(-122.0164,37.993)] | Oakland - Decoto Road | [(-122.0159,37.006),(-122.016,37.002),(-122.0164,37.993)] | Berkeley - Deering St | [(-122.2146,37.904),(-122.2126,37.897)] | Berkeley - Dimond Ave | [(-122.2167,37.994),(-122.2162,37.006)] | Berkeley - Dimond Ave | [(-122.2167,37.994),(-122.2162,37.006)] | Lafayette - Donna Way | [(-122.1333,37.606),(-122.1316,37.599)] | Berkeley - Driftwood Dr | [(-122.0109,37.482),(-122.0113,37.477)] | Oakland - Driscoll Road | [(-121.9482,37.403),(-121.948451,37.39995)] | Oakland - Driscoll Road | [(-121.9482,37.403),(-121.948451,37.39995)] | Oakland - E St | [(-122.1832,37.505),(-122.1826,37.498),(-122.182,37.49)] | Oakland - Eden Ave | [(-122.1143,37.505),(-122.1142,37.491)] | Oakland - Eden Creek | [(-122.022037,37.00675),(-122.0221,37.998)] | Oakland - Eden Creek | [(-122.022037,37.00675),(-122.0221,37.998)] | Oakland - Eden Creek | [(-122.022037,37.00675),(-122.0221,37.998)] | Berkeley - Edgewater Dr | [(-122.201,37.379),(-122.2042,37.41)] | Lafayette - Enos Way | [(-121.7677,37.896),(-121.7673,37.91)] | Oakland - Euclid Ave | [(-122.2671,37.009),(-122.2666,37.987)] | Berkeley - Euclid Ave | [(-122.2671,37.009),(-122.2666,37.987)] | Lafayette - Fairview Ave | [(-121.999,37.428),(-121.9863,37.351)] | Oakland - Fairview Ave | [(-121.999,37.428),(-121.9863,37.351)] | Oakland - Foothill Blvd | [(-122.2414,37.9),(-122.2403,37.893)] | Berkeley - Fountain St | [(-122.2306,37.593),(-122.2293,37.605)] | Berkeley - Gading Road | [(-122.0801,37.343),(-122.08,37.336)] | Oakland - Grizzly Peak Blvd | [(-122.2213,37.638),(-122.2127,37.581)] | Berkeley - Grove Way | [(-122.0643,37.884),(-122.062679,37.89162),(-122.061796,37.89578),(-122.0609,37.9)] | Berkeley - Harris Road | [(-122.0659,37.372),(-122.0675,37.363)] | Oakland - Heartwood Dr | [(-122.2006,37.341),(-122.1992,37.338)] | Lafayette - Hegenberger Exwy | [(-122.1946,37.52),(-122.1947,37.497)] | Oakland - Herrier St | [(-122.1943,37.006),(-122.1936,37.998)] | Oakland - Herrier St | [(-122.1943,37.006),(-122.1936,37.998)] | Berkeley - Hesperian Blvd | [(-122.097,37.333),(-122.0956,37.31),(-122.0946,37.293)] | Oakland - Hesperian Blvd | [(-122.097,37.333),(-122.0956,37.31),(-122.0946,37.293)] | Oakland - Hesperian Blvd | [(-122.1132,37.6),(-122.1123,37.586)] | Berkeley - Hollis St | [(-122.2885,37.397),(-122.289,37.414)] | Lafayette - I- 580 | [(-121.727,37.074),(-121.7229,37.093),(-121.722301,37.09522),(-121.721001,37.10005),(-121.7194,37.106),(-121.7188,37.109),(-121.7168,37.12),(-121.7163,37.123),(-121.7145,37.127),(-121.7096,37.148),(-121.707731,37.1568),(-121.7058,37.166),(-121.7055,37.168),(-121.7044,37.174),(-121.7038,37.172),(-121.7037,37.172),(-121.7027,37.175),(-121.7001,37.181),(-121.6957,37.191),(-121.6948,37.192),(-121.6897,37.204),(-121.6697,37.185)] | Oakland - I- 580 | [(-121.9322,37.989),(-121.9243,37.006),(-121.9217,37.014)] | Oakland - I- 580 | [(-121.9322,37.989),(-121.9243,37.006),(-121.9217,37.014)] | Oakland - I- 580 | [(-122.018,37.019),(-122.0009,37.032),(-121.9787,37.983),(-121.958,37.984),(-121.9571,37.986)] | Oakland - I- 580 | [(-122.018,37.019),(-122.0009,37.032),(-121.9787,37.983),(-121.958,37.984),(-121.9571,37.986)] | Oakland - I- 580 | [(-122.1108,37.023),(-122.1101,37.02),(-122.108103,37.00764),(-122.108,37.007),(-122.1069,37.998),(-122.1064,37.994),(-122.1053,37.982),(-122.1048,37.977),(-122.1032,37.958),(-122.1026,37.953),(-122.1013,37.938),(-122.0989,37.911),(-122.0984,37.91),(-122.098,37.908)] | Oakland - I- 580 | [(-122.1108,37.023),(-122.1101,37.02),(-122.108103,37.00764),(-122.108,37.007),(-122.1069,37.998),(-122.1064,37.994),(-122.1053,37.982),(-122.1048,37.977),(-122.1032,37.958),(-122.1026,37.953),(-122.1013,37.938),(-122.0989,37.911),(-122.0984,37.91),(-122.098,37.908)] | Berkeley - I- 580 | [(-122.1543,37.703),(-122.1535,37.694),(-122.1512,37.655),(-122.1475,37.603),(-122.1468,37.583),(-122.1472,37.569),(-122.149044,37.54874),(-122.1493,37.546),(-122.1501,37.532),(-122.1506,37.509),(-122.1495,37.482),(-122.1487,37.467),(-122.1477,37.447),(-122.1414,37.383),(-122.1404,37.376),(-122.1398,37.372),(-122.139,37.356),(-122.1388,37.353),(-122.1385,37.34),(-122.1382,37.33),(-122.1378,37.316)] | Oakland - I- 580 | [(-122.1543,37.703),(-122.1535,37.694),(-122.1512,37.655),(-122.1475,37.603),(-122.1468,37.583),(-122.1472,37.569),(-122.149044,37.54874),(-122.1493,37.546),(-122.1501,37.532),(-122.1506,37.509),(-122.1495,37.482),(-122.1487,37.467),(-122.1477,37.447),(-122.1414,37.383),(-122.1404,37.376),(-122.1398,37.372),(-122.139,37.356),(-122.1388,37.353),(-122.1385,37.34),(-122.1382,37.33),(-122.1378,37.316)] | Berkeley - I- 580 | [(-122.2197,37.99),(-122.22,37.99),(-122.222092,37.99523),(-122.2232,37.998),(-122.224146,37.99963),(-122.2261,37.003),(-122.2278,37.007),(-122.2302,37.026),(-122.2323,37.043),(-122.2344,37.059),(-122.235405,37.06427),(-122.2365,37.07)] | Berkeley - I- 580 | [(-122.2197,37.99),(-122.22,37.99),(-122.222092,37.99523),(-122.2232,37.998),(-122.224146,37.99963),(-122.2261,37.003),(-122.2278,37.007),(-122.2302,37.026),(-122.2323,37.043),(-122.2344,37.059),(-122.235405,37.06427),(-122.2365,37.07)] | Lafayette - I- 580 Ramp | [(-121.8521,37.011),(-121.8479,37.999),(-121.8476,37.999),(-121.8456,37.01),(-121.8455,37.011)] | Oakland - I- 580 Ramp | [(-121.8521,37.011),(-121.8479,37.999),(-121.8476,37.999),(-121.8456,37.01),(-121.8455,37.011)] | Oakland - I- 580 Ramp | [(-121.8743,37.014),(-121.8722,37.999),(-121.8714,37.999)] | Oakland - I- 580 Ramp | [(-121.8743,37.014),(-121.8722,37.999),(-121.8714,37.999)] | Oakland - I- 580 Ramp | [(-121.9043,37.998),(-121.9036,37.013),(-121.902632,37.0174),(-121.9025,37.018)] | Oakland - I- 580 Ramp | [(-121.9043,37.998),(-121.9036,37.013),(-121.902632,37.0174),(-121.9025,37.018)] | Oakland - I- 580 Ramp | [(-121.9368,37.986),(-121.936483,37.98832),(-121.9353,37.997),(-121.93504,37.00035),(-121.9346,37.006),(-121.933764,37.00031),(-121.9333,37.997),(-121.9322,37.989)] | Oakland - I- 580 Ramp | [(-121.9368,37.986),(-121.936483,37.98832),(-121.9353,37.997),(-121.93504,37.00035),(-121.9346,37.006),(-121.933764,37.00031),(-121.9333,37.997),(-121.9322,37.989)] | Oakland - I- 580 Ramp | [(-122.093241,37.90351),(-122.09364,37.89634),(-122.093788,37.89212)] | Berkeley - I- 580 Ramp | [(-122.0934,37.896),(-122.09257,37.89961),(-122.0911,37.906)] | Berkeley - I- 580 Ramp | [(-122.0941,37.897),(-122.0943,37.902)] | Berkeley - I- 580 Ramp | [(-122.096,37.888),(-122.0962,37.891),(-122.0964,37.9)] | Berkeley - I- 580 Ramp | [(-122.101,37.898),(-122.1005,37.902),(-122.0989,37.911)] | Berkeley - I- 580 Ramp | [(-122.1086,37.003),(-122.1068,37.993),(-122.1066,37.992),(-122.1053,37.982)] | Oakland - I- 580 Ramp | [(-122.1086,37.003),(-122.1068,37.993),(-122.1066,37.992),(-122.1053,37.982)] | Berkeley - I- 580 Ramp | [(-122.1414,37.383),(-122.1407,37.376),(-122.1403,37.372),(-122.139,37.356)] | Oakland - I- 580/I-680 Ramp | ((-121.9207,37.988),(-121.9192,37.016)) | Oakland - I- 580/I-680 Ramp | ((-121.9207,37.988),(-121.9192,37.016)) | Oakland - I- 680 | ((-121.939,37.15),(-121.9387,37.145),(-121.9373,37.125),(-121.934242,37.07643),(-121.933886,37.0709),(-121.9337,37.068),(-121.933122,37.06139),(-121.932736,37.05698),(-121.93222,37.05108),(-121.931844,37.04678),(-121.930113,37.027),(-121.926829,37),(-121.9265,37.998),(-121.9217,37.96),(-121.9203,37.949),(-121.9184,37.934)) | Oakland - I- 680 | ((-121.939,37.15),(-121.9387,37.145),(-121.9373,37.125),(-121.934242,37.07643),(-121.933886,37.0709),(-121.9337,37.068),(-121.933122,37.06139),(-121.932736,37.05698),(-121.93222,37.05108),(-121.931844,37.04678),(-121.930113,37.027),(-121.926829,37),(-121.9265,37.998),(-121.9217,37.96),(-121.9203,37.949),(-121.9184,37.934)) | Oakland - I- 680 | [(-121.9101,37.715),(-121.911269,37.74682),(-121.9119,37.764),(-121.9124,37.776),(-121.9174,37.905),(-121.9194,37.957),(-121.9207,37.988)] | Oakland - I- 680 | [(-121.9184,37.934),(-121.917,37.913),(-121.9122,37.83),(-121.9052,37.702)] | Oakland - I- 680 Ramp | [(-121.8833,37.376),(-121.8833,37.392),(-121.883,37.4),(-121.8835,37.402),(-121.8852,37.422)] | Oakland - I- 680 Ramp | [(-121.8833,37.376),(-121.8833,37.392),(-121.883,37.4),(-121.8835,37.402),(-121.8852,37.422)] | Oakland - I- 680 Ramp | [(-121.92,37.438),(-121.9218,37.424),(-121.9238,37.408),(-121.9252,37.392)] | Oakland - I- 680 Ramp | [(-121.92,37.438),(-121.9218,37.424),(-121.9238,37.408),(-121.9252,37.392)] | Oakland - I- 680 Ramp | [(-121.9238,37.402),(-121.9234,37.395),(-121.923,37.399)] | Oakland - I- 680 Ramp | [(-121.9238,37.402),(-121.9234,37.395),(-121.923,37.399)] | Oakland - I- 80 | ((-122.2937,37.277),(-122.3016,37.262)) | Lafayette - I- 80 | ((-122.2962,37.273),(-122.3004,37.264)) | Lafayette - I- 80 Ramp | [(-122.2962,37.413),(-122.2959,37.382),(-122.2951,37.372)] | Lafayette - I- 880 | ((-121.9669,37.075),(-121.9663,37.071),(-121.9656,37.065),(-121.9618,37.037),(-121.95689,37),(-121.948,37.933)) | Oakland - I- 880 | ((-121.9669,37.075),(-121.9663,37.071),(-121.9656,37.065),(-121.9618,37.037),(-121.95689,37),(-121.948,37.933)) | Oakland - I- 880 | [(-121.948,37.933),(-121.9471,37.925),(-121.9467,37.923),(-121.946,37.918),(-121.9452,37.912),(-121.937,37.852)] | Oakland - I- 880 | [(-122.0219,37.466),(-122.0205,37.447),(-122.020331,37.44447),(-122.020008,37.43962),(-122.0195,37.432),(-122.0193,37.429),(-122.0164,37.393),(-122.010219,37.34771),(-122.0041,37.313)] | Oakland - I- 880 | [(-122.0375,37.632),(-122.0359,37.619),(-122.0358,37.616),(-122.034514,37.60409),(-122.031876,37.57965),(-122.031193,37.57332),(-122.03016,37.56375),(-122.02943,37.55698),(-122.028689,37.54929),(-122.027833,37.53908),(-122.025979,37.51698),(-122.0238,37.491)] | Oakland - I- 880 | [(-122.0375,37.632),(-122.0359,37.619),(-122.0358,37.616),(-122.034514,37.60409),(-122.031876,37.57965),(-122.031193,37.57332),(-122.03016,37.56375),(-122.02943,37.55698),(-122.028689,37.54929),(-122.027833,37.53908),(-122.025979,37.51698),(-122.0238,37.491)] | Berkeley - I- 880 | [(-122.0612,37.003),(-122.0604,37.991),(-122.0596,37.982),(-122.0585,37.967),(-122.0583,37.961),(-122.0553,37.918),(-122.053635,37.89475),(-122.050759,37.8546),(-122.05,37.844),(-122.0485,37.817),(-122.0483,37.813),(-122.0482,37.811)] | Oakland - I- 880 | [(-122.0612,37.003),(-122.0604,37.991),(-122.0596,37.982),(-122.0585,37.967),(-122.0583,37.961),(-122.0553,37.918),(-122.053635,37.89475),(-122.050759,37.8546),(-122.05,37.844),(-122.0485,37.817),(-122.0483,37.813),(-122.0482,37.811)] | Oakland - I- 880 | [(-122.0612,37.003),(-122.0604,37.991),(-122.0596,37.982),(-122.0585,37.967),(-122.0583,37.961),(-122.0553,37.918),(-122.053635,37.89475),(-122.050759,37.8546),(-122.05,37.844),(-122.0485,37.817),(-122.0483,37.813),(-122.0482,37.811)] | Berkeley - I- 880 | [(-122.0831,37.312),(-122.0819,37.296),(-122.081,37.285),(-122.0786,37.248),(-122.078,37.24),(-122.077642,37.23496),(-122.076983,37.22567),(-122.076599,37.22026),(-122.076229,37.21505),(-122.0758,37.209)] | Oakland - I- 880 | [(-122.0978,37.528),(-122.096,37.496),(-122.0931,37.453),(-122.09277,37.4496),(-122.090189,37.41442),(-122.0896,37.405),(-122.085,37.34)] | Oakland - I- 880 | [(-122.1365,37.902),(-122.1358,37.898),(-122.1333,37.881),(-122.1323,37.874),(-122.1311,37.866),(-122.1308,37.865),(-122.1307,37.864),(-122.1289,37.851),(-122.1277,37.843),(-122.1264,37.834),(-122.1231,37.812),(-122.1165,37.766),(-122.1104,37.72),(-122.109695,37.71094),(-122.109,37.702),(-122.108312,37.69168),(-122.1076,37.681)] | Berkeley - I- 880 | [(-122.1755,37.185),(-122.1747,37.178),(-122.1742,37.173),(-122.1692,37.126),(-122.167792,37.11594),(-122.16757,37.11435),(-122.1671,37.111),(-122.1655,37.1),(-122.165169,37.09811),(-122.1641,37.092),(-122.1596,37.061),(-122.158381,37.05275),(-122.155991,37.03657),(-122.1531,37.017),(-122.1478,37.98),(-122.1407,37.932),(-122.1394,37.924),(-122.1389,37.92),(-122.1376,37.91)] | Oakland - I- 880 | [(-122.1755,37.185),(-122.1747,37.178),(-122.1742,37.173),(-122.1692,37.126),(-122.167792,37.11594),(-122.16757,37.11435),(-122.1671,37.111),(-122.1655,37.1),(-122.165169,37.09811),(-122.1641,37.092),(-122.1596,37.061),(-122.158381,37.05275),(-122.155991,37.03657),(-122.1531,37.017),(-122.1478,37.98),(-122.1407,37.932),(-122.1394,37.924),(-122.1389,37.92),(-122.1376,37.91)] | Berkeley - I- 880 | [(-122.2214,37.711),(-122.2202,37.699),(-122.2199,37.695),(-122.219,37.682),(-122.2184,37.672),(-122.2173,37.652),(-122.2159,37.638),(-122.2144,37.616),(-122.2138,37.612),(-122.2135,37.609),(-122.212,37.592),(-122.2116,37.586),(-122.2111,37.581)] | Berkeley - I- 880 | [(-122.2707,37.975),(-122.2693,37.972),(-122.2681,37.966),(-122.267,37.962),(-122.2659,37.957),(-122.2648,37.952),(-122.2636,37.946),(-122.2625,37.935),(-122.2617,37.927),(-122.2607,37.921),(-122.2593,37.916),(-122.258,37.911),(-122.2536,37.898),(-122.2432,37.858),(-122.2408,37.845),(-122.2386,37.827),(-122.2374,37.811)] | Berkeley - I- 880 Ramp | [(-122.0019,37.301),(-122.002,37.293)] | Oakland - I- 880 Ramp | [(-122.0041,37.313),(-122.0018,37.315),(-122.0007,37.315),(-122.0005,37.313),(-122.0002,37.308),(-121.9995,37.289)] | Oakland - I- 880 Ramp | [(-122.0041,37.313),(-122.0038,37.308),(-122.0039,37.284),(-122.0013,37.287),(-121.9995,37.289)] | Oakland - I- 880 Ramp | [(-122.0236,37.488),(-122.0231,37.458),(-122.0227,37.458),(-122.0223,37.452),(-122.0205,37.447)] | Oakland - I- 880 Ramp | [(-122.0238,37.491),(-122.0215,37.483),(-122.0211,37.477),(-122.0205,37.447)] | Oakland - I- 880 Ramp | [(-122.059,37.982),(-122.0577,37.984),(-122.0612,37.003)] | Oakland - I- 880 Ramp | [(-122.059,37.982),(-122.0577,37.984),(-122.0612,37.003)] | Oakland - I- 880 Ramp | [(-122.059,37.982),(-122.0577,37.984),(-122.0612,37.003)] | Berkeley - I- 880 Ramp | [(-122.0618,37.011),(-122.0631,37.982),(-122.0585,37.967)] | Oakland - I- 880 Ramp | [(-122.0618,37.011),(-122.0631,37.982),(-122.0585,37.967)] | Oakland - I- 880 Ramp | [(-122.0618,37.011),(-122.0631,37.982),(-122.0585,37.967)] | Berkeley - I- 880 Ramp | [(-122.085,37.34),(-122.0801,37.316),(-122.081,37.285)] | Oakland - I- 880 Ramp | [(-122.085,37.34),(-122.0801,37.316),(-122.081,37.285)] | Oakland - I- 880 Ramp | [(-122.085,37.34),(-122.0866,37.316),(-122.0819,37.296)] | Oakland - I- 880 Ramp | [(-122.085,37.34),(-122.0866,37.316),(-122.0819,37.296)] | Oakland - I- 880 Ramp | [(-122.1029,37.61),(-122.1013,37.587),(-122.0999,37.569)] | Berkeley - I- 880 Ramp | [(-122.1379,37.891),(-122.1383,37.897),(-122.1377,37.902)] | Berkeley - I- 880 Ramp | [(-122.1379,37.931),(-122.137597,37.92736),(-122.1374,37.925),(-122.1373,37.924),(-122.1369,37.914),(-122.1358,37.905),(-122.1365,37.908),(-122.1358,37.898)] | Berkeley - I- 880 Ramp | [(-122.2536,37.898),(-122.254,37.902)] | Berkeley - I- 880 Ramp | [(-122.2771,37.002),(-122.278,37)] | Lafayette - Indian Way | [(-122.2066,37.398),(-122.2045,37.411)] | Lafayette - Jackson St | [(-122.0845,37.6),(-122.0842,37.606)] | Berkeley - Johnson Dr | [(-121.9145,37.901),(-121.915,37.877)] | Oakland - Joyce St | [(-122.0792,37.604),(-122.0774,37.581)] | Berkeley - Juniper St | [(-121.7823,37.897),(-121.7815,37.9)] | Oakland - Kaiser Dr | [(-122.067163,37.47821),(-122.060402,37.51961)] | Oakland - Keeler Ave | [(-122.2578,37.906),(-122.2579,37.899)] | Berkeley - Kildare Road | [(-122.0968,37.016),(-122.0959,37)] | Oakland - La Playa Dr | [(-122.1039,37.545),(-122.101,37.493)] | Oakland - Laguna Ave | [(-122.2099,37.989),(-122.2089,37)] | Berkeley - Laguna Ave | [(-122.2099,37.989),(-122.2089,37)] | Lafayette - Lakehurst Cir | [(-122.284729,37.89025),(-122.286096,37.90364)] | Berkeley - Lakeshore Ave | [(-122.2586,37.99),(-122.2556,37.006)] | Berkeley - Lakeshore Ave | [(-122.2586,37.99),(-122.2556,37.006)] | Lafayette - Las Positas Road | [(-121.764488,37.99199),(-121.75569,37.02022)] | Oakland - Las Positas Road | [(-121.764488,37.99199),(-121.75569,37.02022)] | Oakland - Linden St | [(-122.2867,37.998),(-122.2864,37.008)] | Berkeley - Linden St | [(-122.2867,37.998),(-122.2864,37.008)] | Lafayette - Livermore Ave | [(-121.7687,37.448),(-121.769,37.375)] | Oakland - Livermore Ave | [(-121.7687,37.448),(-121.769,37.375)] | Oakland - Livermore Ave | [(-121.772719,37.99085),(-121.7728,37.001)] | Oakland - Livermore Ave | [(-121.772719,37.99085),(-121.7728,37.001)] | Oakland - Locust St | [(-122.1606,37.007),(-122.1593,37.987)] | Oakland - Locust St | [(-122.1606,37.007),(-122.1593,37.987)] | Berkeley - Logan Ct | [(-122.0053,37.492),(-122.0061,37.484)] | Oakland - Magnolia St | [(-122.0971,37.5),(-122.0962,37.484)] | Oakland - Mandalay Road | [(-122.2322,37.397),(-122.2321,37.403)] | Lafayette - Marin Ave | [(-122.2741,37.894),(-122.272,37.901)] | Berkeley - Martin Luther King Jr Way | [(-122.2712,37.608),(-122.2711,37.599)] | Berkeley - Mattos Dr | [(-122.0005,37.502),(-122.000898,37.49683)] | Oakland - Maubert Ave | [(-122.1114,37.009),(-122.1096,37.995)] | Oakland - Maubert Ave | [(-122.1114,37.009),(-122.1096,37.995)] | Berkeley - McClure Ave | [(-122.1431,37.001),(-122.1436,37.998)] | Oakland - McClure Ave | [(-122.1431,37.001),(-122.1436,37.998)] | Berkeley - Medlar Dr | [(-122.0627,37.378),(-122.0625,37.375)] | Oakland - Mildred Ct | [(-122.0002,37.388),(-121.9998,37.386)] | Oakland - Miller Road | [(-122.0902,37.645),(-122.0865,37.545)] | Berkeley - Miramar Ave | [(-122.1009,37.025),(-122.099089,37.03209)] | Oakland - Mission Blvd | [(-121.918886,37),(-121.9194,37.976),(-121.9198,37.975)] | Oakland - Mission Blvd | [(-121.918886,37),(-121.9194,37.976),(-121.9198,37.975)] | Oakland - Mission Blvd | [(-122.0006,37.896),(-121.9989,37.88)] | Oakland - Mission Blvd | [(-122.0006,37.896),(-121.9989,37.88)] | Berkeley - Moores Ave | [(-122.0087,37.301),(-122.0094,37.292)] | Oakland - National Ave | [(-122.1192,37.5),(-122.1281,37.489)] | Oakland - Navajo Ct | [(-121.8779,37.901),(-121.8783,37.9)] | Oakland - Newark Blvd | [(-122.0352,37.438),(-122.0341,37.423)] | Oakland - Oakland Inner Harbor | [(-122.2625,37.913),(-122.260016,37.89484)] | Berkeley - Oakridge Road | [(-121.8316,37.049),(-121.828382,37)] | Oakland - Oneil Ave | [(-122.076754,37.62476),(-122.0745,37.595)] | Berkeley - Parkridge Dr | [(-122.1438,37.884),(-122.1428,37.9)] | Berkeley - Parkside Dr | [(-122.0475,37.603),(-122.0443,37.596)] | Berkeley - Paseo Padre Pkwy | [(-121.9143,37.005),(-121.913522,37)] | Oakland - Paseo Padre Pkwy | [(-122.0021,37.639),(-121.996,37.628)] | Oakland - Paseo Padre Pkwy | [(-122.0021,37.639),(-121.996,37.628)] | Berkeley - Pearl St | [(-122.2383,37.594),(-122.2366,37.615)] | Berkeley - Periwinkle Road | [(-122.0451,37.301),(-122.044758,37.29844)] | Oakland - Pimlico Dr | [(-121.8616,37.998),(-121.8618,37.008)] | Oakland - Pimlico Dr | [(-121.8616,37.998),(-121.8618,37.008)] | Oakland - Portsmouth Ave | [(-122.1064,37.315),(-122.1064,37.308)] | Oakland - Proctor Ave | [(-122.2267,37.406),(-122.2251,37.386)] | Lafayette - Railroad Ave | [(-122.0245,37.013),(-122.0234,37.003),(-122.0223,37.993)] | Oakland - Railroad Ave | [(-122.0245,37.013),(-122.0234,37.003),(-122.0223,37.993)] | Oakland - Railroad Ave | [(-122.0245,37.013),(-122.0234,37.003),(-122.0223,37.993)] | Berkeley - Ranspot Dr | [(-122.0972,37.999),(-122.0959,37)] | Oakland - Ranspot Dr | [(-122.0972,37.999),(-122.0959,37)] | Oakland - Ranspot Dr | [(-122.0972,37.999),(-122.0959,37)] | Berkeley - Redding St | [(-122.1978,37.901),(-122.1975,37.895)] | Berkeley - Redwood Road | [(-122.1493,37.98),(-122.1437,37.001)] | Oakland - Redwood Road | [(-122.1493,37.98),(-122.1437,37.001)] | Berkeley - Roca Dr | [(-122.0335,37.609),(-122.0314,37.599)] | Berkeley - Rosedale Ct | [(-121.9232,37.9),(-121.924,37.897)] | Oakland - Sacramento St | [(-122.2799,37.606),(-122.2797,37.597)] | Berkeley - Saddle Brook Dr | [(-122.1478,37.909),(-122.1454,37.904),(-122.1451,37.888)] | Berkeley - Saginaw Ct | [(-121.8803,37.898),(-121.8806,37.901)] | Oakland - San Andreas Dr | [(-122.0609,37.9),(-122.0614,37.895)] | Berkeley - Santa Maria Ave | [(-122.0773,37),(-122.0773,37.98)] | Oakland - Santa Maria Ave | [(-122.0773,37),(-122.0773,37.98)] | Oakland - Santa Maria Ave | [(-122.0773,37),(-122.0773,37.98)] | Berkeley - Shattuck Ave | [(-122.2686,37.904),(-122.2686,37.897)] | Berkeley - Sheridan Road | [(-122.2279,37.425),(-122.2253,37.411),(-122.2223,37.377)] | Lafayette - Shoreline Dr | [(-122.2657,37.603),(-122.2648,37.6)] | Berkeley - Skyline Blvd | [(-122.1738,37.01),(-122.1714,37.996)] | Oakland - Skyline Blvd | [(-122.1738,37.01),(-122.1714,37.996)] | Berkeley - Skyline Dr | [(-122.0277,37.5),(-122.0284,37.498)] | Oakland - Skywest Dr | [(-122.1161,37.62),(-122.1123,37.586)] | Berkeley - Southern Pacific Railroad | [(-122.3002,37.674),(-122.2999,37.661)] | Berkeley - Sp Railroad | [(-121.893564,37.99009),(-121.897,37.016)] | Oakland - Sp Railroad | [(-121.893564,37.99009),(-121.897,37.016)] | Oakland - Sp Railroad | [(-121.9565,37.898),(-121.9562,37.9)] | Oakland - Sp Railroad | [(-122.0734,37.001),(-122.0734,37.997)] | Oakland - Sp Railroad | [(-122.0734,37.001),(-122.0734,37.997)] | Oakland - Sp Railroad | [(-122.0734,37.001),(-122.0734,37.997)] | Berkeley - Sp Railroad | [(-122.0914,37.601),(-122.087,37.56),(-122.086408,37.5551)] | Berkeley - Sp Railroad | [(-122.137792,37.003),(-122.1365,37.992),(-122.131257,37.94612)] | Oakland - Sp Railroad | [(-122.137792,37.003),(-122.1365,37.992),(-122.131257,37.94612)] | Berkeley - Sp Railroad | [(-122.1947,37.497),(-122.193328,37.4848)] | Oakland - Stanton Ave | [(-122.100392,37.0697),(-122.099513,37.06052)] | Oakland - State Hwy 123 | [(-122.3004,37.986),(-122.2998,37.969),(-122.2995,37.962),(-122.2992,37.952),(-122.299,37.942),(-122.2987,37.935),(-122.2984,37.924),(-122.2982,37.92),(-122.2976,37.904),(-122.297,37.88),(-122.2966,37.869),(-122.2959,37.848),(-122.2961,37.843)] | Berkeley - State Hwy 13 | [(-122.1797,37.943),(-122.179871,37.91849),(-122.18,37.9),(-122.179023,37.86615),(-122.1787,37.862),(-122.1781,37.851),(-122.1777,37.845),(-122.1773,37.839),(-122.177,37.833)] | Berkeley - State Hwy 13 | [(-122.2049,37.2),(-122.20328,37.17975),(-122.1989,37.125),(-122.198078,37.11641),(-122.1975,37.11)] | Lafayette - State Hwy 13 Ramp | [(-122.2244,37.427),(-122.223,37.414),(-122.2214,37.396),(-122.2213,37.388)] | Lafayette - State Hwy 238 | ((-122.098,37.908),(-122.0983,37.907),(-122.099,37.905),(-122.101,37.898),(-122.101535,37.89711),(-122.103173,37.89438),(-122.1046,37.892),(-122.106,37.89)) | Berkeley - State Hwy 238 Ramp | [(-122.1288,37.9),(-122.1293,37.895),(-122.1296,37.906)] | Berkeley - State Hwy 24 | [(-122.2674,37.246),(-122.2673,37.248),(-122.267,37.261),(-122.2668,37.271),(-122.2663,37.298),(-122.2659,37.315),(-122.2655,37.336),(-122.265007,37.35882),(-122.264443,37.37286),(-122.2641,37.381),(-122.2638,37.388),(-122.2631,37.396),(-122.2617,37.405),(-122.2615,37.407),(-122.2605,37.412)] | Lafayette - State Hwy 84 | [(-121.9565,37.898),(-121.956589,37.89911),(-121.9569,37.903),(-121.956,37.91),(-121.9553,37.919)] | Oakland - State Hwy 84 | [(-122.0671,37.426),(-122.07,37.402),(-122.074,37.37),(-122.0773,37.338)] | Oakland - State Hwy 92 | [(-122.1085,37.326),(-122.1095,37.322),(-122.1111,37.316),(-122.1119,37.313),(-122.1125,37.311),(-122.1131,37.308),(-122.1167,37.292),(-122.1187,37.285),(-122.12,37.28)] | Oakland - State Hwy 92 Ramp | [(-122.1086,37.321),(-122.1089,37.315),(-122.1111,37.316)] | Oakland - Stuart St | [(-122.2518,37.6),(-122.2507,37.601),(-122.2491,37.606)] | Berkeley - Sunol Ridge Trl | [(-121.9419,37.455),(-121.9345,37.38)] | Oakland - Sunol Ridge Trl | [(-121.9419,37.455),(-121.9345,37.38)] | Oakland - Tassajara Creek | [(-121.87866,37.98898),(-121.8782,37.015)] | Oakland - Tassajara Creek | [(-121.87866,37.98898),(-121.8782,37.015)] | Oakland - Taurus Ave | [(-122.2159,37.416),(-122.2128,37.389)] | Lafayette - Tennyson Road | [(-122.0891,37.317),(-122.0927,37.317)] | Oakland - Thackeray Ave | [(-122.072,37.305),(-122.0715,37.298)] | Oakland - Theresa Way | [(-121.7289,37.906),(-121.728,37.899)] | Oakland - Tissiack Way | [(-121.920364,37),(-121.9208,37.995)] | Oakland - Tissiack Way | [(-121.920364,37),(-121.9208,37.995)] | Oakland - Tupelo Ter | [(-122.059087,37.6113),(-122.057021,37.59942)] | Berkeley - Vallecitos Road | [(-121.8699,37.916),(-121.8703,37.891)] | Oakland - Warm Springs Blvd | [(-121.933956,37),(-121.9343,37.97)] | Oakland - Warm Springs Blvd | [(-121.933956,37),(-121.9343,37.97)] | Oakland - Welch Creek Road | [(-121.7695,37.386),(-121.7737,37.413)] | Oakland - Welch Creek Road | [(-121.7695,37.386),(-121.7737,37.413)] | Oakland - West Loop Road | [(-122.0576,37.604),(-122.0602,37.586)] | Berkeley - Western Pacific Railroad Spur | [(-122.0394,37.018),(-122.0394,37.961)] | Oakland - Western Pacific Railroad Spur | [(-122.0394,37.018),(-122.0394,37.961)] | Oakland - Western Pacific Railroad Spur | [(-122.0394,37.018),(-122.0394,37.961)] | Berkeley - Whitlock Creek | [(-121.74683,37.91276),(-121.733107,37)] | Oakland - Whitlock Creek | [(-121.74683,37.91276),(-121.733107,37)] | Oakland - Willimet Way | [(-122.0964,37.517),(-122.0949,37.493)] | Oakland - Wisconsin St | [(-122.1994,37.017),(-122.1975,37.998),(-122.1971,37.994)] | Oakland - Wisconsin St | [(-122.1994,37.017),(-122.1975,37.998),(-122.1971,37.994)] | Berkeley - Wp Railroad | [(-122.254,37.902),(-122.2506,37.891)] | Berkeley - 100th Ave | [(-122.1657,37.429),(-122.1647,37.432)] | Oakland - 107th Ave | [(-122.1555,37.403),(-122.1531,37.41)] | Oakland - 14th St | [(-122.299,37.147),(-122.3,37.148)] | Lafayette - 19th Ave | [(-122.2366,37.897),(-122.2359,37.905)] | Berkeley - 1st St | [(-121.75508,37.89294),(-121.753581,37.90031)] | Oakland - 5th St | [(-122.278,37),(-122.2792,37.005),(-122.2803,37.009)] | Lafayette - 5th St | [(-122.296,37.615),(-122.2953,37.598)] | Berkeley - 82nd Ave | [(-122.1695,37.596),(-122.1681,37.603)] | Berkeley - 85th Ave | [(-122.1877,37.466),(-122.186,37.476)] | Oakland - 89th Ave | [(-122.1822,37.459),(-122.1803,37.471)] | Oakland - 98th Ave | [(-122.1568,37.498),(-122.1558,37.502)] | Oakland - 98th Ave | [(-122.1693,37.438),(-122.1682,37.444)] | Oakland - 98th Ave | [(-122.2001,37.258),(-122.1974,37.27)] | Lafayette -(333 rows) - -SELECT name, #thepath FROM iexit ORDER BY name COLLATE "C", 2; - name | ?column? -------------------------------------+---------- - I- 580 | 2 - I- 580 | 2 - I- 580 | 2 - I- 580 | 2 - I- 580 | 2 - I- 580 | 2 - I- 580 | 2 - I- 580 | 2 - I- 580 | 2 - I- 580 | 2 - I- 580 | 2 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 4 - I- 580 | 4 - I- 580 | 4 - I- 580 | 4 - I- 580 | 5 - I- 580 | 5 - I- 580 | 5 - I- 580 | 5 - I- 580 | 5 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 7 - I- 580 | 7 - I- 580 | 7 - I- 580 | 7 - I- 580 | 7 - I- 580 | 7 - I- 580 | 7 - I- 580 | 8 - I- 580 | 8 - I- 580 | 8 - I- 580 | 8 - I- 580 | 8 - I- 580 | 8 - I- 580 | 8 - I- 580 | 8 - I- 580 | 8 - I- 580 | 9 - I- 580 | 9 - I- 580 | 9 - I- 580 | 9 - I- 580 | 9 - I- 580 | 12 - I- 580 | 12 - I- 580 | 12 - I- 580 | 12 - I- 580 | 12 - I- 580 | 12 - I- 580 | 12 - I- 580 | 12 - I- 580 | 12 - I- 580 | 12 - I- 580 | 13 - I- 580 | 13 - I- 580 | 13 - I- 580 | 13 - I- 580 | 13 - I- 580 | 13 - I- 580 | 14 - I- 580 | 14 - I- 580 | 14 - I- 580 | 14 - I- 580 | 14 - I- 580 | 14 - I- 580 | 14 - I- 580 | 14 - I- 580 | 18 - I- 580 | 18 - I- 580 | 18 - I- 580 | 18 - I- 580 | 18 - I- 580 | 18 - I- 580 | 21 - I- 580 | 21 - I- 580 | 21 - I- 580 | 21 - I- 580 | 21 - I- 580 | 21 - I- 580 | 21 - I- 580 | 21 - I- 580 | 21 - I- 580 | 21 - I- 580 | 22 - I- 580 | 22 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 6 - I- 580 Ramp | 6 - I- 580 Ramp | 6 - I- 580 Ramp | 7 - I- 580 Ramp | 8 - I- 580 Ramp | 8 - I- 580 Ramp | 8 - I- 580 Ramp | 8 - I- 580 Ramp | 8 - I- 580 Ramp | 8 - I- 580/I-680 Ramp | 2 - I- 580/I-680 Ramp | 2 - I- 580/I-680 Ramp | 2 - I- 580/I-680 Ramp | 2 - I- 580/I-680 Ramp | 2 - I- 580/I-680 Ramp | 2 - I- 580/I-680 Ramp | 4 - I- 580/I-680 Ramp | 4 - I- 580/I-680 Ramp | 4 - I- 580/I-680 Ramp | 4 - I- 580/I-680 Ramp | 5 - I- 580/I-680 Ramp | 6 - I- 580/I-680 Ramp | 6 - I- 580/I-680 Ramp | 6 - I- 680 | 2 - I- 680 | 2 - I- 680 | 2 - I- 680 | 2 - I- 680 | 2 - I- 680 | 2 - I- 680 | 2 - I- 680 | 3 - I- 680 | 3 - I- 680 | 3 - I- 680 | 4 - I- 680 | 4 - I- 680 | 4 - I- 680 | 5 - I- 680 | 5 - I- 680 | 5 - I- 680 | 7 - I- 680 | 7 - I- 680 | 7 - I- 680 | 7 - I- 680 | 8 - I- 680 | 8 - I- 680 | 8 - I- 680 | 8 - I- 680 | 10 - I- 680 | 10 - I- 680 | 10 - I- 680 | 10 - I- 680 | 10 - I- 680 | 10 - I- 680 | 10 - I- 680 | 16 - I- 680 | 16 - I- 680 | 16 - I- 680 | 16 - I- 680 | 16 - I- 680 | 16 - I- 680 | 16 - I- 680 | 16 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 4 - I- 680 Ramp | 4 - I- 680 Ramp | 4 - I- 680 Ramp | 5 - I- 680 Ramp | 5 - I- 680 Ramp | 5 - I- 680 Ramp | 5 - I- 680 Ramp | 5 - I- 680 Ramp | 5 - I- 680 Ramp | 6 - I- 680 Ramp | 6 - I- 680 Ramp | 6 - I- 680 Ramp | 6 - I- 680 Ramp | 7 - I- 680 Ramp | 7 - I- 680 Ramp | 7 - I- 680 Ramp | 7 - I- 680 Ramp | 8 - I- 680 Ramp | 8 - I- 680 Ramp | 8 - I- 680 Ramp | 8 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 3 - I- 80 | 3 - I- 80 | 3 - I- 80 | 4 - I- 80 | 4 - I- 80 | 4 - I- 80 | 4 - I- 80 | 4 - I- 80 | 5 - I- 80 | 5 - I- 80 | 5 - I- 80 | 5 - I- 80 | 5 - I- 80 | 5 - I- 80 | 5 - I- 80 | 5 - I- 80 | 5 - I- 80 | 11 - I- 80 | 11 - I- 80 | 11 - I- 80 | 11 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 3 - I- 80 Ramp | 3 - I- 80 Ramp | 3 - I- 80 Ramp | 3 - I- 80 Ramp | 3 - I- 80 Ramp | 3 - I- 80 Ramp | 3 - I- 80 Ramp | 3 - I- 80 Ramp | 3 - I- 80 Ramp | 4 - I- 80 Ramp | 4 - I- 80 Ramp | 4 - I- 80 Ramp | 4 - I- 80 Ramp | 5 - I- 80 Ramp | 5 - I- 80 Ramp | 5 - I- 80 Ramp | 5 - I- 80 Ramp | 5 - I- 80 Ramp | 5 - I- 80 Ramp | 5 - I- 80 Ramp | 7 - I- 80 Ramp | 7 - I- 80 Ramp | 7 - I- 80 Ramp | 7 - I- 880 | 2 - I- 880 | 2 - I- 880 | 2 - I- 880 | 2 - I- 880 | 2 - I- 880 | 5 - I- 880 | 5 - I- 880 | 5 - I- 880 | 5 - I- 880 | 5 - I- 880 | 5 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 7 - I- 880 | 7 - I- 880 | 7 - I- 880 | 7 - I- 880 | 7 - I- 880 | 7 - I- 880 | 7 - I- 880 | 9 - I- 880 | 9 - I- 880 | 9 - I- 880 | 9 - I- 880 | 9 - I- 880 | 9 - I- 880 | 9 - I- 880 | 10 - I- 880 | 10 - I- 880 | 10 - I- 880 | 10 - I- 880 | 10 - I- 880 | 10 - I- 880 | 10 - I- 880 | 10 - I- 880 | 10 - I- 880 | 10 - I- 880 | 10 - I- 880 | 10 - I- 880 | 12 - I- 880 | 12 - I- 880 | 12 - I- 880 | 12 - I- 880 | 12 - I- 880 | 12 - I- 880 | 12 - I- 880 | 12 - I- 880 | 12 - I- 880 | 12 - I- 880 | 12 - I- 880 | 13 - I- 880 | 13 - I- 880 | 13 - I- 880 | 13 - I- 880 | 13 - I- 880 | 13 - I- 880 | 13 - I- 880 | 13 - I- 880 | 13 - I- 880 | 13 - I- 880 | 13 - I- 880 | 13 - I- 880 | 14 - I- 880 | 14 - I- 880 | 14 - I- 880 | 14 - I- 880 | 14 - I- 880 | 14 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 19 - I- 880 | 19 - I- 880 | 19 - I- 880 | 19 - I- 880 | 19 - I- 880 | 19 - I- 880 | 19 - I- 880 | 19 - I- 880 | 19 - I- 880 | 19 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 8 - I- 880 Ramp | 8 - I- 880 Ramp | 8 - I- 980 | 2 - I- 980 | 2 - I- 980 | 2 - I- 980 | 2 - I- 980 | 2 - I- 980 | 2 - I- 980 | 2 - I- 980 | 2 - I- 980 | 3 - I- 980 | 3 - I- 980 | 3 - I- 980 | 3 - I- 980 | 3 - I- 980 | 3 - I- 980 | 3 - I- 980 | 3 - I- 980 | 3 - I- 980 | 4 - I- 980 | 4 - I- 980 | 5 - I- 980 | 5 - I- 980 | 7 - I- 980 | 7 - I- 980 | 7 - I- 980 | 7 - I- 980 | 12 - I- 980 Ramp | 3 - I- 980 Ramp | 3 - I- 980 Ramp | 3 - I- 980 Ramp | 7 -(896 rows) - -SELECT * FROM toyemp WHERE name = 'sharon'; - name | age | location | annualsal ---------+-----+----------+----------- - sharon | 25 | (15,12) | 12000 -(1 row) - --- --- Test for Leaky view scenario --- -CREATE ROLE regress_alice; -CREATE FUNCTION f_leak (text) - RETURNS bool LANGUAGE 'plpgsql' COST 0.0000001 - AS 'BEGIN RAISE NOTICE ''f_leak => %'', $1; RETURN true; END'; -CREATE TABLE customer ( - cid int primary key, - name text not null, - tel text, - passwd text -); -CREATE TABLE credit_card ( - cid int references customer(cid), - cnum text, - climit int -); -CREATE TABLE credit_usage ( - cid int references customer(cid), - ymd date, - usage int -); -INSERT INTO customer - VALUES (101, 'regress_alice', '+81-12-3456-7890', 'passwd123'), - (102, 'regress_bob', '+01-234-567-8901', 'beafsteak'), - (103, 'regress_eve', '+49-8765-43210', 'hamburger'); -INSERT INTO credit_card - VALUES (101, '1111-2222-3333-4444', 4000), - (102, '5555-6666-7777-8888', 3000), - (103, '9801-2345-6789-0123', 2000); -INSERT INTO credit_usage - VALUES (101, '2011-09-15', 120), - (101, '2011-10-05', 90), - (101, '2011-10-18', 110), - (101, '2011-10-21', 200), - (101, '2011-11-10', 80), - (102, '2011-09-22', 300), - (102, '2011-10-12', 120), - (102, '2011-10-28', 200), - (103, '2011-10-15', 480); -CREATE VIEW my_property_normal AS - SELECT * FROM customer WHERE name = current_user; -CREATE VIEW my_property_secure WITH (security_barrier) AS - SELECT * FROM customer WHERE name = current_user; -CREATE VIEW my_credit_card_normal AS - SELECT * FROM customer l NATURAL JOIN credit_card r - WHERE l.name = current_user; -CREATE VIEW my_credit_card_secure WITH (security_barrier) AS - SELECT * FROM customer l NATURAL JOIN credit_card r - WHERE l.name = current_user; -CREATE VIEW my_credit_card_usage_normal AS - SELECT * FROM my_credit_card_secure l NATURAL JOIN credit_usage r; -CREATE VIEW my_credit_card_usage_secure WITH (security_barrier) AS - SELECT * FROM my_credit_card_secure l NATURAL JOIN credit_usage r; -GRANT SELECT ON my_property_normal TO public; -GRANT SELECT ON my_property_secure TO public; -GRANT SELECT ON my_credit_card_normal TO public; -GRANT SELECT ON my_credit_card_secure TO public; -GRANT SELECT ON my_credit_card_usage_normal TO public; -GRANT SELECT ON my_credit_card_usage_secure TO public; --- --- Run leaky view scenarios --- -SET SESSION AUTHORIZATION regress_alice; --- --- scenario: if a qualifier with tiny-cost is given, it shall be launched --- prior to the security policy of the view. --- -SELECT * FROM my_property_normal WHERE f_leak(passwd); -NOTICE: f_leak => passwd123 -NOTICE: f_leak => beafsteak -NOTICE: f_leak => hamburger - cid | name | tel | passwd ------+---------------+------------------+----------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM my_property_normal WHERE f_leak(passwd); - QUERY PLAN ------------------------------------------------------- - Seq Scan on customer - Filter: (f_leak(passwd) AND (name = CURRENT_USER)) -(2 rows) - -SELECT * FROM my_property_secure WHERE f_leak(passwd); -NOTICE: f_leak => passwd123 - cid | name | tel | passwd ------+---------------+------------------+----------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM my_property_secure WHERE f_leak(passwd); - QUERY PLAN ---------------------------------------------- - Subquery Scan on my_property_secure - Filter: f_leak(my_property_secure.passwd) - -> Seq Scan on customer - Filter: (name = CURRENT_USER) -(4 rows) - --- --- scenario: qualifiers can be pushed down if they contain leaky functions, --- provided they aren't passed data from inside the view. --- -SELECT * FROM my_property_normal v - WHERE f_leak('passwd') AND f_leak(passwd); -NOTICE: f_leak => passwd -NOTICE: f_leak => passwd123 -NOTICE: f_leak => passwd -NOTICE: f_leak => beafsteak -NOTICE: f_leak => passwd -NOTICE: f_leak => hamburger - cid | name | tel | passwd ------+---------------+------------------+----------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM my_property_normal v - WHERE f_leak('passwd') AND f_leak(passwd); - QUERY PLAN ---------------------------------------------------------------------------------- - Seq Scan on customer - Filter: (f_leak('passwd'::text) AND f_leak(passwd) AND (name = CURRENT_USER)) -(2 rows) - -SELECT * FROM my_property_secure v - WHERE f_leak('passwd') AND f_leak(passwd); -NOTICE: f_leak => passwd -NOTICE: f_leak => passwd123 -NOTICE: f_leak => passwd -NOTICE: f_leak => passwd - cid | name | tel | passwd ------+---------------+------------------+----------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM my_property_secure v - WHERE f_leak('passwd') AND f_leak(passwd); - QUERY PLAN --------------------------------------------------------------------- - Subquery Scan on v - Filter: f_leak(v.passwd) - -> Seq Scan on customer - Filter: (f_leak('passwd'::text) AND (name = CURRENT_USER)) -(4 rows) - --- --- scenario: if a qualifier references only one-side of a particular join- --- tree, it shall be distributed to the most deep scan plan as --- possible as we can. --- -SELECT * FROM my_credit_card_normal WHERE f_leak(cnum); -NOTICE: f_leak => 1111-2222-3333-4444 -NOTICE: f_leak => 5555-6666-7777-8888 -NOTICE: f_leak => 9801-2345-6789-0123 - cid | name | tel | passwd | cnum | climit ------+---------------+------------------+-----------+---------------------+-------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_normal WHERE f_leak(cnum); - QUERY PLAN ---------------------------------------------- - Hash Join - Hash Cond: (r.cid = l.cid) - -> Seq Scan on credit_card r - Filter: f_leak(cnum) - -> Hash - -> Seq Scan on customer l - Filter: (name = CURRENT_USER) -(7 rows) - -SELECT * FROM my_credit_card_secure WHERE f_leak(cnum); -NOTICE: f_leak => 1111-2222-3333-4444 - cid | name | tel | passwd | cnum | climit ------+---------------+------------------+-----------+---------------------+-------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_secure WHERE f_leak(cnum); - QUERY PLAN ---------------------------------------------------- - Subquery Scan on my_credit_card_secure - Filter: f_leak(my_credit_card_secure.cnum) - -> Hash Join - Hash Cond: (r.cid = l.cid) - -> Seq Scan on credit_card r - -> Hash - -> Seq Scan on customer l - Filter: (name = CURRENT_USER) -(8 rows) - --- --- scenario: an external qualifier can be pushed-down by in-front-of the --- views with "security_barrier" attribute, except for operators --- implemented with leakproof functions. --- -SELECT * FROM my_credit_card_usage_normal - WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01'; -NOTICE: f_leak => 1111-2222-3333-4444 - cid | name | tel | passwd | cnum | climit | ymd | usage ------+---------------+------------------+-----------+---------------------+--------+------------+------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-05-2011 | 90 - 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-18-2011 | 110 - 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-21-2011 | 200 -(3 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_usage_normal - WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01'; - QUERY PLAN ------------------------------------------------------------------------------- - Nested Loop - Join Filter: (l.cid = r.cid) - -> Seq Scan on credit_usage r - Filter: ((ymd >= '10-01-2011'::date) AND (ymd < '11-01-2011'::date)) - -> Materialize - -> Subquery Scan on l - Filter: f_leak(l.cnum) - -> Hash Join - Hash Cond: (r_1.cid = l_1.cid) - -> Seq Scan on credit_card r_1 - -> Hash - -> Seq Scan on customer l_1 - Filter: (name = CURRENT_USER) -(13 rows) - -SELECT * FROM my_credit_card_usage_secure - WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01'; -NOTICE: f_leak => 1111-2222-3333-4444 -NOTICE: f_leak => 1111-2222-3333-4444 -NOTICE: f_leak => 1111-2222-3333-4444 - cid | name | tel | passwd | cnum | climit | ymd | usage ------+---------------+------------------+-----------+---------------------+--------+------------+------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-05-2011 | 90 - 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-18-2011 | 110 - 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-21-2011 | 200 -(3 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_usage_secure - WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01'; - QUERY PLAN ------------------------------------------------------------------------------------- - Subquery Scan on my_credit_card_usage_secure - Filter: f_leak(my_credit_card_usage_secure.cnum) - -> Nested Loop - Join Filter: (l.cid = r.cid) - -> Seq Scan on credit_usage r - Filter: ((ymd >= '10-01-2011'::date) AND (ymd < '11-01-2011'::date)) - -> Materialize - -> Hash Join - Hash Cond: (r_1.cid = l.cid) - -> Seq Scan on credit_card r_1 - -> Hash - -> Seq Scan on customer l - Filter: (name = CURRENT_USER) -(13 rows) - --- --- Test for the case when security_barrier gets changed between rewriter --- and planner stage. --- -PREPARE p1 AS SELECT * FROM my_property_normal WHERE f_leak(passwd); -PREPARE p2 AS SELECT * FROM my_property_secure WHERE f_leak(passwd); -EXECUTE p1; -NOTICE: f_leak => passwd123 -NOTICE: f_leak => beafsteak -NOTICE: f_leak => hamburger - cid | name | tel | passwd ------+---------------+------------------+----------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 -(1 row) - -EXECUTE p2; -NOTICE: f_leak => passwd123 - cid | name | tel | passwd ------+---------------+------------------+----------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 -(1 row) - -RESET SESSION AUTHORIZATION; -ALTER VIEW my_property_normal SET (security_barrier=true); -ALTER VIEW my_property_secure SET (security_barrier=false); -SET SESSION AUTHORIZATION regress_alice; -EXECUTE p1; -- To be perform as a view with security-barrier -NOTICE: f_leak => passwd123 - cid | name | tel | passwd ------+---------------+------------------+----------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 -(1 row) - -EXECUTE p2; -- To be perform as a view without security-barrier -NOTICE: f_leak => passwd123 -NOTICE: f_leak => beafsteak -NOTICE: f_leak => hamburger - cid | name | tel | passwd ------+---------------+------------------+----------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 -(1 row) - --- Cleanup. -RESET SESSION AUTHORIZATION; -DROP ROLE regress_alice; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/portals_p2.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/portals_p2.out --- /tmp/cirrus-ci-build/src/test/regress/expected/portals_p2.out 2024-03-07 14:25:00.332936000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/portals_p2.out 2024-03-07 14:27:17.347994000 +0000 @@ -1,122 +1,2 @@ --- --- PORTALS_P2 --- -BEGIN; -DECLARE foo13 CURSOR FOR - SELECT * FROM onek WHERE unique1 = 50; -DECLARE foo14 CURSOR FOR - SELECT * FROM onek WHERE unique1 = 51; -DECLARE foo15 CURSOR FOR - SELECT * FROM onek WHERE unique1 = 52; -DECLARE foo16 CURSOR FOR - SELECT * FROM onek WHERE unique1 = 53; -DECLARE foo17 CURSOR FOR - SELECT * FROM onek WHERE unique1 = 54; -DECLARE foo18 CURSOR FOR - SELECT * FROM onek WHERE unique1 = 55; -DECLARE foo19 CURSOR FOR - SELECT * FROM onek WHERE unique1 = 56; -DECLARE foo20 CURSOR FOR - SELECT * FROM onek WHERE unique1 = 57; -DECLARE foo21 CURSOR FOR - SELECT * FROM onek WHERE unique1 = 58; -DECLARE foo22 CURSOR FOR - SELECT * FROM onek WHERE unique1 = 59; -DECLARE foo23 CURSOR FOR - SELECT * FROM onek WHERE unique1 = 60; -DECLARE foo24 CURSOR FOR - SELECT * FROM onek2 WHERE unique1 = 50; -DECLARE foo25 CURSOR FOR - SELECT * FROM onek2 WHERE unique1 = 60; -FETCH all in foo13; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 50 | 253 | 0 | 2 | 0 | 10 | 0 | 50 | 50 | 50 | 50 | 0 | 1 | YBAAAA | TJAAAA | HHHHxx -(1 row) - -FETCH all in foo14; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 51 | 76 | 1 | 3 | 1 | 11 | 1 | 51 | 51 | 51 | 51 | 2 | 3 | ZBAAAA | YCAAAA | AAAAxx -(1 row) - -FETCH all in foo15; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 52 | 985 | 0 | 0 | 2 | 12 | 2 | 52 | 52 | 52 | 52 | 4 | 5 | ACAAAA | XLBAAA | HHHHxx -(1 row) - -FETCH all in foo16; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 53 | 196 | 1 | 1 | 3 | 13 | 3 | 53 | 53 | 53 | 53 | 6 | 7 | BCAAAA | OHAAAA | AAAAxx -(1 row) - -FETCH all in foo17; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 54 | 356 | 0 | 2 | 4 | 14 | 4 | 54 | 54 | 54 | 54 | 8 | 9 | CCAAAA | SNAAAA | AAAAxx -(1 row) - -FETCH all in foo18; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 55 | 627 | 1 | 3 | 5 | 15 | 5 | 55 | 55 | 55 | 55 | 10 | 11 | DCAAAA | DYAAAA | VVVVxx -(1 row) - -FETCH all in foo19; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 56 | 54 | 0 | 0 | 6 | 16 | 6 | 56 | 56 | 56 | 56 | 12 | 13 | ECAAAA | CCAAAA | OOOOxx -(1 row) - -FETCH all in foo20; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 57 | 942 | 1 | 1 | 7 | 17 | 7 | 57 | 57 | 57 | 57 | 14 | 15 | FCAAAA | GKBAAA | OOOOxx -(1 row) - -FETCH all in foo21; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 58 | 114 | 0 | 2 | 8 | 18 | 8 | 58 | 58 | 58 | 58 | 16 | 17 | GCAAAA | KEAAAA | OOOOxx -(1 row) - -FETCH all in foo22; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 59 | 593 | 1 | 3 | 9 | 19 | 9 | 59 | 59 | 59 | 59 | 18 | 19 | HCAAAA | VWAAAA | HHHHxx -(1 row) - -FETCH all in foo23; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 60 | 483 | 0 | 0 | 0 | 0 | 0 | 60 | 60 | 60 | 60 | 0 | 1 | ICAAAA | PSAAAA | VVVVxx -(1 row) - -FETCH all in foo24; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 50 | 253 | 0 | 2 | 0 | 10 | 0 | 50 | 50 | 50 | 50 | 0 | 1 | YBAAAA | TJAAAA | HHHHxx -(1 row) - -FETCH all in foo25; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 60 | 483 | 0 | 0 | 0 | 0 | 0 | 60 | 60 | 60 | 60 | 0 | 1 | ICAAAA | PSAAAA | VVVVxx -(1 row) - -CLOSE foo13; -CLOSE foo14; -CLOSE foo15; -CLOSE foo16; -CLOSE foo17; -CLOSE foo18; -CLOSE foo19; -CLOSE foo20; -CLOSE foo21; -CLOSE foo22; -CLOSE foo23; -CLOSE foo24; -CLOSE foo25; -END; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/foreign_key.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/foreign_key.out --- /tmp/cirrus-ci-build/src/test/regress/expected/foreign_key.out 2024-03-07 14:25:00.330629000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/foreign_key.out 2024-03-07 14:27:17.338471000 +0000 @@ -1,2931 +1,2 @@ --- --- FOREIGN KEY --- --- MATCH FULL --- --- First test, check and cascade --- -CREATE TABLE PKTABLE ( ptest1 int PRIMARY KEY, ptest2 text ); -CREATE TABLE FKTABLE ( ftest1 int REFERENCES PKTABLE MATCH FULL ON DELETE CASCADE ON UPDATE CASCADE, ftest2 int ); --- Insert test data into PKTABLE -INSERT INTO PKTABLE VALUES (1, 'Test1'); -INSERT INTO PKTABLE VALUES (2, 'Test2'); -INSERT INTO PKTABLE VALUES (3, 'Test3'); -INSERT INTO PKTABLE VALUES (4, 'Test4'); -INSERT INTO PKTABLE VALUES (5, 'Test5'); --- Insert successful rows into FK TABLE -INSERT INTO FKTABLE VALUES (1, 2); -INSERT INTO FKTABLE VALUES (2, 3); -INSERT INTO FKTABLE VALUES (3, 4); -INSERT INTO FKTABLE VALUES (NULL, 1); --- Insert a failed row into FK TABLE -INSERT INTO FKTABLE VALUES (100, 2); -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" -DETAIL: Key (ftest1)=(100) is not present in table "pktable". --- Check FKTABLE -SELECT * FROM FKTABLE; - ftest1 | ftest2 ---------+-------- - 1 | 2 - 2 | 3 - 3 | 4 - | 1 -(4 rows) - --- Delete a row from PK TABLE -DELETE FROM PKTABLE WHERE ptest1=1; --- Check FKTABLE for removal of matched row -SELECT * FROM FKTABLE; - ftest1 | ftest2 ---------+-------- - 2 | 3 - 3 | 4 - | 1 -(3 rows) - --- Update a row from PK TABLE -UPDATE PKTABLE SET ptest1=1 WHERE ptest1=2; --- Check FKTABLE for update of matched row -SELECT * FROM FKTABLE; - ftest1 | ftest2 ---------+-------- - 3 | 4 - | 1 - 1 | 3 -(3 rows) - -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- --- check set NULL and table constraint on multiple columns --- -CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 text, PRIMARY KEY(ptest1, ptest2) ); -CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, CONSTRAINT constrname FOREIGN KEY(ftest1, ftest2) - REFERENCES PKTABLE MATCH FULL ON DELETE SET NULL ON UPDATE SET NULL); --- Test comments -COMMENT ON CONSTRAINT constrname_wrong ON FKTABLE IS 'fk constraint comment'; -ERROR: constraint "constrname_wrong" for table "fktable" does not exist -COMMENT ON CONSTRAINT constrname ON FKTABLE IS 'fk constraint comment'; -COMMENT ON CONSTRAINT constrname ON FKTABLE IS NULL; --- Insert test data into PKTABLE -INSERT INTO PKTABLE VALUES (1, 2, 'Test1'); -INSERT INTO PKTABLE VALUES (1, 3, 'Test1-2'); -INSERT INTO PKTABLE VALUES (2, 4, 'Test2'); -INSERT INTO PKTABLE VALUES (3, 6, 'Test3'); -INSERT INTO PKTABLE VALUES (4, 8, 'Test4'); -INSERT INTO PKTABLE VALUES (5, 10, 'Test5'); --- Insert successful rows into FK TABLE -INSERT INTO FKTABLE VALUES (1, 2, 4); -INSERT INTO FKTABLE VALUES (1, 3, 5); -INSERT INTO FKTABLE VALUES (2, 4, 8); -INSERT INTO FKTABLE VALUES (3, 6, 12); -INSERT INTO FKTABLE VALUES (NULL, NULL, 0); --- Insert failed rows into FK TABLE -INSERT INTO FKTABLE VALUES (100, 2, 4); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname" -DETAIL: Key (ftest1, ftest2)=(100, 2) is not present in table "pktable". -INSERT INTO FKTABLE VALUES (2, 2, 4); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname" -DETAIL: Key (ftest1, ftest2)=(2, 2) is not present in table "pktable". -INSERT INTO FKTABLE VALUES (NULL, 2, 4); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname" -DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. -INSERT INTO FKTABLE VALUES (1, NULL, 4); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname" -DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. --- Check FKTABLE -SELECT * FROM FKTABLE; - ftest1 | ftest2 | ftest3 ---------+--------+-------- - 1 | 2 | 4 - 1 | 3 | 5 - 2 | 4 | 8 - 3 | 6 | 12 - | | 0 -(5 rows) - --- Delete a row from PK TABLE -DELETE FROM PKTABLE WHERE ptest1=1 and ptest2=2; --- Check FKTABLE for removal of matched row -SELECT * FROM FKTABLE; - ftest1 | ftest2 | ftest3 ---------+--------+-------- - 1 | 3 | 5 - 2 | 4 | 8 - 3 | 6 | 12 - | | 0 - | | 4 -(5 rows) - --- Delete another row from PK TABLE -DELETE FROM PKTABLE WHERE ptest1=5 and ptest2=10; --- Check FKTABLE (should be no change) -SELECT * FROM FKTABLE; - ftest1 | ftest2 | ftest3 ---------+--------+-------- - 1 | 3 | 5 - 2 | 4 | 8 - 3 | 6 | 12 - | | 0 - | | 4 -(5 rows) - --- Update a row from PK TABLE -UPDATE PKTABLE SET ptest1=1 WHERE ptest1=2; --- Check FKTABLE for update of matched row -SELECT * FROM FKTABLE; - ftest1 | ftest2 | ftest3 ---------+--------+-------- - 1 | 3 | 5 - 3 | 6 | 12 - | | 0 - | | 4 - | | 8 -(5 rows) - --- Check update with part of key null -UPDATE FKTABLE SET ftest1 = NULL WHERE ftest1 = 1; -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname" -DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. --- Check update with old and new key values equal -UPDATE FKTABLE SET ftest1 = 1 WHERE ftest1 = 1; --- Try altering the column type where foreign keys are involved -ALTER TABLE PKTABLE ALTER COLUMN ptest1 TYPE bigint; -ALTER TABLE FKTABLE ALTER COLUMN ftest1 TYPE bigint; -SELECT * FROM PKTABLE; - ptest1 | ptest2 | ptest3 ---------+--------+--------- - 1 | 3 | Test1-2 - 3 | 6 | Test3 - 4 | 8 | Test4 - 1 | 4 | Test2 -(4 rows) - -SELECT * FROM FKTABLE; - ftest1 | ftest2 | ftest3 ---------+--------+-------- - 3 | 6 | 12 - | | 0 - | | 4 - | | 8 - 1 | 3 | 5 -(5 rows) - -DROP TABLE PKTABLE CASCADE; -NOTICE: drop cascades to constraint constrname on table fktable -DROP TABLE FKTABLE; --- --- check set default and table constraint on multiple columns --- -CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 text, PRIMARY KEY(ptest1, ptest2) ); -CREATE TABLE FKTABLE ( ftest1 int DEFAULT -1, ftest2 int DEFAULT -2, ftest3 int, CONSTRAINT constrname2 FOREIGN KEY(ftest1, ftest2) - REFERENCES PKTABLE MATCH FULL ON DELETE SET DEFAULT ON UPDATE SET DEFAULT); --- Insert a value in PKTABLE for default -INSERT INTO PKTABLE VALUES (-1, -2, 'The Default!'); --- Insert test data into PKTABLE -INSERT INTO PKTABLE VALUES (1, 2, 'Test1'); -INSERT INTO PKTABLE VALUES (1, 3, 'Test1-2'); -INSERT INTO PKTABLE VALUES (2, 4, 'Test2'); -INSERT INTO PKTABLE VALUES (3, 6, 'Test3'); -INSERT INTO PKTABLE VALUES (4, 8, 'Test4'); -INSERT INTO PKTABLE VALUES (5, 10, 'Test5'); --- Insert successful rows into FK TABLE -INSERT INTO FKTABLE VALUES (1, 2, 4); -INSERT INTO FKTABLE VALUES (1, 3, 5); -INSERT INTO FKTABLE VALUES (2, 4, 8); -INSERT INTO FKTABLE VALUES (3, 6, 12); -INSERT INTO FKTABLE VALUES (NULL, NULL, 0); --- Insert failed rows into FK TABLE -INSERT INTO FKTABLE VALUES (100, 2, 4); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname2" -DETAIL: Key (ftest1, ftest2)=(100, 2) is not present in table "pktable". -INSERT INTO FKTABLE VALUES (2, 2, 4); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname2" -DETAIL: Key (ftest1, ftest2)=(2, 2) is not present in table "pktable". -INSERT INTO FKTABLE VALUES (NULL, 2, 4); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname2" -DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. -INSERT INTO FKTABLE VALUES (1, NULL, 4); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname2" -DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. --- Check FKTABLE -SELECT * FROM FKTABLE; - ftest1 | ftest2 | ftest3 ---------+--------+-------- - 1 | 2 | 4 - 1 | 3 | 5 - 2 | 4 | 8 - 3 | 6 | 12 - | | 0 -(5 rows) - --- Delete a row from PK TABLE -DELETE FROM PKTABLE WHERE ptest1=1 and ptest2=2; --- Check FKTABLE to check for removal -SELECT * FROM FKTABLE; - ftest1 | ftest2 | ftest3 ---------+--------+-------- - 1 | 3 | 5 - 2 | 4 | 8 - 3 | 6 | 12 - | | 0 - -1 | -2 | 4 -(5 rows) - --- Delete another row from PK TABLE -DELETE FROM PKTABLE WHERE ptest1=5 and ptest2=10; --- Check FKTABLE (should be no change) -SELECT * FROM FKTABLE; - ftest1 | ftest2 | ftest3 ---------+--------+-------- - 1 | 3 | 5 - 2 | 4 | 8 - 3 | 6 | 12 - | | 0 - -1 | -2 | 4 -(5 rows) - --- Update a row from PK TABLE -UPDATE PKTABLE SET ptest1=1 WHERE ptest1=2; --- Check FKTABLE for update of matched row -SELECT * FROM FKTABLE; - ftest1 | ftest2 | ftest3 ---------+--------+-------- - 1 | 3 | 5 - 3 | 6 | 12 - | | 0 - -1 | -2 | 4 - -1 | -2 | 8 -(5 rows) - --- this should fail for lack of CASCADE -DROP TABLE PKTABLE; -ERROR: cannot drop table pktable because other objects depend on it -DETAIL: constraint constrname2 on table fktable depends on table pktable -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP TABLE PKTABLE CASCADE; -NOTICE: drop cascades to constraint constrname2 on table fktable -DROP TABLE FKTABLE; --- --- First test, check with no on delete or on update --- -CREATE TABLE PKTABLE ( ptest1 int PRIMARY KEY, ptest2 text ); -CREATE TABLE FKTABLE ( ftest1 int REFERENCES PKTABLE MATCH FULL, ftest2 int ); --- Insert test data into PKTABLE -INSERT INTO PKTABLE VALUES (1, 'Test1'); -INSERT INTO PKTABLE VALUES (2, 'Test2'); -INSERT INTO PKTABLE VALUES (3, 'Test3'); -INSERT INTO PKTABLE VALUES (4, 'Test4'); -INSERT INTO PKTABLE VALUES (5, 'Test5'); --- Insert successful rows into FK TABLE -INSERT INTO FKTABLE VALUES (1, 2); -INSERT INTO FKTABLE VALUES (2, 3); -INSERT INTO FKTABLE VALUES (3, 4); -INSERT INTO FKTABLE VALUES (NULL, 1); --- Insert a failed row into FK TABLE -INSERT INTO FKTABLE VALUES (100, 2); -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" -DETAIL: Key (ftest1)=(100) is not present in table "pktable". --- Check FKTABLE -SELECT * FROM FKTABLE; - ftest1 | ftest2 ---------+-------- - 1 | 2 - 2 | 3 - 3 | 4 - | 1 -(4 rows) - --- Check PKTABLE -SELECT * FROM PKTABLE; - ptest1 | ptest2 ---------+-------- - 1 | Test1 - 2 | Test2 - 3 | Test3 - 4 | Test4 - 5 | Test5 -(5 rows) - --- Delete a row from PK TABLE (should fail) -DELETE FROM PKTABLE WHERE ptest1=1; -ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_fkey" on table "fktable" -DETAIL: Key (ptest1)=(1) is still referenced from table "fktable". --- Delete a row from PK TABLE (should succeed) -DELETE FROM PKTABLE WHERE ptest1=5; --- Check PKTABLE for deletes -SELECT * FROM PKTABLE; - ptest1 | ptest2 ---------+-------- - 1 | Test1 - 2 | Test2 - 3 | Test3 - 4 | Test4 -(4 rows) - --- Update a row from PK TABLE (should fail) -UPDATE PKTABLE SET ptest1=0 WHERE ptest1=2; -ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_fkey" on table "fktable" -DETAIL: Key (ptest1)=(2) is still referenced from table "fktable". --- Update a row from PK TABLE (should succeed) -UPDATE PKTABLE SET ptest1=0 WHERE ptest1=4; --- Check PKTABLE for updates -SELECT * FROM PKTABLE; - ptest1 | ptest2 ---------+-------- - 1 | Test1 - 2 | Test2 - 3 | Test3 - 0 | Test4 -(4 rows) - -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- --- Check initial check upon ALTER TABLE --- -CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, PRIMARY KEY(ptest1, ptest2) ); -CREATE TABLE FKTABLE ( ftest1 int, ftest2 int ); -INSERT INTO PKTABLE VALUES (1, 2); -INSERT INTO FKTABLE VALUES (1, NULL); -ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2) REFERENCES PKTABLE MATCH FULL; -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_ftest2_fkey" -DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- MATCH SIMPLE --- Base test restricting update/delete -CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) ); -CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, ftest4 int, CONSTRAINT constrname3 - FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE); --- Insert Primary Key values -INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1'); -INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2'); -INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3'); -INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4'); --- Insert Foreign Key values -INSERT INTO FKTABLE VALUES (1, 2, 3, 1); -INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2); -INSERT INTO FKTABLE VALUES (2, NULL, 3, 3); -INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4); -INSERT INTO FKTABLE VALUES (NULL, 3, 4, 5); --- Insert a failed values -INSERT INTO FKTABLE VALUES (1, 2, 7, 6); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname3" -DETAIL: Key (ftest1, ftest2, ftest3)=(1, 2, 7) is not present in table "pktable". --- Show FKTABLE -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - 1 | 2 | 3 | 1 - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 -(5 rows) - --- Try to update something that should fail -UPDATE PKTABLE set ptest2=5 where ptest2=2; -ERROR: update or delete on table "pktable" violates foreign key constraint "constrname3" on table "fktable" -DETAIL: Key (ptest1, ptest2, ptest3)=(1, 2, 3) is still referenced from table "fktable". --- Try to update something that should succeed -UPDATE PKTABLE set ptest1=1 WHERE ptest2=3; --- Try to delete something that should fail -DELETE FROM PKTABLE where ptest1=1 and ptest2=2 and ptest3=3; -ERROR: update or delete on table "pktable" violates foreign key constraint "constrname3" on table "fktable" -DETAIL: Key (ptest1, ptest2, ptest3)=(1, 2, 3) is still referenced from table "fktable". --- Try to delete something that should work -DELETE FROM PKTABLE where ptest1=2; --- Show PKTABLE and FKTABLE -SELECT * from PKTABLE; - ptest1 | ptest2 | ptest3 | ptest4 ---------+--------+--------+-------- - 1 | 2 | 3 | test1 - 1 | 3 | 3 | test2 - 1 | 3 | 4 | test3 -(3 rows) - -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - 1 | 2 | 3 | 1 - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 -(5 rows) - -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- restrict with null values -CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, UNIQUE(ptest1, ptest2, ptest3) ); -CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, ftest4 int, CONSTRAINT constrname3 - FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE (ptest1, ptest2, ptest3)); -INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1'); -INSERT INTO PKTABLE VALUES (1, 3, NULL, 'test2'); -INSERT INTO PKTABLE VALUES (2, NULL, 4, 'test3'); -INSERT INTO FKTABLE VALUES (1, 2, 3, 1); -DELETE FROM PKTABLE WHERE ptest1 = 2; -SELECT * FROM PKTABLE; - ptest1 | ptest2 | ptest3 | ptest4 ---------+--------+--------+-------- - 1 | 2 | 3 | test1 - 1 | 3 | | test2 -(2 rows) - -SELECT * FROM FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - 1 | 2 | 3 | 1 -(1 row) - -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- cascade update/delete -CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) ); -CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, ftest4 int, CONSTRAINT constrname3 - FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE - ON DELETE CASCADE ON UPDATE CASCADE); --- Insert Primary Key values -INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1'); -INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2'); -INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3'); -INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4'); --- Insert Foreign Key values -INSERT INTO FKTABLE VALUES (1, 2, 3, 1); -INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2); -INSERT INTO FKTABLE VALUES (2, NULL, 3, 3); -INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4); -INSERT INTO FKTABLE VALUES (NULL, 3, 4, 5); --- Insert a failed values -INSERT INTO FKTABLE VALUES (1, 2, 7, 6); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname3" -DETAIL: Key (ftest1, ftest2, ftest3)=(1, 2, 7) is not present in table "pktable". --- Show FKTABLE -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - 1 | 2 | 3 | 1 - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 -(5 rows) - --- Try to update something that will cascade -UPDATE PKTABLE set ptest2=5 where ptest2=2; --- Try to update something that should not cascade -UPDATE PKTABLE set ptest1=1 WHERE ptest2=3; --- Show PKTABLE and FKTABLE -SELECT * from PKTABLE; - ptest1 | ptest2 | ptest3 | ptest4 ---------+--------+--------+-------- - 2 | 4 | 5 | test4 - 1 | 5 | 3 | test1 - 1 | 3 | 3 | test2 - 1 | 3 | 4 | test3 -(4 rows) - -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 - 1 | 5 | 3 | 1 -(5 rows) - --- Try to delete something that should cascade -DELETE FROM PKTABLE where ptest1=1 and ptest2=5 and ptest3=3; --- Show PKTABLE and FKTABLE -SELECT * from PKTABLE; - ptest1 | ptest2 | ptest3 | ptest4 ---------+--------+--------+-------- - 2 | 4 | 5 | test4 - 1 | 3 | 3 | test2 - 1 | 3 | 4 | test3 -(3 rows) - -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 -(4 rows) - --- Try to delete something that should not have a cascade -DELETE FROM PKTABLE where ptest1=2; --- Show PKTABLE and FKTABLE -SELECT * from PKTABLE; - ptest1 | ptest2 | ptest3 | ptest4 ---------+--------+--------+-------- - 1 | 3 | 3 | test2 - 1 | 3 | 4 | test3 -(2 rows) - -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 -(4 rows) - -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- set null update / set default delete -CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) ); -CREATE TABLE FKTABLE ( ftest1 int DEFAULT 0, ftest2 int, ftest3 int, ftest4 int, CONSTRAINT constrname3 - FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE - ON DELETE SET DEFAULT ON UPDATE SET NULL); --- Insert Primary Key values -INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1'); -INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2'); -INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3'); -INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4'); --- Insert Foreign Key values -INSERT INTO FKTABLE VALUES (1, 2, 3, 1); -INSERT INTO FKTABLE VALUES (2, 3, 4, 1); -INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2); -INSERT INTO FKTABLE VALUES (2, NULL, 3, 3); -INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4); -INSERT INTO FKTABLE VALUES (NULL, 3, 4, 5); --- Insert a failed values -INSERT INTO FKTABLE VALUES (1, 2, 7, 6); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname3" -DETAIL: Key (ftest1, ftest2, ftest3)=(1, 2, 7) is not present in table "pktable". --- Show FKTABLE -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - 1 | 2 | 3 | 1 - 2 | 3 | 4 | 1 - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 -(6 rows) - --- Try to update something that will set null -UPDATE PKTABLE set ptest2=5 where ptest2=2; --- Try to update something that should not set null -UPDATE PKTABLE set ptest2=2 WHERE ptest2=3 and ptest1=1; --- Show PKTABLE and FKTABLE -SELECT * from PKTABLE; - ptest1 | ptest2 | ptest3 | ptest4 ---------+--------+--------+-------- - 2 | 3 | 4 | test3 - 2 | 4 | 5 | test4 - 1 | 5 | 3 | test1 - 1 | 2 | 3 | test2 -(4 rows) - -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - 2 | 3 | 4 | 1 - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 - | | | 1 -(6 rows) - --- Try to delete something that should set default -DELETE FROM PKTABLE where ptest1=2 and ptest2=3 and ptest3=4; --- Show PKTABLE and FKTABLE -SELECT * from PKTABLE; - ptest1 | ptest2 | ptest3 | ptest4 ---------+--------+--------+-------- - 2 | 4 | 5 | test4 - 1 | 5 | 3 | test1 - 1 | 2 | 3 | test2 -(3 rows) - -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 - | | | 1 - 0 | | | 1 -(6 rows) - --- Try to delete something that should not set default -DELETE FROM PKTABLE where ptest2=5; --- Show PKTABLE and FKTABLE -SELECT * from PKTABLE; - ptest1 | ptest2 | ptest3 | ptest4 ---------+--------+--------+-------- - 2 | 4 | 5 | test4 - 1 | 2 | 3 | test2 -(2 rows) - -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 - | | | 1 - 0 | | | 1 -(6 rows) - -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- set default update / set null delete -CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) ); -CREATE TABLE FKTABLE ( ftest1 int DEFAULT 0, ftest2 int DEFAULT -1, ftest3 int DEFAULT -2, ftest4 int, CONSTRAINT constrname3 - FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE - ON DELETE SET NULL ON UPDATE SET DEFAULT); --- Insert Primary Key values -INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1'); -INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2'); -INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3'); -INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4'); -INSERT INTO PKTABLE VALUES (2, -1, 5, 'test5'); --- Insert Foreign Key values -INSERT INTO FKTABLE VALUES (1, 2, 3, 1); -INSERT INTO FKTABLE VALUES (2, 3, 4, 1); -INSERT INTO FKTABLE VALUES (2, 4, 5, 1); -INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2); -INSERT INTO FKTABLE VALUES (2, NULL, 3, 3); -INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4); -INSERT INTO FKTABLE VALUES (NULL, 3, 4, 5); --- Insert a failed values -INSERT INTO FKTABLE VALUES (1, 2, 7, 6); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname3" -DETAIL: Key (ftest1, ftest2, ftest3)=(1, 2, 7) is not present in table "pktable". --- Show FKTABLE -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - 1 | 2 | 3 | 1 - 2 | 3 | 4 | 1 - 2 | 4 | 5 | 1 - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 -(7 rows) - --- Try to update something that will fail -UPDATE PKTABLE set ptest2=5 where ptest2=2; -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname3" -DETAIL: Key (ftest1, ftest2, ftest3)=(0, -1, -2) is not present in table "pktable". --- Try to update something that will set default -UPDATE PKTABLE set ptest1=0, ptest2=-1, ptest3=-2 where ptest2=2; -UPDATE PKTABLE set ptest2=10 where ptest2=4; --- Try to update something that should not set default -UPDATE PKTABLE set ptest2=2 WHERE ptest2=3 and ptest1=1; --- Show PKTABLE and FKTABLE -SELECT * from PKTABLE; - ptest1 | ptest2 | ptest3 | ptest4 ---------+--------+--------+-------- - 2 | 3 | 4 | test3 - 2 | -1 | 5 | test5 - 0 | -1 | -2 | test1 - 2 | 10 | 5 | test4 - 1 | 2 | 3 | test2 -(5 rows) - -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - 2 | 3 | 4 | 1 - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 - 0 | -1 | -2 | 1 - 0 | -1 | -2 | 1 -(7 rows) - --- Try to delete something that should set null -DELETE FROM PKTABLE where ptest1=2 and ptest2=3 and ptest3=4; --- Show PKTABLE and FKTABLE -SELECT * from PKTABLE; - ptest1 | ptest2 | ptest3 | ptest4 ---------+--------+--------+-------- - 2 | -1 | 5 | test5 - 0 | -1 | -2 | test1 - 2 | 10 | 5 | test4 - 1 | 2 | 3 | test2 -(4 rows) - -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 - 0 | -1 | -2 | 1 - 0 | -1 | -2 | 1 - | | | 1 -(7 rows) - --- Try to delete something that should not set null -DELETE FROM PKTABLE where ptest2=-1 and ptest3=5; --- Show PKTABLE and FKTABLE -SELECT * from PKTABLE; - ptest1 | ptest2 | ptest3 | ptest4 ---------+--------+--------+-------- - 0 | -1 | -2 | test1 - 2 | 10 | 5 | test4 - 1 | 2 | 3 | test2 -(3 rows) - -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 - 0 | -1 | -2 | 1 - 0 | -1 | -2 | 1 - | | | 1 -(7 rows) - -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- Test for ON DELETE SET NULL/DEFAULT (column_list); -CREATE TABLE PKTABLE (tid int, id int, PRIMARY KEY (tid, id)); -CREATE TABLE FKTABLE (tid int, id int, foo int, FOREIGN KEY (tid, id) REFERENCES PKTABLE ON DELETE SET NULL (bar)); -ERROR: column "bar" referenced in foreign key constraint does not exist -CREATE TABLE FKTABLE (tid int, id int, foo int, FOREIGN KEY (tid, id) REFERENCES PKTABLE ON DELETE SET NULL (foo)); -ERROR: column "foo" referenced in ON DELETE SET action must be part of foreign key -CREATE TABLE FKTABLE (tid int, id int, foo int, FOREIGN KEY (tid, foo) REFERENCES PKTABLE ON UPDATE SET NULL (foo)); -ERROR: a column list with SET NULL is only supported for ON DELETE actions -LINE 1: ...oo int, FOREIGN KEY (tid, foo) REFERENCES PKTABLE ON UPDATE ... - ^ -CREATE TABLE FKTABLE ( - tid int, id int, - fk_id_del_set_null int, - fk_id_del_set_default int DEFAULT 0, - FOREIGN KEY (tid, fk_id_del_set_null) REFERENCES PKTABLE ON DELETE SET NULL (fk_id_del_set_null), - FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES PKTABLE ON DELETE SET DEFAULT (fk_id_del_set_default) -); -SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conrelid = 'fktable'::regclass::oid ORDER BY oid; - pg_get_constraintdef --------------------------------------------------------------------------------------------------------------------- - FOREIGN KEY (tid, fk_id_del_set_null) REFERENCES pktable(tid, id) ON DELETE SET NULL (fk_id_del_set_null) - FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES pktable(tid, id) ON DELETE SET DEFAULT (fk_id_del_set_default) -(2 rows) - -INSERT INTO PKTABLE VALUES (1, 0), (1, 1), (1, 2); -INSERT INTO FKTABLE VALUES - (1, 1, 1, NULL), - (1, 2, NULL, 2); -DELETE FROM PKTABLE WHERE id = 1 OR id = 2; -SELECT * FROM FKTABLE ORDER BY id; - tid | id | fk_id_del_set_null | fk_id_del_set_default ------+----+--------------------+----------------------- - 1 | 1 | | - 1 | 2 | | 0 -(2 rows) - -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- Test some invalid FK definitions -CREATE TABLE PKTABLE (ptest1 int PRIMARY KEY, someoid oid); -CREATE TABLE FKTABLE_FAIL1 ( ftest1 int, CONSTRAINT fkfail1 FOREIGN KEY (ftest2) REFERENCES PKTABLE); -ERROR: column "ftest2" referenced in foreign key constraint does not exist -CREATE TABLE FKTABLE_FAIL2 ( ftest1 int, CONSTRAINT fkfail1 FOREIGN KEY (ftest1) REFERENCES PKTABLE(ptest2)); -ERROR: column "ptest2" referenced in foreign key constraint does not exist -CREATE TABLE FKTABLE_FAIL3 ( ftest1 int, CONSTRAINT fkfail1 FOREIGN KEY (tableoid) REFERENCES PKTABLE(someoid)); -ERROR: system columns cannot be used in foreign keys -CREATE TABLE FKTABLE_FAIL4 ( ftest1 oid, CONSTRAINT fkfail1 FOREIGN KEY (ftest1) REFERENCES PKTABLE(tableoid)); -ERROR: system columns cannot be used in foreign keys -DROP TABLE PKTABLE; --- Test for referencing column number smaller than referenced constraint -CREATE TABLE PKTABLE (ptest1 int, ptest2 int, UNIQUE(ptest1, ptest2)); -CREATE TABLE FKTABLE_FAIL1 (ftest1 int REFERENCES pktable(ptest1)); -ERROR: there is no unique constraint matching given keys for referenced table "pktable" -DROP TABLE FKTABLE_FAIL1; -ERROR: table "fktable_fail1" does not exist -DROP TABLE PKTABLE; --- --- Tests for mismatched types --- --- Basic one column, two table setup -CREATE TABLE PKTABLE (ptest1 int PRIMARY KEY); -INSERT INTO PKTABLE VALUES(42); --- This next should fail, because int=inet does not exist -CREATE TABLE FKTABLE (ftest1 inet REFERENCES pktable); -ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented -DETAIL: Key columns "ftest1" and "ptest1" are of incompatible types: inet and integer. --- This should also fail for the same reason, but here we --- give the column name -CREATE TABLE FKTABLE (ftest1 inet REFERENCES pktable(ptest1)); -ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented -DETAIL: Key columns "ftest1" and "ptest1" are of incompatible types: inet and integer. --- This should succeed, even though they are different types, --- because int=int8 exists and is a member of the integer opfamily -CREATE TABLE FKTABLE (ftest1 int8 REFERENCES pktable); --- Check it actually works -INSERT INTO FKTABLE VALUES(42); -- should succeed -INSERT INTO FKTABLE VALUES(43); -- should fail -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" -DETAIL: Key (ftest1)=(43) is not present in table "pktable". -UPDATE FKTABLE SET ftest1 = ftest1; -- should succeed -UPDATE FKTABLE SET ftest1 = ftest1 + 1; -- should fail -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" -DETAIL: Key (ftest1)=(43) is not present in table "pktable". -DROP TABLE FKTABLE; --- This should fail, because we'd have to cast numeric to int which is --- not an implicit coercion (or use numeric=numeric, but that's not part --- of the integer opfamily) -CREATE TABLE FKTABLE (ftest1 numeric REFERENCES pktable); -ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented -DETAIL: Key columns "ftest1" and "ptest1" are of incompatible types: numeric and integer. -DROP TABLE PKTABLE; --- On the other hand, this should work because int implicitly promotes to --- numeric, and we allow promotion on the FK side -CREATE TABLE PKTABLE (ptest1 numeric PRIMARY KEY); -INSERT INTO PKTABLE VALUES(42); -CREATE TABLE FKTABLE (ftest1 int REFERENCES pktable); --- Check it actually works -INSERT INTO FKTABLE VALUES(42); -- should succeed -INSERT INTO FKTABLE VALUES(43); -- should fail -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" -DETAIL: Key (ftest1)=(43) is not present in table "pktable". -UPDATE FKTABLE SET ftest1 = ftest1; -- should succeed -UPDATE FKTABLE SET ftest1 = ftest1 + 1; -- should fail -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" -DETAIL: Key (ftest1)=(43) is not present in table "pktable". -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- Two columns, two tables -CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, PRIMARY KEY(ptest1, ptest2)); --- This should fail, because we just chose really odd types -CREATE TABLE FKTABLE (ftest1 cidr, ftest2 timestamp, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable); -ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented -DETAIL: Key columns "ftest1" and "ptest1" are of incompatible types: cidr and integer. --- Again, so should this... -CREATE TABLE FKTABLE (ftest1 cidr, ftest2 timestamp, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable(ptest1, ptest2)); -ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented -DETAIL: Key columns "ftest1" and "ptest1" are of incompatible types: cidr and integer. --- This fails because we mixed up the column ordering -CREATE TABLE FKTABLE (ftest1 int, ftest2 inet, FOREIGN KEY(ftest2, ftest1) REFERENCES pktable); -ERROR: foreign key constraint "fktable_ftest2_ftest1_fkey" cannot be implemented -DETAIL: Key columns "ftest2" and "ptest1" are of incompatible types: inet and integer. --- As does this... -CREATE TABLE FKTABLE (ftest1 int, ftest2 inet, FOREIGN KEY(ftest2, ftest1) REFERENCES pktable(ptest1, ptest2)); -ERROR: foreign key constraint "fktable_ftest2_ftest1_fkey" cannot be implemented -DETAIL: Key columns "ftest2" and "ptest1" are of incompatible types: inet and integer. --- And again.. -CREATE TABLE FKTABLE (ftest1 int, ftest2 inet, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable(ptest2, ptest1)); -ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented -DETAIL: Key columns "ftest1" and "ptest2" are of incompatible types: integer and inet. --- This works... -CREATE TABLE FKTABLE (ftest1 int, ftest2 inet, FOREIGN KEY(ftest2, ftest1) REFERENCES pktable(ptest2, ptest1)); -DROP TABLE FKTABLE; --- As does this -CREATE TABLE FKTABLE (ftest1 int, ftest2 inet, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable(ptest1, ptest2)); -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- Two columns, same table --- Make sure this still works... -CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3, -ptest4) REFERENCES pktable(ptest1, ptest2)); -DROP TABLE PKTABLE; --- And this, -CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3, -ptest4) REFERENCES pktable); -DROP TABLE PKTABLE; --- This shouldn't (mixed up columns) -CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3, -ptest4) REFERENCES pktable(ptest2, ptest1)); -ERROR: foreign key constraint "pktable_ptest3_ptest4_fkey" cannot be implemented -DETAIL: Key columns "ptest3" and "ptest2" are of incompatible types: integer and inet. --- Nor should this... (same reason, we have 4,3 referencing 1,2 which mismatches types -CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest4, -ptest3) REFERENCES pktable(ptest1, ptest2)); -ERROR: foreign key constraint "pktable_ptest4_ptest3_fkey" cannot be implemented -DETAIL: Key columns "ptest4" and "ptest1" are of incompatible types: inet and integer. --- Not this one either... Same as the last one except we didn't defined the columns being referenced. -CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest4, -ptest3) REFERENCES pktable); -ERROR: foreign key constraint "pktable_ptest4_ptest3_fkey" cannot be implemented -DETAIL: Key columns "ptest4" and "ptest1" are of incompatible types: inet and integer. --- --- Now some cases with inheritance --- Basic 2 table case: 1 column of matching types. -create table pktable_base (base1 int not null); -create table pktable (ptest1 int, primary key(base1), unique(base1, ptest1)) inherits (pktable_base); -create table fktable (ftest1 int references pktable(base1)); --- now some ins, upd, del -insert into pktable(base1) values (1); -insert into pktable(base1) values (2); --- let's insert a non-existent fktable value -insert into fktable(ftest1) values (3); -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" -DETAIL: Key (ftest1)=(3) is not present in table "pktable". --- let's make a valid row for that -insert into pktable(base1) values (3); -insert into fktable(ftest1) values (3); --- let's try removing a row that should fail from pktable -delete from pktable where base1>2; -ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_fkey" on table "fktable" -DETAIL: Key (base1)=(3) is still referenced from table "fktable". --- okay, let's try updating all of the base1 values to *4 --- which should fail. -update pktable set base1=base1*4; -ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_fkey" on table "fktable" -DETAIL: Key (base1)=(3) is still referenced from table "fktable". --- okay, let's try an update that should work. -update pktable set base1=base1*4 where base1<3; --- and a delete that should work -delete from pktable where base1>3; --- cleanup -drop table fktable; -delete from pktable; --- Now 2 columns 2 tables, matching types -create table fktable (ftest1 int, ftest2 int, foreign key(ftest1, ftest2) references pktable(base1, ptest1)); --- now some ins, upd, del -insert into pktable(base1, ptest1) values (1, 1); -insert into pktable(base1, ptest1) values (2, 2); --- let's insert a non-existent fktable value -insert into fktable(ftest1, ftest2) values (3, 1); -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_ftest2_fkey" -DETAIL: Key (ftest1, ftest2)=(3, 1) is not present in table "pktable". --- let's make a valid row for that -insert into pktable(base1,ptest1) values (3, 1); -insert into fktable(ftest1, ftest2) values (3, 1); --- let's try removing a row that should fail from pktable -delete from pktable where base1>2; -ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_ftest2_fkey" on table "fktable" -DETAIL: Key (base1, ptest1)=(3, 1) is still referenced from table "fktable". --- okay, let's try updating all of the base1 values to *4 --- which should fail. -update pktable set base1=base1*4; -ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_ftest2_fkey" on table "fktable" -DETAIL: Key (base1, ptest1)=(3, 1) is still referenced from table "fktable". --- okay, let's try an update that should work. -update pktable set base1=base1*4 where base1<3; --- and a delete that should work -delete from pktable where base1>3; --- cleanup -drop table fktable; -drop table pktable; -drop table pktable_base; --- Now we'll do one all in 1 table with 2 columns of matching types -create table pktable_base(base1 int not null, base2 int); -create table pktable(ptest1 int, ptest2 int, primary key(base1, ptest1), foreign key(base2, ptest2) references - pktable(base1, ptest1)) inherits (pktable_base); -insert into pktable (base1, ptest1, base2, ptest2) values (1, 1, 1, 1); -insert into pktable (base1, ptest1, base2, ptest2) values (2, 1, 1, 1); -insert into pktable (base1, ptest1, base2, ptest2) values (2, 2, 2, 1); -insert into pktable (base1, ptest1, base2, ptest2) values (1, 3, 2, 2); --- fails (3,2) isn't in base1, ptest1 -insert into pktable (base1, ptest1, base2, ptest2) values (2, 3, 3, 2); -ERROR: insert or update on table "pktable" violates foreign key constraint "pktable_base2_ptest2_fkey" -DETAIL: Key (base2, ptest2)=(3, 2) is not present in table "pktable". --- fails (2,2) is being referenced -delete from pktable where base1=2; -ERROR: update or delete on table "pktable" violates foreign key constraint "pktable_base2_ptest2_fkey" on table "pktable" -DETAIL: Key (base1, ptest1)=(2, 2) is still referenced from table "pktable". --- fails (1,1) is being referenced (twice) -update pktable set base1=3 where base1=1; -ERROR: update or delete on table "pktable" violates foreign key constraint "pktable_base2_ptest2_fkey" on table "pktable" -DETAIL: Key (base1, ptest1)=(1, 1) is still referenced from table "pktable". --- this sequence of two deletes will work, since after the first there will be no (2,*) references -delete from pktable where base2=2; -delete from pktable where base1=2; -drop table pktable; -drop table pktable_base; --- 2 columns (2 tables), mismatched types -create table pktable_base(base1 int not null); -create table pktable(ptest1 inet, primary key(base1, ptest1)) inherits (pktable_base); --- just generally bad types (with and without column references on the referenced table) -create table fktable(ftest1 cidr, ftest2 int[], foreign key (ftest1, ftest2) references pktable); -ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented -DETAIL: Key columns "ftest1" and "base1" are of incompatible types: cidr and integer. -create table fktable(ftest1 cidr, ftest2 int[], foreign key (ftest1, ftest2) references pktable(base1, ptest1)); -ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented -DETAIL: Key columns "ftest1" and "base1" are of incompatible types: cidr and integer. --- let's mix up which columns reference which -create table fktable(ftest1 int, ftest2 inet, foreign key(ftest2, ftest1) references pktable); -ERROR: foreign key constraint "fktable_ftest2_ftest1_fkey" cannot be implemented -DETAIL: Key columns "ftest2" and "base1" are of incompatible types: inet and integer. -create table fktable(ftest1 int, ftest2 inet, foreign key(ftest2, ftest1) references pktable(base1, ptest1)); -ERROR: foreign key constraint "fktable_ftest2_ftest1_fkey" cannot be implemented -DETAIL: Key columns "ftest2" and "base1" are of incompatible types: inet and integer. -create table fktable(ftest1 int, ftest2 inet, foreign key(ftest1, ftest2) references pktable(ptest1, base1)); -ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented -DETAIL: Key columns "ftest1" and "ptest1" are of incompatible types: integer and inet. -drop table pktable; -drop table pktable_base; --- 2 columns (1 table), mismatched types -create table pktable_base(base1 int not null, base2 int); -create table pktable(ptest1 inet, ptest2 inet[], primary key(base1, ptest1), foreign key(base2, ptest2) references - pktable(base1, ptest1)) inherits (pktable_base); -ERROR: foreign key constraint "pktable_base2_ptest2_fkey" cannot be implemented -DETAIL: Key columns "ptest2" and "ptest1" are of incompatible types: inet[] and inet. -create table pktable(ptest1 inet, ptest2 inet, primary key(base1, ptest1), foreign key(base2, ptest2) references - pktable(ptest1, base1)) inherits (pktable_base); -ERROR: foreign key constraint "pktable_base2_ptest2_fkey" cannot be implemented -DETAIL: Key columns "base2" and "ptest1" are of incompatible types: integer and inet. -create table pktable(ptest1 inet, ptest2 inet, primary key(base1, ptest1), foreign key(ptest2, base2) references - pktable(base1, ptest1)) inherits (pktable_base); -ERROR: foreign key constraint "pktable_ptest2_base2_fkey" cannot be implemented -DETAIL: Key columns "ptest2" and "base1" are of incompatible types: inet and integer. -create table pktable(ptest1 inet, ptest2 inet, primary key(base1, ptest1), foreign key(ptest2, base2) references - pktable(base1, ptest1)) inherits (pktable_base); -ERROR: foreign key constraint "pktable_ptest2_base2_fkey" cannot be implemented -DETAIL: Key columns "ptest2" and "base1" are of incompatible types: inet and integer. -drop table pktable; -ERROR: table "pktable" does not exist -drop table pktable_base; --- --- Deferrable constraints --- --- deferrable, explicitly deferred -CREATE TABLE pktable ( - id INT4 PRIMARY KEY, - other INT4 -); -CREATE TABLE fktable ( - id INT4 PRIMARY KEY, - fk INT4 REFERENCES pktable DEFERRABLE -); --- default to immediate: should fail -INSERT INTO fktable VALUES (5, 10); -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" -DETAIL: Key (fk)=(10) is not present in table "pktable". --- explicitly defer the constraint -BEGIN; -SET CONSTRAINTS ALL DEFERRED; -INSERT INTO fktable VALUES (10, 15); -INSERT INTO pktable VALUES (15, 0); -- make the FK insert valid -COMMIT; -DROP TABLE fktable, pktable; --- deferrable, initially deferred -CREATE TABLE pktable ( - id INT4 PRIMARY KEY, - other INT4 -); -CREATE TABLE fktable ( - id INT4 PRIMARY KEY, - fk INT4 REFERENCES pktable DEFERRABLE INITIALLY DEFERRED -); --- default to deferred, should succeed -BEGIN; -INSERT INTO fktable VALUES (100, 200); -INSERT INTO pktable VALUES (200, 500); -- make the FK insert valid -COMMIT; --- default to deferred, explicitly make immediate -BEGIN; -SET CONSTRAINTS ALL IMMEDIATE; --- should fail -INSERT INTO fktable VALUES (500, 1000); -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" -DETAIL: Key (fk)=(1000) is not present in table "pktable". -COMMIT; -DROP TABLE fktable, pktable; --- tricky behavior: according to SQL99, if a deferred constraint is set --- to 'immediate' mode, it should be checked for validity *immediately*, --- not when the current transaction commits (i.e. the mode change applies --- retroactively) -CREATE TABLE pktable ( - id INT4 PRIMARY KEY, - other INT4 -); -CREATE TABLE fktable ( - id INT4 PRIMARY KEY, - fk INT4 REFERENCES pktable DEFERRABLE -); -BEGIN; -SET CONSTRAINTS ALL DEFERRED; --- should succeed, for now -INSERT INTO fktable VALUES (1000, 2000); --- should cause transaction abort, due to preceding error -SET CONSTRAINTS ALL IMMEDIATE; -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" -DETAIL: Key (fk)=(2000) is not present in table "pktable". -INSERT INTO pktable VALUES (2000, 3); -- too late -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; -DROP TABLE fktable, pktable; --- deferrable, initially deferred -CREATE TABLE pktable ( - id INT4 PRIMARY KEY, - other INT4 -); -CREATE TABLE fktable ( - id INT4 PRIMARY KEY, - fk INT4 REFERENCES pktable DEFERRABLE INITIALLY DEFERRED -); -BEGIN; --- no error here -INSERT INTO fktable VALUES (100, 200); --- error here on commit -COMMIT; -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" -DETAIL: Key (fk)=(200) is not present in table "pktable". -DROP TABLE pktable, fktable; --- test notice about expensive referential integrity checks, --- where the index cannot be used because of type incompatibilities. -CREATE TEMP TABLE pktable ( - id1 INT4 PRIMARY KEY, - id2 VARCHAR(4) UNIQUE, - id3 REAL UNIQUE, - UNIQUE(id1, id2, id3) -); -CREATE TEMP TABLE fktable ( - x1 INT4 REFERENCES pktable(id1), - x2 VARCHAR(4) REFERENCES pktable(id2), - x3 REAL REFERENCES pktable(id3), - x4 TEXT, - x5 INT2 -); --- check individual constraints with alter table. --- should fail --- varchar does not promote to real -ALTER TABLE fktable ADD CONSTRAINT fk_2_3 -FOREIGN KEY (x2) REFERENCES pktable(id3); -ERROR: foreign key constraint "fk_2_3" cannot be implemented -DETAIL: Key columns "x2" and "id3" are of incompatible types: character varying and real. --- nor to int4 -ALTER TABLE fktable ADD CONSTRAINT fk_2_1 -FOREIGN KEY (x2) REFERENCES pktable(id1); -ERROR: foreign key constraint "fk_2_1" cannot be implemented -DETAIL: Key columns "x2" and "id1" are of incompatible types: character varying and integer. --- real does not promote to int4 -ALTER TABLE fktable ADD CONSTRAINT fk_3_1 -FOREIGN KEY (x3) REFERENCES pktable(id1); -ERROR: foreign key constraint "fk_3_1" cannot be implemented -DETAIL: Key columns "x3" and "id1" are of incompatible types: real and integer. --- int4 does not promote to text -ALTER TABLE fktable ADD CONSTRAINT fk_1_2 -FOREIGN KEY (x1) REFERENCES pktable(id2); -ERROR: foreign key constraint "fk_1_2" cannot be implemented -DETAIL: Key columns "x1" and "id2" are of incompatible types: integer and character varying. --- should succeed --- int4 promotes to real -ALTER TABLE fktable ADD CONSTRAINT fk_1_3 -FOREIGN KEY (x1) REFERENCES pktable(id3); --- text is compatible with varchar -ALTER TABLE fktable ADD CONSTRAINT fk_4_2 -FOREIGN KEY (x4) REFERENCES pktable(id2); --- int2 is part of integer opfamily as of 8.0 -ALTER TABLE fktable ADD CONSTRAINT fk_5_1 -FOREIGN KEY (x5) REFERENCES pktable(id1); --- check multikey cases, especially out-of-order column lists --- these should work -ALTER TABLE fktable ADD CONSTRAINT fk_123_123 -FOREIGN KEY (x1,x2,x3) REFERENCES pktable(id1,id2,id3); -ALTER TABLE fktable ADD CONSTRAINT fk_213_213 -FOREIGN KEY (x2,x1,x3) REFERENCES pktable(id2,id1,id3); -ALTER TABLE fktable ADD CONSTRAINT fk_253_213 -FOREIGN KEY (x2,x5,x3) REFERENCES pktable(id2,id1,id3); --- these should fail -ALTER TABLE fktable ADD CONSTRAINT fk_123_231 -FOREIGN KEY (x1,x2,x3) REFERENCES pktable(id2,id3,id1); -ERROR: foreign key constraint "fk_123_231" cannot be implemented -DETAIL: Key columns "x1" and "id2" are of incompatible types: integer and character varying. -ALTER TABLE fktable ADD CONSTRAINT fk_241_132 -FOREIGN KEY (x2,x4,x1) REFERENCES pktable(id1,id3,id2); -ERROR: foreign key constraint "fk_241_132" cannot be implemented -DETAIL: Key columns "x2" and "id1" are of incompatible types: character varying and integer. -DROP TABLE pktable, fktable; --- test a tricky case: we can elide firing the FK check trigger during --- an UPDATE if the UPDATE did not change the foreign key --- field. However, we can't do this if our transaction was the one that --- created the updated row and the trigger is deferred, since our UPDATE --- will have invalidated the original newly-inserted tuple, and therefore --- cause the on-INSERT RI trigger not to be fired. -CREATE TEMP TABLE pktable ( - id int primary key, - other int -); -CREATE TEMP TABLE fktable ( - id int primary key, - fk int references pktable deferrable initially deferred -); -INSERT INTO pktable VALUES (5, 10); -BEGIN; --- doesn't match PK, but no error yet -INSERT INTO fktable VALUES (0, 20); --- don't change FK -UPDATE fktable SET id = id + 1; --- should catch error from initial INSERT -COMMIT; -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" -DETAIL: Key (fk)=(20) is not present in table "pktable". --- check same case when insert is in a different subtransaction than update -BEGIN; --- doesn't match PK, but no error yet -INSERT INTO fktable VALUES (0, 20); --- UPDATE will be in a subxact -SAVEPOINT savept1; --- don't change FK -UPDATE fktable SET id = id + 1; --- should catch error from initial INSERT -COMMIT; -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" -DETAIL: Key (fk)=(20) is not present in table "pktable". -BEGIN; --- INSERT will be in a subxact -SAVEPOINT savept1; --- doesn't match PK, but no error yet -INSERT INTO fktable VALUES (0, 20); -RELEASE SAVEPOINT savept1; --- don't change FK -UPDATE fktable SET id = id + 1; --- should catch error from initial INSERT -COMMIT; -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" -DETAIL: Key (fk)=(20) is not present in table "pktable". -BEGIN; --- doesn't match PK, but no error yet -INSERT INTO fktable VALUES (0, 20); --- UPDATE will be in a subxact -SAVEPOINT savept1; --- don't change FK -UPDATE fktable SET id = id + 1; --- Roll back the UPDATE -ROLLBACK TO savept1; --- should catch error from initial INSERT -COMMIT; -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" -DETAIL: Key (fk)=(20) is not present in table "pktable". --- --- check ALTER CONSTRAINT --- -INSERT INTO fktable VALUES (1, 5); -ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey DEFERRABLE INITIALLY IMMEDIATE; -BEGIN; --- doesn't match FK, should throw error now -UPDATE pktable SET id = 10 WHERE id = 5; -ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_fk_fkey" on table "fktable" -DETAIL: Key (id)=(5) is still referenced from table "fktable". -COMMIT; -BEGIN; --- doesn't match PK, should throw error now -INSERT INTO fktable VALUES (0, 20); -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" -DETAIL: Key (fk)=(20) is not present in table "pktable". -COMMIT; --- try additional syntax -ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey NOT DEFERRABLE; --- illegal option -ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey NOT DEFERRABLE INITIALLY DEFERRED; -ERROR: constraint declared INITIALLY DEFERRED must be DEFERRABLE -LINE 1: ...e ALTER CONSTRAINT fktable_fk_fkey NOT DEFERRABLE INITIALLY ... - ^ --- test order of firing of FK triggers when several RI-induced changes need to --- be made to the same row. This was broken by subtransaction-related --- changes in 8.0. -CREATE TEMP TABLE users ( - id INT PRIMARY KEY, - name VARCHAR NOT NULL -); -INSERT INTO users VALUES (1, 'Jozko'); -INSERT INTO users VALUES (2, 'Ferko'); -INSERT INTO users VALUES (3, 'Samko'); -CREATE TEMP TABLE tasks ( - id INT PRIMARY KEY, - owner INT REFERENCES users ON UPDATE CASCADE ON DELETE SET NULL, - worker INT REFERENCES users ON UPDATE CASCADE ON DELETE SET NULL, - checked_by INT REFERENCES users ON UPDATE CASCADE ON DELETE SET NULL -); -INSERT INTO tasks VALUES (1,1,NULL,NULL); -INSERT INTO tasks VALUES (2,2,2,NULL); -INSERT INTO tasks VALUES (3,3,3,3); -SELECT * FROM tasks; - id | owner | worker | checked_by -----+-------+--------+------------ - 1 | 1 | | - 2 | 2 | 2 | - 3 | 3 | 3 | 3 -(3 rows) - -UPDATE users SET id = 4 WHERE id = 3; -SELECT * FROM tasks; - id | owner | worker | checked_by -----+-------+--------+------------ - 1 | 1 | | - 2 | 2 | 2 | - 3 | 4 | 4 | 4 -(3 rows) - -DELETE FROM users WHERE id = 4; -SELECT * FROM tasks; - id | owner | worker | checked_by -----+-------+--------+------------ - 1 | 1 | | - 2 | 2 | 2 | - 3 | | | -(3 rows) - --- could fail with only 2 changes to make, if row was already updated -BEGIN; -UPDATE tasks set id=id WHERE id=2; -SELECT * FROM tasks; - id | owner | worker | checked_by -----+-------+--------+------------ - 1 | 1 | | - 3 | | | - 2 | 2 | 2 | -(3 rows) - -DELETE FROM users WHERE id = 2; -SELECT * FROM tasks; - id | owner | worker | checked_by -----+-------+--------+------------ - 1 | 1 | | - 3 | | | - 2 | | | -(3 rows) - -COMMIT; --- --- Test self-referential FK with CASCADE (bug #6268) --- -create temp table selfref ( - a int primary key, - b int, - foreign key (b) references selfref (a) - on update cascade on delete cascade -); -insert into selfref (a, b) -values - (0, 0), - (1, 1); -begin; - update selfref set a = 123 where a = 0; - select a, b from selfref; - a | b ------+----- - 1 | 1 - 123 | 123 -(2 rows) - - update selfref set a = 456 where a = 123; - select a, b from selfref; - a | b ------+----- - 1 | 1 - 456 | 456 -(2 rows) - -commit; --- --- Test that SET DEFAULT actions recognize updates to default values --- -create temp table defp (f1 int primary key); -create temp table defc (f1 int default 0 - references defp on delete set default); -insert into defp values (0), (1), (2); -insert into defc values (2); -select * from defc; - f1 ----- - 2 -(1 row) - -delete from defp where f1 = 2; -select * from defc; - f1 ----- - 0 -(1 row) - -delete from defp where f1 = 0; -- fail -ERROR: update or delete on table "defp" violates foreign key constraint "defc_f1_fkey" on table "defc" -DETAIL: Key (f1)=(0) is still referenced from table "defc". -alter table defc alter column f1 set default 1; -delete from defp where f1 = 0; -select * from defc; - f1 ----- - 1 -(1 row) - -delete from defp where f1 = 1; -- fail -ERROR: update or delete on table "defp" violates foreign key constraint "defc_f1_fkey" on table "defc" -DETAIL: Key (f1)=(1) is still referenced from table "defc". --- --- Test the difference between NO ACTION and RESTRICT --- -create temp table pp (f1 int primary key); -create temp table cc (f1 int references pp on update no action on delete no action); -insert into pp values(12); -insert into pp values(11); -update pp set f1=f1+1; -insert into cc values(13); -update pp set f1=f1+1; -update pp set f1=f1+1; -- fail -ERROR: update or delete on table "pp" violates foreign key constraint "cc_f1_fkey" on table "cc" -DETAIL: Key (f1)=(13) is still referenced from table "cc". -delete from pp where f1 = 13; -- fail -ERROR: update or delete on table "pp" violates foreign key constraint "cc_f1_fkey" on table "cc" -DETAIL: Key (f1)=(13) is still referenced from table "cc". -drop table pp, cc; -create temp table pp (f1 int primary key); -create temp table cc (f1 int references pp on update restrict on delete restrict); -insert into pp values(12); -insert into pp values(11); -update pp set f1=f1+1; -insert into cc values(13); -update pp set f1=f1+1; -- fail -ERROR: update or delete on table "pp" violates foreign key constraint "cc_f1_fkey" on table "cc" -DETAIL: Key (f1)=(13) is still referenced from table "cc". -delete from pp where f1 = 13; -- fail -ERROR: update or delete on table "pp" violates foreign key constraint "cc_f1_fkey" on table "cc" -DETAIL: Key (f1)=(13) is still referenced from table "cc". -drop table pp, cc; --- --- Test interaction of foreign-key optimization with rules (bug #14219) --- -create temp table t1 (a integer primary key, b text); -create temp table t2 (a integer primary key, b integer references t1); -create rule r1 as on delete to t1 do delete from t2 where t2.b = old.a; -explain (costs off) delete from t1 where a = 1; - QUERY PLAN --------------------------------------------- - Delete on t2 - -> Nested Loop - -> Index Scan using t1_pkey on t1 - Index Cond: (a = 1) - -> Seq Scan on t2 - Filter: (b = 1) - - Delete on t1 - -> Index Scan using t1_pkey on t1 - Index Cond: (a = 1) -(10 rows) - -delete from t1 where a = 1; --- Test a primary key with attributes located in later attnum positions --- compared to the fk attributes. -create table pktable2 (a int, b int, c int, d int, e int, primary key (d, e)); -create table fktable2 (d int, e int, foreign key (d, e) references pktable2); -insert into pktable2 values (1, 2, 3, 4, 5); -insert into fktable2 values (4, 5); -delete from pktable2; -ERROR: update or delete on table "pktable2" violates foreign key constraint "fktable2_d_e_fkey" on table "fktable2" -DETAIL: Key (d, e)=(4, 5) is still referenced from table "fktable2". -update pktable2 set d = 5; -ERROR: update or delete on table "pktable2" violates foreign key constraint "fktable2_d_e_fkey" on table "fktable2" -DETAIL: Key (d, e)=(4, 5) is still referenced from table "fktable2". -drop table pktable2, fktable2; --- Test truncation of long foreign key names -create table pktable1 (a int primary key); -create table pktable2 (a int, b int, primary key (a, b)); -create table fktable2 ( - a int, - b int, - very_very_long_column_name_to_exceed_63_characters int, - foreign key (very_very_long_column_name_to_exceed_63_characters) references pktable1, - foreign key (a, very_very_long_column_name_to_exceed_63_characters) references pktable2, - foreign key (a, very_very_long_column_name_to_exceed_63_characters) references pktable2 -); -select conname from pg_constraint where conrelid = 'fktable2'::regclass order by conname; - conname ------------------------------------------------------------------ - fktable2_a_very_very_long_column_name_to_exceed_63_charac_fkey1 - fktable2_a_very_very_long_column_name_to_exceed_63_charact_fkey - fktable2_very_very_long_column_name_to_exceed_63_character_fkey -(3 rows) - -drop table pktable1, pktable2, fktable2; --- --- Test deferred FK check on a tuple deleted by a rolled-back subtransaction --- -create table pktable2(f1 int primary key); -create table fktable2(f1 int references pktable2 deferrable initially deferred); -insert into pktable2 values(1); -begin; -insert into fktable2 values(1); -savepoint x; -delete from fktable2; -rollback to x; -commit; -begin; -insert into fktable2 values(2); -savepoint x; -delete from fktable2; -rollback to x; -commit; -- fail -ERROR: insert or update on table "fktable2" violates foreign key constraint "fktable2_f1_fkey" -DETAIL: Key (f1)=(2) is not present in table "pktable2". --- --- Test that we prevent dropping FK constraint with pending trigger events --- -begin; -insert into fktable2 values(2); -alter table fktable2 drop constraint fktable2_f1_fkey; -ERROR: cannot ALTER TABLE "fktable2" because it has pending trigger events -commit; -begin; -delete from pktable2 where f1 = 1; -alter table fktable2 drop constraint fktable2_f1_fkey; -ERROR: cannot ALTER TABLE "pktable2" because it has pending trigger events -commit; -drop table pktable2, fktable2; --- --- Test keys that "look" different but compare as equal --- -create table pktable2 (a float8, b float8, primary key (a, b)); -create table fktable2 (x float8, y float8, foreign key (x, y) references pktable2 (a, b) on update cascade); -insert into pktable2 values ('-0', '-0'); -insert into fktable2 values ('-0', '-0'); -select * from pktable2; - a | b -----+---- - -0 | -0 -(1 row) - -select * from fktable2; - x | y -----+---- - -0 | -0 -(1 row) - -update pktable2 set a = '0' where a = '-0'; -select * from pktable2; - a | b ----+---- - 0 | -0 -(1 row) - --- should have updated fktable2.x -select * from fktable2; - x | y ----+---- - 0 | -0 -(1 row) - -drop table pktable2, fktable2; --- --- Foreign keys and partitioned tables --- --- Creation of a partitioned hierarchy with irregular definitions -CREATE TABLE fk_notpartitioned_pk (fdrop1 int, a int, fdrop2 int, b int, - PRIMARY KEY (a, b)); -ALTER TABLE fk_notpartitioned_pk DROP COLUMN fdrop1, DROP COLUMN fdrop2; -CREATE TABLE fk_partitioned_fk (b int, fdrop1 int, a int) PARTITION BY RANGE (a, b); -ALTER TABLE fk_partitioned_fk DROP COLUMN fdrop1; -CREATE TABLE fk_partitioned_fk_1 (fdrop1 int, fdrop2 int, a int, fdrop3 int, b int); -ALTER TABLE fk_partitioned_fk_1 DROP COLUMN fdrop1, DROP COLUMN fdrop2, DROP COLUMN fdrop3; -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_1 FOR VALUES FROM (0,0) TO (1000,1000); -ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk; -CREATE TABLE fk_partitioned_fk_2 (b int, fdrop1 int, fdrop2 int, a int); -ALTER TABLE fk_partitioned_fk_2 DROP COLUMN fdrop1, DROP COLUMN fdrop2; -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 FOR VALUES FROM (1000,1000) TO (2000,2000); -CREATE TABLE fk_partitioned_fk_3 (fdrop1 int, fdrop2 int, fdrop3 int, fdrop4 int, b int, a int) - PARTITION BY HASH (a); -ALTER TABLE fk_partitioned_fk_3 DROP COLUMN fdrop1, DROP COLUMN fdrop2, - DROP COLUMN fdrop3, DROP COLUMN fdrop4; -CREATE TABLE fk_partitioned_fk_3_0 PARTITION OF fk_partitioned_fk_3 FOR VALUES WITH (MODULUS 5, REMAINDER 0); -CREATE TABLE fk_partitioned_fk_3_1 PARTITION OF fk_partitioned_fk_3 FOR VALUES WITH (MODULUS 5, REMAINDER 1); -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_3 - FOR VALUES FROM (2000,2000) TO (3000,3000); --- Creating a foreign key with ONLY on a partitioned table referencing --- a non-partitioned table fails. -ALTER TABLE ONLY fk_partitioned_fk ADD FOREIGN KEY (a, b) - REFERENCES fk_notpartitioned_pk; -ERROR: cannot use ONLY for foreign key on partitioned table "fk_partitioned_fk" referencing relation "fk_notpartitioned_pk" --- Adding a NOT VALID foreign key on a partitioned table referencing --- a non-partitioned table fails. -ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) - REFERENCES fk_notpartitioned_pk NOT VALID; -ERROR: cannot add NOT VALID foreign key on partitioned table "fk_partitioned_fk" referencing relation "fk_notpartitioned_pk" -DETAIL: This feature is not yet supported on partitioned tables. --- these inserts, targeting both the partition directly as well as the --- partitioned table, should all fail -INSERT INTO fk_partitioned_fk (a,b) VALUES (500, 501); -ERROR: insert or update on table "fk_partitioned_fk_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(500, 501) is not present in table "fk_notpartitioned_pk". -INSERT INTO fk_partitioned_fk_1 (a,b) VALUES (500, 501); -ERROR: insert or update on table "fk_partitioned_fk_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(500, 501) is not present in table "fk_notpartitioned_pk". -INSERT INTO fk_partitioned_fk (a,b) VALUES (1500, 1501); -ERROR: insert or update on table "fk_partitioned_fk_2" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(1500, 1501) is not present in table "fk_notpartitioned_pk". -INSERT INTO fk_partitioned_fk_2 (a,b) VALUES (1500, 1501); -ERROR: insert or update on table "fk_partitioned_fk_2" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(1500, 1501) is not present in table "fk_notpartitioned_pk". -INSERT INTO fk_partitioned_fk (a,b) VALUES (2500, 2502); -ERROR: insert or update on table "fk_partitioned_fk_3_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(2500, 2502) is not present in table "fk_notpartitioned_pk". -INSERT INTO fk_partitioned_fk_3 (a,b) VALUES (2500, 2502); -ERROR: insert or update on table "fk_partitioned_fk_3_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(2500, 2502) is not present in table "fk_notpartitioned_pk". -INSERT INTO fk_partitioned_fk (a,b) VALUES (2501, 2503); -ERROR: insert or update on table "fk_partitioned_fk_3_0" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(2501, 2503) is not present in table "fk_notpartitioned_pk". -INSERT INTO fk_partitioned_fk_3 (a,b) VALUES (2501, 2503); -ERROR: insert or update on table "fk_partitioned_fk_3_0" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(2501, 2503) is not present in table "fk_notpartitioned_pk". --- but if we insert the values that make them valid, then they work -INSERT INTO fk_notpartitioned_pk VALUES (500, 501), (1500, 1501), - (2500, 2502), (2501, 2503); -INSERT INTO fk_partitioned_fk (a,b) VALUES (500, 501); -INSERT INTO fk_partitioned_fk (a,b) VALUES (1500, 1501); -INSERT INTO fk_partitioned_fk (a,b) VALUES (2500, 2502); -INSERT INTO fk_partitioned_fk (a,b) VALUES (2501, 2503); --- this update fails because there is no referenced row -UPDATE fk_partitioned_fk SET a = a + 1 WHERE a = 2501; -ERROR: insert or update on table "fk_partitioned_fk_3_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(2502, 2503) is not present in table "fk_notpartitioned_pk". --- but we can fix it thusly: -INSERT INTO fk_notpartitioned_pk (a,b) VALUES (2502, 2503); -UPDATE fk_partitioned_fk SET a = a + 1 WHERE a = 2501; --- these updates would leave lingering rows in the referencing table; disallow -UPDATE fk_notpartitioned_pk SET b = 502 WHERE a = 500; -ERROR: update or delete on table "fk_notpartitioned_pk" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" on table "fk_partitioned_fk" -DETAIL: Key (a, b)=(500, 501) is still referenced from table "fk_partitioned_fk". -UPDATE fk_notpartitioned_pk SET b = 1502 WHERE a = 1500; -ERROR: update or delete on table "fk_notpartitioned_pk" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" on table "fk_partitioned_fk" -DETAIL: Key (a, b)=(1500, 1501) is still referenced from table "fk_partitioned_fk". -UPDATE fk_notpartitioned_pk SET b = 2504 WHERE a = 2500; -ERROR: update or delete on table "fk_notpartitioned_pk" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" on table "fk_partitioned_fk" -DETAIL: Key (a, b)=(2500, 2502) is still referenced from table "fk_partitioned_fk". --- check psql behavior -\d fk_notpartitioned_pk - Table "public.fk_notpartitioned_pk" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | not null | - b | integer | | not null | -Indexes: - "fk_notpartitioned_pk_pkey" PRIMARY KEY, btree (a, b) -Referenced by: - TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) - -ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_b_fkey; --- done. -DROP TABLE fk_notpartitioned_pk, fk_partitioned_fk; --- Altering a type referenced by a foreign key needs to drop/recreate the FK. --- Ensure that works. -CREATE TABLE fk_notpartitioned_pk (a INT, PRIMARY KEY(a), CHECK (a > 0)); -CREATE TABLE fk_partitioned_fk (a INT REFERENCES fk_notpartitioned_pk(a) PRIMARY KEY) PARTITION BY RANGE(a); -CREATE TABLE fk_partitioned_fk_1 PARTITION OF fk_partitioned_fk FOR VALUES FROM (MINVALUE) TO (MAXVALUE); -INSERT INTO fk_notpartitioned_pk VALUES (1); -INSERT INTO fk_partitioned_fk VALUES (1); -ALTER TABLE fk_notpartitioned_pk ALTER COLUMN a TYPE bigint; -DELETE FROM fk_notpartitioned_pk WHERE a = 1; -ERROR: update or delete on table "fk_notpartitioned_pk" violates foreign key constraint "fk_partitioned_fk_a_fkey" on table "fk_partitioned_fk" -DETAIL: Key (a)=(1) is still referenced from table "fk_partitioned_fk". -DROP TABLE fk_notpartitioned_pk, fk_partitioned_fk; --- Test some other exotic foreign key features: MATCH SIMPLE, ON UPDATE/DELETE --- actions -CREATE TABLE fk_notpartitioned_pk (a int, b int, primary key (a, b)); -CREATE TABLE fk_partitioned_fk (a int default 2501, b int default 142857) PARTITION BY LIST (a); -CREATE TABLE fk_partitioned_fk_1 PARTITION OF fk_partitioned_fk FOR VALUES IN (NULL,500,501,502); -ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) - REFERENCES fk_notpartitioned_pk MATCH SIMPLE - ON DELETE SET NULL ON UPDATE SET NULL; -CREATE TABLE fk_partitioned_fk_2 PARTITION OF fk_partitioned_fk FOR VALUES IN (1500,1502); -CREATE TABLE fk_partitioned_fk_3 (a int, b int); -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_3 FOR VALUES IN (2500,2501,2502,2503); --- this insert fails -INSERT INTO fk_partitioned_fk (a, b) VALUES (2502, 2503); -ERROR: insert or update on table "fk_partitioned_fk_3" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(2502, 2503) is not present in table "fk_notpartitioned_pk". -INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, 2503); -ERROR: insert or update on table "fk_partitioned_fk_3" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(2502, 2503) is not present in table "fk_notpartitioned_pk". --- but since the FK is MATCH SIMPLE, this one doesn't -INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, NULL); --- now create the referenced row ... -INSERT INTO fk_notpartitioned_pk VALUES (2502, 2503); ---- and now the same insert work -INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, 2503); --- this always works -INSERT INTO fk_partitioned_fk (a,b) VALUES (NULL, NULL); --- MATCH FULL -INSERT INTO fk_notpartitioned_pk VALUES (1, 2); -CREATE TABLE fk_partitioned_fk_full (x int, y int) PARTITION BY RANGE (x); -CREATE TABLE fk_partitioned_fk_full_1 PARTITION OF fk_partitioned_fk_full DEFAULT; -INSERT INTO fk_partitioned_fk_full VALUES (1, NULL); -ALTER TABLE fk_partitioned_fk_full ADD FOREIGN KEY (x, y) REFERENCES fk_notpartitioned_pk MATCH FULL; -- fails -ERROR: insert or update on table "fk_partitioned_fk_full_1" violates foreign key constraint "fk_partitioned_fk_full_x_y_fkey" -DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. -TRUNCATE fk_partitioned_fk_full; -ALTER TABLE fk_partitioned_fk_full ADD FOREIGN KEY (x, y) REFERENCES fk_notpartitioned_pk MATCH FULL; -INSERT INTO fk_partitioned_fk_full VALUES (1, NULL); -- fails -ERROR: insert or update on table "fk_partitioned_fk_full_1" violates foreign key constraint "fk_partitioned_fk_full_x_y_fkey" -DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. -DROP TABLE fk_partitioned_fk_full; --- ON UPDATE SET NULL -SELECT tableoid::regclass, a, b FROM fk_partitioned_fk WHERE b IS NULL ORDER BY a; - tableoid | a | b ----------------------+------+--- - fk_partitioned_fk_3 | 2502 | - fk_partitioned_fk_1 | | -(2 rows) - -UPDATE fk_notpartitioned_pk SET a = a + 1 WHERE a = 2502; -SELECT tableoid::regclass, a, b FROM fk_partitioned_fk WHERE b IS NULL ORDER BY a; - tableoid | a | b ----------------------+------+--- - fk_partitioned_fk_3 | 2502 | - fk_partitioned_fk_1 | | - fk_partitioned_fk_1 | | -(3 rows) - --- ON DELETE SET NULL -INSERT INTO fk_partitioned_fk VALUES (2503, 2503); -SELECT count(*) FROM fk_partitioned_fk WHERE a IS NULL; - count -------- - 2 -(1 row) - -DELETE FROM fk_notpartitioned_pk; -SELECT count(*) FROM fk_partitioned_fk WHERE a IS NULL; - count -------- - 3 -(1 row) - --- ON UPDATE/DELETE SET DEFAULT -ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_b_fkey; -ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) - REFERENCES fk_notpartitioned_pk - ON DELETE SET DEFAULT ON UPDATE SET DEFAULT; -INSERT INTO fk_notpartitioned_pk VALUES (2502, 2503); -INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, 2503); --- this fails, because the defaults for the referencing table are not present --- in the referenced table: -UPDATE fk_notpartitioned_pk SET a = 1500 WHERE a = 2502; -ERROR: insert or update on table "fk_partitioned_fk_3" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(2501, 142857) is not present in table "fk_notpartitioned_pk". --- but inserting the row we can make it work: -INSERT INTO fk_notpartitioned_pk VALUES (2501, 142857); -UPDATE fk_notpartitioned_pk SET a = 1500 WHERE a = 2502; -SELECT * FROM fk_partitioned_fk WHERE b = 142857; - a | b -------+-------- - 2501 | 142857 -(1 row) - --- ON DELETE SET NULL column_list -ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_b_fkey; -ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) - REFERENCES fk_notpartitioned_pk - ON DELETE SET NULL (a); -BEGIN; -DELETE FROM fk_notpartitioned_pk WHERE b = 142857; -SELECT * FROM fk_partitioned_fk WHERE a IS NOT NULL OR b IS NOT NULL ORDER BY a NULLS LAST; - a | b -------+-------- - 2502 | - | 142857 -(2 rows) - -ROLLBACK; --- ON DELETE SET DEFAULT column_list -ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_b_fkey; -ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) - REFERENCES fk_notpartitioned_pk - ON DELETE SET DEFAULT (a); -BEGIN; -DELETE FROM fk_partitioned_fk; -DELETE FROM fk_notpartitioned_pk; -INSERT INTO fk_notpartitioned_pk VALUES (500, 100000), (2501, 100000); -INSERT INTO fk_partitioned_fk VALUES (500, 100000); -DELETE FROM fk_notpartitioned_pk WHERE a = 500; -SELECT * FROM fk_partitioned_fk ORDER BY a; - a | b -------+-------- - 2501 | 100000 -(1 row) - -ROLLBACK; --- ON UPDATE/DELETE CASCADE -ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_b_fkey; -ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) - REFERENCES fk_notpartitioned_pk - ON DELETE CASCADE ON UPDATE CASCADE; -UPDATE fk_notpartitioned_pk SET a = 2502 WHERE a = 2501; -SELECT * FROM fk_partitioned_fk WHERE b = 142857; - a | b -------+-------- - 2502 | 142857 -(1 row) - --- Now you see it ... -SELECT * FROM fk_partitioned_fk WHERE b = 142857; - a | b -------+-------- - 2502 | 142857 -(1 row) - -DELETE FROM fk_notpartitioned_pk WHERE b = 142857; --- now you don't. -SELECT * FROM fk_partitioned_fk WHERE a = 142857; - a | b ----+--- -(0 rows) - --- verify that DROP works -DROP TABLE fk_partitioned_fk_2; --- Test behavior of the constraint together with attaching and detaching --- partitions. -CREATE TABLE fk_partitioned_fk_2 PARTITION OF fk_partitioned_fk FOR VALUES IN (1500,1502); -ALTER TABLE fk_partitioned_fk DETACH PARTITION fk_partitioned_fk_2; -BEGIN; -DROP TABLE fk_partitioned_fk; --- constraint should still be there -\d fk_partitioned_fk_2; - Table "public.fk_partitioned_fk_2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | 2501 - b | integer | | | 142857 -Foreign-key constraints: - "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE - -ROLLBACK; -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 FOR VALUES IN (1500,1502); -DROP TABLE fk_partitioned_fk_2; -CREATE TABLE fk_partitioned_fk_2 (b int, c text, a int, - FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk ON UPDATE CASCADE ON DELETE CASCADE); -ALTER TABLE fk_partitioned_fk_2 DROP COLUMN c; -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 FOR VALUES IN (1500,1502); --- should have only one constraint -\d fk_partitioned_fk_2 - Table "public.fk_partitioned_fk_2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - b | integer | | | - a | integer | | | -Partition of: fk_partitioned_fk FOR VALUES IN (1500, 1502) -Foreign-key constraints: - TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE - -DROP TABLE fk_partitioned_fk_2; -CREATE TABLE fk_partitioned_fk_4 (a int, b int, FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE) PARTITION BY RANGE (b, a); -CREATE TABLE fk_partitioned_fk_4_1 PARTITION OF fk_partitioned_fk_4 FOR VALUES FROM (1,1) TO (100,100); -CREATE TABLE fk_partitioned_fk_4_2 (a int, b int, FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE SET NULL); -ALTER TABLE fk_partitioned_fk_4 ATTACH PARTITION fk_partitioned_fk_4_2 FOR VALUES FROM (100,100) TO (1000,1000); -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_4 FOR VALUES IN (3500,3502); -ALTER TABLE fk_partitioned_fk DETACH PARTITION fk_partitioned_fk_4; -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_4 FOR VALUES IN (3500,3502); --- should only have one constraint -\d fk_partitioned_fk_4 - Partitioned table "public.fk_partitioned_fk_4" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition of: fk_partitioned_fk FOR VALUES IN (3500, 3502) -Partition key: RANGE (b, a) -Foreign-key constraints: - TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE -Number of partitions: 2 (Use \d+ to list them.) - -\d fk_partitioned_fk_4_1 - Table "public.fk_partitioned_fk_4_1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition of: fk_partitioned_fk_4 FOR VALUES FROM (1, 1) TO (100, 100) -Foreign-key constraints: - TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE - --- this one has an FK with mismatched properties -\d fk_partitioned_fk_4_2 - Table "public.fk_partitioned_fk_4_2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition of: fk_partitioned_fk_4 FOR VALUES FROM (100, 100) TO (1000, 1000) -Foreign-key constraints: - "fk_partitioned_fk_4_2_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE SET NULL - TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE - -CREATE TABLE fk_partitioned_fk_5 (a int, b int, - FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE, - FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) MATCH FULL ON UPDATE CASCADE ON DELETE CASCADE) - PARTITION BY RANGE (a); -CREATE TABLE fk_partitioned_fk_5_1 (a int, b int, FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk); -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_5 FOR VALUES IN (4500); -ALTER TABLE fk_partitioned_fk_5 ATTACH PARTITION fk_partitioned_fk_5_1 FOR VALUES FROM (0) TO (10); -ALTER TABLE fk_partitioned_fk DETACH PARTITION fk_partitioned_fk_5; -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_5 FOR VALUES IN (4500); --- this one has two constraints, similar but not quite the one in the parent, --- so it gets a new one -\d fk_partitioned_fk_5 - Partitioned table "public.fk_partitioned_fk_5" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition of: fk_partitioned_fk FOR VALUES IN (4500) -Partition key: RANGE (a) -Foreign-key constraints: - "fk_partitioned_fk_5_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE - "fk_partitioned_fk_5_a_b_fkey1" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) MATCH FULL ON UPDATE CASCADE ON DELETE CASCADE - TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE -Number of partitions: 1 (Use \d+ to list them.) - --- verify that it works to reattaching a child with multiple candidate --- constraints -ALTER TABLE fk_partitioned_fk_5 DETACH PARTITION fk_partitioned_fk_5_1; -ALTER TABLE fk_partitioned_fk_5 ATTACH PARTITION fk_partitioned_fk_5_1 FOR VALUES FROM (0) TO (10); -\d fk_partitioned_fk_5_1 - Table "public.fk_partitioned_fk_5_1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition of: fk_partitioned_fk_5 FOR VALUES FROM (0) TO (10) -Foreign-key constraints: - "fk_partitioned_fk_5_1_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) - TABLE "fk_partitioned_fk_5" CONSTRAINT "fk_partitioned_fk_5_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE - TABLE "fk_partitioned_fk_5" CONSTRAINT "fk_partitioned_fk_5_a_b_fkey1" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) MATCH FULL ON UPDATE CASCADE ON DELETE CASCADE - TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE - --- verify that attaching a table checks that the existing data satisfies the --- constraint -CREATE TABLE fk_partitioned_fk_2 (a int, b int) PARTITION BY RANGE (b); -CREATE TABLE fk_partitioned_fk_2_1 PARTITION OF fk_partitioned_fk_2 FOR VALUES FROM (0) TO (1000); -CREATE TABLE fk_partitioned_fk_2_2 PARTITION OF fk_partitioned_fk_2 FOR VALUES FROM (1000) TO (2000); -INSERT INTO fk_partitioned_fk_2 VALUES (1600, 601), (1600, 1601); -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 - FOR VALUES IN (1600); -ERROR: insert or update on table "fk_partitioned_fk_2_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(1600, 601) is not present in table "fk_notpartitioned_pk". -INSERT INTO fk_notpartitioned_pk VALUES (1600, 601), (1600, 1601); -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 - FOR VALUES IN (1600); --- leave these tables around intentionally --- test the case when the referenced table is owned by a different user -create role regress_other_partitioned_fk_owner; -grant references on fk_notpartitioned_pk to regress_other_partitioned_fk_owner; -set role regress_other_partitioned_fk_owner; -create table other_partitioned_fk(a int, b int) partition by list (a); -create table other_partitioned_fk_1 partition of other_partitioned_fk - for values in (2048); -insert into other_partitioned_fk - select 2048, x from generate_series(1,10) x; --- this should fail -alter table other_partitioned_fk add foreign key (a, b) - references fk_notpartitioned_pk(a, b); -ERROR: insert or update on table "other_partitioned_fk_1" violates foreign key constraint "other_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(2048, 1) is not present in table "fk_notpartitioned_pk". --- add the missing keys and retry -reset role; -insert into fk_notpartitioned_pk (a, b) - select 2048, x from generate_series(1,10) x; -set role regress_other_partitioned_fk_owner; -alter table other_partitioned_fk add foreign key (a, b) - references fk_notpartitioned_pk(a, b); --- clean up -drop table other_partitioned_fk; -reset role; -revoke all on fk_notpartitioned_pk from regress_other_partitioned_fk_owner; -drop role regress_other_partitioned_fk_owner; --- --- Test self-referencing foreign key with partition. --- This should create only one fk constraint per partition --- -CREATE TABLE parted_self_fk ( - id bigint NOT NULL PRIMARY KEY, - id_abc bigint, - FOREIGN KEY (id_abc) REFERENCES parted_self_fk(id) -) -PARTITION BY RANGE (id); -CREATE TABLE part1_self_fk ( - id bigint NOT NULL PRIMARY KEY, - id_abc bigint -); -ALTER TABLE parted_self_fk ATTACH PARTITION part1_self_fk FOR VALUES FROM (0) TO (10); -CREATE TABLE part2_self_fk PARTITION OF parted_self_fk FOR VALUES FROM (10) TO (20); -CREATE TABLE part3_self_fk ( -- a partitioned partition - id bigint NOT NULL PRIMARY KEY, - id_abc bigint -) PARTITION BY RANGE (id); -CREATE TABLE part32_self_fk PARTITION OF part3_self_fk FOR VALUES FROM (20) TO (30); -ALTER TABLE parted_self_fk ATTACH PARTITION part3_self_fk FOR VALUES FROM (20) TO (40); -CREATE TABLE part33_self_fk ( - id bigint NOT NULL PRIMARY KEY, - id_abc bigint -); -ALTER TABLE part3_self_fk ATTACH PARTITION part33_self_fk FOR VALUES FROM (30) TO (40); -SELECT cr.relname, co.conname, co.contype, co.convalidated, - p.conname AS conparent, p.convalidated, cf.relname AS foreignrel -FROM pg_constraint co -JOIN pg_class cr ON cr.oid = co.conrelid -LEFT JOIN pg_class cf ON cf.oid = co.confrelid -LEFT JOIN pg_constraint p ON p.oid = co.conparentid -WHERE cr.oid IN (SELECT relid FROM pg_partition_tree('parted_self_fk')) -ORDER BY co.contype, cr.relname, co.conname, p.conname; - relname | conname | contype | convalidated | conparent | convalidated | foreignrel -----------------+----------------------------+---------+--------------+----------------------------+--------------+---------------- - part1_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk - part2_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk - part32_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk - part33_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk - part3_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk - parted_self_fk | parted_self_fk_id_abc_fkey | f | t | | | parted_self_fk - part1_self_fk | part1_self_fk_id_not_null | n | t | | | - part2_self_fk | parted_self_fk_id_not_null | n | t | | | - part32_self_fk | part3_self_fk_id_not_null | n | t | | | - part33_self_fk | part33_self_fk_id_not_null | n | t | | | - part3_self_fk | part3_self_fk_id_not_null | n | t | | | - parted_self_fk | parted_self_fk_id_not_null | n | t | | | - part1_self_fk | part1_self_fk_pkey | p | t | parted_self_fk_pkey | t | - part2_self_fk | part2_self_fk_pkey | p | t | parted_self_fk_pkey | t | - part32_self_fk | part32_self_fk_pkey | p | t | part3_self_fk_pkey | t | - part33_self_fk | part33_self_fk_pkey | p | t | part3_self_fk_pkey | t | - part3_self_fk | part3_self_fk_pkey | p | t | parted_self_fk_pkey | t | - parted_self_fk | parted_self_fk_pkey | p | t | | | -(18 rows) - --- detach and re-attach multiple times just to ensure everything is kosher -ALTER TABLE parted_self_fk DETACH PARTITION part2_self_fk; -ALTER TABLE parted_self_fk ATTACH PARTITION part2_self_fk FOR VALUES FROM (10) TO (20); -ALTER TABLE parted_self_fk DETACH PARTITION part2_self_fk; -ALTER TABLE parted_self_fk ATTACH PARTITION part2_self_fk FOR VALUES FROM (10) TO (20); -SELECT cr.relname, co.conname, co.contype, co.convalidated, - p.conname AS conparent, p.convalidated, cf.relname AS foreignrel -FROM pg_constraint co -JOIN pg_class cr ON cr.oid = co.conrelid -LEFT JOIN pg_class cf ON cf.oid = co.confrelid -LEFT JOIN pg_constraint p ON p.oid = co.conparentid -WHERE cr.oid IN (SELECT relid FROM pg_partition_tree('parted_self_fk')) -ORDER BY co.contype, cr.relname, co.conname, p.conname; - relname | conname | contype | convalidated | conparent | convalidated | foreignrel -----------------+----------------------------+---------+--------------+----------------------------+--------------+---------------- - part1_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk - part2_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk - part32_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk - part33_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk - part3_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk - parted_self_fk | parted_self_fk_id_abc_fkey | f | t | | | parted_self_fk - part1_self_fk | part1_self_fk_id_not_null | n | t | | | - part2_self_fk | parted_self_fk_id_not_null | n | t | | | - part32_self_fk | part3_self_fk_id_not_null | n | t | | | - part33_self_fk | part33_self_fk_id_not_null | n | t | | | - part3_self_fk | part3_self_fk_id_not_null | n | t | | | - parted_self_fk | parted_self_fk_id_not_null | n | t | | | - part1_self_fk | part1_self_fk_pkey | p | t | parted_self_fk_pkey | t | - part2_self_fk | part2_self_fk_pkey | p | t | parted_self_fk_pkey | t | - part32_self_fk | part32_self_fk_pkey | p | t | part3_self_fk_pkey | t | - part33_self_fk | part33_self_fk_pkey | p | t | part3_self_fk_pkey | t | - part3_self_fk | part3_self_fk_pkey | p | t | parted_self_fk_pkey | t | - parted_self_fk | parted_self_fk_pkey | p | t | | | -(18 rows) - --- Leave this table around, for pg_upgrade/pg_dump tests --- Test creating a constraint at the parent that already exists in partitions. --- There should be no duplicated constraints, and attempts to drop the --- constraint in partitions should raise appropriate errors. -create schema fkpart0 - create table pkey (a int primary key) - create table fk_part (a int) partition by list (a) - create table fk_part_1 partition of fk_part - (foreign key (a) references fkpart0.pkey) for values in (1) - create table fk_part_23 partition of fk_part - (foreign key (a) references fkpart0.pkey) for values in (2, 3) - partition by list (a) - create table fk_part_23_2 partition of fk_part_23 for values in (2); -alter table fkpart0.fk_part add foreign key (a) references fkpart0.pkey; -\d fkpart0.fk_part_1 \\ -- should have only one FK - Table "fkpart0.fk_part_1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition of: fkpart0.fk_part FOR VALUES IN (1) -Foreign-key constraints: - TABLE "fkpart0.fk_part" CONSTRAINT "fk_part_a_fkey" FOREIGN KEY (a) REFERENCES fkpart0.pkey(a) - -alter table fkpart0.fk_part_1 drop constraint fk_part_1_a_fkey; -ERROR: cannot drop inherited constraint "fk_part_1_a_fkey" of relation "fk_part_1" -\d fkpart0.fk_part_23 \\ -- should have only one FK - Partitioned table "fkpart0.fk_part_23" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition of: fkpart0.fk_part FOR VALUES IN (2, 3) -Partition key: LIST (a) -Foreign-key constraints: - TABLE "fkpart0.fk_part" CONSTRAINT "fk_part_a_fkey" FOREIGN KEY (a) REFERENCES fkpart0.pkey(a) -Number of partitions: 1 (Use \d+ to list them.) - -\d fkpart0.fk_part_23_2 \\ -- should have only one FK - Table "fkpart0.fk_part_23_2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition of: fkpart0.fk_part_23 FOR VALUES IN (2) -Foreign-key constraints: - TABLE "fkpart0.fk_part" CONSTRAINT "fk_part_a_fkey" FOREIGN KEY (a) REFERENCES fkpart0.pkey(a) - -alter table fkpart0.fk_part_23 drop constraint fk_part_23_a_fkey; -ERROR: cannot drop inherited constraint "fk_part_23_a_fkey" of relation "fk_part_23" -alter table fkpart0.fk_part_23_2 drop constraint fk_part_23_a_fkey; -ERROR: cannot drop inherited constraint "fk_part_23_a_fkey" of relation "fk_part_23_2" -create table fkpart0.fk_part_4 partition of fkpart0.fk_part for values in (4); -\d fkpart0.fk_part_4 - Table "fkpart0.fk_part_4" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition of: fkpart0.fk_part FOR VALUES IN (4) -Foreign-key constraints: - TABLE "fkpart0.fk_part" CONSTRAINT "fk_part_a_fkey" FOREIGN KEY (a) REFERENCES fkpart0.pkey(a) - -alter table fkpart0.fk_part_4 drop constraint fk_part_a_fkey; -ERROR: cannot drop inherited constraint "fk_part_a_fkey" of relation "fk_part_4" -create table fkpart0.fk_part_56 partition of fkpart0.fk_part - for values in (5,6) partition by list (a); -create table fkpart0.fk_part_56_5 partition of fkpart0.fk_part_56 - for values in (5); -\d fkpart0.fk_part_56 - Partitioned table "fkpart0.fk_part_56" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition of: fkpart0.fk_part FOR VALUES IN (5, 6) -Partition key: LIST (a) -Foreign-key constraints: - TABLE "fkpart0.fk_part" CONSTRAINT "fk_part_a_fkey" FOREIGN KEY (a) REFERENCES fkpart0.pkey(a) -Number of partitions: 1 (Use \d+ to list them.) - -alter table fkpart0.fk_part_56 drop constraint fk_part_a_fkey; -ERROR: cannot drop inherited constraint "fk_part_a_fkey" of relation "fk_part_56" -alter table fkpart0.fk_part_56_5 drop constraint fk_part_a_fkey; -ERROR: cannot drop inherited constraint "fk_part_a_fkey" of relation "fk_part_56_5" --- verify that attaching and detaching partitions maintains the right set of --- triggers -create schema fkpart1 - create table pkey (a int primary key) - create table fk_part (a int) partition by list (a) - create table fk_part_1 partition of fk_part for values in (1) partition by list (a) - create table fk_part_1_1 partition of fk_part_1 for values in (1); -alter table fkpart1.fk_part add foreign key (a) references fkpart1.pkey; -insert into fkpart1.fk_part values (1); -- should fail -ERROR: insert or update on table "fk_part_1_1" violates foreign key constraint "fk_part_a_fkey" -DETAIL: Key (a)=(1) is not present in table "pkey". -insert into fkpart1.pkey values (1); -insert into fkpart1.fk_part values (1); -delete from fkpart1.pkey where a = 1; -- should fail -ERROR: update or delete on table "pkey" violates foreign key constraint "fk_part_a_fkey" on table "fk_part" -DETAIL: Key (a)=(1) is still referenced from table "fk_part". -alter table fkpart1.fk_part detach partition fkpart1.fk_part_1; -create table fkpart1.fk_part_1_2 partition of fkpart1.fk_part_1 for values in (2); -insert into fkpart1.fk_part_1 values (2); -- should fail -ERROR: insert or update on table "fk_part_1_2" violates foreign key constraint "fk_part_a_fkey" -DETAIL: Key (a)=(2) is not present in table "pkey". -delete from fkpart1.pkey where a = 1; -ERROR: update or delete on table "pkey" violates foreign key constraint "fk_part_a_fkey" on table "fk_part_1" -DETAIL: Key (a)=(1) is still referenced from table "fk_part_1". --- verify that attaching and detaching partitions manipulates the inheritance --- properties of their FK constraints correctly -create schema fkpart2 - create table pkey (a int primary key) - create table fk_part (a int, constraint fkey foreign key (a) references fkpart2.pkey) partition by list (a) - create table fk_part_1 partition of fkpart2.fk_part for values in (1) partition by list (a) - create table fk_part_1_1 (a int, constraint my_fkey foreign key (a) references fkpart2.pkey); -alter table fkpart2.fk_part_1 attach partition fkpart2.fk_part_1_1 for values in (1); -alter table fkpart2.fk_part_1 drop constraint fkey; -- should fail -ERROR: cannot drop inherited constraint "fkey" of relation "fk_part_1" -alter table fkpart2.fk_part_1_1 drop constraint my_fkey; -- should fail -ERROR: cannot drop inherited constraint "my_fkey" of relation "fk_part_1_1" -alter table fkpart2.fk_part detach partition fkpart2.fk_part_1; -alter table fkpart2.fk_part_1 drop constraint fkey; -- ok -alter table fkpart2.fk_part_1_1 drop constraint my_fkey; -- doesn't exist -ERROR: constraint "my_fkey" of relation "fk_part_1_1" does not exist --- verify constraint deferrability -create schema fkpart3 - create table pkey (a int primary key) - create table fk_part (a int, constraint fkey foreign key (a) references fkpart3.pkey deferrable initially immediate) partition by list (a) - create table fk_part_1 partition of fkpart3.fk_part for values in (1) partition by list (a) - create table fk_part_1_1 partition of fkpart3.fk_part_1 for values in (1) - create table fk_part_2 partition of fkpart3.fk_part for values in (2); -begin; -set constraints fkpart3.fkey deferred; -insert into fkpart3.fk_part values (1); -insert into fkpart3.pkey values (1); -commit; -begin; -set constraints fkpart3.fkey deferred; -delete from fkpart3.pkey; -delete from fkpart3.fk_part; -commit; -drop schema fkpart0, fkpart1, fkpart2, fkpart3 cascade; -NOTICE: drop cascades to 10 other objects -DETAIL: drop cascades to table fkpart3.pkey -drop cascades to table fkpart3.fk_part -drop cascades to table fkpart2.pkey -drop cascades to table fkpart2.fk_part -drop cascades to table fkpart2.fk_part_1 -drop cascades to table fkpart1.pkey -drop cascades to table fkpart1.fk_part -drop cascades to table fkpart1.fk_part_1 -drop cascades to table fkpart0.pkey -drop cascades to table fkpart0.fk_part --- Test a partitioned table as referenced table. --- Verify basic functionality with a regular partition creation and a partition --- with a different column layout, as well as partitions added (created and --- attached) after creating the foreign key. -CREATE SCHEMA fkpart3; -SET search_path TO fkpart3; -CREATE TABLE pk (a int PRIMARY KEY) PARTITION BY RANGE (a); -CREATE TABLE pk1 PARTITION OF pk FOR VALUES FROM (0) TO (1000); -CREATE TABLE pk2 (b int, a int); -ALTER TABLE pk2 DROP COLUMN b; -ALTER TABLE pk2 ALTER a SET NOT NULL; -ALTER TABLE pk ATTACH PARTITION pk2 FOR VALUES FROM (1000) TO (2000); -CREATE TABLE fk (a int) PARTITION BY RANGE (a); -CREATE TABLE fk1 PARTITION OF fk FOR VALUES FROM (0) TO (750); -ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk; -CREATE TABLE fk2 (b int, a int) ; -ALTER TABLE fk2 DROP COLUMN b; -ALTER TABLE fk ATTACH PARTITION fk2 FOR VALUES FROM (750) TO (3500); -CREATE TABLE pk3 PARTITION OF pk FOR VALUES FROM (2000) TO (3000); -CREATE TABLE pk4 (LIKE pk); -ALTER TABLE pk ATTACH PARTITION pk4 FOR VALUES FROM (3000) TO (4000); -CREATE TABLE pk5 (c int, b int, a int NOT NULL) PARTITION BY RANGE (a); -ALTER TABLE pk5 DROP COLUMN b, DROP COLUMN c; -CREATE TABLE pk51 PARTITION OF pk5 FOR VALUES FROM (4000) TO (4500); -CREATE TABLE pk52 PARTITION OF pk5 FOR VALUES FROM (4500) TO (5000); -ALTER TABLE pk ATTACH PARTITION pk5 FOR VALUES FROM (4000) TO (5000); -CREATE TABLE fk3 PARTITION OF fk FOR VALUES FROM (3500) TO (5000); --- these should fail: referenced value not present -INSERT into fk VALUES (1); -ERROR: insert or update on table "fk1" violates foreign key constraint "fk_a_fkey" -DETAIL: Key (a)=(1) is not present in table "pk". -INSERT into fk VALUES (1000); -ERROR: insert or update on table "fk2" violates foreign key constraint "fk_a_fkey" -DETAIL: Key (a)=(1000) is not present in table "pk". -INSERT into fk VALUES (2000); -ERROR: insert or update on table "fk2" violates foreign key constraint "fk_a_fkey" -DETAIL: Key (a)=(2000) is not present in table "pk". -INSERT into fk VALUES (3000); -ERROR: insert or update on table "fk2" violates foreign key constraint "fk_a_fkey" -DETAIL: Key (a)=(3000) is not present in table "pk". -INSERT into fk VALUES (4000); -ERROR: insert or update on table "fk3" violates foreign key constraint "fk_a_fkey" -DETAIL: Key (a)=(4000) is not present in table "pk". -INSERT into fk VALUES (4500); -ERROR: insert or update on table "fk3" violates foreign key constraint "fk_a_fkey" -DETAIL: Key (a)=(4500) is not present in table "pk". --- insert into the referenced table, now they should work -INSERT into pk VALUES (1), (1000), (2000), (3000), (4000), (4500); -INSERT into fk VALUES (1), (1000), (2000), (3000), (4000), (4500); --- should fail: referencing value present -DELETE FROM pk WHERE a = 1; -ERROR: update or delete on table "pk1" violates foreign key constraint "fk_a_fkey1" on table "fk" -DETAIL: Key (a)=(1) is still referenced from table "fk". -DELETE FROM pk WHERE a = 1000; -ERROR: update or delete on table "pk2" violates foreign key constraint "fk_a_fkey2" on table "fk" -DETAIL: Key (a)=(1000) is still referenced from table "fk". -DELETE FROM pk WHERE a = 2000; -ERROR: update or delete on table "pk3" violates foreign key constraint "fk_a_fkey3" on table "fk" -DETAIL: Key (a)=(2000) is still referenced from table "fk". -DELETE FROM pk WHERE a = 3000; -ERROR: update or delete on table "pk4" violates foreign key constraint "fk_a_fkey4" on table "fk" -DETAIL: Key (a)=(3000) is still referenced from table "fk". -DELETE FROM pk WHERE a = 4000; -ERROR: update or delete on table "pk51" violates foreign key constraint "fk_a_fkey6" on table "fk" -DETAIL: Key (a)=(4000) is still referenced from table "fk". -DELETE FROM pk WHERE a = 4500; -ERROR: update or delete on table "pk52" violates foreign key constraint "fk_a_fkey7" on table "fk" -DETAIL: Key (a)=(4500) is still referenced from table "fk". -UPDATE pk SET a = 2 WHERE a = 1; -ERROR: update or delete on table "pk1" violates foreign key constraint "fk_a_fkey1" on table "fk" -DETAIL: Key (a)=(1) is still referenced from table "fk". -UPDATE pk SET a = 1002 WHERE a = 1000; -ERROR: update or delete on table "pk2" violates foreign key constraint "fk_a_fkey2" on table "fk" -DETAIL: Key (a)=(1000) is still referenced from table "fk". -UPDATE pk SET a = 2002 WHERE a = 2000; -ERROR: update or delete on table "pk3" violates foreign key constraint "fk_a_fkey3" on table "fk" -DETAIL: Key (a)=(2000) is still referenced from table "fk". -UPDATE pk SET a = 3002 WHERE a = 3000; -ERROR: update or delete on table "pk4" violates foreign key constraint "fk_a_fkey4" on table "fk" -DETAIL: Key (a)=(3000) is still referenced from table "fk". -UPDATE pk SET a = 4002 WHERE a = 4000; -ERROR: update or delete on table "pk51" violates foreign key constraint "fk_a_fkey6" on table "fk" -DETAIL: Key (a)=(4000) is still referenced from table "fk". -UPDATE pk SET a = 4502 WHERE a = 4500; -ERROR: update or delete on table "pk52" violates foreign key constraint "fk_a_fkey7" on table "fk" -DETAIL: Key (a)=(4500) is still referenced from table "fk". --- now they should work -DELETE FROM fk; -UPDATE pk SET a = 2 WHERE a = 1; -DELETE FROM pk WHERE a = 2; -UPDATE pk SET a = 1002 WHERE a = 1000; -DELETE FROM pk WHERE a = 1002; -UPDATE pk SET a = 2002 WHERE a = 2000; -DELETE FROM pk WHERE a = 2002; -UPDATE pk SET a = 3002 WHERE a = 3000; -DELETE FROM pk WHERE a = 3002; -UPDATE pk SET a = 4002 WHERE a = 4000; -DELETE FROM pk WHERE a = 4002; -UPDATE pk SET a = 4502 WHERE a = 4500; -DELETE FROM pk WHERE a = 4502; -CREATE SCHEMA fkpart4; -SET search_path TO fkpart4; --- dropping/detaching PARTITIONs is prevented if that would break --- a foreign key's existing data -CREATE TABLE droppk (a int PRIMARY KEY) PARTITION BY RANGE (a); -CREATE TABLE droppk1 PARTITION OF droppk FOR VALUES FROM (0) TO (1000); -CREATE TABLE droppk_d PARTITION OF droppk DEFAULT; -CREATE TABLE droppk2 PARTITION OF droppk FOR VALUES FROM (1000) TO (2000) - PARTITION BY RANGE (a); -CREATE TABLE droppk21 PARTITION OF droppk2 FOR VALUES FROM (1000) TO (1400); -CREATE TABLE droppk2_d PARTITION OF droppk2 DEFAULT; -INSERT into droppk VALUES (1), (1000), (1500), (2000); -CREATE TABLE dropfk (a int REFERENCES droppk); -INSERT into dropfk VALUES (1), (1000), (1500), (2000); --- these should all fail -ALTER TABLE droppk DETACH PARTITION droppk_d; -ERROR: removing partition "droppk_d" violates foreign key constraint "dropfk_a_fkey5" -DETAIL: Key (a)=(2000) is still referenced from table "dropfk". -ALTER TABLE droppk2 DETACH PARTITION droppk2_d; -ERROR: removing partition "droppk2_d" violates foreign key constraint "dropfk_a_fkey4" -DETAIL: Key (a)=(1500) is still referenced from table "dropfk". -ALTER TABLE droppk DETACH PARTITION droppk1; -ERROR: removing partition "droppk1" violates foreign key constraint "dropfk_a_fkey1" -DETAIL: Key (a)=(1) is still referenced from table "dropfk". -ALTER TABLE droppk DETACH PARTITION droppk2; -ERROR: removing partition "droppk2" violates foreign key constraint "dropfk_a_fkey2" -DETAIL: Key (a)=(1000) is still referenced from table "dropfk". -ALTER TABLE droppk2 DETACH PARTITION droppk21; -ERROR: removing partition "droppk21" violates foreign key constraint "dropfk_a_fkey3" -DETAIL: Key (a)=(1000) is still referenced from table "dropfk". --- dropping partitions is disallowed -DROP TABLE droppk_d; -ERROR: cannot drop table droppk_d because other objects depend on it -DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk_d -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP TABLE droppk2_d; -ERROR: cannot drop table droppk2_d because other objects depend on it -DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk2_d -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP TABLE droppk1; -ERROR: cannot drop table droppk1 because other objects depend on it -DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk1 -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP TABLE droppk2; -ERROR: cannot drop table droppk2 because other objects depend on it -DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk2 -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP TABLE droppk21; -ERROR: cannot drop table droppk21 because other objects depend on it -DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk21 -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DELETE FROM dropfk; --- dropping partitions is disallowed, even when no referencing values -DROP TABLE droppk_d; -ERROR: cannot drop table droppk_d because other objects depend on it -DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk_d -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP TABLE droppk2_d; -ERROR: cannot drop table droppk2_d because other objects depend on it -DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk2_d -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP TABLE droppk1; -ERROR: cannot drop table droppk1 because other objects depend on it -DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk1 -HINT: Use DROP ... CASCADE to drop the dependent objects too. --- but DETACH is allowed, and DROP afterwards works -ALTER TABLE droppk2 DETACH PARTITION droppk21; -DROP TABLE droppk2; -ERROR: cannot drop table droppk2 because other objects depend on it -DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk2 -HINT: Use DROP ... CASCADE to drop the dependent objects too. --- Verify that initial constraint creation and cloning behave correctly -CREATE SCHEMA fkpart5; -SET search_path TO fkpart5; -CREATE TABLE pk (a int PRIMARY KEY) PARTITION BY LIST (a); -CREATE TABLE pk1 PARTITION OF pk FOR VALUES IN (1) PARTITION BY LIST (a); -CREATE TABLE pk11 PARTITION OF pk1 FOR VALUES IN (1); -CREATE TABLE fk (a int) PARTITION BY LIST (a); -CREATE TABLE fk1 PARTITION OF fk FOR VALUES IN (1) PARTITION BY LIST (a); -CREATE TABLE fk11 PARTITION OF fk1 FOR VALUES IN (1); -ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk; -CREATE TABLE pk2 PARTITION OF pk FOR VALUES IN (2); -CREATE TABLE pk3 (a int NOT NULL) PARTITION BY LIST (a); -CREATE TABLE pk31 PARTITION OF pk3 FOR VALUES IN (31); -CREATE TABLE pk32 (b int, a int NOT NULL); -ALTER TABLE pk32 DROP COLUMN b; -ALTER TABLE pk3 ATTACH PARTITION pk32 FOR VALUES IN (32); -ALTER TABLE pk ATTACH PARTITION pk3 FOR VALUES IN (31, 32); -CREATE TABLE fk2 PARTITION OF fk FOR VALUES IN (2); -CREATE TABLE fk3 (b int, a int); -ALTER TABLE fk3 DROP COLUMN b; -ALTER TABLE fk ATTACH PARTITION fk3 FOR VALUES IN (3); -SELECT pg_describe_object('pg_constraint'::regclass, oid, 0), confrelid::regclass, - CASE WHEN conparentid <> 0 THEN pg_describe_object('pg_constraint'::regclass, conparentid, 0) ELSE 'TOP' END -FROM pg_catalog.pg_constraint -WHERE conrelid IN (SELECT relid FROM pg_partition_tree('fk')) -ORDER BY conrelid::regclass::text, conname; - pg_describe_object | confrelid | case -------------------------------------+-----------+----------------------------------- - constraint fk_a_fkey on table fk | pk | TOP - constraint fk_a_fkey1 on table fk | pk1 | constraint fk_a_fkey on table fk - constraint fk_a_fkey2 on table fk | pk11 | constraint fk_a_fkey1 on table fk - constraint fk_a_fkey3 on table fk | pk2 | constraint fk_a_fkey on table fk - constraint fk_a_fkey4 on table fk | pk3 | constraint fk_a_fkey on table fk - constraint fk_a_fkey5 on table fk | pk31 | constraint fk_a_fkey4 on table fk - constraint fk_a_fkey6 on table fk | pk32 | constraint fk_a_fkey4 on table fk - constraint fk_a_fkey on table fk1 | pk | constraint fk_a_fkey on table fk - constraint fk_a_fkey on table fk11 | pk | constraint fk_a_fkey on table fk1 - constraint fk_a_fkey on table fk2 | pk | constraint fk_a_fkey on table fk - constraint fk_a_fkey on table fk3 | pk | constraint fk_a_fkey on table fk -(11 rows) - -CREATE TABLE fk4 (LIKE fk); -INSERT INTO fk4 VALUES (50); -ALTER TABLE fk ATTACH PARTITION fk4 FOR VALUES IN (50); -ERROR: insert or update on table "fk4" violates foreign key constraint "fk_a_fkey" -DETAIL: Key (a)=(50) is not present in table "pk". --- Verify constraint deferrability -CREATE SCHEMA fkpart9; -SET search_path TO fkpart9; -CREATE TABLE pk (a int PRIMARY KEY) PARTITION BY LIST (a); -CREATE TABLE pk1 PARTITION OF pk FOR VALUES IN (1, 2) PARTITION BY LIST (a); -CREATE TABLE pk11 PARTITION OF pk1 FOR VALUES IN (1); -CREATE TABLE pk3 PARTITION OF pk FOR VALUES IN (3); -CREATE TABLE fk (a int REFERENCES pk DEFERRABLE INITIALLY IMMEDIATE); -INSERT INTO fk VALUES (1); -- should fail -ERROR: insert or update on table "fk" violates foreign key constraint "fk_a_fkey" -DETAIL: Key (a)=(1) is not present in table "pk". -BEGIN; -SET CONSTRAINTS fk_a_fkey DEFERRED; -INSERT INTO fk VALUES (1); -COMMIT; -- should fail -ERROR: insert or update on table "fk" violates foreign key constraint "fk_a_fkey" -DETAIL: Key (a)=(1) is not present in table "pk". -BEGIN; -SET CONSTRAINTS fk_a_fkey DEFERRED; -INSERT INTO fk VALUES (1); -INSERT INTO pk VALUES (1); -COMMIT; -- OK -BEGIN; -SET CONSTRAINTS fk_a_fkey DEFERRED; -DELETE FROM pk WHERE a = 1; -DELETE FROM fk WHERE a = 1; -COMMIT; -- OK --- Verify constraint deferrability when changed by ALTER --- Partitioned table at referencing end -CREATE TABLE pt(f1 int, f2 int, f3 int, PRIMARY KEY(f1,f2)); -CREATE TABLE ref(f1 int, f2 int, f3 int) - PARTITION BY list(f1); -CREATE TABLE ref1 PARTITION OF ref FOR VALUES IN (1); -CREATE TABLE ref2 PARTITION OF ref FOR VALUES in (2); -ALTER TABLE ref ADD FOREIGN KEY(f1,f2) REFERENCES pt; -ALTER TABLE ref ALTER CONSTRAINT ref_f1_f2_fkey - DEFERRABLE INITIALLY DEFERRED; -INSERT INTO pt VALUES(1,2,3); -INSERT INTO ref VALUES(1,2,3); -BEGIN; -DELETE FROM pt; -DELETE FROM ref; -ABORT; -DROP TABLE pt, ref; --- Multi-level partitioning at referencing end -CREATE TABLE pt(f1 int, f2 int, f3 int, PRIMARY KEY(f1,f2)); -CREATE TABLE ref(f1 int, f2 int, f3 int) - PARTITION BY list(f1); -CREATE TABLE ref1_2 PARTITION OF ref FOR VALUES IN (1, 2) PARTITION BY list (f2); -CREATE TABLE ref1 PARTITION OF ref1_2 FOR VALUES IN (1); -CREATE TABLE ref2 PARTITION OF ref1_2 FOR VALUES IN (2) PARTITION BY list (f2); -CREATE TABLE ref22 PARTITION OF ref2 FOR VALUES IN (2); -ALTER TABLE ref ADD FOREIGN KEY(f1,f2) REFERENCES pt; -INSERT INTO pt VALUES(1,2,3); -INSERT INTO ref VALUES(1,2,3); -ALTER TABLE ref22 ALTER CONSTRAINT ref_f1_f2_fkey - DEFERRABLE INITIALLY IMMEDIATE; -- fails -ERROR: cannot alter constraint "ref_f1_f2_fkey" on relation "ref22" -DETAIL: Constraint "ref_f1_f2_fkey" is derived from constraint "ref_f1_f2_fkey" of relation "ref". -HINT: You may alter the constraint it derives from instead. -ALTER TABLE ref ALTER CONSTRAINT ref_f1_f2_fkey - DEFERRABLE INITIALLY DEFERRED; -BEGIN; -DELETE FROM pt; -DELETE FROM ref; -ABORT; -DROP TABLE pt, ref; --- Partitioned table at referenced end -CREATE TABLE pt(f1 int, f2 int, f3 int, PRIMARY KEY(f1,f2)) - PARTITION BY LIST(f1); -CREATE TABLE pt1 PARTITION OF pt FOR VALUES IN (1); -CREATE TABLE pt2 PARTITION OF pt FOR VALUES IN (2); -CREATE TABLE ref(f1 int, f2 int, f3 int); -ALTER TABLE ref ADD FOREIGN KEY(f1,f2) REFERENCES pt; -ALTER TABLE ref ALTER CONSTRAINT ref_f1_f2_fkey - DEFERRABLE INITIALLY DEFERRED; -INSERT INTO pt VALUES(1,2,3); -INSERT INTO ref VALUES(1,2,3); -BEGIN; -DELETE FROM pt; -DELETE FROM ref; -ABORT; -DROP TABLE pt, ref; --- Multi-level partitioning at referenced end -CREATE TABLE pt(f1 int, f2 int, f3 int, PRIMARY KEY(f1,f2)) - PARTITION BY LIST(f1); -CREATE TABLE pt1_2 PARTITION OF pt FOR VALUES IN (1, 2) PARTITION BY LIST (f1); -CREATE TABLE pt1 PARTITION OF pt1_2 FOR VALUES IN (1); -CREATE TABLE pt2 PARTITION OF pt1_2 FOR VALUES IN (2); -CREATE TABLE ref(f1 int, f2 int, f3 int); -ALTER TABLE ref ADD FOREIGN KEY(f1,f2) REFERENCES pt; -ALTER TABLE ref ALTER CONSTRAINT ref_f1_f2_fkey1 - DEFERRABLE INITIALLY DEFERRED; -- fails -ERROR: cannot alter constraint "ref_f1_f2_fkey1" on relation "ref" -DETAIL: Constraint "ref_f1_f2_fkey1" is derived from constraint "ref_f1_f2_fkey" of relation "ref". -HINT: You may alter the constraint it derives from instead. -ALTER TABLE ref ALTER CONSTRAINT ref_f1_f2_fkey - DEFERRABLE INITIALLY DEFERRED; -INSERT INTO pt VALUES(1,2,3); -INSERT INTO ref VALUES(1,2,3); -BEGIN; -DELETE FROM pt; -DELETE FROM ref; -ABORT; -DROP TABLE pt, ref; -DROP SCHEMA fkpart9 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table pk -drop cascades to table fk --- Verify ON UPDATE/DELETE behavior -CREATE SCHEMA fkpart6; -SET search_path TO fkpart6; -CREATE TABLE pk (a int PRIMARY KEY) PARTITION BY RANGE (a); -CREATE TABLE pk1 PARTITION OF pk FOR VALUES FROM (1) TO (100) PARTITION BY RANGE (a); -CREATE TABLE pk11 PARTITION OF pk1 FOR VALUES FROM (1) TO (50); -CREATE TABLE pk12 PARTITION OF pk1 FOR VALUES FROM (50) TO (100); -CREATE TABLE fk (a int) PARTITION BY RANGE (a); -CREATE TABLE fk1 PARTITION OF fk FOR VALUES FROM (1) TO (100) PARTITION BY RANGE (a); -CREATE TABLE fk11 PARTITION OF fk1 FOR VALUES FROM (1) TO (10); -CREATE TABLE fk12 PARTITION OF fk1 FOR VALUES FROM (10) TO (100); -ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk ON UPDATE CASCADE ON DELETE CASCADE; -CREATE TABLE fk_d PARTITION OF fk DEFAULT; -INSERT INTO pk VALUES (1); -INSERT INTO fk VALUES (1); -UPDATE pk SET a = 20; -SELECT tableoid::regclass, * FROM fk; - tableoid | a -----------+---- - fk12 | 20 -(1 row) - -DELETE FROM pk WHERE a = 20; -SELECT tableoid::regclass, * FROM fk; - tableoid | a -----------+--- -(0 rows) - -DROP TABLE fk; -TRUNCATE TABLE pk; -INSERT INTO pk VALUES (20), (50); -CREATE TABLE fk (a int) PARTITION BY RANGE (a); -CREATE TABLE fk1 PARTITION OF fk FOR VALUES FROM (1) TO (100) PARTITION BY RANGE (a); -CREATE TABLE fk11 PARTITION OF fk1 FOR VALUES FROM (1) TO (10); -CREATE TABLE fk12 PARTITION OF fk1 FOR VALUES FROM (10) TO (100); -ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk ON UPDATE SET NULL ON DELETE SET NULL; -CREATE TABLE fk_d PARTITION OF fk DEFAULT; -INSERT INTO fk VALUES (20), (50); -UPDATE pk SET a = 21 WHERE a = 20; -DELETE FROM pk WHERE a = 50; -SELECT tableoid::regclass, * FROM fk; - tableoid | a -----------+--- - fk_d | - fk_d | -(2 rows) - -DROP TABLE fk; -TRUNCATE TABLE pk; -INSERT INTO pk VALUES (20), (30), (50); -CREATE TABLE fk (id int, a int DEFAULT 50) PARTITION BY RANGE (a); -CREATE TABLE fk1 PARTITION OF fk FOR VALUES FROM (1) TO (100) PARTITION BY RANGE (a); -CREATE TABLE fk11 PARTITION OF fk1 FOR VALUES FROM (1) TO (10); -CREATE TABLE fk12 PARTITION OF fk1 FOR VALUES FROM (10) TO (100); -ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk ON UPDATE SET DEFAULT ON DELETE SET DEFAULT; -CREATE TABLE fk_d PARTITION OF fk DEFAULT; -INSERT INTO fk VALUES (1, 20), (2, 30); -DELETE FROM pk WHERE a = 20 RETURNING *; - a ----- - 20 -(1 row) - -UPDATE pk SET a = 90 WHERE a = 30 RETURNING *; - a ----- - 90 -(1 row) - -SELECT tableoid::regclass, * FROM fk; - tableoid | id | a -----------+----+---- - fk12 | 1 | 50 - fk12 | 2 | 50 -(2 rows) - -DROP TABLE fk; -TRUNCATE TABLE pk; -INSERT INTO pk VALUES (20), (30); -CREATE TABLE fk (a int DEFAULT 50) PARTITION BY RANGE (a); -CREATE TABLE fk1 PARTITION OF fk FOR VALUES FROM (1) TO (100) PARTITION BY RANGE (a); -CREATE TABLE fk11 PARTITION OF fk1 FOR VALUES FROM (1) TO (10); -CREATE TABLE fk12 PARTITION OF fk1 FOR VALUES FROM (10) TO (100); -ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk ON UPDATE RESTRICT ON DELETE RESTRICT; -CREATE TABLE fk_d PARTITION OF fk DEFAULT; -INSERT INTO fk VALUES (20), (30); -DELETE FROM pk WHERE a = 20; -ERROR: update or delete on table "pk11" violates foreign key constraint "fk_a_fkey2" on table "fk" -DETAIL: Key (a)=(20) is still referenced from table "fk". -UPDATE pk SET a = 90 WHERE a = 30; -ERROR: update or delete on table "pk" violates foreign key constraint "fk_a_fkey" on table "fk" -DETAIL: Key (a)=(30) is still referenced from table "fk". -SELECT tableoid::regclass, * FROM fk; - tableoid | a -----------+---- - fk12 | 20 - fk12 | 30 -(2 rows) - -DROP TABLE fk; --- test for reported bug: relispartition not set --- https://postgr.es/m/CA+HiwqHMsRtRYRWYTWavKJ8x14AFsv7bmAV46mYwnfD3vy8goQ@mail.gmail.com -CREATE SCHEMA fkpart7 - CREATE TABLE pkpart (a int) PARTITION BY LIST (a) - CREATE TABLE pkpart1 PARTITION OF pkpart FOR VALUES IN (1); -ALTER TABLE fkpart7.pkpart1 ADD PRIMARY KEY (a); -ALTER TABLE fkpart7.pkpart ADD PRIMARY KEY (a); -CREATE TABLE fkpart7.fk (a int REFERENCES fkpart7.pkpart); -DROP SCHEMA fkpart7 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table fkpart7.pkpart -drop cascades to table fkpart7.fk --- ensure we check partitions are "not used" when dropping constraints -CREATE SCHEMA fkpart8 - CREATE TABLE tbl1(f1 int PRIMARY KEY) - CREATE TABLE tbl2(f1 int REFERENCES tbl1 DEFERRABLE INITIALLY DEFERRED) PARTITION BY RANGE(f1) - CREATE TABLE tbl2_p1 PARTITION OF tbl2 FOR VALUES FROM (minvalue) TO (maxvalue); -INSERT INTO fkpart8.tbl1 VALUES(1); -BEGIN; -INSERT INTO fkpart8.tbl2 VALUES(1); -ALTER TABLE fkpart8.tbl2 DROP CONSTRAINT tbl2_f1_fkey; -ERROR: cannot ALTER TABLE "tbl2_p1" because it has pending trigger events -COMMIT; -DROP SCHEMA fkpart8 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table fkpart8.tbl1 -drop cascades to table fkpart8.tbl2 --- ensure FK referencing a multi-level partitioned table are --- enforce reference to sub-children. -CREATE SCHEMA fkpart9 - CREATE TABLE pk (a INT PRIMARY KEY) PARTITION BY RANGE (a) - CREATE TABLE fk ( - fk_a INT REFERENCES pk(a) ON DELETE CASCADE - ) - CREATE TABLE pk1 PARTITION OF pk FOR VALUES FROM (30) TO (50) PARTITION BY RANGE (a) - CREATE TABLE pk11 PARTITION OF pk1 FOR VALUES FROM (30) TO (40); -INSERT INTO fkpart9.pk VALUES (35); -INSERT INTO fkpart9.fk VALUES (35); -DELETE FROM fkpart9.pk WHERE a=35; -SELECT * FROM fkpart9.pk; - a ---- -(0 rows) - -SELECT * FROM fkpart9.fk; - fk_a ------- -(0 rows) - -DROP SCHEMA fkpart9 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table fkpart9.pk -drop cascades to table fkpart9.fk --- test that ri_Check_Pk_Match() scans the correct partition for a deferred --- ON DELETE/UPDATE NO ACTION constraint -CREATE SCHEMA fkpart10 - CREATE TABLE tbl1(f1 int PRIMARY KEY) PARTITION BY RANGE(f1) - CREATE TABLE tbl1_p1 PARTITION OF tbl1 FOR VALUES FROM (minvalue) TO (1) - CREATE TABLE tbl1_p2 PARTITION OF tbl1 FOR VALUES FROM (1) TO (maxvalue) - CREATE TABLE tbl2(f1 int REFERENCES tbl1 DEFERRABLE INITIALLY DEFERRED) - CREATE TABLE tbl3(f1 int PRIMARY KEY) PARTITION BY RANGE(f1) - CREATE TABLE tbl3_p1 PARTITION OF tbl3 FOR VALUES FROM (minvalue) TO (1) - CREATE TABLE tbl3_p2 PARTITION OF tbl3 FOR VALUES FROM (1) TO (maxvalue) - CREATE TABLE tbl4(f1 int REFERENCES tbl3 DEFERRABLE INITIALLY DEFERRED); -INSERT INTO fkpart10.tbl1 VALUES (0), (1); -INSERT INTO fkpart10.tbl2 VALUES (0), (1); -INSERT INTO fkpart10.tbl3 VALUES (-2), (-1), (0); -INSERT INTO fkpart10.tbl4 VALUES (-2), (-1); -BEGIN; -DELETE FROM fkpart10.tbl1 WHERE f1 = 0; -UPDATE fkpart10.tbl1 SET f1 = 2 WHERE f1 = 1; -INSERT INTO fkpart10.tbl1 VALUES (0), (1); -COMMIT; --- test that cross-partition updates correctly enforces the foreign key --- restriction (specifically testing INITIAILLY DEFERRED) -BEGIN; -UPDATE fkpart10.tbl1 SET f1 = 3 WHERE f1 = 0; -UPDATE fkpart10.tbl3 SET f1 = f1 * -1; -INSERT INTO fkpart10.tbl1 VALUES (4); -COMMIT; -ERROR: update or delete on table "tbl1" violates foreign key constraint "tbl2_f1_fkey" on table "tbl2" -DETAIL: Key (f1)=(0) is still referenced from table "tbl2". -BEGIN; -UPDATE fkpart10.tbl3 SET f1 = f1 * -1; -UPDATE fkpart10.tbl3 SET f1 = f1 + 3; -UPDATE fkpart10.tbl1 SET f1 = 3 WHERE f1 = 0; -INSERT INTO fkpart10.tbl1 VALUES (0); -COMMIT; -ERROR: update or delete on table "tbl3" violates foreign key constraint "tbl4_f1_fkey" on table "tbl4" -DETAIL: Key (f1)=(-2) is still referenced from table "tbl4". -BEGIN; -UPDATE fkpart10.tbl3 SET f1 = f1 * -1; -UPDATE fkpart10.tbl1 SET f1 = 3 WHERE f1 = 0; -INSERT INTO fkpart10.tbl1 VALUES (0); -INSERT INTO fkpart10.tbl3 VALUES (-2), (-1); -COMMIT; --- test where the updated table now has both an IMMEDIATE and a DEFERRED --- constraint pointing into it -CREATE TABLE fkpart10.tbl5(f1 int REFERENCES fkpart10.tbl3); -INSERT INTO fkpart10.tbl5 VALUES (-2), (-1); -BEGIN; -UPDATE fkpart10.tbl3 SET f1 = f1 * -3; -ERROR: update or delete on table "tbl3" violates foreign key constraint "tbl5_f1_fkey" on table "tbl5" -DETAIL: Key (f1)=(-2) is still referenced from table "tbl5". -COMMIT; --- Now test where the row referenced from the table with an IMMEDIATE --- constraint stays in place, while those referenced from the table with a --- DEFERRED constraint don't. -DELETE FROM fkpart10.tbl5; -INSERT INTO fkpart10.tbl5 VALUES (0); -BEGIN; -UPDATE fkpart10.tbl3 SET f1 = f1 * -3; -COMMIT; -ERROR: update or delete on table "tbl3" violates foreign key constraint "tbl4_f1_fkey" on table "tbl4" -DETAIL: Key (f1)=(-2) is still referenced from table "tbl4". -DROP SCHEMA fkpart10 CASCADE; -NOTICE: drop cascades to 5 other objects -DETAIL: drop cascades to table fkpart10.tbl1 -drop cascades to table fkpart10.tbl2 -drop cascades to table fkpart10.tbl3 -drop cascades to table fkpart10.tbl4 -drop cascades to table fkpart10.tbl5 --- verify foreign keys are enforced during cross-partition updates, --- especially on the PK side -CREATE SCHEMA fkpart11 - CREATE TABLE pk (a INT PRIMARY KEY, b text) PARTITION BY LIST (a) - CREATE TABLE fk ( - a INT, - CONSTRAINT fkey FOREIGN KEY (a) REFERENCES pk(a) ON UPDATE CASCADE ON DELETE CASCADE - ) - CREATE TABLE fk_parted ( - a INT PRIMARY KEY, - CONSTRAINT fkey FOREIGN KEY (a) REFERENCES pk(a) ON UPDATE CASCADE ON DELETE CASCADE - ) PARTITION BY LIST (a) - CREATE TABLE fk_another ( - a INT, - CONSTRAINT fkey FOREIGN KEY (a) REFERENCES fk_parted (a) ON UPDATE CASCADE ON DELETE CASCADE - ) - CREATE TABLE pk1 PARTITION OF pk FOR VALUES IN (1, 2) PARTITION BY LIST (a) - CREATE TABLE pk2 PARTITION OF pk FOR VALUES IN (3) - CREATE TABLE pk3 PARTITION OF pk FOR VALUES IN (4) - CREATE TABLE fk1 PARTITION OF fk_parted FOR VALUES IN (1, 2) - CREATE TABLE fk2 PARTITION OF fk_parted FOR VALUES IN (3) - CREATE TABLE fk3 PARTITION OF fk_parted FOR VALUES IN (4); -CREATE TABLE fkpart11.pk11 (b text, a int NOT NULL); -ALTER TABLE fkpart11.pk1 ATTACH PARTITION fkpart11.pk11 FOR VALUES IN (1); -CREATE TABLE fkpart11.pk12 (b text, c int, a int NOT NULL); -ALTER TABLE fkpart11.pk12 DROP c; -ALTER TABLE fkpart11.pk1 ATTACH PARTITION fkpart11.pk12 FOR VALUES IN (2); -INSERT INTO fkpart11.pk VALUES (1, 'xxx'), (3, 'yyy'); -INSERT INTO fkpart11.fk VALUES (1), (3); -INSERT INTO fkpart11.fk_parted VALUES (1), (3); -INSERT INTO fkpart11.fk_another VALUES (1), (3); --- moves 2 rows from one leaf partition to another, with both updates being --- cascaded to fk and fk_parted. Updates of fk_parted, of which one is --- cross-partition (3 -> 4), are further cascaded to fk_another. -UPDATE fkpart11.pk SET a = a + 1 RETURNING tableoid::pg_catalog.regclass, *; - tableoid | a | b ----------------+---+----- - fkpart11.pk12 | 2 | xxx - fkpart11.pk3 | 4 | yyy -(2 rows) - -SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk; - tableoid | a --------------+--- - fkpart11.fk | 2 - fkpart11.fk | 4 -(2 rows) - -SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk_parted; - tableoid | a ---------------+--- - fkpart11.fk1 | 2 - fkpart11.fk3 | 4 -(2 rows) - -SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk_another; - tableoid | a ----------------------+--- - fkpart11.fk_another | 2 - fkpart11.fk_another | 4 -(2 rows) - --- let's try with the foreign key pointing at tables in the partition tree --- that are not the same as the query's target table --- 1. foreign key pointing into a non-root ancestor --- --- A cross-partition update on the root table will fail, because we currently --- can't enforce the foreign keys pointing into a non-leaf partition -ALTER TABLE fkpart11.fk DROP CONSTRAINT fkey; -DELETE FROM fkpart11.fk WHERE a = 4; -ALTER TABLE fkpart11.fk ADD CONSTRAINT fkey FOREIGN KEY (a) REFERENCES fkpart11.pk1 (a) ON UPDATE CASCADE ON DELETE CASCADE; -UPDATE fkpart11.pk SET a = a - 1; -ERROR: cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key -DETAIL: A foreign key points to ancestor "pk1" but not the root ancestor "pk". -HINT: Consider defining the foreign key on table "pk". --- it's okay though if the non-leaf partition is updated directly -UPDATE fkpart11.pk1 SET a = a - 1; -SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.pk; - tableoid | a | b ----------------+---+----- - fkpart11.pk11 | 1 | xxx - fkpart11.pk3 | 4 | yyy -(2 rows) - -SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk; - tableoid | a --------------+--- - fkpart11.fk | 1 -(1 row) - -SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk_parted; - tableoid | a ---------------+--- - fkpart11.fk1 | 1 - fkpart11.fk3 | 4 -(2 rows) - -SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk_another; - tableoid | a ----------------------+--- - fkpart11.fk_another | 4 - fkpart11.fk_another | 1 -(2 rows) - --- 2. foreign key pointing into a single leaf partition --- --- A cross-partition update that deletes from the pointed-to leaf partition --- is allowed to succeed -ALTER TABLE fkpart11.fk DROP CONSTRAINT fkey; -ALTER TABLE fkpart11.fk ADD CONSTRAINT fkey FOREIGN KEY (a) REFERENCES fkpart11.pk11 (a) ON UPDATE CASCADE ON DELETE CASCADE; --- will delete (1) from p11 which is cascaded to fk -UPDATE fkpart11.pk SET a = a + 1 WHERE a = 1; -SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk; - tableoid | a -----------+--- -(0 rows) - -DROP TABLE fkpart11.fk; --- check that regular and deferrable AR triggers on the PK tables --- still work as expected -CREATE FUNCTION fkpart11.print_row () RETURNS TRIGGER LANGUAGE plpgsql AS $$ - BEGIN - RAISE NOTICE 'TABLE: %, OP: %, OLD: %, NEW: %', TG_RELNAME, TG_OP, OLD, NEW; - RETURN NULL; - END; -$$; -CREATE TRIGGER trig_upd_pk AFTER UPDATE ON fkpart11.pk FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); -CREATE TRIGGER trig_del_pk AFTER DELETE ON fkpart11.pk FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); -CREATE TRIGGER trig_ins_pk AFTER INSERT ON fkpart11.pk FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); -CREATE CONSTRAINT TRIGGER trig_upd_fk_parted AFTER UPDATE ON fkpart11.fk_parted INITIALLY DEFERRED FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); -CREATE CONSTRAINT TRIGGER trig_del_fk_parted AFTER DELETE ON fkpart11.fk_parted INITIALLY DEFERRED FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); -CREATE CONSTRAINT TRIGGER trig_ins_fk_parted AFTER INSERT ON fkpart11.fk_parted INITIALLY DEFERRED FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); -UPDATE fkpart11.pk SET a = 3 WHERE a = 4; -NOTICE: TABLE: pk3, OP: DELETE, OLD: (4,yyy), NEW: -NOTICE: TABLE: pk2, OP: INSERT, OLD: , NEW: (3,yyy) -NOTICE: TABLE: fk3, OP: DELETE, OLD: (4), NEW: -NOTICE: TABLE: fk2, OP: INSERT, OLD: , NEW: (3) -UPDATE fkpart11.pk SET a = 1 WHERE a = 2; -NOTICE: TABLE: pk12, OP: DELETE, OLD: (xxx,2), NEW: -NOTICE: TABLE: pk11, OP: INSERT, OLD: , NEW: (xxx,1) -NOTICE: TABLE: fk1, OP: UPDATE, OLD: (2), NEW: (1) -DROP SCHEMA fkpart11 CASCADE; -NOTICE: drop cascades to 4 other objects -DETAIL: drop cascades to table fkpart11.pk -drop cascades to table fkpart11.fk_parted -drop cascades to table fkpart11.fk_another -drop cascades to function fkpart11.print_row() +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/cluster.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/cluster.out --- /tmp/cirrus-ci-build/src/test/regress/expected/cluster.out 2024-03-07 14:25:00.329549000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/cluster.out 2024-03-07 14:27:17.337484000 +0000 @@ -1,668 +1,2 @@ --- --- CLUSTER --- -CREATE TABLE clstr_tst_s (rf_a SERIAL PRIMARY KEY, - b INT); -CREATE TABLE clstr_tst (a SERIAL PRIMARY KEY, - b INT, - c TEXT, - d TEXT, - CONSTRAINT clstr_tst_con FOREIGN KEY (b) REFERENCES clstr_tst_s); -CREATE INDEX clstr_tst_b ON clstr_tst (b); -CREATE INDEX clstr_tst_c ON clstr_tst (c); -CREATE INDEX clstr_tst_c_b ON clstr_tst (c,b); -CREATE INDEX clstr_tst_b_c ON clstr_tst (b,c); -INSERT INTO clstr_tst_s (b) VALUES (0); -INSERT INTO clstr_tst_s (b) SELECT b FROM clstr_tst_s; -INSERT INTO clstr_tst_s (b) SELECT b FROM clstr_tst_s; -INSERT INTO clstr_tst_s (b) SELECT b FROM clstr_tst_s; -INSERT INTO clstr_tst_s (b) SELECT b FROM clstr_tst_s; -INSERT INTO clstr_tst_s (b) SELECT b FROM clstr_tst_s; -CREATE TABLE clstr_tst_inh () INHERITS (clstr_tst); -INSERT INTO clstr_tst (b, c) VALUES (11, 'once'); -INSERT INTO clstr_tst (b, c) VALUES (10, 'diez'); -INSERT INTO clstr_tst (b, c) VALUES (31, 'treinta y uno'); -INSERT INTO clstr_tst (b, c) VALUES (22, 'veintidos'); -INSERT INTO clstr_tst (b, c) VALUES (3, 'tres'); -INSERT INTO clstr_tst (b, c) VALUES (20, 'veinte'); -INSERT INTO clstr_tst (b, c) VALUES (23, 'veintitres'); -INSERT INTO clstr_tst (b, c) VALUES (21, 'veintiuno'); -INSERT INTO clstr_tst (b, c) VALUES (4, 'cuatro'); -INSERT INTO clstr_tst (b, c) VALUES (14, 'catorce'); -INSERT INTO clstr_tst (b, c) VALUES (2, 'dos'); -INSERT INTO clstr_tst (b, c) VALUES (18, 'dieciocho'); -INSERT INTO clstr_tst (b, c) VALUES (27, 'veintisiete'); -INSERT INTO clstr_tst (b, c) VALUES (25, 'veinticinco'); -INSERT INTO clstr_tst (b, c) VALUES (13, 'trece'); -INSERT INTO clstr_tst (b, c) VALUES (28, 'veintiocho'); -INSERT INTO clstr_tst (b, c) VALUES (32, 'treinta y dos'); -INSERT INTO clstr_tst (b, c) VALUES (5, 'cinco'); -INSERT INTO clstr_tst (b, c) VALUES (29, 'veintinueve'); -INSERT INTO clstr_tst (b, c) VALUES (1, 'uno'); -INSERT INTO clstr_tst (b, c) VALUES (24, 'veinticuatro'); -INSERT INTO clstr_tst (b, c) VALUES (30, 'treinta'); -INSERT INTO clstr_tst (b, c) VALUES (12, 'doce'); -INSERT INTO clstr_tst (b, c) VALUES (17, 'diecisiete'); -INSERT INTO clstr_tst (b, c) VALUES (9, 'nueve'); -INSERT INTO clstr_tst (b, c) VALUES (19, 'diecinueve'); -INSERT INTO clstr_tst (b, c) VALUES (26, 'veintiseis'); -INSERT INTO clstr_tst (b, c) VALUES (15, 'quince'); -INSERT INTO clstr_tst (b, c) VALUES (7, 'siete'); -INSERT INTO clstr_tst (b, c) VALUES (16, 'dieciseis'); -INSERT INTO clstr_tst (b, c) VALUES (8, 'ocho'); --- This entry is needed to test that TOASTED values are copied correctly. -INSERT INTO clstr_tst (b, c, d) VALUES (6, 'seis', repeat('xyzzy', 100000)); -CLUSTER clstr_tst_c ON clstr_tst; -SELECT a,b,c,substring(d for 30), length(d) from clstr_tst; - a | b | c | substring | length -----+----+---------------+--------------------------------+-------- - 10 | 14 | catorce | | - 18 | 5 | cinco | | - 9 | 4 | cuatro | | - 26 | 19 | diecinueve | | - 12 | 18 | dieciocho | | - 30 | 16 | dieciseis | | - 24 | 17 | diecisiete | | - 2 | 10 | diez | | - 23 | 12 | doce | | - 11 | 2 | dos | | - 25 | 9 | nueve | | - 31 | 8 | ocho | | - 1 | 11 | once | | - 28 | 15 | quince | | - 32 | 6 | seis | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzy | 500000 - 29 | 7 | siete | | - 15 | 13 | trece | | - 22 | 30 | treinta | | - 17 | 32 | treinta y dos | | - 3 | 31 | treinta y uno | | - 5 | 3 | tres | | - 20 | 1 | uno | | - 6 | 20 | veinte | | - 14 | 25 | veinticinco | | - 21 | 24 | veinticuatro | | - 4 | 22 | veintidos | | - 19 | 29 | veintinueve | | - 16 | 28 | veintiocho | | - 27 | 26 | veintiseis | | - 13 | 27 | veintisiete | | - 7 | 23 | veintitres | | - 8 | 21 | veintiuno | | -(32 rows) - -SELECT a,b,c,substring(d for 30), length(d) from clstr_tst ORDER BY a; - a | b | c | substring | length -----+----+---------------+--------------------------------+-------- - 1 | 11 | once | | - 2 | 10 | diez | | - 3 | 31 | treinta y uno | | - 4 | 22 | veintidos | | - 5 | 3 | tres | | - 6 | 20 | veinte | | - 7 | 23 | veintitres | | - 8 | 21 | veintiuno | | - 9 | 4 | cuatro | | - 10 | 14 | catorce | | - 11 | 2 | dos | | - 12 | 18 | dieciocho | | - 13 | 27 | veintisiete | | - 14 | 25 | veinticinco | | - 15 | 13 | trece | | - 16 | 28 | veintiocho | | - 17 | 32 | treinta y dos | | - 18 | 5 | cinco | | - 19 | 29 | veintinueve | | - 20 | 1 | uno | | - 21 | 24 | veinticuatro | | - 22 | 30 | treinta | | - 23 | 12 | doce | | - 24 | 17 | diecisiete | | - 25 | 9 | nueve | | - 26 | 19 | diecinueve | | - 27 | 26 | veintiseis | | - 28 | 15 | quince | | - 29 | 7 | siete | | - 30 | 16 | dieciseis | | - 31 | 8 | ocho | | - 32 | 6 | seis | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzy | 500000 -(32 rows) - -SELECT a,b,c,substring(d for 30), length(d) from clstr_tst ORDER BY b; - a | b | c | substring | length -----+----+---------------+--------------------------------+-------- - 20 | 1 | uno | | - 11 | 2 | dos | | - 5 | 3 | tres | | - 9 | 4 | cuatro | | - 18 | 5 | cinco | | - 32 | 6 | seis | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzy | 500000 - 29 | 7 | siete | | - 31 | 8 | ocho | | - 25 | 9 | nueve | | - 2 | 10 | diez | | - 1 | 11 | once | | - 23 | 12 | doce | | - 15 | 13 | trece | | - 10 | 14 | catorce | | - 28 | 15 | quince | | - 30 | 16 | dieciseis | | - 24 | 17 | diecisiete | | - 12 | 18 | dieciocho | | - 26 | 19 | diecinueve | | - 6 | 20 | veinte | | - 8 | 21 | veintiuno | | - 4 | 22 | veintidos | | - 7 | 23 | veintitres | | - 21 | 24 | veinticuatro | | - 14 | 25 | veinticinco | | - 27 | 26 | veintiseis | | - 13 | 27 | veintisiete | | - 16 | 28 | veintiocho | | - 19 | 29 | veintinueve | | - 22 | 30 | treinta | | - 3 | 31 | treinta y uno | | - 17 | 32 | treinta y dos | | -(32 rows) - -SELECT a,b,c,substring(d for 30), length(d) from clstr_tst ORDER BY c; - a | b | c | substring | length -----+----+---------------+--------------------------------+-------- - 10 | 14 | catorce | | - 18 | 5 | cinco | | - 9 | 4 | cuatro | | - 26 | 19 | diecinueve | | - 12 | 18 | dieciocho | | - 30 | 16 | dieciseis | | - 24 | 17 | diecisiete | | - 2 | 10 | diez | | - 23 | 12 | doce | | - 11 | 2 | dos | | - 25 | 9 | nueve | | - 31 | 8 | ocho | | - 1 | 11 | once | | - 28 | 15 | quince | | - 32 | 6 | seis | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzy | 500000 - 29 | 7 | siete | | - 15 | 13 | trece | | - 22 | 30 | treinta | | - 17 | 32 | treinta y dos | | - 3 | 31 | treinta y uno | | - 5 | 3 | tres | | - 20 | 1 | uno | | - 6 | 20 | veinte | | - 14 | 25 | veinticinco | | - 21 | 24 | veinticuatro | | - 4 | 22 | veintidos | | - 19 | 29 | veintinueve | | - 16 | 28 | veintiocho | | - 27 | 26 | veintiseis | | - 13 | 27 | veintisiete | | - 7 | 23 | veintitres | | - 8 | 21 | veintiuno | | -(32 rows) - --- Verify that inheritance link still works -INSERT INTO clstr_tst_inh VALUES (0, 100, 'in child table'); -SELECT a,b,c,substring(d for 30), length(d) from clstr_tst; - a | b | c | substring | length -----+-----+----------------+--------------------------------+-------- - 10 | 14 | catorce | | - 18 | 5 | cinco | | - 9 | 4 | cuatro | | - 26 | 19 | diecinueve | | - 12 | 18 | dieciocho | | - 30 | 16 | dieciseis | | - 24 | 17 | diecisiete | | - 2 | 10 | diez | | - 23 | 12 | doce | | - 11 | 2 | dos | | - 25 | 9 | nueve | | - 31 | 8 | ocho | | - 1 | 11 | once | | - 28 | 15 | quince | | - 32 | 6 | seis | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzy | 500000 - 29 | 7 | siete | | - 15 | 13 | trece | | - 22 | 30 | treinta | | - 17 | 32 | treinta y dos | | - 3 | 31 | treinta y uno | | - 5 | 3 | tres | | - 20 | 1 | uno | | - 6 | 20 | veinte | | - 14 | 25 | veinticinco | | - 21 | 24 | veinticuatro | | - 4 | 22 | veintidos | | - 19 | 29 | veintinueve | | - 16 | 28 | veintiocho | | - 27 | 26 | veintiseis | | - 13 | 27 | veintisiete | | - 7 | 23 | veintitres | | - 8 | 21 | veintiuno | | - 0 | 100 | in child table | | -(33 rows) - --- Verify that foreign key link still works -INSERT INTO clstr_tst (b, c) VALUES (1111, 'this should fail'); -ERROR: insert or update on table "clstr_tst" violates foreign key constraint "clstr_tst_con" -DETAIL: Key (b)=(1111) is not present in table "clstr_tst_s". -SELECT conname FROM pg_constraint WHERE conrelid = 'clstr_tst'::regclass -ORDER BY 1; - conname ----------------------- - clstr_tst_a_not_null - clstr_tst_con - clstr_tst_pkey -(3 rows) - -SELECT relname, relkind, - EXISTS(SELECT 1 FROM pg_class WHERE oid = c.reltoastrelid) AS hastoast -FROM pg_class c WHERE relname LIKE 'clstr_tst%' ORDER BY relname; - relname | relkind | hastoast -----------------------+---------+---------- - clstr_tst | r | t - clstr_tst_a_seq | S | f - clstr_tst_b | i | f - clstr_tst_b_c | i | f - clstr_tst_c | i | f - clstr_tst_c_b | i | f - clstr_tst_inh | r | t - clstr_tst_pkey | i | f - clstr_tst_s | r | f - clstr_tst_s_pkey | i | f - clstr_tst_s_rf_a_seq | S | f -(11 rows) - --- Verify that indisclustered is correctly set -SELECT pg_class.relname FROM pg_index, pg_class, pg_class AS pg_class_2 -WHERE pg_class.oid=indexrelid - AND indrelid=pg_class_2.oid - AND pg_class_2.relname = 'clstr_tst' - AND indisclustered; - relname -------------- - clstr_tst_c -(1 row) - --- Try changing indisclustered -ALTER TABLE clstr_tst CLUSTER ON clstr_tst_b_c; -SELECT pg_class.relname FROM pg_index, pg_class, pg_class AS pg_class_2 -WHERE pg_class.oid=indexrelid - AND indrelid=pg_class_2.oid - AND pg_class_2.relname = 'clstr_tst' - AND indisclustered; - relname ---------------- - clstr_tst_b_c -(1 row) - --- Try turning off all clustering -ALTER TABLE clstr_tst SET WITHOUT CLUSTER; -SELECT pg_class.relname FROM pg_index, pg_class, pg_class AS pg_class_2 -WHERE pg_class.oid=indexrelid - AND indrelid=pg_class_2.oid - AND pg_class_2.relname = 'clstr_tst' - AND indisclustered; - relname ---------- -(0 rows) - --- Verify that toast tables are clusterable -CLUSTER pg_toast.pg_toast_826 USING pg_toast_826_index; --- Verify that clustering all tables does in fact cluster the right ones -CREATE USER regress_clstr_user; -CREATE TABLE clstr_1 (a INT PRIMARY KEY); -CREATE TABLE clstr_2 (a INT PRIMARY KEY); -CREATE TABLE clstr_3 (a INT PRIMARY KEY); -ALTER TABLE clstr_1 OWNER TO regress_clstr_user; -ALTER TABLE clstr_3 OWNER TO regress_clstr_user; -GRANT SELECT ON clstr_2 TO regress_clstr_user; -INSERT INTO clstr_1 VALUES (2); -INSERT INTO clstr_1 VALUES (1); -INSERT INTO clstr_2 VALUES (2); -INSERT INTO clstr_2 VALUES (1); -INSERT INTO clstr_3 VALUES (2); -INSERT INTO clstr_3 VALUES (1); --- "CLUSTER " on a table that hasn't been clustered -CLUSTER clstr_2; -ERROR: there is no previously clustered index for table "clstr_2" -CLUSTER clstr_1_pkey ON clstr_1; -CLUSTER clstr_2 USING clstr_2_pkey; -SELECT * FROM clstr_1 UNION ALL - SELECT * FROM clstr_2 UNION ALL - SELECT * FROM clstr_3; - a ---- - 1 - 2 - 1 - 2 - 2 - 1 -(6 rows) - --- revert to the original state -DELETE FROM clstr_1; -DELETE FROM clstr_2; -DELETE FROM clstr_3; -INSERT INTO clstr_1 VALUES (2); -INSERT INTO clstr_1 VALUES (1); -INSERT INTO clstr_2 VALUES (2); -INSERT INTO clstr_2 VALUES (1); -INSERT INTO clstr_3 VALUES (2); -INSERT INTO clstr_3 VALUES (1); --- this user can only cluster clstr_1 and clstr_3, but the latter --- has not been clustered -SET SESSION AUTHORIZATION regress_clstr_user; -CLUSTER; -SELECT * FROM clstr_1 UNION ALL - SELECT * FROM clstr_2 UNION ALL - SELECT * FROM clstr_3; - a ---- - 1 - 2 - 2 - 1 - 2 - 1 -(6 rows) - --- cluster a single table using the indisclustered bit previously set -DELETE FROM clstr_1; -INSERT INTO clstr_1 VALUES (2); -INSERT INTO clstr_1 VALUES (1); -CLUSTER clstr_1; -SELECT * FROM clstr_1; - a ---- - 1 - 2 -(2 rows) - --- Test MVCC-safety of cluster. There isn't much we can do to verify the --- results with a single backend... -CREATE TABLE clustertest (key int PRIMARY KEY); -INSERT INTO clustertest VALUES (10); -INSERT INTO clustertest VALUES (20); -INSERT INTO clustertest VALUES (30); -INSERT INTO clustertest VALUES (40); -INSERT INTO clustertest VALUES (50); --- Use a transaction so that updates are not committed when CLUSTER sees 'em -BEGIN; --- Test update where the old row version is found first in the scan -UPDATE clustertest SET key = 100 WHERE key = 10; --- Test update where the new row version is found first in the scan -UPDATE clustertest SET key = 35 WHERE key = 40; --- Test longer update chain -UPDATE clustertest SET key = 60 WHERE key = 50; -UPDATE clustertest SET key = 70 WHERE key = 60; -UPDATE clustertest SET key = 80 WHERE key = 70; -SELECT * FROM clustertest; - key ------ - 20 - 30 - 100 - 35 - 80 -(5 rows) - -CLUSTER clustertest_pkey ON clustertest; -SELECT * FROM clustertest; - key ------ - 20 - 30 - 35 - 80 - 100 -(5 rows) - -COMMIT; -SELECT * FROM clustertest; - key ------ - 20 - 30 - 35 - 80 - 100 -(5 rows) - --- check that temp tables can be clustered -create temp table clstr_temp (col1 int primary key, col2 text); -insert into clstr_temp values (2, 'two'), (1, 'one'); -cluster clstr_temp using clstr_temp_pkey; -select * from clstr_temp; - col1 | col2 -------+------ - 1 | one - 2 | two -(2 rows) - -drop table clstr_temp; -RESET SESSION AUTHORIZATION; --- check clustering an empty table -DROP TABLE clustertest; -CREATE TABLE clustertest (f1 int PRIMARY KEY); -CLUSTER clustertest USING clustertest_pkey; -CLUSTER clustertest; --- Check that partitioned tables can be clustered -CREATE TABLE clstrpart (a int) PARTITION BY RANGE (a); -CREATE TABLE clstrpart1 PARTITION OF clstrpart FOR VALUES FROM (1) TO (10) PARTITION BY RANGE (a); -CREATE TABLE clstrpart11 PARTITION OF clstrpart1 FOR VALUES FROM (1) TO (5); -CREATE TABLE clstrpart12 PARTITION OF clstrpart1 FOR VALUES FROM (5) TO (10) PARTITION BY RANGE (a); -CREATE TABLE clstrpart2 PARTITION OF clstrpart FOR VALUES FROM (10) TO (20); -CREATE TABLE clstrpart3 PARTITION OF clstrpart DEFAULT PARTITION BY RANGE (a); -CREATE TABLE clstrpart33 PARTITION OF clstrpart3 DEFAULT; -CREATE INDEX clstrpart_only_idx ON ONLY clstrpart (a); -CLUSTER clstrpart USING clstrpart_only_idx; -- fails -ERROR: cannot cluster on invalid index "clstrpart_only_idx" -DROP INDEX clstrpart_only_idx; -CREATE INDEX clstrpart_idx ON clstrpart (a); --- Check that clustering sets new relfilenodes: -CREATE TEMP TABLE old_cluster_info AS SELECT relname, level, relfilenode, relkind FROM pg_partition_tree('clstrpart'::regclass) AS tree JOIN pg_class c ON c.oid=tree.relid ; -CLUSTER clstrpart USING clstrpart_idx; -CREATE TEMP TABLE new_cluster_info AS SELECT relname, level, relfilenode, relkind FROM pg_partition_tree('clstrpart'::regclass) AS tree JOIN pg_class c ON c.oid=tree.relid ; -SELECT relname, old.level, old.relkind, old.relfilenode = new.relfilenode FROM old_cluster_info AS old JOIN new_cluster_info AS new USING (relname) ORDER BY relname COLLATE "C"; - relname | level | relkind | ?column? --------------+-------+---------+---------- - clstrpart | 0 | p | t - clstrpart1 | 1 | p | t - clstrpart11 | 2 | r | f - clstrpart12 | 2 | p | t - clstrpart2 | 1 | r | f - clstrpart3 | 1 | p | t - clstrpart33 | 2 | r | f -(7 rows) - --- Partitioned indexes aren't and can't be marked un/clustered: -\d clstrpart - Partitioned table "public.clstrpart" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition key: RANGE (a) -Indexes: - "clstrpart_idx" btree (a) -Number of partitions: 3 (Use \d+ to list them.) - -CLUSTER clstrpart; -ERROR: there is no previously clustered index for table "clstrpart" -ALTER TABLE clstrpart SET WITHOUT CLUSTER; -ERROR: cannot mark index clustered in partitioned table -ALTER TABLE clstrpart CLUSTER ON clstrpart_idx; -ERROR: cannot mark index clustered in partitioned table -DROP TABLE clstrpart; --- Ownership of partitions is checked -CREATE TABLE ptnowner(i int unique) PARTITION BY LIST (i); -CREATE INDEX ptnowner_i_idx ON ptnowner(i); -CREATE TABLE ptnowner1 PARTITION OF ptnowner FOR VALUES IN (1); -CREATE ROLE regress_ptnowner; -CREATE TABLE ptnowner2 PARTITION OF ptnowner FOR VALUES IN (2); -ALTER TABLE ptnowner1 OWNER TO regress_ptnowner; -ALTER TABLE ptnowner OWNER TO regress_ptnowner; -CREATE TEMP TABLE ptnowner_oldnodes AS - SELECT oid, relname, relfilenode FROM pg_partition_tree('ptnowner') AS tree - JOIN pg_class AS c ON c.oid=tree.relid; -SET SESSION AUTHORIZATION regress_ptnowner; -CLUSTER ptnowner USING ptnowner_i_idx; -RESET SESSION AUTHORIZATION; -SELECT a.relname, a.relfilenode=b.relfilenode FROM pg_class a - JOIN ptnowner_oldnodes b USING (oid) ORDER BY a.relname COLLATE "C"; - relname | ?column? ------------+---------- - ptnowner | t - ptnowner1 | f - ptnowner2 | t -(3 rows) - -DROP TABLE ptnowner; -DROP ROLE regress_ptnowner; --- Test CLUSTER with external tuplesorting -create table clstr_4 as select * from tenk1; -create index cluster_sort on clstr_4 (hundred, thousand, tenthous); --- ensure we don't use the index in CLUSTER nor the checking SELECTs -set enable_indexscan = off; --- Use external sort: -set maintenance_work_mem = '1MB'; -cluster clstr_4 using cluster_sort; -select * from -(select hundred, lag(hundred) over () as lhundred, - thousand, lag(thousand) over () as lthousand, - tenthous, lag(tenthous) over () as ltenthous from clstr_4) ss -where row(hundred, thousand, tenthous) <= row(lhundred, lthousand, ltenthous); - hundred | lhundred | thousand | lthousand | tenthous | ltenthous ----------+----------+----------+-----------+----------+----------- -(0 rows) - -reset enable_indexscan; -reset maintenance_work_mem; --- test CLUSTER on expression index -CREATE TABLE clstr_expression(id serial primary key, a int, b text COLLATE "C"); -INSERT INTO clstr_expression(a, b) SELECT g.i % 42, 'prefix'||g.i FROM generate_series(1, 133) g(i); -CREATE INDEX clstr_expression_minus_a ON clstr_expression ((-a), b); -CREATE INDEX clstr_expression_upper_b ON clstr_expression ((upper(b))); --- verify indexes work before cluster -BEGIN; -SET LOCAL enable_seqscan = false; -EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3'; - QUERY PLAN ---------------------------------------------------------------- - Index Scan using clstr_expression_upper_b on clstr_expression - Index Cond: (upper(b) = 'PREFIX3'::text) -(2 rows) - -SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3'; - id | a | b -----+---+--------- - 3 | 3 | prefix3 -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b; - QUERY PLAN ---------------------------------------------------------------- - Index Scan using clstr_expression_minus_a on clstr_expression - Index Cond: ((- a) = '-3'::integer) -(2 rows) - -SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b; - id | a | b ------+---+----------- - 129 | 3 | prefix129 - 3 | 3 | prefix3 - 45 | 3 | prefix45 - 87 | 3 | prefix87 -(4 rows) - -COMMIT; --- and after clustering on clstr_expression_minus_a -CLUSTER clstr_expression USING clstr_expression_minus_a; -WITH rows AS - (SELECT ctid, lag(a) OVER (ORDER BY ctid) AS la, a FROM clstr_expression) -SELECT * FROM rows WHERE la < a; - ctid | la | a -------+----+--- -(0 rows) - -BEGIN; -SET LOCAL enable_seqscan = false; -EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3'; - QUERY PLAN ---------------------------------------------------------------- - Index Scan using clstr_expression_upper_b on clstr_expression - Index Cond: (upper(b) = 'PREFIX3'::text) -(2 rows) - -SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3'; - id | a | b -----+---+--------- - 3 | 3 | prefix3 -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b; - QUERY PLAN ---------------------------------------------------------------- - Index Scan using clstr_expression_minus_a on clstr_expression - Index Cond: ((- a) = '-3'::integer) -(2 rows) - -SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b; - id | a | b ------+---+----------- - 129 | 3 | prefix129 - 3 | 3 | prefix3 - 45 | 3 | prefix45 - 87 | 3 | prefix87 -(4 rows) - -COMMIT; --- and after clustering on clstr_expression_upper_b -CLUSTER clstr_expression USING clstr_expression_upper_b; -WITH rows AS - (SELECT ctid, lag(b) OVER (ORDER BY ctid) AS lb, b FROM clstr_expression) -SELECT * FROM rows WHERE upper(lb) > upper(b); - ctid | lb | b -------+----+--- -(0 rows) - -BEGIN; -SET LOCAL enable_seqscan = false; -EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3'; - QUERY PLAN ---------------------------------------------------------------- - Index Scan using clstr_expression_upper_b on clstr_expression - Index Cond: (upper(b) = 'PREFIX3'::text) -(2 rows) - -SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3'; - id | a | b -----+---+--------- - 3 | 3 | prefix3 -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b; - QUERY PLAN ---------------------------------------------------------------- - Index Scan using clstr_expression_minus_a on clstr_expression - Index Cond: ((- a) = '-3'::integer) -(2 rows) - -SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b; - id | a | b ------+---+----------- - 129 | 3 | prefix129 - 3 | 3 | prefix3 - 45 | 3 | prefix45 - 87 | 3 | prefix87 -(4 rows) - -COMMIT; --- clean up -DROP TABLE clustertest; -DROP TABLE clstr_1; -DROP TABLE clstr_2; -DROP TABLE clstr_3; -DROP TABLE clstr_4; -DROP TABLE clstr_expression; -DROP USER regress_clstr_user; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/dependency.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/dependency.out --- /tmp/cirrus-ci-build/src/test/regress/expected/dependency.out 2024-03-07 14:25:00.330144000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/dependency.out 2024-03-07 14:27:17.340892000 +0000 @@ -1,153 +1,2 @@ --- --- DEPENDENCIES --- -CREATE USER regress_dep_user; -CREATE USER regress_dep_user2; -CREATE USER regress_dep_user3; -CREATE GROUP regress_dep_group; -CREATE TABLE deptest (f1 serial primary key, f2 text); -GRANT SELECT ON TABLE deptest TO GROUP regress_dep_group; -GRANT ALL ON TABLE deptest TO regress_dep_user, regress_dep_user2; --- can't drop neither because they have privileges somewhere -DROP USER regress_dep_user; -ERROR: role "regress_dep_user" cannot be dropped because some objects depend on it -DETAIL: privileges for table deptest -DROP GROUP regress_dep_group; -ERROR: role "regress_dep_group" cannot be dropped because some objects depend on it -DETAIL: privileges for table deptest --- if we revoke the privileges we can drop the group -REVOKE SELECT ON deptest FROM GROUP regress_dep_group; -DROP GROUP regress_dep_group; --- can't drop the user if we revoke the privileges partially -REVOKE SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES ON deptest FROM regress_dep_user; -DROP USER regress_dep_user; -ERROR: role "regress_dep_user" cannot be dropped because some objects depend on it -DETAIL: privileges for table deptest --- now we are OK to drop him -REVOKE TRIGGER ON deptest FROM regress_dep_user; -DROP USER regress_dep_user; --- we are OK too if we drop the privileges all at once -REVOKE ALL ON deptest FROM regress_dep_user2; -DROP USER regress_dep_user2; --- can't drop the owner of an object --- the error message detail here would include a pg_toast_nnn name that --- is not constant, so suppress it -\set VERBOSITY terse -ALTER TABLE deptest OWNER TO regress_dep_user3; -DROP USER regress_dep_user3; -ERROR: role "regress_dep_user3" cannot be dropped because some objects depend on it -\set VERBOSITY default --- if we drop the object, we can drop the user too -DROP TABLE deptest; -DROP USER regress_dep_user3; --- Test DROP OWNED -CREATE USER regress_dep_user0; -CREATE USER regress_dep_user1; -CREATE USER regress_dep_user2; -SET SESSION AUTHORIZATION regress_dep_user0; --- permission denied -DROP OWNED BY regress_dep_user1; -ERROR: permission denied to drop objects -DETAIL: Only roles with privileges of role "regress_dep_user1" may drop objects owned by it. -DROP OWNED BY regress_dep_user0, regress_dep_user2; -ERROR: permission denied to drop objects -DETAIL: Only roles with privileges of role "regress_dep_user2" may drop objects owned by it. -REASSIGN OWNED BY regress_dep_user0 TO regress_dep_user1; -ERROR: permission denied to reassign objects -DETAIL: Only roles with privileges of role "regress_dep_user1" may reassign objects to it. -REASSIGN OWNED BY regress_dep_user1 TO regress_dep_user0; -ERROR: permission denied to reassign objects -DETAIL: Only roles with privileges of role "regress_dep_user1" may reassign objects owned by it. --- this one is allowed -DROP OWNED BY regress_dep_user0; -CREATE TABLE deptest1 (f1 int unique); -GRANT ALL ON deptest1 TO regress_dep_user1 WITH GRANT OPTION; -SET SESSION AUTHORIZATION regress_dep_user1; -CREATE TABLE deptest (a serial primary key, b text); -GRANT ALL ON deptest1 TO regress_dep_user2; -RESET SESSION AUTHORIZATION; -\z deptest1 - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+----------+-------+----------------------------------------------------+-------------------+---------- - public | deptest1 | table | regress_dep_user0=arwdDxt/regress_dep_user0 +| | - | | | regress_dep_user1=a*r*w*d*D*x*t*/regress_dep_user0+| | - | | | regress_dep_user2=arwdDxt/regress_dep_user1 | | -(1 row) - -DROP OWNED BY regress_dep_user1; --- all grants revoked -\z deptest1 - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+----------+-------+---------------------------------------------+-------------------+---------- - public | deptest1 | table | regress_dep_user0=arwdDxt/regress_dep_user0 | | -(1 row) - --- table was dropped -\d deptest --- Test REASSIGN OWNED -GRANT ALL ON deptest1 TO regress_dep_user1; -GRANT CREATE ON DATABASE regression TO regress_dep_user1; -SET SESSION AUTHORIZATION regress_dep_user1; -CREATE SCHEMA deptest; -CREATE TABLE deptest (a serial primary key, b text); -ALTER DEFAULT PRIVILEGES FOR ROLE regress_dep_user1 IN SCHEMA deptest - GRANT ALL ON TABLES TO regress_dep_user2; -CREATE FUNCTION deptest_func() RETURNS void LANGUAGE plpgsql - AS $$ BEGIN END; $$; -CREATE TYPE deptest_enum AS ENUM ('red'); -CREATE TYPE deptest_range AS RANGE (SUBTYPE = int4); -CREATE TABLE deptest2 (f1 int); --- make a serial column the hard way -CREATE SEQUENCE ss1; -ALTER TABLE deptest2 ALTER f1 SET DEFAULT nextval('ss1'); -ALTER SEQUENCE ss1 OWNED BY deptest2.f1; --- When reassigning ownership of a composite type, its pg_class entry --- should match -CREATE TYPE deptest_t AS (a int); -SELECT typowner = relowner -FROM pg_type JOIN pg_class c ON typrelid = c.oid WHERE typname = 'deptest_t'; - ?column? ----------- - t -(1 row) - -RESET SESSION AUTHORIZATION; -REASSIGN OWNED BY regress_dep_user1 TO regress_dep_user2; -\dt deptest - List of relations - Schema | Name | Type | Owner ---------+---------+-------+------------------- - public | deptest | table | regress_dep_user2 -(1 row) - -SELECT typowner = relowner -FROM pg_type JOIN pg_class c ON typrelid = c.oid WHERE typname = 'deptest_t'; - ?column? ----------- - t -(1 row) - --- doesn't work: grant still exists -DROP USER regress_dep_user1; -ERROR: role "regress_dep_user1" cannot be dropped because some objects depend on it -DETAIL: privileges for database regression -privileges for table deptest1 -owner of default privileges on new relations belonging to role regress_dep_user1 in schema deptest -DROP OWNED BY regress_dep_user1; -DROP USER regress_dep_user1; -DROP USER regress_dep_user2; -ERROR: role "regress_dep_user2" cannot be dropped because some objects depend on it -DETAIL: owner of schema deptest -owner of sequence deptest_a_seq -owner of table deptest -owner of function deptest_func() -owner of type deptest_enum -owner of type deptest_range -owner of table deptest2 -owner of sequence ss1 -owner of type deptest_t -DROP OWNED BY regress_dep_user2, regress_dep_user0; -DROP USER regress_dep_user2; -DROP USER regress_dep_user0; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/guc.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/guc.out --- /tmp/cirrus-ci-build/src/test/regress/expected/guc.out 2024-03-07 14:25:00.330807000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/guc.out 2024-03-07 14:27:17.337895000 +0000 @@ -1,890 +1,2 @@ --- pg_regress should ensure that this default value applies; however --- we can't rely on any specific default value of vacuum_cost_delay -SHOW datestyle; - DateStyle ---------------- - Postgres, MDY -(1 row) - --- SET to some nondefault value -SET vacuum_cost_delay TO 40; -SET datestyle = 'ISO, YMD'; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 40ms -(1 row) - -SHOW datestyle; - DateStyle ------------ - ISO, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - --- SET LOCAL has no effect outside of a transaction -SET LOCAL vacuum_cost_delay TO 50; -WARNING: SET LOCAL can only be used in transaction blocks -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 40ms -(1 row) - -SET LOCAL datestyle = 'SQL'; -WARNING: SET LOCAL can only be used in transaction blocks -SHOW datestyle; - DateStyle ------------ - ISO, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - --- SET LOCAL within a transaction that commits -BEGIN; -SET LOCAL vacuum_cost_delay TO 50; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 50ms -(1 row) - -SET LOCAL datestyle = 'SQL'; -SHOW datestyle; - DateStyle ------------ - SQL, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz -------------------------- - 08/13/2006 12:34:56 PDT -(1 row) - -COMMIT; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 40ms -(1 row) - -SHOW datestyle; - DateStyle ------------ - ISO, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - --- SET should be reverted after ROLLBACK -BEGIN; -SET vacuum_cost_delay TO 60; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 60ms -(1 row) - -SET datestyle = 'German'; -SHOW datestyle; - DateStyle -------------- - German, DMY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz -------------------------- - 13.08.2006 12:34:56 PDT -(1 row) - -ROLLBACK; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 40ms -(1 row) - -SHOW datestyle; - DateStyle ------------ - ISO, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - --- Some tests with subtransactions -BEGIN; -SET vacuum_cost_delay TO 70; -SET datestyle = 'MDY'; -SHOW datestyle; - DateStyle ------------ - ISO, MDY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - -SAVEPOINT first_sp; -SET vacuum_cost_delay TO 80.1; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 80100us -(1 row) - -SET datestyle = 'German, DMY'; -SHOW datestyle; - DateStyle -------------- - German, DMY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz -------------------------- - 13.08.2006 12:34:56 PDT -(1 row) - -ROLLBACK TO first_sp; -SHOW datestyle; - DateStyle ------------ - ISO, MDY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - -SAVEPOINT second_sp; -SET vacuum_cost_delay TO '900us'; -SET datestyle = 'SQL, YMD'; -SHOW datestyle; - DateStyle ------------ - SQL, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz -------------------------- - 08/13/2006 12:34:56 PDT -(1 row) - -SAVEPOINT third_sp; -SET vacuum_cost_delay TO 100; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 100ms -(1 row) - -SET datestyle = 'Postgres, MDY'; -SHOW datestyle; - DateStyle ---------------- - Postgres, MDY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------------- - Sun Aug 13 12:34:56 2006 PDT -(1 row) - -ROLLBACK TO third_sp; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 900us -(1 row) - -SHOW datestyle; - DateStyle ------------ - SQL, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz -------------------------- - 08/13/2006 12:34:56 PDT -(1 row) - -ROLLBACK TO second_sp; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 70ms -(1 row) - -SHOW datestyle; - DateStyle ------------ - ISO, MDY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - -ROLLBACK; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 40ms -(1 row) - -SHOW datestyle; - DateStyle ------------ - ISO, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - --- SET LOCAL with Savepoints -BEGIN; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 40ms -(1 row) - -SHOW datestyle; - DateStyle ------------ - ISO, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - -SAVEPOINT sp; -SET LOCAL vacuum_cost_delay TO 30; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 30ms -(1 row) - -SET LOCAL datestyle = 'Postgres, MDY'; -SHOW datestyle; - DateStyle ---------------- - Postgres, MDY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------------- - Sun Aug 13 12:34:56 2006 PDT -(1 row) - -ROLLBACK TO sp; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 40ms -(1 row) - -SHOW datestyle; - DateStyle ------------ - ISO, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - -ROLLBACK; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 40ms -(1 row) - -SHOW datestyle; - DateStyle ------------ - ISO, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - --- SET LOCAL persists through RELEASE (which was not true in 8.0-8.2) -BEGIN; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 40ms -(1 row) - -SHOW datestyle; - DateStyle ------------ - ISO, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - -SAVEPOINT sp; -SET LOCAL vacuum_cost_delay TO 30; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 30ms -(1 row) - -SET LOCAL datestyle = 'Postgres, MDY'; -SHOW datestyle; - DateStyle ---------------- - Postgres, MDY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------------- - Sun Aug 13 12:34:56 2006 PDT -(1 row) - -RELEASE SAVEPOINT sp; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 30ms -(1 row) - -SHOW datestyle; - DateStyle ---------------- - Postgres, MDY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------------- - Sun Aug 13 12:34:56 2006 PDT -(1 row) - -ROLLBACK; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 40ms -(1 row) - -SHOW datestyle; - DateStyle ------------ - ISO, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - --- SET followed by SET LOCAL -BEGIN; -SET vacuum_cost_delay TO 40; -SET LOCAL vacuum_cost_delay TO 50; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 50ms -(1 row) - -SET datestyle = 'ISO, DMY'; -SET LOCAL datestyle = 'Postgres, MDY'; -SHOW datestyle; - DateStyle ---------------- - Postgres, MDY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------------- - Sun Aug 13 12:34:56 2006 PDT -(1 row) - -COMMIT; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 40ms -(1 row) - -SHOW datestyle; - DateStyle ------------ - ISO, DMY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - --- --- Test RESET. We use datestyle because the reset value is forced by --- pg_regress, so it doesn't depend on the installation's configuration. --- -SET datestyle = iso, ymd; -SHOW datestyle; - DateStyle ------------ - ISO, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - -RESET datestyle; -SHOW datestyle; - DateStyle ---------------- - Postgres, MDY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------------- - Sun Aug 13 12:34:56 2006 PDT -(1 row) - --- Test some simple error cases -SET seq_page_cost TO 'NaN'; -ERROR: invalid value for parameter "seq_page_cost": "NaN" -SET vacuum_cost_delay TO '10s'; -ERROR: 10000 ms is outside the valid range for parameter "vacuum_cost_delay" (0 .. 100) -SET no_such_variable TO 42; -ERROR: unrecognized configuration parameter "no_such_variable" --- Test "custom" GUCs created on the fly (which aren't really an --- intended feature, but many people use them). -SHOW custom.my_guc; -- error, not known yet -ERROR: unrecognized configuration parameter "custom.my_guc" -SET custom.my_guc = 42; -SHOW custom.my_guc; - custom.my_guc ---------------- - 42 -(1 row) - -RESET custom.my_guc; -- this makes it go to empty, not become unknown again -SHOW custom.my_guc; - custom.my_guc ---------------- - -(1 row) - -SET custom.my.qualified.guc = 'foo'; -SHOW custom.my.qualified.guc; - custom.my.qualified.guc -------------------------- - foo -(1 row) - -SET custom."bad-guc" = 42; -- disallowed because -c cannot set this name -ERROR: invalid configuration parameter name "custom.bad-guc" -DETAIL: Custom parameter names must be two or more simple identifiers separated by dots. -SHOW custom."bad-guc"; -ERROR: unrecognized configuration parameter "custom.bad-guc" -SET special."weird name" = 'foo'; -- could be allowed, but we choose not to -ERROR: invalid configuration parameter name "special.weird name" -DETAIL: Custom parameter names must be two or more simple identifiers separated by dots. -SHOW special."weird name"; -ERROR: unrecognized configuration parameter "special.weird name" --- Check what happens when you try to set a "custom" GUC within the --- namespace of an extension. -SET plpgsql.extra_foo_warnings = true; -- allowed if plpgsql is not loaded yet -LOAD 'plpgsql'; -- this will throw a warning and delete the variable -WARNING: invalid configuration parameter name "plpgsql.extra_foo_warnings", removing it -DETAIL: "plpgsql" is now a reserved prefix. -SET plpgsql.extra_foo_warnings = true; -- now, it's an error -ERROR: invalid configuration parameter name "plpgsql.extra_foo_warnings" -DETAIL: "plpgsql" is a reserved prefix. -SHOW plpgsql.extra_foo_warnings; -ERROR: unrecognized configuration parameter "plpgsql.extra_foo_warnings" --- --- Test DISCARD TEMP --- -CREATE TEMP TABLE reset_test ( data text ) ON COMMIT DELETE ROWS; -SELECT relname FROM pg_class WHERE relname = 'reset_test'; - relname ------------- - reset_test -(1 row) - -DISCARD TEMP; -SELECT relname FROM pg_class WHERE relname = 'reset_test'; - relname ---------- -(0 rows) - --- --- Test DISCARD ALL --- --- do changes -DECLARE foo CURSOR WITH HOLD FOR SELECT 1; -PREPARE foo AS SELECT 1; -LISTEN foo_event; -SET vacuum_cost_delay = 13; -CREATE TEMP TABLE tmp_foo (data text) ON COMMIT DELETE ROWS; -CREATE ROLE regress_guc_user; -SET SESSION AUTHORIZATION regress_guc_user; --- look changes -SELECT pg_listening_channels(); - pg_listening_channels ------------------------ - foo_event -(1 row) - -SELECT name FROM pg_prepared_statements; - name ------- - foo -(1 row) - -SELECT name FROM pg_cursors; - name ------- - foo -(1 row) - -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 13ms -(1 row) - -SELECT relname from pg_class where relname = 'tmp_foo'; - relname ---------- - tmp_foo -(1 row) - -SELECT current_user = 'regress_guc_user'; - ?column? ----------- - t -(1 row) - --- discard everything -DISCARD ALL; --- look again -SELECT pg_listening_channels(); - pg_listening_channels ------------------------ -(0 rows) - -SELECT name FROM pg_prepared_statements; - name ------- -(0 rows) - -SELECT name FROM pg_cursors; - name ------- -(0 rows) - -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 0 -(1 row) - -SELECT relname from pg_class where relname = 'tmp_foo'; - relname ---------- -(0 rows) - -SELECT current_user = 'regress_guc_user'; - ?column? ----------- - f -(1 row) - -DROP ROLE regress_guc_user; --- --- search_path should react to changes in pg_namespace --- -set search_path = foo, public, not_there_initially; -select current_schemas(false); - current_schemas ------------------ - {public} -(1 row) - -create schema not_there_initially; -select current_schemas(false); - current_schemas ------------------------------- - {public,not_there_initially} -(1 row) - -drop schema not_there_initially; -select current_schemas(false); - current_schemas ------------------ - {public} -(1 row) - -reset search_path; --- --- Tests for function-local GUC settings --- -set work_mem = '3MB'; -create function report_guc(text) returns text as -$$ select current_setting($1) $$ language sql -set work_mem = '1MB'; -select report_guc('work_mem'), current_setting('work_mem'); - report_guc | current_setting -------------+----------------- - 1MB | 3MB -(1 row) - -alter function report_guc(text) set work_mem = '2MB'; -select report_guc('work_mem'), current_setting('work_mem'); - report_guc | current_setting -------------+----------------- - 2MB | 3MB -(1 row) - -alter function report_guc(text) reset all; -select report_guc('work_mem'), current_setting('work_mem'); - report_guc | current_setting -------------+----------------- - 3MB | 3MB -(1 row) - --- SET LOCAL is restricted by a function SET option -create or replace function myfunc(int) returns text as $$ -begin - set local work_mem = '2MB'; - return current_setting('work_mem'); -end $$ -language plpgsql -set work_mem = '1MB'; -select myfunc(0), current_setting('work_mem'); - myfunc | current_setting ---------+----------------- - 2MB | 3MB -(1 row) - -alter function myfunc(int) reset all; -select myfunc(0), current_setting('work_mem'); - myfunc | current_setting ---------+----------------- - 2MB | 2MB -(1 row) - -set work_mem = '3MB'; --- but SET isn't -create or replace function myfunc(int) returns text as $$ -begin - set work_mem = '2MB'; - return current_setting('work_mem'); -end $$ -language plpgsql -set work_mem = '1MB'; -select myfunc(0), current_setting('work_mem'); - myfunc | current_setting ---------+----------------- - 2MB | 2MB -(1 row) - -set work_mem = '3MB'; --- it should roll back on error, though -create or replace function myfunc(int) returns text as $$ -begin - set work_mem = '2MB'; - perform 1/$1; - return current_setting('work_mem'); -end $$ -language plpgsql -set work_mem = '1MB'; -select myfunc(0); -ERROR: division by zero -CONTEXT: SQL statement "SELECT 1/$1" -PL/pgSQL function myfunc(integer) line 4 at PERFORM -select current_setting('work_mem'); - current_setting ------------------ - 3MB -(1 row) - -select myfunc(1), current_setting('work_mem'); - myfunc | current_setting ---------+----------------- - 2MB | 2MB -(1 row) - --- check current_setting()'s behavior with invalid setting name -select current_setting('nosuch.setting'); -- FAIL -ERROR: unrecognized configuration parameter "nosuch.setting" -select current_setting('nosuch.setting', false); -- FAIL -ERROR: unrecognized configuration parameter "nosuch.setting" -select current_setting('nosuch.setting', true) is null; - ?column? ----------- - t -(1 row) - --- after this, all three cases should yield 'nada' -set nosuch.setting = 'nada'; -select current_setting('nosuch.setting'); - current_setting ------------------ - nada -(1 row) - -select current_setting('nosuch.setting', false); - current_setting ------------------ - nada -(1 row) - -select current_setting('nosuch.setting', true); - current_setting ------------------ - nada -(1 row) - --- Normally, CREATE FUNCTION should complain about invalid values in --- function SET options; but not if check_function_bodies is off, --- because that creates ordering hazards for pg_dump -create function func_with_bad_set() returns int as $$ select 1 $$ -language sql -set default_text_search_config = no_such_config; -NOTICE: text search configuration "no_such_config" does not exist -ERROR: invalid value for parameter "default_text_search_config": "no_such_config" -set check_function_bodies = off; -create function func_with_bad_set() returns int as $$ select 1 $$ -language sql -set default_text_search_config = no_such_config; -NOTICE: text search configuration "no_such_config" does not exist -select func_with_bad_set(); -ERROR: invalid value for parameter "default_text_search_config": "no_such_config" -reset check_function_bodies; -set default_with_oids to f; --- Should not allow to set it to true. -set default_with_oids to t; -ERROR: tables declared WITH OIDS are not supported --- Test GUC categories and flag patterns -SELECT pg_settings_get_flags(NULL); - pg_settings_get_flags ------------------------ - -(1 row) - -SELECT pg_settings_get_flags('does_not_exist'); - pg_settings_get_flags ------------------------ - -(1 row) - -CREATE TABLE tab_settings_flags AS SELECT name, category, - 'EXPLAIN' = ANY(flags) AS explain, - 'NO_RESET' = ANY(flags) AS no_reset, - 'NO_RESET_ALL' = ANY(flags) AS no_reset_all, - 'NOT_IN_SAMPLE' = ANY(flags) AS not_in_sample, - 'RUNTIME_COMPUTED' = ANY(flags) AS runtime_computed - FROM pg_show_all_settings() AS psas, - pg_settings_get_flags(psas.name) AS flags; --- Developer GUCs should be flagged with GUC_NOT_IN_SAMPLE: -SELECT name FROM tab_settings_flags - WHERE category = 'Developer Options' AND NOT not_in_sample - ORDER BY 1; - name ------- -(0 rows) - --- Most query-tuning GUCs are flagged as valid for EXPLAIN. --- default_statistics_target is an exception. -SELECT name FROM tab_settings_flags - WHERE category ~ '^Query Tuning' AND NOT explain - ORDER BY 1; - name ---------------------------- - default_statistics_target -(1 row) - --- Runtime-computed GUCs should be part of the preset category. -SELECT name FROM tab_settings_flags - WHERE NOT category = 'Preset Options' AND runtime_computed - ORDER BY 1; - name ------- -(0 rows) - --- Preset GUCs are flagged as NOT_IN_SAMPLE. -SELECT name FROM tab_settings_flags - WHERE category = 'Preset Options' AND NOT not_in_sample - ORDER BY 1; - name ------- -(0 rows) - --- NO_RESET implies NO_RESET_ALL. -SELECT name FROM tab_settings_flags - WHERE no_reset AND NOT no_reset_all - ORDER BY 1; - name ------- -(0 rows) - -DROP TABLE tab_settings_flags; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/bitmapops.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/bitmapops.out --- /tmp/cirrus-ci-build/src/test/regress/expected/bitmapops.out 2024-03-07 14:25:00.329390000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/bitmapops.out 2024-03-07 14:27:17.346426000 +0000 @@ -1,38 +1,2 @@ --- Test bitmap AND and OR --- Generate enough data that we can test the lossy bitmaps. --- There's 55 tuples per page in the table. 53 is just --- below 55, so that an index scan with qual a = constant --- will return at least one hit per page. 59 is just above --- 55, so that an index scan with qual b = constant will return --- hits on most but not all pages. 53 and 59 are prime, so that --- there's a maximum number of a,b combinations in the table. --- That allows us to test all the different combinations of --- lossy and non-lossy pages with the minimum amount of data -CREATE TABLE bmscantest (a int, b int, t text); -INSERT INTO bmscantest - SELECT (r%53), (r%59), 'foooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo' - FROM generate_series(1,70000) r; -CREATE INDEX i_bmtest_a ON bmscantest(a); -CREATE INDEX i_bmtest_b ON bmscantest(b); --- We want to use bitmapscans. With default settings, the planner currently --- chooses a bitmap scan for the queries below anyway, but let's make sure. -set enable_indexscan=false; -set enable_seqscan=false; --- Lower work_mem to trigger use of lossy bitmaps -set work_mem = 64; --- Test bitmap-and. -SELECT count(*) FROM bmscantest WHERE a = 1 AND b = 1; - count -------- - 23 -(1 row) - --- Test bitmap-or. -SELECT count(*) FROM bmscantest WHERE a = 1 OR b = 1; - count -------- - 2485 -(1 row) - --- clean up -DROP TABLE bmscantest; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/combocid.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/combocid.out --- /tmp/cirrus-ci-build/src/test/regress/expected/combocid.out 2024-03-07 14:25:00.329664000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/combocid.out 2024-03-07 14:27:17.336098000 +0000 @@ -1,169 +1,2 @@ --- --- Tests for some likely failure cases with combo cmin/cmax mechanism --- -CREATE TEMP TABLE combocidtest (foobar int); -BEGIN; --- a few dummy ops to push up the CommandId counter -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest VALUES (1); -INSERT INTO combocidtest VALUES (2); -SELECT ctid,cmin,* FROM combocidtest; - ctid | cmin | foobar --------+------+-------- - (0,1) | 10 | 1 - (0,2) | 11 | 2 -(2 rows) - -SAVEPOINT s1; -UPDATE combocidtest SET foobar = foobar + 10; --- here we should see only updated tuples -SELECT ctid,cmin,* FROM combocidtest; - ctid | cmin | foobar --------+------+-------- - (0,3) | 12 | 11 - (0,4) | 12 | 12 -(2 rows) - -ROLLBACK TO s1; --- now we should see old tuples, but with combo CIDs starting at 0 -SELECT ctid,cmin,* FROM combocidtest; - ctid | cmin | foobar --------+------+-------- - (0,1) | 0 | 1 - (0,2) | 1 | 2 -(2 rows) - -COMMIT; --- combo data is not there anymore, but should still see tuples -SELECT ctid,cmin,* FROM combocidtest; - ctid | cmin | foobar --------+------+-------- - (0,1) | 0 | 1 - (0,2) | 1 | 2 -(2 rows) - --- Test combo CIDs with portals -BEGIN; -INSERT INTO combocidtest VALUES (333); -DECLARE c CURSOR FOR SELECT ctid,cmin,* FROM combocidtest; -DELETE FROM combocidtest; -FETCH ALL FROM c; - ctid | cmin | foobar --------+------+-------- - (0,1) | 1 | 1 - (0,2) | 1 | 2 - (0,5) | 0 | 333 -(3 rows) - -ROLLBACK; -SELECT ctid,cmin,* FROM combocidtest; - ctid | cmin | foobar --------+------+-------- - (0,1) | 1 | 1 - (0,2) | 1 | 2 -(2 rows) - --- check behavior with locked tuples -BEGIN; --- a few dummy ops to push up the CommandId counter -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest VALUES (444); -SELECT ctid,cmin,* FROM combocidtest; - ctid | cmin | foobar --------+------+-------- - (0,1) | 1 | 1 - (0,2) | 1 | 2 - (0,6) | 10 | 444 -(3 rows) - -SAVEPOINT s1; --- this doesn't affect cmin -SELECT ctid,cmin,* FROM combocidtest FOR UPDATE; - ctid | cmin | foobar --------+------+-------- - (0,1) | 1 | 1 - (0,2) | 1 | 2 - (0,6) | 10 | 444 -(3 rows) - -SELECT ctid,cmin,* FROM combocidtest; - ctid | cmin | foobar --------+------+-------- - (0,1) | 1 | 1 - (0,2) | 1 | 2 - (0,6) | 10 | 444 -(3 rows) - --- but this does -UPDATE combocidtest SET foobar = foobar + 10; -SELECT ctid,cmin,* FROM combocidtest; - ctid | cmin | foobar --------+------+-------- - (0,7) | 12 | 11 - (0,8) | 12 | 12 - (0,9) | 12 | 454 -(3 rows) - -ROLLBACK TO s1; -SELECT ctid,cmin,* FROM combocidtest; - ctid | cmin | foobar --------+------+-------- - (0,1) | 12 | 1 - (0,2) | 12 | 2 - (0,6) | 0 | 444 -(3 rows) - -COMMIT; -SELECT ctid,cmin,* FROM combocidtest; - ctid | cmin | foobar --------+------+-------- - (0,1) | 12 | 1 - (0,2) | 12 | 2 - (0,6) | 0 | 444 -(3 rows) - --- test for bug reported in --- CABRT9RC81YUf1=jsmWopcKJEro=VoeG2ou6sPwyOUTx_qteRsg@mail.gmail.com -CREATE TABLE IF NOT EXISTS testcase( - id int PRIMARY KEY, - balance numeric -); -INSERT INTO testcase VALUES (1, 0); -BEGIN; -SELECT * FROM testcase WHERE testcase.id = 1 FOR UPDATE; - id | balance -----+--------- - 1 | 0 -(1 row) - -UPDATE testcase SET balance = balance + 400 WHERE id=1; -SAVEPOINT subxact; -UPDATE testcase SET balance = balance - 100 WHERE id=1; -ROLLBACK TO SAVEPOINT subxact; --- should return one tuple -SELECT * FROM testcase WHERE id = 1 FOR UPDATE; - id | balance -----+--------- - 1 | 400 -(1 row) - -ROLLBACK; -DROP TABLE testcase; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/tsearch.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/tsearch.out --- /tmp/cirrus-ci-build/src/test/regress/expected/tsearch.out 2024-03-07 14:25:00.334420000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/tsearch.out 2024-03-07 14:27:17.341264000 +0000 @@ -1,3007 +1,2 @@ --- directory paths are passed to us in environment variables -\getenv abs_srcdir PG_ABS_SRCDIR --- --- Sanity checks for text search catalogs --- --- NB: we assume the oidjoins test will have caught any dangling links, --- that is OID or REGPROC fields that are not zero and do not match some --- row in the linked-to table. However, if we want to enforce that a link --- field can't be 0, we have to check it here. --- Find unexpected zero link entries -SELECT oid, prsname -FROM pg_ts_parser -WHERE prsnamespace = 0 OR prsstart = 0 OR prstoken = 0 OR prsend = 0 OR - -- prsheadline is optional - prslextype = 0; - oid | prsname ------+--------- -(0 rows) - -SELECT oid, dictname -FROM pg_ts_dict -WHERE dictnamespace = 0 OR dictowner = 0 OR dicttemplate = 0; - oid | dictname ------+---------- -(0 rows) - -SELECT oid, tmplname -FROM pg_ts_template -WHERE tmplnamespace = 0 OR tmpllexize = 0; -- tmplinit is optional - oid | tmplname ------+---------- -(0 rows) - -SELECT oid, cfgname -FROM pg_ts_config -WHERE cfgnamespace = 0 OR cfgowner = 0 OR cfgparser = 0; - oid | cfgname ------+--------- -(0 rows) - -SELECT mapcfg, maptokentype, mapseqno -FROM pg_ts_config_map -WHERE mapcfg = 0 OR mapdict = 0; - mapcfg | maptokentype | mapseqno ---------+--------------+---------- -(0 rows) - --- Look for pg_ts_config_map entries that aren't one of parser's token types -SELECT * FROM - ( SELECT oid AS cfgid, (ts_token_type(cfgparser)).tokid AS tokid - FROM pg_ts_config ) AS tt -RIGHT JOIN pg_ts_config_map AS m - ON (tt.cfgid=m.mapcfg AND tt.tokid=m.maptokentype) -WHERE - tt.cfgid IS NULL OR tt.tokid IS NULL; - cfgid | tokid | mapcfg | maptokentype | mapseqno | mapdict --------+-------+--------+--------------+----------+--------- -(0 rows) - --- Load some test data -CREATE TABLE test_tsvector( - t text, - a tsvector -); -\set filename :abs_srcdir '/data/tsearch.data' -COPY test_tsvector FROM :'filename'; -ANALYZE test_tsvector; --- test basic text search behavior without indexes, then with -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh'; - count -------- - 17 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt'; - count -------- - 98 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)'; - count -------- - 23 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)'; - count -------- - 39 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*'; - count -------- - 494 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}'); - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh'; - count -------- - 432 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A'; - count -------- - 56 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D'; - count -------- - 58 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A'; - count -------- - 452 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D'; - count -------- - 450 -(1 row) - -create index wowidx on test_tsvector using gist (a); -SET enable_seqscan=OFF; -SET enable_indexscan=ON; -SET enable_bitmapscan=OFF; -explain (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; - QUERY PLAN -------------------------------------------------------- - Aggregate - -> Index Scan using wowidx on test_tsvector - Index Cond: (a @@ '''wr'' | ''qh'''::tsquery) -(3 rows) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh'; - count -------- - 17 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt'; - count -------- - 98 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)'; - count -------- - 23 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)'; - count -------- - 39 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*'; - count -------- - 494 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}'); - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh'; - count -------- - 432 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A'; - count -------- - 56 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D'; - count -------- - 58 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A'; - count -------- - 452 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D'; - count -------- - 450 -(1 row) - -SET enable_indexscan=OFF; -SET enable_bitmapscan=ON; -explain (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; - QUERY PLAN -------------------------------------------------------------- - Aggregate - -> Bitmap Heap Scan on test_tsvector - Recheck Cond: (a @@ '''wr'' | ''qh'''::tsquery) - -> Bitmap Index Scan on wowidx - Index Cond: (a @@ '''wr'' | ''qh'''::tsquery) -(5 rows) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh'; - count -------- - 17 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt'; - count -------- - 98 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)'; - count -------- - 23 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)'; - count -------- - 39 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*'; - count -------- - 494 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}'); - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh'; - count -------- - 432 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A'; - count -------- - 56 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D'; - count -------- - 58 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A'; - count -------- - 452 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D'; - count -------- - 450 -(1 row) - --- Test siglen parameter of GiST tsvector_ops -CREATE INDEX wowidx1 ON test_tsvector USING gist (a tsvector_ops(foo=1)); -ERROR: unrecognized parameter "foo" -CREATE INDEX wowidx1 ON test_tsvector USING gist (a tsvector_ops(siglen=0)); -ERROR: value 0 out of bounds for option "siglen" -DETAIL: Valid values are between "1" and "2024". -CREATE INDEX wowidx1 ON test_tsvector USING gist (a tsvector_ops(siglen=2048)); -ERROR: value 2048 out of bounds for option "siglen" -DETAIL: Valid values are between "1" and "2024". -CREATE INDEX wowidx1 ON test_tsvector USING gist (a tsvector_ops(siglen=100,foo='bar')); -ERROR: unrecognized parameter "foo" -CREATE INDEX wowidx1 ON test_tsvector USING gist (a tsvector_ops(siglen=100, siglen = 200)); -ERROR: parameter "siglen" specified more than once -CREATE INDEX wowidx2 ON test_tsvector USING gist (a tsvector_ops(siglen=1)); -\d test_tsvector - Table "public.test_tsvector" - Column | Type | Collation | Nullable | Default ---------+----------+-----------+----------+--------- - t | text | | | - a | tsvector | | | -Indexes: - "wowidx" gist (a) - "wowidx2" gist (a tsvector_ops (siglen='1')) - -DROP INDEX wowidx; -EXPLAIN (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; - QUERY PLAN -------------------------------------------------------------- - Aggregate - -> Bitmap Heap Scan on test_tsvector - Recheck Cond: (a @@ '''wr'' | ''qh'''::tsquery) - -> Bitmap Index Scan on wowidx2 - Index Cond: (a @@ '''wr'' | ''qh'''::tsquery) -(5 rows) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh'; - count -------- - 17 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt'; - count -------- - 98 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)'; - count -------- - 23 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)'; - count -------- - 39 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*'; - count -------- - 494 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}'); - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh'; - count -------- - 432 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A'; - count -------- - 56 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D'; - count -------- - 58 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A'; - count -------- - 452 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D'; - count -------- - 450 -(1 row) - -DROP INDEX wowidx2; -CREATE INDEX wowidx ON test_tsvector USING gist (a tsvector_ops(siglen=484)); -\d test_tsvector - Table "public.test_tsvector" - Column | Type | Collation | Nullable | Default ---------+----------+-----------+----------+--------- - t | text | | | - a | tsvector | | | -Indexes: - "wowidx" gist (a tsvector_ops (siglen='484')) - -EXPLAIN (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; - QUERY PLAN -------------------------------------------------------------- - Aggregate - -> Bitmap Heap Scan on test_tsvector - Recheck Cond: (a @@ '''wr'' | ''qh'''::tsquery) - -> Bitmap Index Scan on wowidx - Index Cond: (a @@ '''wr'' | ''qh'''::tsquery) -(5 rows) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh'; - count -------- - 17 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt'; - count -------- - 98 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)'; - count -------- - 23 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)'; - count -------- - 39 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*'; - count -------- - 494 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}'); - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh'; - count -------- - 432 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A'; - count -------- - 56 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D'; - count -------- - 58 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A'; - count -------- - 452 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D'; - count -------- - 450 -(1 row) - -RESET enable_seqscan; -RESET enable_indexscan; -RESET enable_bitmapscan; -DROP INDEX wowidx; -CREATE INDEX wowidx ON test_tsvector USING gin (a); -SET enable_seqscan=OFF; --- GIN only supports bitmapscan, so no need to test plain indexscan -explain (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; - QUERY PLAN -------------------------------------------------------------- - Aggregate - -> Bitmap Heap Scan on test_tsvector - Recheck Cond: (a @@ '''wr'' | ''qh'''::tsquery) - -> Bitmap Index Scan on wowidx - Index Cond: (a @@ '''wr'' | ''qh'''::tsquery) -(5 rows) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh'; - count -------- - 17 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt'; - count -------- - 98 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)'; - count -------- - 23 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)'; - count -------- - 39 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*'; - count -------- - 494 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}'); - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh'; - count -------- - 432 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A'; - count -------- - 56 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D'; - count -------- - 58 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A'; - count -------- - 452 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D'; - count -------- - 450 -(1 row) - --- Test optimization of non-empty GIN_SEARCH_MODE_ALL queries -EXPLAIN (COSTS OFF) -SELECT count(*) FROM test_tsvector WHERE a @@ '!qh'; - QUERY PLAN ------------------------------------------------------ - Aggregate - -> Bitmap Heap Scan on test_tsvector - Recheck Cond: (a @@ '!''qh'''::tsquery) - -> Bitmap Index Scan on wowidx - Index Cond: (a @@ '!''qh'''::tsquery) -(5 rows) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!qh'; - count -------- - 410 -(1 row) - -EXPLAIN (COSTS OFF) -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr' AND a @@ '!qh'; - QUERY PLAN ------------------------------------------------------------------------------------- - Aggregate - -> Bitmap Heap Scan on test_tsvector - Recheck Cond: ((a @@ '''wr'''::tsquery) AND (a @@ '!''qh'''::tsquery)) - -> Bitmap Index Scan on wowidx - Index Cond: ((a @@ '''wr'''::tsquery) AND (a @@ '!''qh'''::tsquery)) -(5 rows) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr' AND a @@ '!qh'; - count -------- - 60 -(1 row) - -RESET enable_seqscan; -INSERT INTO test_tsvector VALUES ('???', 'DFG:1A,2B,6C,10 FGH'); -SELECT * FROM ts_stat('SELECT a FROM test_tsvector') ORDER BY ndoc DESC, nentry DESC, word LIMIT 10; - word | ndoc | nentry -------+------+-------- - qq | 108 | 108 - qt | 102 | 102 - qe | 100 | 101 - qh | 98 | 99 - qw | 98 | 98 - qa | 97 | 97 - ql | 94 | 94 - qs | 94 | 94 - qr | 92 | 93 - qi | 92 | 92 -(10 rows) - -SELECT * FROM ts_stat('SELECT a FROM test_tsvector', 'AB') ORDER BY ndoc DESC, nentry DESC, word; - word | ndoc | nentry -------+------+-------- - DFG | 1 | 2 -(1 row) - ---dictionaries and to_tsvector -SELECT ts_lexize('english_stem', 'skies'); - ts_lexize ------------ - {sky} -(1 row) - -SELECT ts_lexize('english_stem', 'identity'); - ts_lexize ------------ - {ident} -(1 row) - -SELECT * FROM ts_token_type('default'); - tokid | alias | description --------+-----------------+------------------------------------------ - 1 | asciiword | Word, all ASCII - 2 | word | Word, all letters - 3 | numword | Word, letters and digits - 4 | email | Email address - 5 | url | URL - 6 | host | Host - 7 | sfloat | Scientific notation - 8 | version | Version number - 9 | hword_numpart | Hyphenated word part, letters and digits - 10 | hword_part | Hyphenated word part, all letters - 11 | hword_asciipart | Hyphenated word part, all ASCII - 12 | blank | Space symbols - 13 | tag | XML tag - 14 | protocol | Protocol head - 15 | numhword | Hyphenated word, letters and digits - 16 | asciihword | Hyphenated word, all ASCII - 17 | hword | Hyphenated word, all letters - 18 | url_path | URL path - 19 | file | File or path name - 20 | float | Decimal notation - 21 | int | Signed integer - 22 | uint | Unsigned integer - 23 | entity | XML entity -(23 rows) - -SELECT * FROM ts_parse('default', '345 qwe@efd.r '' http://www.com/ http://aew.werc.ewr/?ad=qwe&dw 1aew.werc.ewr/?ad=qwe&dw 2aew.werc.ewr http://3aew.werc.ewr/?ad=qwe&dw http://4aew.werc.ewr http://5aew.werc.ewr:8100/? ad=qwe&dw 6aew.werc.ewr:8100/?ad=qwe&dw 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 +4.0e-10 qwe qwe qwqwe 234.435 455 5.005 teodor@stack.net teodor@123-stack.net 123_teodor@stack.net 123-teodor@stack.net qwe-wer asdf qwer jf sdjk ewr1> ewri2 -/usr/local/fff /awdf/dwqe/4325 rewt/ewr wefjn /wqe-324/ewr gist.h gist.h.c gist.c. readline 4.2 4.2. 4.2, readline-4.2 readline-4.2. 234 - wow < jqw <> qwerty'); - tokid | token --------+-------------------------------------- - 22 | 345 - 12 | - 1 | qwe - 12 | @ - 19 | efd.r - 12 | ' - 14 | http:// - 6 | www.com - 12 | / - 14 | http:// - 5 | aew.werc.ewr/?ad=qwe&dw - 6 | aew.werc.ewr - 18 | /?ad=qwe&dw - 12 | - 5 | 1aew.werc.ewr/?ad=qwe&dw - 6 | 1aew.werc.ewr - 18 | /?ad=qwe&dw - 12 | - 6 | 2aew.werc.ewr - 12 | - 14 | http:// - 5 | 3aew.werc.ewr/?ad=qwe&dw - 6 | 3aew.werc.ewr - 18 | /?ad=qwe&dw - 12 | - 14 | http:// - 6 | 4aew.werc.ewr - 12 | - 14 | http:// - 5 | 5aew.werc.ewr:8100/? - 6 | 5aew.werc.ewr:8100 - 18 | /? - 12 | - 1 | ad - 12 | = - 1 | qwe - 12 | & - 1 | dw - 12 | - 5 | 6aew.werc.ewr:8100/?ad=qwe&dw - 6 | 6aew.werc.ewr:8100 - 18 | /?ad=qwe&dw - 12 | - 5 | 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 - 6 | 7aew.werc.ewr:8100 - 18 | /?ad=qwe&dw=%20%32 - 12 | - 7 | +4.0e-10 - 12 | - 1 | qwe - 12 | - 1 | qwe - 12 | - 1 | qwqwe - 12 | - 20 | 234.435 - 12 | - 22 | 455 - 12 | - 20 | 5.005 - 12 | - 4 | teodor@stack.net - 12 | - 4 | teodor@123-stack.net - 12 | - 4 | 123_teodor@stack.net - 12 | - 4 | 123-teodor@stack.net - 12 | - 16 | qwe-wer - 11 | qwe - 12 | - - 11 | wer - 12 | - 1 | asdf - 12 | - 13 | - 1 | qwer - 12 | - 1 | jf - 12 | - 1 | sdjk - 12 | < - 1 | we - 12 | - 1 | hjwer - 12 | - 13 | - 12 | - 3 | ewr1 - 12 | > - 3 | ewri2 - 12 | - 13 | - 12 | + - | - 19 | /usr/local/fff - 12 | - 19 | /awdf/dwqe/4325 - 12 | - 19 | rewt/ewr - 12 | - 1 | wefjn - 12 | - 19 | /wqe-324/ewr - 12 | - 19 | gist.h - 12 | - 19 | gist.h.c - 12 | - 19 | gist.c - 12 | . - 1 | readline - 12 | - 20 | 4.2 - 12 | - 20 | 4.2 - 12 | . - 20 | 4.2 - 12 | , - 1 | readline - 20 | -4.2 - 12 | - 1 | readline - 20 | -4.2 - 12 | . - 22 | 234 - 12 | + - | - 12 | < - 1 | i - 12 | - 13 | - 12 | - 1 | wow - 12 | - 12 | < - 1 | jqw - 12 | - 12 | <> - 1 | qwerty -(139 rows) - -SELECT to_tsvector('english', '345 qwe@efd.r '' http://www.com/ http://aew.werc.ewr/?ad=qwe&dw 1aew.werc.ewr/?ad=qwe&dw 2aew.werc.ewr http://3aew.werc.ewr/?ad=qwe&dw http://4aew.werc.ewr http://5aew.werc.ewr:8100/? ad=qwe&dw 6aew.werc.ewr:8100/?ad=qwe&dw 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 +4.0e-10 qwe qwe qwqwe 234.435 455 5.005 teodor@stack.net teodor@123-stack.net 123_teodor@stack.net 123-teodor@stack.net qwe-wer asdf qwer jf sdjk ewr1> ewri2 -/usr/local/fff /awdf/dwqe/4325 rewt/ewr wefjn /wqe-324/ewr gist.h gist.h.c gist.c. readline 4.2 4.2. 4.2, readline-4.2 readline-4.2. 234 - wow < jqw <> qwerty'); - to_tsvector ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - '+4.0e-10':28 '-4.2':63,65 '/?':18 '/?ad=qwe&dw':7,10,14,24 '/?ad=qwe&dw=%20%32':27 '/awdf/dwqe/4325':51 '/usr/local/fff':50 '/wqe-324/ewr':54 '123-teodor@stack.net':38 '123_teodor@stack.net':37 '1aew.werc.ewr':9 '1aew.werc.ewr/?ad=qwe&dw':8 '234':66 '234.435':32 '2aew.werc.ewr':11 '345':1 '3aew.werc.ewr':13 '3aew.werc.ewr/?ad=qwe&dw':12 '4.2':59,60,61 '455':33 '4aew.werc.ewr':15 '5.005':34 '5aew.werc.ewr:8100':17 '5aew.werc.ewr:8100/?':16 '6aew.werc.ewr:8100':23 '6aew.werc.ewr:8100/?ad=qwe&dw':22 '7aew.werc.ewr:8100':26 '7aew.werc.ewr:8100/?ad=qwe&dw=%20%32':25 'ad':19 'aew.werc.ewr':6 'aew.werc.ewr/?ad=qwe&dw':5 'asdf':42 'dw':21 'efd.r':3 'ewr1':48 'ewri2':49 'gist.c':57 'gist.h':55 'gist.h.c':56 'hjwer':47 'jf':44 'jqw':69 'qwe':2,20,29,30,40 'qwe-wer':39 'qwer':43 'qwerti':70 'qwqwe':31 'readlin':58,62,64 'rewt/ewr':52 'sdjk':45 'teodor@123-stack.net':36 'teodor@stack.net':35 'wefjn':53 'wer':41 'wow':68 'www.com':4 -(1 row) - -SELECT length(to_tsvector('english', '345 qwe@efd.r '' http://www.com/ http://aew.werc.ewr/?ad=qwe&dw 1aew.werc.ewr/?ad=qwe&dw 2aew.werc.ewr http://3aew.werc.ewr/?ad=qwe&dw http://4aew.werc.ewr http://5aew.werc.ewr:8100/? ad=qwe&dw 6aew.werc.ewr:8100/?ad=qwe&dw 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 +4.0e-10 qwe qwe qwqwe 234.435 455 5.005 teodor@stack.net teodor@123-stack.net 123_teodor@stack.net 123-teodor@stack.net qwe-wer asdf qwer jf sdjk ewr1> ewri2 -/usr/local/fff /awdf/dwqe/4325 rewt/ewr wefjn /wqe-324/ewr gist.h gist.h.c gist.c. readline 4.2 4.2. 4.2, readline-4.2 readline-4.2. 234 - wow < jqw <> qwerty')); - length --------- - 56 -(1 row) - --- ts_debug -SELECT * from ts_debug('english', 'abc&nm1;def©ghiõjkl'); - alias | description | token | dictionaries | dictionary | lexemes ------------+-----------------+----------------------------+----------------+--------------+--------- - tag | XML tag | | {} | | - asciiword | Word, all ASCII | abc | {english_stem} | english_stem | {abc} - entity | XML entity | &nm1; | {} | | - asciiword | Word, all ASCII | def | {english_stem} | english_stem | {def} - entity | XML entity | © | {} | | - asciiword | Word, all ASCII | ghi | {english_stem} | english_stem | {ghi} - entity | XML entity | õ | {} | | - asciiword | Word, all ASCII | jkl | {english_stem} | english_stem | {jkl} - tag | XML tag | | {} | | -(9 rows) - --- check parsing of URLs -SELECT * from ts_debug('english', 'http://www.harewoodsolutions.co.uk/press.aspx'); - alias | description | token | dictionaries | dictionary | lexemes -----------+---------------+----------------------------------------+--------------+------------+------------------------------------------ - protocol | Protocol head | http:// | {} | | - url | URL | www.harewoodsolutions.co.uk/press.aspx | {simple} | simple | {www.harewoodsolutions.co.uk/press.aspx} - host | Host | www.harewoodsolutions.co.uk | {simple} | simple | {www.harewoodsolutions.co.uk} - url_path | URL path | /press.aspx | {simple} | simple | {/press.aspx} - tag | XML tag | | {} | | -(5 rows) - -SELECT * from ts_debug('english', 'http://aew.wer0c.ewr/id?ad=qwe&dw'); - alias | description | token | dictionaries | dictionary | lexemes -----------+---------------+----------------------------+--------------+------------+------------------------------ - protocol | Protocol head | http:// | {} | | - url | URL | aew.wer0c.ewr/id?ad=qwe&dw | {simple} | simple | {aew.wer0c.ewr/id?ad=qwe&dw} - host | Host | aew.wer0c.ewr | {simple} | simple | {aew.wer0c.ewr} - url_path | URL path | /id?ad=qwe&dw | {simple} | simple | {/id?ad=qwe&dw} - tag | XML tag | | {} | | -(5 rows) - -SELECT * from ts_debug('english', 'http://5aew.werc.ewr:8100/?'); - alias | description | token | dictionaries | dictionary | lexemes -----------+---------------+----------------------+--------------+------------+------------------------ - protocol | Protocol head | http:// | {} | | - url | URL | 5aew.werc.ewr:8100/? | {simple} | simple | {5aew.werc.ewr:8100/?} - host | Host | 5aew.werc.ewr:8100 | {simple} | simple | {5aew.werc.ewr:8100} - url_path | URL path | /? | {simple} | simple | {/?} -(4 rows) - -SELECT * from ts_debug('english', '5aew.werc.ewr:8100/?xx'); - alias | description | token | dictionaries | dictionary | lexemes -----------+-------------+------------------------+--------------+------------+-------------------------- - url | URL | 5aew.werc.ewr:8100/?xx | {simple} | simple | {5aew.werc.ewr:8100/?xx} - host | Host | 5aew.werc.ewr:8100 | {simple} | simple | {5aew.werc.ewr:8100} - url_path | URL path | /?xx | {simple} | simple | {/?xx} -(3 rows) - -SELECT token, alias, - dictionaries, dictionaries is null as dnull, array_dims(dictionaries) as ddims, - lexemes, lexemes is null as lnull, array_dims(lexemes) as ldims -from ts_debug('english', 'a title'); - token | alias | dictionaries | dnull | ddims | lexemes | lnull | ldims --------+-----------+----------------+-------+-------+---------+-------+------- - a | asciiword | {english_stem} | f | [1:1] | {} | f | - | blank | {} | f | | | t | - title | asciiword | {english_stem} | f | [1:1] | {titl} | f | [1:1] -(3 rows) - --- to_tsquery -SELECT to_tsquery('english', 'qwe & sKies '); - to_tsquery ---------------- - 'qwe' & 'sky' -(1 row) - -SELECT to_tsquery('simple', 'qwe & sKies '); - to_tsquery ------------------ - 'qwe' & 'skies' -(1 row) - -SELECT to_tsquery('english', '''the wether'':dc & '' sKies '':BC '); - to_tsquery ------------------------- - 'wether':CD & 'sky':BC -(1 row) - -SELECT to_tsquery('english', 'asd&(and|fghj)'); - to_tsquery ----------------- - 'asd' & 'fghj' -(1 row) - -SELECT to_tsquery('english', '(asd&and)|fghj'); - to_tsquery ----------------- - 'asd' | 'fghj' -(1 row) - -SELECT to_tsquery('english', '(asd&!and)|fghj'); - to_tsquery ----------------- - 'asd' | 'fghj' -(1 row) - -SELECT to_tsquery('english', '(the|and&(i&1))&fghj'); - to_tsquery --------------- - '1' & 'fghj' -(1 row) - -SELECT plainto_tsquery('english', 'the and z 1))& fghj'); - plainto_tsquery --------------------- - 'z' & '1' & 'fghj' -(1 row) - -SELECT plainto_tsquery('english', 'foo bar') && plainto_tsquery('english', 'asd'); - ?column? ------------------------ - 'foo' & 'bar' & 'asd' -(1 row) - -SELECT plainto_tsquery('english', 'foo bar') || plainto_tsquery('english', 'asd fg'); - ?column? ------------------------------- - 'foo' & 'bar' | 'asd' & 'fg' -(1 row) - -SELECT plainto_tsquery('english', 'foo bar') || !!plainto_tsquery('english', 'asd fg'); - ?column? ------------------------------------ - 'foo' & 'bar' | !( 'asd' & 'fg' ) -(1 row) - -SELECT plainto_tsquery('english', 'foo bar') && 'asd | fg'; - ?column? ----------------------------------- - 'foo' & 'bar' & ( 'asd' | 'fg' ) -(1 row) - --- Check stop word deletion, a and s are stop-words -SELECT to_tsquery('english', '!(a & !b) & c'); - to_tsquery -------------- - !!'b' & 'c' -(1 row) - -SELECT to_tsquery('english', '!(a & !b)'); - to_tsquery ------------- - !!'b' -(1 row) - -SELECT to_tsquery('english', '(1 <-> 2) <-> a'); - to_tsquery -------------- - '1' <-> '2' -(1 row) - -SELECT to_tsquery('english', '(1 <-> a) <-> 2'); - to_tsquery -------------- - '1' <2> '2' -(1 row) - -SELECT to_tsquery('english', '(a <-> 1) <-> 2'); - to_tsquery -------------- - '1' <-> '2' -(1 row) - -SELECT to_tsquery('english', 'a <-> (1 <-> 2)'); - to_tsquery -------------- - '1' <-> '2' -(1 row) - -SELECT to_tsquery('english', '1 <-> (a <-> 2)'); - to_tsquery -------------- - '1' <2> '2' -(1 row) - -SELECT to_tsquery('english', '1 <-> (2 <-> a)'); - to_tsquery -------------- - '1' <-> '2' -(1 row) - -SELECT to_tsquery('english', '(1 <-> 2) <3> a'); - to_tsquery -------------- - '1' <-> '2' -(1 row) - -SELECT to_tsquery('english', '(1 <-> a) <3> 2'); - to_tsquery -------------- - '1' <4> '2' -(1 row) - -SELECT to_tsquery('english', '(a <-> 1) <3> 2'); - to_tsquery -------------- - '1' <3> '2' -(1 row) - -SELECT to_tsquery('english', 'a <3> (1 <-> 2)'); - to_tsquery -------------- - '1' <-> '2' -(1 row) - -SELECT to_tsquery('english', '1 <3> (a <-> 2)'); - to_tsquery -------------- - '1' <4> '2' -(1 row) - -SELECT to_tsquery('english', '1 <3> (2 <-> a)'); - to_tsquery -------------- - '1' <3> '2' -(1 row) - -SELECT to_tsquery('english', '(1 <3> 2) <-> a'); - to_tsquery -------------- - '1' <3> '2' -(1 row) - -SELECT to_tsquery('english', '(1 <3> a) <-> 2'); - to_tsquery -------------- - '1' <4> '2' -(1 row) - -SELECT to_tsquery('english', '(a <3> 1) <-> 2'); - to_tsquery -------------- - '1' <-> '2' -(1 row) - -SELECT to_tsquery('english', 'a <-> (1 <3> 2)'); - to_tsquery -------------- - '1' <3> '2' -(1 row) - -SELECT to_tsquery('english', '1 <-> (a <3> 2)'); - to_tsquery -------------- - '1' <4> '2' -(1 row) - -SELECT to_tsquery('english', '1 <-> (2 <3> a)'); - to_tsquery -------------- - '1' <-> '2' -(1 row) - -SELECT to_tsquery('english', '((a <-> 1) <-> 2) <-> s'); - to_tsquery -------------- - '1' <-> '2' -(1 row) - -SELECT to_tsquery('english', '(2 <-> (a <-> 1)) <-> s'); - to_tsquery -------------- - '2' <2> '1' -(1 row) - -SELECT to_tsquery('english', '((1 <-> a) <-> 2) <-> s'); - to_tsquery -------------- - '1' <2> '2' -(1 row) - -SELECT to_tsquery('english', '(2 <-> (1 <-> a)) <-> s'); - to_tsquery -------------- - '2' <-> '1' -(1 row) - -SELECT to_tsquery('english', 's <-> ((a <-> 1) <-> 2)'); - to_tsquery -------------- - '1' <-> '2' -(1 row) - -SELECT to_tsquery('english', 's <-> (2 <-> (a <-> 1))'); - to_tsquery -------------- - '2' <2> '1' -(1 row) - -SELECT to_tsquery('english', 's <-> ((1 <-> a) <-> 2)'); - to_tsquery -------------- - '1' <2> '2' -(1 row) - -SELECT to_tsquery('english', 's <-> (2 <-> (1 <-> a))'); - to_tsquery -------------- - '2' <-> '1' -(1 row) - -SELECT to_tsquery('english', '((a <-> 1) <-> s) <-> 2'); - to_tsquery -------------- - '1' <2> '2' -(1 row) - -SELECT to_tsquery('english', '(s <-> (a <-> 1)) <-> 2'); - to_tsquery -------------- - '1' <-> '2' -(1 row) - -SELECT to_tsquery('english', '((1 <-> a) <-> s) <-> 2'); - to_tsquery -------------- - '1' <3> '2' -(1 row) - -SELECT to_tsquery('english', '(s <-> (1 <-> a)) <-> 2'); - to_tsquery -------------- - '1' <2> '2' -(1 row) - -SELECT to_tsquery('english', '2 <-> ((a <-> 1) <-> s)'); - to_tsquery -------------- - '2' <2> '1' -(1 row) - -SELECT to_tsquery('english', '2 <-> (s <-> (a <-> 1))'); - to_tsquery -------------- - '2' <3> '1' -(1 row) - -SELECT to_tsquery('english', '2 <-> ((1 <-> a) <-> s)'); - to_tsquery -------------- - '2' <-> '1' -(1 row) - -SELECT to_tsquery('english', '2 <-> (s <-> (1 <-> a))'); - to_tsquery -------------- - '2' <2> '1' -(1 row) - -SELECT to_tsquery('english', 'foo <-> (a <-> (the <-> bar))'); - to_tsquery ------------------ - 'foo' <3> 'bar' -(1 row) - -SELECT to_tsquery('english', '((foo <-> a) <-> the) <-> bar'); - to_tsquery ------------------ - 'foo' <3> 'bar' -(1 row) - -SELECT to_tsquery('english', 'foo <-> a <-> the <-> bar'); - to_tsquery ------------------ - 'foo' <3> 'bar' -(1 row) - -SELECT phraseto_tsquery('english', 'PostgreSQL can be extended by the user in many ways'); - phraseto_tsquery ------------------------------------------------------------ - 'postgresql' <3> 'extend' <3> 'user' <2> 'mani' <-> 'way' -(1 row) - -SELECT ts_rank_cd(to_tsvector('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -'), to_tsquery('english', 'paint&water')); - ts_rank_cd ------------- - 0.05 -(1 row) - -SELECT ts_rank_cd(to_tsvector('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -'), to_tsquery('english', 'breath&motion&water')); - ts_rank_cd -------------- - 0.008333334 -(1 row) - -SELECT ts_rank_cd(to_tsvector('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -'), to_tsquery('english', 'ocean')); - ts_rank_cd ------------- - 0.1 -(1 row) - -SELECT ts_rank_cd(to_tsvector('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -'), to_tsquery('english', 'painted <-> Ship')); - ts_rank_cd ------------- - 0.1 -(1 row) - -SELECT ts_rank_cd(strip(to_tsvector('both stripped')), - to_tsquery('both & stripped')); - ts_rank_cd ------------- - 0 -(1 row) - -SELECT ts_rank_cd(to_tsvector('unstripped') || strip(to_tsvector('stripped')), - to_tsquery('unstripped & stripped')); - ts_rank_cd ------------- - 0 -(1 row) - ---headline tests -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'paint&water')); - ts_headline ------------------------------------------ - painted Ocean. + - Water, water, every where+ - And all the boards did shrink; + - Water, water, every -(1 row) - -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'breath&motion&water')); - ts_headline ----------------------------------- - breath nor motion,+ - As idle as a painted Ship + - Upon a painted Ocean. + - Water, water -(1 row) - -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'ocean')); - ts_headline ----------------------------------- - Ocean. + - Water, water, every where + - And all the boards did shrink;+ - Water, water, every where -(1 row) - -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'day & drink')); - ts_headline ------------------------------------- - day, + - We stuck, nor breath nor motion,+ - As idle as a painted Ship + - Upon a painted Ocean. + - Water, water, every where + - And all the boards did shrink; + - Water, water, every where, + - Nor any drop -(1 row) - -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'day | drink')); - ts_headline ------------------------------------------------------------ - Day after day, day after day,+ - We stuck, nor breath nor motion, + - As idle as a painted -(1 row) - -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'day | !drink')); - ts_headline ------------------------------------------------------------ - Day after day, day after day,+ - We stuck, nor breath nor motion, + - As idle as a painted -(1 row) - -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'painted <-> Ship & drink')); - ts_headline ----------------------------------- - painted Ship + - Upon a painted Ocean. + - Water, water, every where + - And all the boards did shrink;+ - Water, water, every where, + - Nor any drop to drink -(1 row) - -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'painted <-> Ship | drink')); - ts_headline ---------------------------------- - painted Ship + - Upon a painted Ocean. + - Water, water, every where + - And all the boards did shrink -(1 row) - -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'painted <-> Ship | !drink')); - ts_headline ---------------------------------- - painted Ship + - Upon a painted Ocean. + - Water, water, every where + - And all the boards did shrink -(1 row) - -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', phraseto_tsquery('english', 'painted Ocean')); - ts_headline ----------------------------------- - painted Ocean. + - Water, water, every where + - And all the boards did shrink;+ - Water, water, every -(1 row) - -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', phraseto_tsquery('english', 'idle as a painted Ship')); - ts_headline ---------------------------------------------- - idle as a painted Ship+ - Upon a painted Ocean. + - Water, water, every where + - And all the boards -(1 row) - -SELECT ts_headline('english', -'Lorem ipsum urna. Nullam nullam ullamcorper urna.', -to_tsquery('english','Lorem') && phraseto_tsquery('english','ullamcorper urna'), -'MaxWords=100, MinWords=1'); - ts_headline -------------------------------------------------------------------------------- - Lorem ipsum urna. Nullam nullam ullamcorper urna -(1 row) - -SELECT ts_headline('english', -'Lorem ipsum urna. Nullam nullam ullamcorper urna.', -phraseto_tsquery('english','ullamcorper urna'), -'MaxWords=100, MinWords=5'); - ts_headline -------------------------------------------------------------- - urna. Nullam nullam ullamcorper urna. -(1 row) - -SELECT ts_headline('english', ' - - - -Sea view wow foo bar qq -YES   -ff-bg - - -', -to_tsquery('english', 'sea&foo'), 'HighlightAll=true'); - ts_headline ------------------------------------------------------------------------------ - + - + - + - + - Sea view wow foo bar qq + - YES  + - ff-bg + - + - + - -(1 row) - -SELECT ts_headline('simple', '1 2 3 1 3'::text, '1 <-> 3', 'MaxWords=2, MinWords=1'); - ts_headline -------------------- - 1 3 -(1 row) - -SELECT ts_headline('simple', '1 2 3 1 3'::text, '1 & 3', 'MaxWords=4, MinWords=1'); - ts_headline ---------------------- - 1 2 3 -(1 row) - -SELECT ts_headline('simple', '1 2 3 1 3'::text, '1 <-> 3', 'MaxWords=4, MinWords=1'); - ts_headline -------------------- - 1 3 -(1 row) - ---Check if headline fragments work -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'ocean'), 'MaxFragments=1'); - ts_headline ------------------------------------- - after day, + - We stuck, nor breath nor motion,+ - As idle as a painted Ship + - Upon a painted Ocean. + - Water, water, every where + - And all the boards did shrink; + - Water, water, every where, + - Nor any drop -(1 row) - ---Check if more than one fragments are displayed -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'Coleridge & stuck'), 'MaxFragments=2'); - ts_headline ----------------------------------------------- - after day, day after day, + - We stuck, nor breath nor motion, + - As idle as a painted Ship + - Upon a painted Ocean. + - Water, water, every where + - And all the boards did shrink; + - Water, water, every where ... drop to drink.+ - S. T. Coleridge -(1 row) - ---Fragments when there all query words are not in the document -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'ocean & seahorse'), 'MaxFragments=1'); - ts_headline ------------------------------------- - + - Day after day, day after day, + - We stuck, nor breath nor motion,+ - As idle as -(1 row) - ---FragmentDelimiter option -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'Coleridge & stuck'), 'MaxFragments=2,FragmentDelimiter=***'); - ts_headline --------------------------------------------- - after day, day after day, + - We stuck, nor breath nor motion, + - As idle as a painted Ship + - Upon a painted Ocean. + - Water, water, every where + - And all the boards did shrink; + - Water, water, every where***drop to drink.+ - S. T. Coleridge -(1 row) - ---Fragments with phrase search -SELECT ts_headline('english', -'Lorem ipsum urna. Nullam nullam ullamcorper urna.', -to_tsquery('english','Lorem') && phraseto_tsquery('english','ullamcorper urna'), -'MaxFragments=100, MaxWords=100, MinWords=1'); - ts_headline -------------------------------------------------------------------------------- - Lorem ipsum urna. Nullam nullam ullamcorper urna -(1 row) - --- Edge cases with empty query -SELECT ts_headline('english', -'', to_tsquery('english', '')); -NOTICE: text-search query doesn't contain lexemes: "" - ts_headline -------------- - -(1 row) - -SELECT ts_headline('english', -'foo bar', to_tsquery('english', '')); -NOTICE: text-search query doesn't contain lexemes: "" - ts_headline -------------- - foo bar -(1 row) - ---Rewrite sub system -CREATE TABLE test_tsquery (txtkeyword TEXT, txtsample TEXT); -\set ECHO none -ALTER TABLE test_tsquery ADD COLUMN keyword tsquery; -UPDATE test_tsquery SET keyword = to_tsquery('english', txtkeyword); -ALTER TABLE test_tsquery ADD COLUMN sample tsquery; -UPDATE test_tsquery SET sample = to_tsquery('english', txtsample::text); -SELECT COUNT(*) FROM test_tsquery WHERE keyword < 'new <-> york'; - count -------- - 2 -(1 row) - -SELECT COUNT(*) FROM test_tsquery WHERE keyword <= 'new <-> york'; - count -------- - 3 -(1 row) - -SELECT COUNT(*) FROM test_tsquery WHERE keyword = 'new <-> york'; - count -------- - 1 -(1 row) - -SELECT COUNT(*) FROM test_tsquery WHERE keyword >= 'new <-> york'; - count -------- - 4 -(1 row) - -SELECT COUNT(*) FROM test_tsquery WHERE keyword > 'new <-> york'; - count -------- - 3 -(1 row) - -CREATE UNIQUE INDEX bt_tsq ON test_tsquery (keyword); -SET enable_seqscan=OFF; -SELECT COUNT(*) FROM test_tsquery WHERE keyword < 'new <-> york'; - count -------- - 2 -(1 row) - -SELECT COUNT(*) FROM test_tsquery WHERE keyword <= 'new <-> york'; - count -------- - 3 -(1 row) - -SELECT COUNT(*) FROM test_tsquery WHERE keyword = 'new <-> york'; - count -------- - 1 -(1 row) - -SELECT COUNT(*) FROM test_tsquery WHERE keyword >= 'new <-> york'; - count -------- - 4 -(1 row) - -SELECT COUNT(*) FROM test_tsquery WHERE keyword > 'new <-> york'; - count -------- - 3 -(1 row) - -RESET enable_seqscan; -SELECT ts_rewrite('foo & bar & qq & new & york', 'new & york'::tsquery, 'big & apple | nyc | new & york & city'); - ts_rewrite ------------------------------------------------------------------------------- - 'foo' & 'bar' & 'qq' & ( 'city' & 'new' & 'york' | 'nyc' | 'big' & 'apple' ) -(1 row) - -SELECT ts_rewrite(ts_rewrite('new & !york ', 'york', '!jersey'), - 'jersey', 'mexico'); - ts_rewrite --------------------- - 'new' & !!'mexico' -(1 row) - -SELECT ts_rewrite('moscow', 'SELECT keyword, sample FROM test_tsquery'::text ); - ts_rewrite ---------------------- - 'moskva' | 'moscow' -(1 row) - -SELECT ts_rewrite('moscow & hotel', 'SELECT keyword, sample FROM test_tsquery'::text ); - ts_rewrite ------------------------------------ - 'hotel' & ( 'moskva' | 'moscow' ) -(1 row) - -SELECT ts_rewrite('bar & qq & foo & (new <-> york)', 'SELECT keyword, sample FROM test_tsquery'::text ); - ts_rewrite -------------------------------------------------------------------------------------- - 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' ) -(1 row) - -SELECT ts_rewrite( 'moscow', 'SELECT keyword, sample FROM test_tsquery'); - ts_rewrite ---------------------- - 'moskva' | 'moscow' -(1 row) - -SELECT ts_rewrite( 'moscow & hotel', 'SELECT keyword, sample FROM test_tsquery'); - ts_rewrite ------------------------------------ - 'hotel' & ( 'moskva' | 'moscow' ) -(1 row) - -SELECT ts_rewrite( 'bar & qq & foo & (new <-> york)', 'SELECT keyword, sample FROM test_tsquery'); - ts_rewrite -------------------------------------------------------------------------------------- - 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' ) -(1 row) - -SELECT ts_rewrite('1 & (2 <-> 3)', 'SELECT keyword, sample FROM test_tsquery'::text ); - ts_rewrite -------------- - '2' <-> '4' -(1 row) - -SELECT ts_rewrite('1 & (2 <2> 3)', 'SELECT keyword, sample FROM test_tsquery'::text ); - ts_rewrite -------------------- - '1' & '2' <2> '3' -(1 row) - -SELECT ts_rewrite('5 <-> (1 & (2 <-> 3))', 'SELECT keyword, sample FROM test_tsquery'::text ); - ts_rewrite -------------------------- - '5' <-> ( '2' <-> '4' ) -(1 row) - -SELECT ts_rewrite('5 <-> (6 | 8)', 'SELECT keyword, sample FROM test_tsquery'::text ); - ts_rewrite ------------------------ - '5' <-> ( '6' | '8' ) -(1 row) - --- Check empty substitution -SELECT ts_rewrite(to_tsquery('5 & (6 | 5)'), to_tsquery('5'), to_tsquery('')); -NOTICE: text-search query doesn't contain lexemes: "" - ts_rewrite ------------- - '6' -(1 row) - -SELECT ts_rewrite(to_tsquery('!5'), to_tsquery('5'), to_tsquery('')); -NOTICE: text-search query doesn't contain lexemes: "" - ts_rewrite ------------- - -(1 row) - -SELECT keyword FROM test_tsquery WHERE keyword @> 'new'; - keyword ------------------- - 'new' <-> 'york' -(1 row) - -SELECT keyword FROM test_tsquery WHERE keyword @> 'moscow'; - keyword ----------- - 'moscow' -(1 row) - -SELECT keyword FROM test_tsquery WHERE keyword <@ 'new'; - keyword ---------- -(0 rows) - -SELECT keyword FROM test_tsquery WHERE keyword <@ 'moscow'; - keyword ----------- - 'moscow' -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow') AS query; - ts_rewrite ---------------------- - 'moskva' | 'moscow' -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow & hotel') AS query; - ts_rewrite ------------------------------------ - 'hotel' & ( 'moskva' | 'moscow' ) -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'bar & qq & foo & (new <-> york)') AS query; - ts_rewrite -------------------------------------------------------------------------------------- - 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' ) -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow') AS query; - ts_rewrite ---------------------- - 'moskva' | 'moscow' -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow & hotel') AS query; - ts_rewrite ------------------------------------ - 'hotel' & ( 'moskva' | 'moscow' ) -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'bar & qq & foo & (new <-> york)') AS query; - ts_rewrite -------------------------------------------------------------------------------------- - 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' ) -(1 row) - -CREATE INDEX qq ON test_tsquery USING gist (keyword tsquery_ops); -SET enable_seqscan=OFF; -SELECT keyword FROM test_tsquery WHERE keyword @> 'new'; - keyword ------------------- - 'new' <-> 'york' -(1 row) - -SELECT keyword FROM test_tsquery WHERE keyword @> 'moscow'; - keyword ----------- - 'moscow' -(1 row) - -SELECT keyword FROM test_tsquery WHERE keyword <@ 'new'; - keyword ---------- -(0 rows) - -SELECT keyword FROM test_tsquery WHERE keyword <@ 'moscow'; - keyword ----------- - 'moscow' -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow') AS query; - ts_rewrite ---------------------- - 'moskva' | 'moscow' -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow & hotel') AS query; - ts_rewrite ------------------------------------ - 'hotel' & ( 'moskva' | 'moscow' ) -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'bar & qq & foo & (new <-> york)') AS query; - ts_rewrite -------------------------------------------------------------------------------------- - 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' ) -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow') AS query; - ts_rewrite ---------------------- - 'moskva' | 'moscow' -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow & hotel') AS query; - ts_rewrite ------------------------------------ - 'hotel' & ( 'moskva' | 'moscow' ) -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'bar & qq & foo & (new <-> york)') AS query; - ts_rewrite -------------------------------------------------------------------------------------- - 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' ) -(1 row) - -SELECT ts_rewrite(tsquery_phrase('foo', 'foo'), 'foo', 'bar | baz'); - ts_rewrite ------------------------------------------ - ( 'bar' | 'baz' ) <-> ( 'bar' | 'baz' ) -(1 row) - -SELECT to_tsvector('foo bar') @@ - ts_rewrite(tsquery_phrase('foo', 'foo'), 'foo', 'bar | baz'); - ?column? ----------- - f -(1 row) - -SELECT to_tsvector('bar baz') @@ - ts_rewrite(tsquery_phrase('foo', 'foo'), 'foo', 'bar | baz'); - ?column? ----------- - t -(1 row) - -RESET enable_seqscan; ---test GUC -SET default_text_search_config=simple; -SELECT to_tsvector('SKIES My booKs'); - to_tsvector ----------------------------- - 'books':3 'my':2 'skies':1 -(1 row) - -SELECT plainto_tsquery('SKIES My booKs'); - plainto_tsquery --------------------------- - 'skies' & 'my' & 'books' -(1 row) - -SELECT to_tsquery('SKIES & My | booKs'); - to_tsquery --------------------------- - 'skies' & 'my' | 'books' -(1 row) - -SET default_text_search_config=english; -SELECT to_tsvector('SKIES My booKs'); - to_tsvector ------------------- - 'book':3 'sky':1 -(1 row) - -SELECT plainto_tsquery('SKIES My booKs'); - plainto_tsquery ------------------ - 'sky' & 'book' -(1 row) - -SELECT to_tsquery('SKIES & My | booKs'); - to_tsquery ----------------- - 'sky' | 'book' -(1 row) - ---trigger -CREATE TRIGGER tsvectorupdate -BEFORE UPDATE OR INSERT ON test_tsvector -FOR EACH ROW EXECUTE PROCEDURE tsvector_update_trigger(a, 'pg_catalog.english', t); -SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty'); - count -------- - 0 -(1 row) - -INSERT INTO test_tsvector (t) VALUES ('345 qwerty'); -SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty'); - count -------- - 1 -(1 row) - -UPDATE test_tsvector SET t = null WHERE t = '345 qwerty'; -SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty'); - count -------- - 0 -(1 row) - -INSERT INTO test_tsvector (t) VALUES ('345 qwerty'); -SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty'); - count -------- - 1 -(1 row) - --- Test inlining of immutable constant functions --- to_tsquery(text) is not immutable, so it won't be inlined -explain (costs off) -select * from test_tsquery, to_tsquery('new') q where txtsample @@ q; - QUERY PLAN ------------------------------------------------- - Nested Loop - Join Filter: (test_tsquery.txtsample @@ q.q) - -> Function Scan on to_tsquery q - -> Seq Scan on test_tsquery -(4 rows) - --- to_tsquery(regconfig, text) is an immutable function. --- That allows us to get rid of using function scan and join at all. -explain (costs off) -select * from test_tsquery, to_tsquery('english', 'new') q where txtsample @@ q; - QUERY PLAN ---------------------------------------------- - Seq Scan on test_tsquery - Filter: (txtsample @@ '''new'''::tsquery) -(2 rows) - --- test finding items in GIN's pending list -create temp table pendtest (ts tsvector); -create index pendtest_idx on pendtest using gin(ts); -insert into pendtest values (to_tsvector('Lore ipsam')); -insert into pendtest values (to_tsvector('Lore ipsum')); -select * from pendtest where 'ipsu:*'::tsquery @@ ts; - ts --------------------- - 'ipsum':2 'lore':1 -(1 row) - -select * from pendtest where 'ipsa:*'::tsquery @@ ts; - ts --------------------- - 'ipsam':2 'lore':1 -(1 row) - -select * from pendtest where 'ips:*'::tsquery @@ ts; - ts --------------------- - 'ipsam':2 'lore':1 - 'ipsum':2 'lore':1 -(2 rows) - -select * from pendtest where 'ipt:*'::tsquery @@ ts; - ts ----- -(0 rows) - -select * from pendtest where 'ipi:*'::tsquery @@ ts; - ts ----- -(0 rows) - ---check OP_PHRASE on index -create temp table phrase_index_test(fts tsvector); -insert into phrase_index_test values ('A fat cat has just eaten a rat.'); -insert into phrase_index_test values (to_tsvector('english', 'A fat cat has just eaten a rat.')); -create index phrase_index_test_idx on phrase_index_test using gin(fts); -set enable_seqscan = off; -select * from phrase_index_test where fts @@ phraseto_tsquery('english', 'fat cat'); - fts ------------------------------------ - 'cat':3 'eaten':6 'fat':2 'rat':8 -(1 row) - -set enable_seqscan = on; --- test websearch_to_tsquery function -select websearch_to_tsquery('simple', 'I have a fat:*ABCD cat'); - websearch_to_tsquery ---------------------------------------------- - 'i' & 'have' & 'a' & 'fat' & 'abcd' & 'cat' -(1 row) - -select websearch_to_tsquery('simple', 'orange:**AABBCCDD'); - websearch_to_tsquery ------------------------ - 'orange' & 'aabbccdd' -(1 row) - -select websearch_to_tsquery('simple', 'fat:A!cat:B|rat:C<'); - websearch_to_tsquery ------------------------------------------ - 'fat' & 'a' & 'cat' & 'b' & 'rat' & 'c' -(1 row) - -select websearch_to_tsquery('simple', 'fat:A : cat:B'); - websearch_to_tsquery ---------------------------- - 'fat' & 'a' & 'cat' & 'b' -(1 row) - -select websearch_to_tsquery('simple', 'fat*rat'); - websearch_to_tsquery ----------------------- - 'fat' <-> 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'fat-rat'); - websearch_to_tsquery -------------------------------- - 'fat-rat' <-> 'fat' <-> 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'fat_rat'); - websearch_to_tsquery ----------------------- - 'fat' <-> 'rat' -(1 row) - --- weights are completely ignored -select websearch_to_tsquery('simple', 'abc : def'); - websearch_to_tsquery ----------------------- - 'abc' & 'def' -(1 row) - -select websearch_to_tsquery('simple', 'abc:def'); - websearch_to_tsquery ----------------------- - 'abc' & 'def' -(1 row) - -select websearch_to_tsquery('simple', 'a:::b'); - websearch_to_tsquery ----------------------- - 'a' & 'b' -(1 row) - -select websearch_to_tsquery('simple', 'abc:d'); - websearch_to_tsquery ----------------------- - 'abc' & 'd' -(1 row) - -select websearch_to_tsquery('simple', ':'); -NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored - websearch_to_tsquery ----------------------- - -(1 row) - --- these operators are ignored -select websearch_to_tsquery('simple', 'abc & def'); - websearch_to_tsquery ----------------------- - 'abc' & 'def' -(1 row) - -select websearch_to_tsquery('simple', 'abc | def'); - websearch_to_tsquery ----------------------- - 'abc' & 'def' -(1 row) - -select websearch_to_tsquery('simple', 'abc <-> def'); - websearch_to_tsquery ----------------------- - 'abc' & 'def' -(1 row) - -select websearch_to_tsquery('simple', 'abc (pg or class)'); - websearch_to_tsquery ------------------------- - 'abc' & 'pg' | 'class' -(1 row) - --- NOT is ignored in quotes -select websearch_to_tsquery('english', 'My brand new smartphone'); - websearch_to_tsquery -------------------------------- - 'brand' & 'new' & 'smartphon' -(1 row) - -select websearch_to_tsquery('english', 'My brand "new smartphone"'); - websearch_to_tsquery ---------------------------------- - 'brand' & 'new' <-> 'smartphon' -(1 row) - -select websearch_to_tsquery('english', 'My brand "new -smartphone"'); - websearch_to_tsquery ---------------------------------- - 'brand' & 'new' <-> 'smartphon' -(1 row) - --- test OR operator -select websearch_to_tsquery('simple', 'cat or rat'); - websearch_to_tsquery ----------------------- - 'cat' | 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'cat OR rat'); - websearch_to_tsquery ----------------------- - 'cat' | 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'cat "OR" rat'); - websearch_to_tsquery ----------------------- - 'cat' & 'or' & 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'cat OR'); - websearch_to_tsquery ----------------------- - 'cat' & 'or' -(1 row) - -select websearch_to_tsquery('simple', 'OR rat'); - websearch_to_tsquery ----------------------- - 'or' & 'rat' -(1 row) - -select websearch_to_tsquery('simple', '"fat cat OR rat"'); - websearch_to_tsquery ------------------------------------- - 'fat' <-> 'cat' <-> 'or' <-> 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'fat (cat OR rat'); - websearch_to_tsquery ------------------------ - 'fat' & 'cat' | 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'or OR or'); - websearch_to_tsquery ----------------------- - 'or' | 'or' -(1 row) - --- OR is an operator here ... -select websearch_to_tsquery('simple', '"fat cat"or"fat rat"'); - websearch_to_tsquery ------------------------------------ - 'fat' <-> 'cat' | 'fat' <-> 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'fat or(rat'); - websearch_to_tsquery ----------------------- - 'fat' | 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'fat or)rat'); - websearch_to_tsquery ----------------------- - 'fat' | 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'fat or&rat'); - websearch_to_tsquery ----------------------- - 'fat' | 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'fat or|rat'); - websearch_to_tsquery ----------------------- - 'fat' | 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'fat or!rat'); - websearch_to_tsquery ----------------------- - 'fat' | 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'fat orrat'); - websearch_to_tsquery ----------------------- - 'fat' | 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'fat or '); - websearch_to_tsquery ----------------------- - 'fat' & 'or' -(1 row) - --- ... but not here -select websearch_to_tsquery('simple', 'abc orange'); - websearch_to_tsquery ----------------------- - 'abc' & 'orange' -(1 row) - -select websearch_to_tsquery('simple', 'abc OR1234'); - websearch_to_tsquery ----------------------- - 'abc' & 'or1234' -(1 row) - -select websearch_to_tsquery('simple', 'abc or-abc'); - websearch_to_tsquery -------------------------------------- - 'abc' & 'or-abc' <-> 'or' <-> 'abc' -(1 row) - -select websearch_to_tsquery('simple', 'abc OR_abc'); - websearch_to_tsquery ------------------------- - 'abc' & 'or' <-> 'abc' -(1 row) - --- test quotes -select websearch_to_tsquery('english', '"pg_class pg'); - websearch_to_tsquery ---------------------------- - 'pg' <-> 'class' <-> 'pg' -(1 row) - -select websearch_to_tsquery('english', 'pg_class pg"'); - websearch_to_tsquery -------------------------- - 'pg' <-> 'class' & 'pg' -(1 row) - -select websearch_to_tsquery('english', '"pg_class pg"'); - websearch_to_tsquery ---------------------------- - 'pg' <-> 'class' <-> 'pg' -(1 row) - -select websearch_to_tsquery('english', '"pg_class : pg"'); - websearch_to_tsquery ---------------------------- - 'pg' <-> 'class' <-> 'pg' -(1 row) - -select websearch_to_tsquery('english', 'abc "pg_class pg"'); - websearch_to_tsquery ------------------------------------ - 'abc' & 'pg' <-> 'class' <-> 'pg' -(1 row) - -select websearch_to_tsquery('english', '"pg_class pg" def'); - websearch_to_tsquery ------------------------------------ - 'pg' <-> 'class' <-> 'pg' & 'def' -(1 row) - -select websearch_to_tsquery('english', 'abc "pg pg_class pg" def'); - websearch_to_tsquery ----------------------------------------------------- - 'abc' & 'pg' <-> 'pg' <-> 'class' <-> 'pg' & 'def' -(1 row) - -select websearch_to_tsquery('english', ' or "pg pg_class pg" or '); - websearch_to_tsquery ------------------------------------- - 'pg' <-> 'pg' <-> 'class' <-> 'pg' -(1 row) - -select websearch_to_tsquery('english', '""pg pg_class pg""'); - websearch_to_tsquery --------------------------------- - 'pg' & 'pg' <-> 'class' & 'pg' -(1 row) - -select websearch_to_tsquery('english', 'abc """"" def'); - websearch_to_tsquery ----------------------- - 'abc' & 'def' -(1 row) - -select websearch_to_tsquery('english', 'cat -"fat rat"'); - websearch_to_tsquery ------------------------------- - 'cat' & !( 'fat' <-> 'rat' ) -(1 row) - -select websearch_to_tsquery('english', 'cat -"fat rat" cheese'); - websearch_to_tsquery ----------------------------------------- - 'cat' & !( 'fat' <-> 'rat' ) & 'chees' -(1 row) - -select websearch_to_tsquery('english', 'abc "def -"'); - websearch_to_tsquery ----------------------- - 'abc' & 'def' -(1 row) - -select websearch_to_tsquery('english', 'abc "def :"'); - websearch_to_tsquery ----------------------- - 'abc' & 'def' -(1 row) - -select websearch_to_tsquery('english', '"A fat cat" has just eaten a -rat.'); - websearch_to_tsquery ------------------------------------- - 'fat' <-> 'cat' & 'eaten' & !'rat' -(1 row) - -select websearch_to_tsquery('english', '"A fat cat" has just eaten OR !rat.'); - websearch_to_tsquery ------------------------------------ - 'fat' <-> 'cat' & 'eaten' | 'rat' -(1 row) - -select websearch_to_tsquery('english', '"A fat cat" has just (+eaten OR -rat)'); - websearch_to_tsquery ------------------------------------- - 'fat' <-> 'cat' & 'eaten' | !'rat' -(1 row) - -select websearch_to_tsquery('english', 'this is ----fine'); - websearch_to_tsquery ----------------------- - !!!!'fine' -(1 row) - -select websearch_to_tsquery('english', '(()) )))) this ||| is && -fine, "dear friend" OR good'); - websearch_to_tsquery ----------------------------------------- - !'fine' & 'dear' <-> 'friend' | 'good' -(1 row) - -select websearch_to_tsquery('english', 'an old <-> cat " is fine &&& too'); - websearch_to_tsquery ------------------------- - 'old' & 'cat' & 'fine' -(1 row) - -select websearch_to_tsquery('english', '"A the" OR just on'); -NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored - websearch_to_tsquery ----------------------- - -(1 row) - -select websearch_to_tsquery('english', '"a fat cat" ate a rat'); - websearch_to_tsquery ---------------------------------- - 'fat' <-> 'cat' & 'ate' & 'rat' -(1 row) - -select to_tsvector('english', 'A fat cat ate a rat') @@ - websearch_to_tsquery('english', '"a fat cat" ate a rat'); - ?column? ----------- - t -(1 row) - -select to_tsvector('english', 'A fat grey cat ate a rat') @@ - websearch_to_tsquery('english', '"a fat cat" ate a rat'); - ?column? ----------- - f -(1 row) - --- cases handled by gettoken_tsvector() -select websearch_to_tsquery(''''); -NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored - websearch_to_tsquery ----------------------- - -(1 row) - -select websearch_to_tsquery('''abc''''def'''); - websearch_to_tsquery ----------------------- - 'abc' <-> 'def' -(1 row) - -select websearch_to_tsquery('\abc'); - websearch_to_tsquery ----------------------- - 'abc' -(1 row) - -select websearch_to_tsquery('\'); -NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored - websearch_to_tsquery ----------------------- - -(1 row) - +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/tsdicts.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/tsdicts.out --- /tmp/cirrus-ci-build/src/test/regress/expected/tsdicts.out 2024-03-07 14:25:00.334387000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/tsdicts.out 2024-03-07 14:27:17.336510000 +0000 @@ -1,723 +1,2 @@ ---Test text search dictionaries and configurations --- Test ISpell dictionary with ispell affix file -CREATE TEXT SEARCH DICTIONARY ispell ( - Template=ispell, - DictFile=ispell_sample, - AffFile=ispell_sample -); -SELECT ts_lexize('ispell', 'skies'); - ts_lexize ------------ - {sky} -(1 row) - -SELECT ts_lexize('ispell', 'bookings'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('ispell', 'booking'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('ispell', 'foot'); - ts_lexize ------------ - {foot} -(1 row) - -SELECT ts_lexize('ispell', 'foots'); - ts_lexize ------------ - {foot} -(1 row) - -SELECT ts_lexize('ispell', 'rebookings'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('ispell', 'rebooking'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('ispell', 'rebook'); - ts_lexize ------------ - -(1 row) - -SELECT ts_lexize('ispell', 'unbookings'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('ispell', 'unbooking'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('ispell', 'unbook'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('ispell', 'footklubber'); - ts_lexize ----------------- - {foot,klubber} -(1 row) - -SELECT ts_lexize('ispell', 'footballklubber'); - ts_lexize ------------------------------------------------------- - {footballklubber,foot,ball,klubber,football,klubber} -(1 row) - -SELECT ts_lexize('ispell', 'ballyklubber'); - ts_lexize ----------------- - {ball,klubber} -(1 row) - -SELECT ts_lexize('ispell', 'footballyklubber'); - ts_lexize ---------------------- - {foot,ball,klubber} -(1 row) - --- Test ISpell dictionary with hunspell affix file -CREATE TEXT SEARCH DICTIONARY hunspell ( - Template=ispell, - DictFile=ispell_sample, - AffFile=hunspell_sample -); -SELECT ts_lexize('hunspell', 'skies'); - ts_lexize ------------ - {sky} -(1 row) - -SELECT ts_lexize('hunspell', 'bookings'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell', 'booking'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell', 'foot'); - ts_lexize ------------ - {foot} -(1 row) - -SELECT ts_lexize('hunspell', 'foots'); - ts_lexize ------------ - {foot} -(1 row) - -SELECT ts_lexize('hunspell', 'rebookings'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell', 'rebooking'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell', 'rebook'); - ts_lexize ------------ - -(1 row) - -SELECT ts_lexize('hunspell', 'unbookings'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('hunspell', 'unbooking'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('hunspell', 'unbook'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('hunspell', 'footklubber'); - ts_lexize ----------------- - {foot,klubber} -(1 row) - -SELECT ts_lexize('hunspell', 'footballklubber'); - ts_lexize ------------------------------------------------------- - {footballklubber,foot,ball,klubber,football,klubber} -(1 row) - -SELECT ts_lexize('hunspell', 'ballyklubber'); - ts_lexize ----------------- - {ball,klubber} -(1 row) - -SELECT ts_lexize('hunspell', 'footballyklubber'); - ts_lexize ---------------------- - {foot,ball,klubber} -(1 row) - --- Test ISpell dictionary with hunspell affix file with FLAG long parameter -CREATE TEXT SEARCH DICTIONARY hunspell_long ( - Template=ispell, - DictFile=hunspell_sample_long, - AffFile=hunspell_sample_long -); -SELECT ts_lexize('hunspell_long', 'skies'); - ts_lexize ------------ - {sky} -(1 row) - -SELECT ts_lexize('hunspell_long', 'bookings'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell_long', 'booking'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell_long', 'foot'); - ts_lexize ------------ - {foot} -(1 row) - -SELECT ts_lexize('hunspell_long', 'foots'); - ts_lexize ------------ - {foot} -(1 row) - -SELECT ts_lexize('hunspell_long', 'rebookings'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell_long', 'rebooking'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell_long', 'rebook'); - ts_lexize ------------ - -(1 row) - -SELECT ts_lexize('hunspell_long', 'unbookings'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('hunspell_long', 'unbooking'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('hunspell_long', 'unbook'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('hunspell_long', 'booked'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('hunspell_long', 'footklubber'); - ts_lexize ----------------- - {foot,klubber} -(1 row) - -SELECT ts_lexize('hunspell_long', 'footballklubber'); - ts_lexize ------------------------------------------------------- - {footballklubber,foot,ball,klubber,football,klubber} -(1 row) - -SELECT ts_lexize('hunspell_long', 'ballyklubber'); - ts_lexize ----------------- - {ball,klubber} -(1 row) - -SELECT ts_lexize('hunspell_long', 'ballsklubber'); - ts_lexize ----------------- - {ball,klubber} -(1 row) - -SELECT ts_lexize('hunspell_long', 'footballyklubber'); - ts_lexize ---------------------- - {foot,ball,klubber} -(1 row) - -SELECT ts_lexize('hunspell_long', 'ex-machina'); - ts_lexize ---------------- - {ex-,machina} -(1 row) - --- Test ISpell dictionary with hunspell affix file with FLAG num parameter -CREATE TEXT SEARCH DICTIONARY hunspell_num ( - Template=ispell, - DictFile=hunspell_sample_num, - AffFile=hunspell_sample_num -); -SELECT ts_lexize('hunspell_num', 'skies'); - ts_lexize ------------ - {sky} -(1 row) - -SELECT ts_lexize('hunspell_num', 'sk'); - ts_lexize ------------ - {sky} -(1 row) - -SELECT ts_lexize('hunspell_num', 'bookings'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell_num', 'booking'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell_num', 'foot'); - ts_lexize ------------ - {foot} -(1 row) - -SELECT ts_lexize('hunspell_num', 'foots'); - ts_lexize ------------ - {foot} -(1 row) - -SELECT ts_lexize('hunspell_num', 'rebookings'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell_num', 'rebooking'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell_num', 'rebook'); - ts_lexize ------------ - -(1 row) - -SELECT ts_lexize('hunspell_num', 'unbookings'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('hunspell_num', 'unbooking'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('hunspell_num', 'unbook'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('hunspell_num', 'booked'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('hunspell_num', 'footklubber'); - ts_lexize ----------------- - {foot,klubber} -(1 row) - -SELECT ts_lexize('hunspell_num', 'footballklubber'); - ts_lexize ------------------------------------------------------- - {footballklubber,foot,ball,klubber,football,klubber} -(1 row) - -SELECT ts_lexize('hunspell_num', 'ballyklubber'); - ts_lexize ----------------- - {ball,klubber} -(1 row) - -SELECT ts_lexize('hunspell_num', 'footballyklubber'); - ts_lexize ---------------------- - {foot,ball,klubber} -(1 row) - --- Test suitability of affix and dict files -CREATE TEXT SEARCH DICTIONARY hunspell_err ( - Template=ispell, - DictFile=ispell_sample, - AffFile=hunspell_sample_long -); -ERROR: invalid affix alias "GJUS" -CREATE TEXT SEARCH DICTIONARY hunspell_err ( - Template=ispell, - DictFile=ispell_sample, - AffFile=hunspell_sample_num -); -ERROR: invalid affix flag "SZ\" -CREATE TEXT SEARCH DICTIONARY hunspell_invalid_1 ( - Template=ispell, - DictFile=hunspell_sample_long, - AffFile=ispell_sample -); -CREATE TEXT SEARCH DICTIONARY hunspell_invalid_2 ( - Template=ispell, - DictFile=hunspell_sample_long, - AffFile=hunspell_sample_num -); -CREATE TEXT SEARCH DICTIONARY hunspell_invalid_3 ( - Template=ispell, - DictFile=hunspell_sample_num, - AffFile=ispell_sample -); -CREATE TEXT SEARCH DICTIONARY hunspell_err ( - Template=ispell, - DictFile=hunspell_sample_num, - AffFile=hunspell_sample_long -); -ERROR: invalid affix alias "302,301,202,303" --- Synonym dictionary -CREATE TEXT SEARCH DICTIONARY synonym ( - Template=synonym, - Synonyms=synonym_sample -); -SELECT ts_lexize('synonym', 'PoStGrEs'); - ts_lexize ------------ - {pgsql} -(1 row) - -SELECT ts_lexize('synonym', 'Gogle'); - ts_lexize ------------ - {googl} -(1 row) - -SELECT ts_lexize('synonym', 'indices'); - ts_lexize ------------ - {index} -(1 row) - --- test altering boolean parameters -SELECT dictinitoption FROM pg_ts_dict WHERE dictname = 'synonym'; - dictinitoption ------------------------------ - synonyms = 'synonym_sample' -(1 row) - -ALTER TEXT SEARCH DICTIONARY synonym (CaseSensitive = 1); -SELECT ts_lexize('synonym', 'PoStGrEs'); - ts_lexize ------------ - -(1 row) - -SELECT dictinitoption FROM pg_ts_dict WHERE dictname = 'synonym'; - dictinitoption ------------------------------------------------- - synonyms = 'synonym_sample', casesensitive = 1 -(1 row) - -ALTER TEXT SEARCH DICTIONARY synonym (CaseSensitive = 2); -- fail -ERROR: casesensitive requires a Boolean value -ALTER TEXT SEARCH DICTIONARY synonym (CaseSensitive = off); -SELECT ts_lexize('synonym', 'PoStGrEs'); - ts_lexize ------------ - {pgsql} -(1 row) - -SELECT dictinitoption FROM pg_ts_dict WHERE dictname = 'synonym'; - dictinitoption ----------------------------------------------------- - synonyms = 'synonym_sample', casesensitive = 'off' -(1 row) - --- Create and simple test thesaurus dictionary --- More tests in configuration checks because ts_lexize() --- cannot pass more than one word to thesaurus. -CREATE TEXT SEARCH DICTIONARY thesaurus ( - Template=thesaurus, - DictFile=thesaurus_sample, - Dictionary=english_stem -); -SELECT ts_lexize('thesaurus', 'one'); - ts_lexize ------------ - {1} -(1 row) - --- Test ispell dictionary in configuration -CREATE TEXT SEARCH CONFIGURATION ispell_tst ( - COPY=english -); -ALTER TEXT SEARCH CONFIGURATION ispell_tst ALTER MAPPING FOR - word, numword, asciiword, hword, numhword, asciihword, hword_part, hword_numpart, hword_asciipart - WITH ispell, english_stem; -SELECT to_tsvector('ispell_tst', 'Booking the skies after rebookings for footballklubber from a foot'); - to_tsvector ----------------------------------------------------------------------------------------------------- - 'ball':7 'book':1,5 'booking':1,5 'foot':7,10 'football':7 'footballklubber':7 'klubber':7 'sky':3 -(1 row) - -SELECT to_tsquery('ispell_tst', 'footballklubber'); - to_tsquery --------------------------------------------------------------------------- - 'footballklubber' | 'foot' & 'ball' & 'klubber' | 'football' & 'klubber' -(1 row) - -SELECT to_tsquery('ispell_tst', 'footballyklubber:b & rebookings:A & sky'); - to_tsquery ------------------------------------------------------------------------- - 'foot':B & 'ball':B & 'klubber':B & ( 'booking':A | 'book':A ) & 'sky' -(1 row) - --- Test ispell dictionary with hunspell affix in configuration -CREATE TEXT SEARCH CONFIGURATION hunspell_tst ( - COPY=ispell_tst -); -ALTER TEXT SEARCH CONFIGURATION hunspell_tst ALTER MAPPING - REPLACE ispell WITH hunspell; -SELECT to_tsvector('hunspell_tst', 'Booking the skies after rebookings for footballklubber from a foot'); - to_tsvector ----------------------------------------------------------------------------------------------------- - 'ball':7 'book':1,5 'booking':1,5 'foot':7,10 'football':7 'footballklubber':7 'klubber':7 'sky':3 -(1 row) - -SELECT to_tsquery('hunspell_tst', 'footballklubber'); - to_tsquery --------------------------------------------------------------------------- - 'footballklubber' | 'foot' & 'ball' & 'klubber' | 'football' & 'klubber' -(1 row) - -SELECT to_tsquery('hunspell_tst', 'footballyklubber:b & rebookings:A & sky'); - to_tsquery ------------------------------------------------------------------------- - 'foot':B & 'ball':B & 'klubber':B & ( 'booking':A | 'book':A ) & 'sky' -(1 row) - -SELECT to_tsquery('hunspell_tst', 'footballyklubber:b <-> sky'); - to_tsquery -------------------------------------------------- - ( 'foot':B & 'ball':B & 'klubber':B ) <-> 'sky' -(1 row) - -SELECT phraseto_tsquery('hunspell_tst', 'footballyklubber sky'); - phraseto_tsquery -------------------------------------------- - ( 'foot' & 'ball' & 'klubber' ) <-> 'sky' -(1 row) - --- Test ispell dictionary with hunspell affix with FLAG long in configuration -ALTER TEXT SEARCH CONFIGURATION hunspell_tst ALTER MAPPING - REPLACE hunspell WITH hunspell_long; -SELECT to_tsvector('hunspell_tst', 'Booking the skies after rebookings for footballklubber from a foot'); - to_tsvector ----------------------------------------------------------------------------------------------------- - 'ball':7 'book':1,5 'booking':1,5 'foot':7,10 'football':7 'footballklubber':7 'klubber':7 'sky':3 -(1 row) - -SELECT to_tsquery('hunspell_tst', 'footballklubber'); - to_tsquery --------------------------------------------------------------------------- - 'footballklubber' | 'foot' & 'ball' & 'klubber' | 'football' & 'klubber' -(1 row) - -SELECT to_tsquery('hunspell_tst', 'footballyklubber:b & rebookings:A & sky'); - to_tsquery ------------------------------------------------------------------------- - 'foot':B & 'ball':B & 'klubber':B & ( 'booking':A | 'book':A ) & 'sky' -(1 row) - --- Test ispell dictionary with hunspell affix with FLAG num in configuration -ALTER TEXT SEARCH CONFIGURATION hunspell_tst ALTER MAPPING - REPLACE hunspell_long WITH hunspell_num; -SELECT to_tsvector('hunspell_tst', 'Booking the skies after rebookings for footballklubber from a foot'); - to_tsvector ----------------------------------------------------------------------------------------------------- - 'ball':7 'book':1,5 'booking':1,5 'foot':7,10 'football':7 'footballklubber':7 'klubber':7 'sky':3 -(1 row) - -SELECT to_tsquery('hunspell_tst', 'footballklubber'); - to_tsquery --------------------------------------------------------------------------- - 'footballklubber' | 'foot' & 'ball' & 'klubber' | 'football' & 'klubber' -(1 row) - -SELECT to_tsquery('hunspell_tst', 'footballyklubber:b & rebookings:A & sky'); - to_tsquery ------------------------------------------------------------------------- - 'foot':B & 'ball':B & 'klubber':B & ( 'booking':A | 'book':A ) & 'sky' -(1 row) - --- Test synonym dictionary in configuration -CREATE TEXT SEARCH CONFIGURATION synonym_tst ( - COPY=english -); -ALTER TEXT SEARCH CONFIGURATION synonym_tst ALTER MAPPING FOR - asciiword, hword_asciipart, asciihword - WITH synonym, english_stem; -SELECT to_tsvector('synonym_tst', 'Postgresql is often called as postgres or pgsql and pronounced as postgre'); - to_tsvector ---------------------------------------------------- - 'call':4 'often':3 'pgsql':1,6,8,12 'pronounc':10 -(1 row) - -SELECT to_tsvector('synonym_tst', 'Most common mistake is to write Gogle instead of Google'); - to_tsvector ----------------------------------------------------------- - 'common':2 'googl':7,10 'instead':8 'mistak':3 'write':6 -(1 row) - -SELECT to_tsvector('synonym_tst', 'Indexes or indices - Which is right plural form of index?'); - to_tsvector ----------------------------------------------- - 'form':8 'index':1,3,10 'plural':7 'right':6 -(1 row) - -SELECT to_tsquery('synonym_tst', 'Index & indices'); - to_tsquery ---------------------- - 'index' & 'index':* -(1 row) - --- test thesaurus in configuration --- see thesaurus_sample.ths to understand 'odd' resulting tsvector -CREATE TEXT SEARCH CONFIGURATION thesaurus_tst ( - COPY=synonym_tst -); -ALTER TEXT SEARCH CONFIGURATION thesaurus_tst ALTER MAPPING FOR - asciiword, hword_asciipart, asciihword - WITH synonym, thesaurus, english_stem; -SELECT to_tsvector('thesaurus_tst', 'one postgres one two one two three one'); - to_tsvector ----------------------------------- - '1':1,5 '12':3 '123':4 'pgsql':2 -(1 row) - -SELECT to_tsvector('thesaurus_tst', 'Supernovae star is very new star and usually called supernovae (abbreviation SN)'); - to_tsvector --------------------------------------------------------------- - 'abbrevi':10 'call':8 'new':4 'sn':1,9,11 'star':5 'usual':7 -(1 row) - -SELECT to_tsvector('thesaurus_tst', 'Booking tickets is looking like a booking a tickets'); - to_tsvector -------------------------------------------------------- - 'card':3,10 'invit':2,9 'like':6 'look':5 'order':1,8 -(1 row) - --- invalid: non-lowercase quoted identifiers -CREATE TEXT SEARCH DICTIONARY tsdict_case -( - Template = ispell, - "DictFile" = ispell_sample, - "AffFile" = ispell_sample -); -ERROR: unrecognized Ispell parameter: "DictFile" --- Test grammar for configurations -CREATE TEXT SEARCH CONFIGURATION dummy_tst (COPY=english); --- Overriden mapping change with duplicated tokens. -ALTER TEXT SEARCH CONFIGURATION dummy_tst - ALTER MAPPING FOR word, word WITH ispell; --- Not a token supported by the configuration's parser, fails. -ALTER TEXT SEARCH CONFIGURATION dummy_tst - DROP MAPPING FOR not_a_token, not_a_token; -ERROR: token type "not_a_token" does not exist --- Not a token supported by the configuration's parser, fails even --- with IF EXISTS. -ALTER TEXT SEARCH CONFIGURATION dummy_tst - DROP MAPPING IF EXISTS FOR not_a_token, not_a_token; -ERROR: token type "not_a_token" does not exist --- Token supported by the configuration's parser, succeeds. -ALTER TEXT SEARCH CONFIGURATION dummy_tst - DROP MAPPING FOR word, word; --- No mapping for token supported by the configuration's parser, fails. -ALTER TEXT SEARCH CONFIGURATION dummy_tst - DROP MAPPING FOR word; -ERROR: mapping for token type "word" does not exist --- Token supported by the configuration's parser, cannot be found, --- succeeds with IF EXISTS. -ALTER TEXT SEARCH CONFIGURATION dummy_tst - DROP MAPPING IF EXISTS FOR word, word; -NOTICE: mapping for token type "word" does not exist, skipping --- Re-add mapping, with duplicated tokens supported by the parser. -ALTER TEXT SEARCH CONFIGURATION dummy_tst - ADD MAPPING FOR word, word WITH ispell; --- Not a token supported by the configuration's parser, fails. -ALTER TEXT SEARCH CONFIGURATION dummy_tst - ADD MAPPING FOR not_a_token WITH ispell; -ERROR: token type "not_a_token" does not exist -DROP TEXT SEARCH CONFIGURATION dummy_tst; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/foreign_data.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/foreign_data.out --- /tmp/cirrus-ci-build/src/test/regress/expected/foreign_data.out 2024-03-07 14:25:00.330579000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/foreign_data.out 2024-03-07 14:27:17.340767000 +0000 @@ -1,2203 +1,2 @@ --- --- Test foreign-data wrapper and server management. --- --- directory paths and dlsuffix are passed to us in environment variables -\getenv libdir PG_LIBDIR -\getenv dlsuffix PG_DLSUFFIX -\set regresslib :libdir '/regress' :dlsuffix -CREATE FUNCTION test_fdw_handler() - RETURNS fdw_handler - AS :'regresslib', 'test_fdw_handler' - LANGUAGE C; --- Clean up in case a prior regression run failed --- Suppress NOTICE messages when roles don't exist -SET client_min_messages TO 'warning'; -DROP ROLE IF EXISTS regress_foreign_data_user, regress_test_role, regress_test_role2, regress_test_role_super, regress_test_indirect, regress_unprivileged_role; -RESET client_min_messages; -CREATE ROLE regress_foreign_data_user LOGIN SUPERUSER; -SET SESSION AUTHORIZATION 'regress_foreign_data_user'; -CREATE ROLE regress_test_role; -CREATE ROLE regress_test_role2; -CREATE ROLE regress_test_role_super SUPERUSER; -CREATE ROLE regress_test_indirect; -CREATE ROLE regress_unprivileged_role; -CREATE FOREIGN DATA WRAPPER dummy; -COMMENT ON FOREIGN DATA WRAPPER dummy IS 'useless'; -CREATE FOREIGN DATA WRAPPER postgresql VALIDATOR postgresql_fdw_validator; --- At this point we should have 2 built-in wrappers and no servers. -SELECT fdwname, fdwhandler::regproc, fdwvalidator::regproc, fdwoptions FROM pg_foreign_data_wrapper ORDER BY 1, 2, 3; - fdwname | fdwhandler | fdwvalidator | fdwoptions -------------+------------+--------------------------+------------ - dummy | - | - | - postgresql | - | postgresql_fdw_validator | -(2 rows) - -SELECT srvname, srvoptions FROM pg_foreign_server; - srvname | srvoptions ----------+------------ -(0 rows) - -SELECT * FROM pg_user_mapping; - oid | umuser | umserver | umoptions ------+--------+----------+----------- -(0 rows) - --- CREATE FOREIGN DATA WRAPPER -CREATE FOREIGN DATA WRAPPER foo VALIDATOR bar; -- ERROR -ERROR: function bar(text[], oid) does not exist -CREATE FOREIGN DATA WRAPPER foo; -\dew - List of foreign-data wrappers - Name | Owner | Handler | Validator -------------+---------------------------+---------+-------------------------- - dummy | regress_foreign_data_user | - | - - foo | regress_foreign_data_user | - | - - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator -(3 rows) - -CREATE FOREIGN DATA WRAPPER foo; -- duplicate -ERROR: foreign-data wrapper "foo" already exists -DROP FOREIGN DATA WRAPPER foo; -CREATE FOREIGN DATA WRAPPER foo OPTIONS (testing '1'); -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+---------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo | regress_foreign_data_user | - | - | | (testing '1') | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - -DROP FOREIGN DATA WRAPPER foo; -CREATE FOREIGN DATA WRAPPER foo OPTIONS (testing '1', testing '2'); -- ERROR -ERROR: option "testing" provided more than once -CREATE FOREIGN DATA WRAPPER foo OPTIONS (testing '1', another '2'); -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+----------------------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo | regress_foreign_data_user | - | - | | (testing '1', another '2') | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - -DROP FOREIGN DATA WRAPPER foo; -SET ROLE regress_test_role; -CREATE FOREIGN DATA WRAPPER foo; -- ERROR -ERROR: permission denied to create foreign-data wrapper "foo" -HINT: Must be superuser to create a foreign-data wrapper. -RESET ROLE; -CREATE FOREIGN DATA WRAPPER foo VALIDATOR postgresql_fdw_validator; -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+-------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo | regress_foreign_data_user | - | postgresql_fdw_validator | | | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - --- HANDLER related checks -CREATE FUNCTION invalid_fdw_handler() RETURNS int LANGUAGE SQL AS 'SELECT 1;'; -CREATE FOREIGN DATA WRAPPER test_fdw HANDLER invalid_fdw_handler; -- ERROR -ERROR: function invalid_fdw_handler must return type fdw_handler -CREATE FOREIGN DATA WRAPPER test_fdw HANDLER test_fdw_handler HANDLER invalid_fdw_handler; -- ERROR -ERROR: conflicting or redundant options -LINE 1: ...GN DATA WRAPPER test_fdw HANDLER test_fdw_handler HANDLER in... - ^ -CREATE FOREIGN DATA WRAPPER test_fdw HANDLER test_fdw_handler; -DROP FOREIGN DATA WRAPPER test_fdw; --- ALTER FOREIGN DATA WRAPPER -ALTER FOREIGN DATA WRAPPER foo OPTIONS (nonexistent 'fdw'); -- ERROR -ERROR: invalid option "nonexistent" -HINT: There are no valid options in this context. -ALTER FOREIGN DATA WRAPPER foo; -- ERROR -ERROR: syntax error at or near ";" -LINE 1: ALTER FOREIGN DATA WRAPPER foo; - ^ -ALTER FOREIGN DATA WRAPPER foo VALIDATOR bar; -- ERROR -ERROR: function bar(text[], oid) does not exist -ALTER FOREIGN DATA WRAPPER foo NO VALIDATOR; -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+-------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo | regress_foreign_data_user | - | - | | | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - -ALTER FOREIGN DATA WRAPPER foo OPTIONS (a '1', b '2'); -ALTER FOREIGN DATA WRAPPER foo OPTIONS (SET c '4'); -- ERROR -ERROR: option "c" not found -ALTER FOREIGN DATA WRAPPER foo OPTIONS (DROP c); -- ERROR -ERROR: option "c" not found -ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD x '1', DROP x); -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+----------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo | regress_foreign_data_user | - | - | | (a '1', b '2') | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - -ALTER FOREIGN DATA WRAPPER foo OPTIONS (DROP a, SET b '3', ADD c '4'); -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+----------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo | regress_foreign_data_user | - | - | | (b '3', c '4') | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - -ALTER FOREIGN DATA WRAPPER foo OPTIONS (a '2'); -ALTER FOREIGN DATA WRAPPER foo OPTIONS (b '4'); -- ERROR -ERROR: option "b" provided more than once -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+-----------------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo | regress_foreign_data_user | - | - | | (b '3', c '4', a '2') | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - -SET ROLE regress_test_role; -ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD d '5'); -- ERROR -ERROR: permission denied to alter foreign-data wrapper "foo" -HINT: Must be superuser to alter a foreign-data wrapper. -SET ROLE regress_test_role_super; -ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD d '5'); -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+------------------------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo | regress_foreign_data_user | - | - | | (b '3', c '4', a '2', d '5') | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - -ALTER FOREIGN DATA WRAPPER foo OWNER TO regress_test_role; -- ERROR -ERROR: permission denied to change owner of foreign-data wrapper "foo" -HINT: The owner of a foreign-data wrapper must be a superuser. -ALTER FOREIGN DATA WRAPPER foo OWNER TO regress_test_role_super; -ALTER ROLE regress_test_role_super NOSUPERUSER; -SET ROLE regress_test_role_super; -ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD e '6'); -- ERROR -ERROR: permission denied to alter foreign-data wrapper "foo" -HINT: Must be superuser to alter a foreign-data wrapper. -RESET ROLE; -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+------------------------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo | regress_test_role_super | - | - | | (b '3', c '4', a '2', d '5') | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - -ALTER FOREIGN DATA WRAPPER foo RENAME TO foo1; -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+------------------------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo1 | regress_test_role_super | - | - | | (b '3', c '4', a '2', d '5') | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - -ALTER FOREIGN DATA WRAPPER foo1 RENAME TO foo; --- HANDLER related checks -ALTER FOREIGN DATA WRAPPER foo HANDLER invalid_fdw_handler; -- ERROR -ERROR: function invalid_fdw_handler must return type fdw_handler -ALTER FOREIGN DATA WRAPPER foo HANDLER test_fdw_handler HANDLER anything; -- ERROR -ERROR: conflicting or redundant options -LINE 1: ...FOREIGN DATA WRAPPER foo HANDLER test_fdw_handler HANDLER an... - ^ -ALTER FOREIGN DATA WRAPPER foo HANDLER test_fdw_handler; -WARNING: changing the foreign-data wrapper handler can change behavior of existing foreign tables -DROP FUNCTION invalid_fdw_handler(); --- DROP FOREIGN DATA WRAPPER -DROP FOREIGN DATA WRAPPER nonexistent; -- ERROR -ERROR: foreign-data wrapper "nonexistent" does not exist -DROP FOREIGN DATA WRAPPER IF EXISTS nonexistent; -NOTICE: foreign-data wrapper "nonexistent" does not exist, skipping -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+------------------+--------------------------+-------------------+------------------------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo | regress_test_role_super | test_fdw_handler | - | | (b '3', c '4', a '2', d '5') | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - -DROP ROLE regress_test_role_super; -- ERROR -ERROR: role "regress_test_role_super" cannot be dropped because some objects depend on it -DETAIL: owner of foreign-data wrapper foo -SET ROLE regress_test_role_super; -DROP FOREIGN DATA WRAPPER foo; -RESET ROLE; -DROP ROLE regress_test_role_super; -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+-------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(2 rows) - -CREATE FOREIGN DATA WRAPPER foo; -CREATE SERVER s1 FOREIGN DATA WRAPPER foo; -COMMENT ON SERVER s1 IS 'foreign server'; -CREATE USER MAPPING FOR current_user SERVER s1; -CREATE USER MAPPING FOR current_user SERVER s1; -- ERROR -ERROR: user mapping for "regress_foreign_data_user" already exists for server "s1" -CREATE USER MAPPING IF NOT EXISTS FOR current_user SERVER s1; -- NOTICE -NOTICE: user mapping for "regress_foreign_data_user" already exists for server "s1", skipping -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+-------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo | regress_foreign_data_user | - | - | | | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - -\des+ - List of foreign servers - Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description -------+---------------------------+----------------------+-------------------+------+---------+-------------+---------------- - s1 | regress_foreign_data_user | foo | | | | | foreign server -(1 row) - -\deu+ - List of user mappings - Server | User name | FDW options ---------+---------------------------+------------- - s1 | regress_foreign_data_user | -(1 row) - -DROP FOREIGN DATA WRAPPER foo; -- ERROR -ERROR: cannot drop foreign-data wrapper foo because other objects depend on it -DETAIL: server s1 depends on foreign-data wrapper foo -user mapping for regress_foreign_data_user on server s1 depends on server s1 -HINT: Use DROP ... CASCADE to drop the dependent objects too. -SET ROLE regress_test_role; -DROP FOREIGN DATA WRAPPER foo CASCADE; -- ERROR -ERROR: must be owner of foreign-data wrapper foo -RESET ROLE; -DROP FOREIGN DATA WRAPPER foo CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to server s1 -drop cascades to user mapping for regress_foreign_data_user on server s1 -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+-------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(2 rows) - -\des+ - List of foreign servers - Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description -------+-------+----------------------+-------------------+------+---------+-------------+------------- -(0 rows) - -\deu+ - List of user mappings - Server | User name | FDW options ---------+-----------+------------- -(0 rows) - --- exercise CREATE SERVER -CREATE SERVER s1 FOREIGN DATA WRAPPER foo; -- ERROR -ERROR: foreign-data wrapper "foo" does not exist -CREATE FOREIGN DATA WRAPPER foo OPTIONS ("test wrapper" 'true'); -CREATE SERVER s1 FOREIGN DATA WRAPPER foo; -CREATE SERVER s1 FOREIGN DATA WRAPPER foo; -- ERROR -ERROR: server "s1" already exists -CREATE SERVER IF NOT EXISTS s1 FOREIGN DATA WRAPPER foo; -- No ERROR, just NOTICE -NOTICE: server "s1" already exists, skipping -CREATE SERVER s2 FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b'); -CREATE SERVER s3 TYPE 'oracle' FOREIGN DATA WRAPPER foo; -CREATE SERVER s4 TYPE 'oracle' FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b'); -CREATE SERVER s5 VERSION '15.0' FOREIGN DATA WRAPPER foo; -CREATE SERVER s6 VERSION '16.0' FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b'); -CREATE SERVER s7 TYPE 'oracle' VERSION '17.0' FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b'); -CREATE SERVER s8 FOREIGN DATA WRAPPER postgresql OPTIONS (foo '1'); -- ERROR -ERROR: invalid option "foo" -CREATE SERVER s8 FOREIGN DATA WRAPPER postgresql OPTIONS (host 'localhost', dbname 's8db'); -\des+ - List of foreign servers - Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description -------+---------------------------+----------------------+-------------------+--------+---------+-----------------------------------+------------- - s1 | regress_foreign_data_user | foo | | | | | - s2 | regress_foreign_data_user | foo | | | | (host 'a', dbname 'b') | - s3 | regress_foreign_data_user | foo | | oracle | | | - s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') | - s5 | regress_foreign_data_user | foo | | | 15.0 | | - s6 | regress_foreign_data_user | foo | | | 16.0 | (host 'a', dbname 'b') | - s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') | - s8 | regress_foreign_data_user | postgresql | | | | (host 'localhost', dbname 's8db') | -(8 rows) - -SET ROLE regress_test_role; -CREATE SERVER t1 FOREIGN DATA WRAPPER foo; -- ERROR: no usage on FDW -ERROR: permission denied for foreign-data wrapper foo -RESET ROLE; -GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role; -SET ROLE regress_test_role; -CREATE SERVER t1 FOREIGN DATA WRAPPER foo; -RESET ROLE; -\des+ - List of foreign servers - Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description -------+---------------------------+----------------------+-------------------+--------+---------+-----------------------------------+------------- - s1 | regress_foreign_data_user | foo | | | | | - s2 | regress_foreign_data_user | foo | | | | (host 'a', dbname 'b') | - s3 | regress_foreign_data_user | foo | | oracle | | | - s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') | - s5 | regress_foreign_data_user | foo | | | 15.0 | | - s6 | regress_foreign_data_user | foo | | | 16.0 | (host 'a', dbname 'b') | - s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') | - s8 | regress_foreign_data_user | postgresql | | | | (host 'localhost', dbname 's8db') | - t1 | regress_test_role | foo | | | | | -(9 rows) - -REVOKE USAGE ON FOREIGN DATA WRAPPER foo FROM regress_test_role; -GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_indirect; -SET ROLE regress_test_role; -CREATE SERVER t2 FOREIGN DATA WRAPPER foo; -- ERROR -ERROR: permission denied for foreign-data wrapper foo -RESET ROLE; -GRANT regress_test_indirect TO regress_test_role; -SET ROLE regress_test_role; -CREATE SERVER t2 FOREIGN DATA WRAPPER foo; -\des+ - List of foreign servers - Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description -------+---------------------------+----------------------+-------------------+--------+---------+-----------------------------------+------------- - s1 | regress_foreign_data_user | foo | | | | | - s2 | regress_foreign_data_user | foo | | | | (host 'a', dbname 'b') | - s3 | regress_foreign_data_user | foo | | oracle | | | - s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') | - s5 | regress_foreign_data_user | foo | | | 15.0 | | - s6 | regress_foreign_data_user | foo | | | 16.0 | (host 'a', dbname 'b') | - s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') | - s8 | regress_foreign_data_user | postgresql | | | | (host 'localhost', dbname 's8db') | - t1 | regress_test_role | foo | | | | | - t2 | regress_test_role | foo | | | | | -(10 rows) - -RESET ROLE; -REVOKE regress_test_indirect FROM regress_test_role; --- ALTER SERVER -ALTER SERVER s0; -- ERROR -ERROR: syntax error at or near ";" -LINE 1: ALTER SERVER s0; - ^ -ALTER SERVER s0 OPTIONS (a '1'); -- ERROR -ERROR: server "s0" does not exist -ALTER SERVER s1 VERSION '1.0' OPTIONS (servername 's1'); -ALTER SERVER s2 VERSION '1.1'; -ALTER SERVER s3 OPTIONS ("tns name" 'orcl', port '1521'); -GRANT USAGE ON FOREIGN SERVER s1 TO regress_test_role; -GRANT USAGE ON FOREIGN SERVER s6 TO regress_test_role2 WITH GRANT OPTION; -\des+ - List of foreign servers - Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description -------+---------------------------+----------------------+-------------------------------------------------------+--------+---------+-----------------------------------+------------- - s1 | regress_foreign_data_user | foo | regress_foreign_data_user=U/regress_foreign_data_user+| | 1.0 | (servername 's1') | - | | | regress_test_role=U/regress_foreign_data_user | | | | - s2 | regress_foreign_data_user | foo | | | 1.1 | (host 'a', dbname 'b') | - s3 | regress_foreign_data_user | foo | | oracle | | ("tns name" 'orcl', port '1521') | - s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') | - s5 | regress_foreign_data_user | foo | | | 15.0 | | - s6 | regress_foreign_data_user | foo | regress_foreign_data_user=U/regress_foreign_data_user+| | 16.0 | (host 'a', dbname 'b') | - | | | regress_test_role2=U*/regress_foreign_data_user | | | | - s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') | - s8 | regress_foreign_data_user | postgresql | | | | (host 'localhost', dbname 's8db') | - t1 | regress_test_role | foo | | | | | - t2 | regress_test_role | foo | | | | | -(10 rows) - -SET ROLE regress_test_role; -ALTER SERVER s1 VERSION '1.1'; -- ERROR -ERROR: must be owner of foreign server s1 -ALTER SERVER s1 OWNER TO regress_test_role; -- ERROR -ERROR: must be owner of foreign server s1 -RESET ROLE; -ALTER SERVER s1 OWNER TO regress_test_role; -GRANT regress_test_role2 TO regress_test_role; -SET ROLE regress_test_role; -ALTER SERVER s1 VERSION '1.1'; -ALTER SERVER s1 OWNER TO regress_test_role2; -- ERROR -ERROR: permission denied for foreign-data wrapper foo -RESET ROLE; -ALTER SERVER s8 OPTIONS (foo '1'); -- ERROR option validation -ERROR: invalid option "foo" -ALTER SERVER s8 OPTIONS (connect_timeout '30', SET dbname 'db1', DROP host); -SET ROLE regress_test_role; -ALTER SERVER s1 OWNER TO regress_test_indirect; -- ERROR -ERROR: must be able to SET ROLE "regress_test_indirect" -RESET ROLE; -GRANT regress_test_indirect TO regress_test_role; -SET ROLE regress_test_role; -ALTER SERVER s1 OWNER TO regress_test_indirect; -RESET ROLE; -GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_indirect; -SET ROLE regress_test_role; -ALTER SERVER s1 OWNER TO regress_test_indirect; -RESET ROLE; -DROP ROLE regress_test_indirect; -- ERROR -ERROR: role "regress_test_indirect" cannot be dropped because some objects depend on it -DETAIL: privileges for foreign-data wrapper foo -owner of server s1 -\des+ - List of foreign servers - Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description -------+---------------------------+----------------------+-------------------------------------------------------+--------+---------+--------------------------------------+------------- - s1 | regress_test_indirect | foo | regress_test_indirect=U/regress_test_indirect | | 1.1 | (servername 's1') | - s2 | regress_foreign_data_user | foo | | | 1.1 | (host 'a', dbname 'b') | - s3 | regress_foreign_data_user | foo | | oracle | | ("tns name" 'orcl', port '1521') | - s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') | - s5 | regress_foreign_data_user | foo | | | 15.0 | | - s6 | regress_foreign_data_user | foo | regress_foreign_data_user=U/regress_foreign_data_user+| | 16.0 | (host 'a', dbname 'b') | - | | | regress_test_role2=U*/regress_foreign_data_user | | | | - s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') | - s8 | regress_foreign_data_user | postgresql | | | | (dbname 'db1', connect_timeout '30') | - t1 | regress_test_role | foo | | | | | - t2 | regress_test_role | foo | | | | | -(10 rows) - -ALTER SERVER s8 RENAME to s8new; -\des+ - List of foreign servers - Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description --------+---------------------------+----------------------+-------------------------------------------------------+--------+---------+--------------------------------------+------------- - s1 | regress_test_indirect | foo | regress_test_indirect=U/regress_test_indirect | | 1.1 | (servername 's1') | - s2 | regress_foreign_data_user | foo | | | 1.1 | (host 'a', dbname 'b') | - s3 | regress_foreign_data_user | foo | | oracle | | ("tns name" 'orcl', port '1521') | - s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') | - s5 | regress_foreign_data_user | foo | | | 15.0 | | - s6 | regress_foreign_data_user | foo | regress_foreign_data_user=U/regress_foreign_data_user+| | 16.0 | (host 'a', dbname 'b') | - | | | regress_test_role2=U*/regress_foreign_data_user | | | | - s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') | - s8new | regress_foreign_data_user | postgresql | | | | (dbname 'db1', connect_timeout '30') | - t1 | regress_test_role | foo | | | | | - t2 | regress_test_role | foo | | | | | -(10 rows) - -ALTER SERVER s8new RENAME to s8; --- DROP SERVER -DROP SERVER nonexistent; -- ERROR -ERROR: server "nonexistent" does not exist -DROP SERVER IF EXISTS nonexistent; -NOTICE: server "nonexistent" does not exist, skipping -\des - List of foreign servers - Name | Owner | Foreign-data wrapper -------+---------------------------+---------------------- - s1 | regress_test_indirect | foo - s2 | regress_foreign_data_user | foo - s3 | regress_foreign_data_user | foo - s4 | regress_foreign_data_user | foo - s5 | regress_foreign_data_user | foo - s6 | regress_foreign_data_user | foo - s7 | regress_foreign_data_user | foo - s8 | regress_foreign_data_user | postgresql - t1 | regress_test_role | foo - t2 | regress_test_role | foo -(10 rows) - -SET ROLE regress_test_role; -DROP SERVER s2; -- ERROR -ERROR: must be owner of foreign server s2 -DROP SERVER s1; -RESET ROLE; -\des - List of foreign servers - Name | Owner | Foreign-data wrapper -------+---------------------------+---------------------- - s2 | regress_foreign_data_user | foo - s3 | regress_foreign_data_user | foo - s4 | regress_foreign_data_user | foo - s5 | regress_foreign_data_user | foo - s6 | regress_foreign_data_user | foo - s7 | regress_foreign_data_user | foo - s8 | regress_foreign_data_user | postgresql - t1 | regress_test_role | foo - t2 | regress_test_role | foo -(9 rows) - -ALTER SERVER s2 OWNER TO regress_test_role; -SET ROLE regress_test_role; -DROP SERVER s2; -RESET ROLE; -\des - List of foreign servers - Name | Owner | Foreign-data wrapper -------+---------------------------+---------------------- - s3 | regress_foreign_data_user | foo - s4 | regress_foreign_data_user | foo - s5 | regress_foreign_data_user | foo - s6 | regress_foreign_data_user | foo - s7 | regress_foreign_data_user | foo - s8 | regress_foreign_data_user | postgresql - t1 | regress_test_role | foo - t2 | regress_test_role | foo -(8 rows) - -CREATE USER MAPPING FOR current_user SERVER s3; -\deu - List of user mappings - Server | User name ---------+--------------------------- - s3 | regress_foreign_data_user -(1 row) - -DROP SERVER s3; -- ERROR -ERROR: cannot drop server s3 because other objects depend on it -DETAIL: user mapping for regress_foreign_data_user on server s3 depends on server s3 -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP SERVER s3 CASCADE; -NOTICE: drop cascades to user mapping for regress_foreign_data_user on server s3 -\des - List of foreign servers - Name | Owner | Foreign-data wrapper -------+---------------------------+---------------------- - s4 | regress_foreign_data_user | foo - s5 | regress_foreign_data_user | foo - s6 | regress_foreign_data_user | foo - s7 | regress_foreign_data_user | foo - s8 | regress_foreign_data_user | postgresql - t1 | regress_test_role | foo - t2 | regress_test_role | foo -(7 rows) - -\deu -List of user mappings - Server | User name ---------+----------- -(0 rows) - --- CREATE USER MAPPING -CREATE USER MAPPING FOR regress_test_missing_role SERVER s1; -- ERROR -ERROR: role "regress_test_missing_role" does not exist -CREATE USER MAPPING FOR current_user SERVER s1; -- ERROR -ERROR: server "s1" does not exist -CREATE USER MAPPING FOR current_user SERVER s4; -CREATE USER MAPPING FOR user SERVER s4; -- ERROR duplicate -ERROR: user mapping for "regress_foreign_data_user" already exists for server "s4" -CREATE USER MAPPING FOR public SERVER s4 OPTIONS ("this mapping" 'is public'); -CREATE USER MAPPING FOR user SERVER s8 OPTIONS (username 'test', password 'secret'); -- ERROR -ERROR: invalid option "username" -HINT: Perhaps you meant the option "user". -CREATE USER MAPPING FOR user SERVER s8 OPTIONS (user 'test', password 'secret'); -ALTER SERVER s5 OWNER TO regress_test_role; -ALTER SERVER s6 OWNER TO regress_test_indirect; -SET ROLE regress_test_role; -CREATE USER MAPPING FOR current_user SERVER s5; -CREATE USER MAPPING FOR current_user SERVER s6 OPTIONS (username 'test'); -CREATE USER MAPPING FOR current_user SERVER s7; -- ERROR -ERROR: permission denied for foreign server s7 -CREATE USER MAPPING FOR public SERVER s8; -- ERROR -ERROR: must be owner of foreign server s8 -RESET ROLE; -ALTER SERVER t1 OWNER TO regress_test_indirect; -SET ROLE regress_test_role; -CREATE USER MAPPING FOR current_user SERVER t1 OPTIONS (username 'bob', password 'boo'); -CREATE USER MAPPING FOR public SERVER t1; -RESET ROLE; -\deu - List of user mappings - Server | User name ---------+--------------------------- - s4 | public - s4 | regress_foreign_data_user - s5 | regress_test_role - s6 | regress_test_role - s8 | regress_foreign_data_user - t1 | public - t1 | regress_test_role -(7 rows) - --- ALTER USER MAPPING -ALTER USER MAPPING FOR regress_test_missing_role SERVER s4 OPTIONS (gotcha 'true'); -- ERROR -ERROR: role "regress_test_missing_role" does not exist -ALTER USER MAPPING FOR user SERVER ss4 OPTIONS (gotcha 'true'); -- ERROR -ERROR: server "ss4" does not exist -ALTER USER MAPPING FOR public SERVER s5 OPTIONS (gotcha 'true'); -- ERROR -ERROR: user mapping for "public" does not exist for server "s5" -ALTER USER MAPPING FOR current_user SERVER s8 OPTIONS (username 'test'); -- ERROR -ERROR: invalid option "username" -HINT: Perhaps you meant the option "user". -ALTER USER MAPPING FOR current_user SERVER s8 OPTIONS (DROP user, SET password 'public'); -SET ROLE regress_test_role; -ALTER USER MAPPING FOR current_user SERVER s5 OPTIONS (ADD modified '1'); -ALTER USER MAPPING FOR public SERVER s4 OPTIONS (ADD modified '1'); -- ERROR -ERROR: must be owner of foreign server s4 -ALTER USER MAPPING FOR public SERVER t1 OPTIONS (ADD modified '1'); -RESET ROLE; -\deu+ - List of user mappings - Server | User name | FDW options ---------+---------------------------+---------------------------------- - s4 | public | ("this mapping" 'is public') - s4 | regress_foreign_data_user | - s5 | regress_test_role | (modified '1') - s6 | regress_test_role | (username 'test') - s8 | regress_foreign_data_user | (password 'public') - t1 | public | (modified '1') - t1 | regress_test_role | (username 'bob', password 'boo') -(7 rows) - --- DROP USER MAPPING -DROP USER MAPPING FOR regress_test_missing_role SERVER s4; -- ERROR -ERROR: role "regress_test_missing_role" does not exist -DROP USER MAPPING FOR user SERVER ss4; -ERROR: server "ss4" does not exist -DROP USER MAPPING FOR public SERVER s7; -- ERROR -ERROR: user mapping for "public" does not exist for server "s7" -DROP USER MAPPING IF EXISTS FOR regress_test_missing_role SERVER s4; -NOTICE: role "regress_test_missing_role" does not exist, skipping -DROP USER MAPPING IF EXISTS FOR user SERVER ss4; -NOTICE: server "ss4" does not exist, skipping -DROP USER MAPPING IF EXISTS FOR public SERVER s7; -NOTICE: user mapping for "public" does not exist for server "s7", skipping -CREATE USER MAPPING FOR public SERVER s8; -SET ROLE regress_test_role; -DROP USER MAPPING FOR public SERVER s8; -- ERROR -ERROR: must be owner of foreign server s8 -RESET ROLE; -DROP SERVER s7; -\deu - List of user mappings - Server | User name ---------+--------------------------- - s4 | public - s4 | regress_foreign_data_user - s5 | regress_test_role - s6 | regress_test_role - s8 | public - s8 | regress_foreign_data_user - t1 | public - t1 | regress_test_role -(8 rows) - --- CREATE FOREIGN TABLE -CREATE SCHEMA foreign_schema; -CREATE SERVER s0 FOREIGN DATA WRAPPER dummy; -CREATE FOREIGN TABLE ft1 (); -- ERROR -ERROR: syntax error at or near ";" -LINE 1: CREATE FOREIGN TABLE ft1 (); - ^ -CREATE FOREIGN TABLE ft1 () SERVER no_server; -- ERROR -ERROR: server "no_server" does not exist -CREATE FOREIGN TABLE ft1 ( - c1 integer OPTIONS ("param 1" 'val1') PRIMARY KEY, - c2 text OPTIONS (param2 'val2', param3 'val3'), - c3 date -) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -- ERROR -ERROR: primary key constraints are not supported on foreign tables -LINE 2: c1 integer OPTIONS ("param 1" 'val1') PRIMARY KEY, - ^ -CREATE TABLE ref_table (id integer PRIMARY KEY); -CREATE FOREIGN TABLE ft1 ( - c1 integer OPTIONS ("param 1" 'val1') REFERENCES ref_table (id), - c2 text OPTIONS (param2 'val2', param3 'val3'), - c3 date -) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -- ERROR -ERROR: foreign key constraints are not supported on foreign tables -LINE 2: c1 integer OPTIONS ("param 1" 'val1') REFERENCES ref_table ... - ^ -DROP TABLE ref_table; -CREATE FOREIGN TABLE ft1 ( - c1 integer OPTIONS ("param 1" 'val1') NOT NULL, - c2 text OPTIONS (param2 'val2', param3 'val3'), - c3 date, - UNIQUE (c3) -) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -- ERROR -ERROR: unique constraints are not supported on foreign tables -LINE 5: UNIQUE (c3) - ^ -CREATE FOREIGN TABLE ft1 ( - c1 integer OPTIONS ("param 1" 'val1') NOT NULL, - c2 text OPTIONS (param2 'val2', param3 'val3') CHECK (c2 <> ''), - c3 date, - CHECK (c3 BETWEEN '1994-01-01'::date AND '1994-01-31'::date) -) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -COMMENT ON FOREIGN TABLE ft1 IS 'ft1'; -COMMENT ON COLUMN ft1.c1 IS 'ft1.c1'; -\d+ ft1 - Foreign table "public.ft1" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+--------------------------------+----------+--------------+------------- - c1 | integer | | not null | | ("param 1" 'val1') | plain | | ft1.c1 - c2 | text | | | | (param2 'val2', param3 'val3') | extended | | - c3 | date | | | | | plain | | -Check constraints: - "ft1_c2_check" CHECK (c2 <> ''::text) - "ft1_c3_check" CHECK (c3 >= '01-01-1994'::date AND c3 <= '01-31-1994'::date) -Not-null constraints: - "ft1_c1_not_null" NOT NULL "c1" -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') - -\det+ - List of foreign tables - Schema | Table | Server | FDW options | Description ---------+-------+--------+-------------------------------------------------+------------- - public | ft1 | s0 | (delimiter ',', quote '"', "be quoted" 'value') | ft1 -(1 row) - -CREATE INDEX id_ft1_c2 ON ft1 (c2); -- ERROR -ERROR: cannot create index on relation "ft1" -DETAIL: This operation is not supported for foreign tables. -SELECT * FROM ft1; -- ERROR -ERROR: foreign-data wrapper "dummy" has no handler -EXPLAIN SELECT * FROM ft1; -- ERROR -ERROR: foreign-data wrapper "dummy" has no handler -CREATE TABLE lt1 (a INT) PARTITION BY RANGE (a); -CREATE FOREIGN TABLE ft_part1 - PARTITION OF lt1 FOR VALUES FROM (0) TO (1000) SERVER s0; -CREATE INDEX ON lt1 (a); -- skips partition -CREATE UNIQUE INDEX ON lt1 (a); -- ERROR -ERROR: cannot create unique index on partitioned table "lt1" -DETAIL: Table "lt1" contains partitions that are foreign tables. -ALTER TABLE lt1 ADD PRIMARY KEY (a); -- ERROR -ERROR: cannot create unique index on partitioned table "lt1" -DETAIL: Table "lt1" contains partitions that are foreign tables. -DROP TABLE lt1; -CREATE TABLE lt1 (a INT) PARTITION BY RANGE (a); -CREATE INDEX ON lt1 (a); -CREATE FOREIGN TABLE ft_part1 - PARTITION OF lt1 FOR VALUES FROM (0) TO (1000) SERVER s0; -CREATE FOREIGN TABLE ft_part2 (a INT) SERVER s0; -ALTER TABLE lt1 ATTACH PARTITION ft_part2 FOR VALUES FROM (1000) TO (2000); -DROP FOREIGN TABLE ft_part1, ft_part2; -CREATE UNIQUE INDEX ON lt1 (a); -ALTER TABLE lt1 ADD PRIMARY KEY (a); -CREATE FOREIGN TABLE ft_part1 - PARTITION OF lt1 FOR VALUES FROM (0) TO (1000) SERVER s0; -- ERROR -ERROR: cannot create foreign partition of partitioned table "lt1" -DETAIL: Table "lt1" contains indexes that are unique. -CREATE FOREIGN TABLE ft_part2 (a INT NOT NULL) SERVER s0; -ALTER TABLE lt1 ATTACH PARTITION ft_part2 - FOR VALUES FROM (1000) TO (2000); -- ERROR -ERROR: cannot attach foreign table "ft_part2" as partition of partitioned table "lt1" -DETAIL: Partitioned table "lt1" contains unique indexes. -DROP TABLE lt1; -DROP FOREIGN TABLE ft_part2; -CREATE TABLE lt1 (a INT) PARTITION BY RANGE (a); -CREATE INDEX ON lt1 (a); -CREATE TABLE lt1_part1 - PARTITION OF lt1 FOR VALUES FROM (0) TO (1000) - PARTITION BY RANGE (a); -CREATE FOREIGN TABLE ft_part_1_1 - PARTITION OF lt1_part1 FOR VALUES FROM (0) TO (100) SERVER s0; -CREATE FOREIGN TABLE ft_part_1_2 (a INT) SERVER s0; -ALTER TABLE lt1_part1 ATTACH PARTITION ft_part_1_2 FOR VALUES FROM (100) TO (200); -CREATE UNIQUE INDEX ON lt1 (a); -ERROR: cannot create unique index on partitioned table "lt1" -DETAIL: Table "lt1" contains partitions that are foreign tables. -ALTER TABLE lt1 ADD PRIMARY KEY (a); -ERROR: cannot create unique index on partitioned table "lt1_part1" -DETAIL: Table "lt1_part1" contains partitions that are foreign tables. -DROP FOREIGN TABLE ft_part_1_1, ft_part_1_2; -CREATE UNIQUE INDEX ON lt1 (a); -ALTER TABLE lt1 ADD PRIMARY KEY (a); -CREATE FOREIGN TABLE ft_part_1_1 - PARTITION OF lt1_part1 FOR VALUES FROM (0) TO (100) SERVER s0; -ERROR: cannot create foreign partition of partitioned table "lt1_part1" -DETAIL: Table "lt1_part1" contains indexes that are unique. -CREATE FOREIGN TABLE ft_part_1_2 (a INT NOT NULL) SERVER s0; -ALTER TABLE lt1_part1 ATTACH PARTITION ft_part_1_2 FOR VALUES FROM (100) TO (200); -ERROR: cannot attach foreign table "ft_part_1_2" as partition of partitioned table "lt1_part1" -DETAIL: Partitioned table "lt1_part1" contains unique indexes. -DROP TABLE lt1; -DROP FOREIGN TABLE ft_part_1_2; --- ALTER FOREIGN TABLE -COMMENT ON FOREIGN TABLE ft1 IS 'foreign table'; -COMMENT ON FOREIGN TABLE ft1 IS NULL; -COMMENT ON COLUMN ft1.c1 IS 'foreign column'; -COMMENT ON COLUMN ft1.c1 IS NULL; -ALTER FOREIGN TABLE ft1 ADD COLUMN c4 integer; -ALTER FOREIGN TABLE ft1 ADD COLUMN c5 integer DEFAULT 0; -ALTER FOREIGN TABLE ft1 ADD COLUMN c6 integer; -ALTER FOREIGN TABLE ft1 ADD COLUMN c7 integer NOT NULL; -ALTER FOREIGN TABLE ft1 ADD COLUMN c8 integer; -ALTER FOREIGN TABLE ft1 ADD COLUMN c9 integer; -ALTER FOREIGN TABLE ft1 ADD COLUMN c10 integer OPTIONS (p1 'v1'); -ALTER FOREIGN TABLE ft1 ALTER COLUMN c4 SET DEFAULT 0; -ALTER FOREIGN TABLE ft1 ALTER COLUMN c5 DROP DEFAULT; -ALTER FOREIGN TABLE ft1 ALTER COLUMN c6 SET NOT NULL; -ALTER FOREIGN TABLE ft1 ALTER COLUMN c7 DROP NOT NULL; -ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE char(10) USING '0'; -- ERROR -ERROR: "ft1" is not a table -ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE char(10); -ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET DATA TYPE text; -ALTER FOREIGN TABLE ft1 ALTER COLUMN xmin OPTIONS (ADD p1 'v1'); -- ERROR -ERROR: cannot alter system column "xmin" -ALTER FOREIGN TABLE ft1 ALTER COLUMN c7 OPTIONS (ADD p1 'v1', ADD p2 'v2'), - ALTER COLUMN c8 OPTIONS (ADD p1 'v1', ADD p2 'v2'); -ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 OPTIONS (SET p2 'V2', DROP p1); -ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 SET STATISTICS 10000; -ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 SET (n_distinct = 100); -ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET STATISTICS -1; -ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET STORAGE PLAIN; -\d+ ft1 - Foreign table "public.ft1" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+--------------------------------+----------+--------------+------------- - c1 | integer | | not null | | ("param 1" 'val1') | plain | 10000 | - c2 | text | | | | (param2 'val2', param3 'val3') | extended | | - c3 | date | | | | | plain | | - c4 | integer | | | 0 | | plain | | - c5 | integer | | | | | plain | | - c6 | integer | | not null | | | plain | | - c7 | integer | | | | (p1 'v1', p2 'v2') | plain | | - c8 | text | | | | (p2 'V2') | plain | | - c9 | integer | | | | | plain | | - c10 | integer | | | | (p1 'v1') | plain | | -Check constraints: - "ft1_c2_check" CHECK (c2 <> ''::text) - "ft1_c3_check" CHECK (c3 >= '01-01-1994'::date AND c3 <= '01-31-1994'::date) -Not-null constraints: - "ft1_c1_not_null" NOT NULL "c1" - "ft1_c6_not_null" NOT NULL "c6" -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') - --- can't change the column type if it's used elsewhere -CREATE TABLE use_ft1_column_type (x ft1); -ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET DATA TYPE integer; -- ERROR -ERROR: cannot alter foreign table "ft1" because column "use_ft1_column_type.x" uses its row type -DROP TABLE use_ft1_column_type; -ALTER FOREIGN TABLE ft1 ADD PRIMARY KEY (c7); -- ERROR -ERROR: primary key constraints are not supported on foreign tables -LINE 1: ALTER FOREIGN TABLE ft1 ADD PRIMARY KEY (c7); - ^ -ALTER FOREIGN TABLE ft1 ADD CONSTRAINT ft1_c9_check CHECK (c9 < 0) NOT VALID; -ALTER FOREIGN TABLE ft1 ALTER CONSTRAINT ft1_c9_check DEFERRABLE; -- ERROR -ERROR: ALTER action ALTER CONSTRAINT cannot be performed on relation "ft1" -DETAIL: This operation is not supported for foreign tables. -ALTER FOREIGN TABLE ft1 DROP CONSTRAINT ft1_c9_check; -ALTER FOREIGN TABLE ft1 DROP CONSTRAINT no_const; -- ERROR -ERROR: constraint "no_const" of relation "ft1" does not exist -ALTER FOREIGN TABLE ft1 DROP CONSTRAINT IF EXISTS no_const; -NOTICE: constraint "no_const" of relation "ft1" does not exist, skipping -ALTER FOREIGN TABLE ft1 OWNER TO regress_test_role; -ALTER FOREIGN TABLE ft1 OPTIONS (DROP delimiter, SET quote '~', ADD escape '@'); -ALTER FOREIGN TABLE ft1 DROP COLUMN no_column; -- ERROR -ERROR: column "no_column" of relation "ft1" does not exist -ALTER FOREIGN TABLE ft1 DROP COLUMN IF EXISTS no_column; -NOTICE: column "no_column" of relation "ft1" does not exist, skipping -ALTER FOREIGN TABLE ft1 DROP COLUMN c9; -ALTER FOREIGN TABLE ft1 SET SCHEMA foreign_schema; -ALTER FOREIGN TABLE ft1 SET TABLESPACE ts; -- ERROR -ERROR: relation "ft1" does not exist -ALTER FOREIGN TABLE foreign_schema.ft1 RENAME c1 TO foreign_column_1; -ALTER FOREIGN TABLE foreign_schema.ft1 RENAME TO foreign_table_1; -\d foreign_schema.foreign_table_1 - Foreign table "foreign_schema.foreign_table_1" - Column | Type | Collation | Nullable | Default | FDW options -------------------+---------+-----------+----------+---------+-------------------------------- - foreign_column_1 | integer | | not null | | ("param 1" 'val1') - c2 | text | | | | (param2 'val2', param3 'val3') - c3 | date | | | | - c4 | integer | | | 0 | - c5 | integer | | | | - c6 | integer | | not null | | - c7 | integer | | | | (p1 'v1', p2 'v2') - c8 | text | | | | (p2 'V2') - c10 | integer | | | | (p1 'v1') -Check constraints: - "ft1_c2_check" CHECK (c2 <> ''::text) - "ft1_c3_check" CHECK (c3 >= '01-01-1994'::date AND c3 <= '01-31-1994'::date) -Server: s0 -FDW options: (quote '~', "be quoted" 'value', escape '@') - --- alter noexisting table -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c4 integer; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c6 integer; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c7 integer NOT NULL; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c8 integer; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c9 integer; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c10 integer OPTIONS (p1 'v1'); -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c6 SET NOT NULL; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c7 DROP NOT NULL; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c8 TYPE char(10); -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c8 SET DATA TYPE text; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c7 OPTIONS (ADD p1 'v1', ADD p2 'v2'), - ALTER COLUMN c8 OPTIONS (ADD p1 'v1', ADD p2 'v2'); -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c8 OPTIONS (SET p2 'V2', DROP p1); -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP CONSTRAINT IF EXISTS no_const; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP CONSTRAINT ft1_c1_check; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 OWNER TO regress_test_role; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 OPTIONS (DROP delimiter, SET quote '~', ADD escape '@'); -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP COLUMN IF EXISTS no_column; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP COLUMN c9; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 SET SCHEMA foreign_schema; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 RENAME c1 TO foreign_column_1; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 RENAME TO foreign_table_1; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping --- Information schema -SELECT * FROM information_schema.foreign_data_wrappers ORDER BY 1, 2; - foreign_data_wrapper_catalog | foreign_data_wrapper_name | authorization_identifier | library_name | foreign_data_wrapper_language -------------------------------+---------------------------+---------------------------+--------------+------------------------------- - regression | dummy | regress_foreign_data_user | | c - regression | foo | regress_foreign_data_user | | c - regression | postgresql | regress_foreign_data_user | | c -(3 rows) - -SELECT * FROM information_schema.foreign_data_wrapper_options ORDER BY 1, 2, 3; - foreign_data_wrapper_catalog | foreign_data_wrapper_name | option_name | option_value -------------------------------+---------------------------+--------------+-------------- - regression | foo | test wrapper | true -(1 row) - -SELECT * FROM information_schema.foreign_servers ORDER BY 1, 2; - foreign_server_catalog | foreign_server_name | foreign_data_wrapper_catalog | foreign_data_wrapper_name | foreign_server_type | foreign_server_version | authorization_identifier -------------------------+---------------------+------------------------------+---------------------------+---------------------+------------------------+--------------------------- - regression | s0 | regression | dummy | | | regress_foreign_data_user - regression | s4 | regression | foo | oracle | | regress_foreign_data_user - regression | s5 | regression | foo | | 15.0 | regress_test_role - regression | s6 | regression | foo | | 16.0 | regress_test_indirect - regression | s8 | regression | postgresql | | | regress_foreign_data_user - regression | t1 | regression | foo | | | regress_test_indirect - regression | t2 | regression | foo | | | regress_test_role -(7 rows) - -SELECT * FROM information_schema.foreign_server_options ORDER BY 1, 2, 3; - foreign_server_catalog | foreign_server_name | option_name | option_value -------------------------+---------------------+-----------------+-------------- - regression | s4 | dbname | b - regression | s4 | host | a - regression | s6 | dbname | b - regression | s6 | host | a - regression | s8 | connect_timeout | 30 - regression | s8 | dbname | db1 -(6 rows) - -SELECT * FROM information_schema.user_mappings ORDER BY lower(authorization_identifier), 2, 3; - authorization_identifier | foreign_server_catalog | foreign_server_name ----------------------------+------------------------+--------------------- - PUBLIC | regression | s4 - PUBLIC | regression | s8 - PUBLIC | regression | t1 - regress_foreign_data_user | regression | s4 - regress_foreign_data_user | regression | s8 - regress_test_role | regression | s5 - regress_test_role | regression | s6 - regress_test_role | regression | t1 -(8 rows) - -SELECT * FROM information_schema.user_mapping_options ORDER BY lower(authorization_identifier), 2, 3, 4; - authorization_identifier | foreign_server_catalog | foreign_server_name | option_name | option_value ----------------------------+------------------------+---------------------+--------------+-------------- - PUBLIC | regression | s4 | this mapping | is public - PUBLIC | regression | t1 | modified | 1 - regress_foreign_data_user | regression | s8 | password | public - regress_test_role | regression | s5 | modified | 1 - regress_test_role | regression | s6 | username | test - regress_test_role | regression | t1 | password | boo - regress_test_role | regression | t1 | username | bob -(7 rows) - -SELECT * FROM information_schema.usage_privileges WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5; - grantor | grantee | object_catalog | object_schema | object_name | object_type | privilege_type | is_grantable ----------------------------+---------------------------+----------------+---------------+-------------+----------------------+----------------+-------------- - regress_foreign_data_user | regress_foreign_data_user | regression | | foo | FOREIGN DATA WRAPPER | USAGE | YES - regress_foreign_data_user | regress_test_indirect | regression | | foo | FOREIGN DATA WRAPPER | USAGE | NO - regress_test_indirect | regress_test_indirect | regression | | s6 | FOREIGN SERVER | USAGE | YES - regress_test_indirect | regress_test_role2 | regression | | s6 | FOREIGN SERVER | USAGE | YES -(4 rows) - -SELECT * FROM information_schema.role_usage_grants WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5; - grantor | grantee | object_catalog | object_schema | object_name | object_type | privilege_type | is_grantable ----------------------------+---------------------------+----------------+---------------+-------------+----------------------+----------------+-------------- - regress_foreign_data_user | regress_foreign_data_user | regression | | foo | FOREIGN DATA WRAPPER | USAGE | YES - regress_foreign_data_user | regress_test_indirect | regression | | foo | FOREIGN DATA WRAPPER | USAGE | NO - regress_test_indirect | regress_test_indirect | regression | | s6 | FOREIGN SERVER | USAGE | YES - regress_test_indirect | regress_test_role2 | regression | | s6 | FOREIGN SERVER | USAGE | YES -(4 rows) - -SELECT * FROM information_schema.foreign_tables ORDER BY 1, 2, 3; - foreign_table_catalog | foreign_table_schema | foreign_table_name | foreign_server_catalog | foreign_server_name ------------------------+----------------------+--------------------+------------------------+--------------------- - regression | foreign_schema | foreign_table_1 | regression | s0 -(1 row) - -SELECT * FROM information_schema.foreign_table_options ORDER BY 1, 2, 3, 4; - foreign_table_catalog | foreign_table_schema | foreign_table_name | option_name | option_value ------------------------+----------------------+--------------------+-------------+-------------- - regression | foreign_schema | foreign_table_1 | be quoted | value - regression | foreign_schema | foreign_table_1 | escape | @ - regression | foreign_schema | foreign_table_1 | quote | ~ -(3 rows) - -SET ROLE regress_test_role; -SELECT * FROM information_schema.user_mapping_options ORDER BY 1, 2, 3, 4; - authorization_identifier | foreign_server_catalog | foreign_server_name | option_name | option_value ---------------------------+------------------------+---------------------+-------------+-------------- - PUBLIC | regression | t1 | modified | 1 - regress_test_role | regression | s5 | modified | 1 - regress_test_role | regression | s6 | username | test - regress_test_role | regression | t1 | password | boo - regress_test_role | regression | t1 | username | bob -(5 rows) - -SELECT * FROM information_schema.usage_privileges WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5; - grantor | grantee | object_catalog | object_schema | object_name | object_type | privilege_type | is_grantable ----------------------------+-----------------------+----------------+---------------+-------------+----------------------+----------------+-------------- - regress_foreign_data_user | regress_test_indirect | regression | | foo | FOREIGN DATA WRAPPER | USAGE | NO - regress_test_indirect | regress_test_indirect | regression | | s6 | FOREIGN SERVER | USAGE | YES - regress_test_indirect | regress_test_role2 | regression | | s6 | FOREIGN SERVER | USAGE | YES -(3 rows) - -SELECT * FROM information_schema.role_usage_grants WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5; - grantor | grantee | object_catalog | object_schema | object_name | object_type | privilege_type | is_grantable ----------------------------+-----------------------+----------------+---------------+-------------+----------------------+----------------+-------------- - regress_foreign_data_user | regress_test_indirect | regression | | foo | FOREIGN DATA WRAPPER | USAGE | NO - regress_test_indirect | regress_test_indirect | regression | | s6 | FOREIGN SERVER | USAGE | YES - regress_test_indirect | regress_test_role2 | regression | | s6 | FOREIGN SERVER | USAGE | YES -(3 rows) - -DROP USER MAPPING FOR current_user SERVER t1; -SET ROLE regress_test_role2; -SELECT * FROM information_schema.user_mapping_options ORDER BY 1, 2, 3, 4; - authorization_identifier | foreign_server_catalog | foreign_server_name | option_name | option_value ---------------------------+------------------------+---------------------+-------------+-------------- - regress_test_role | regression | s6 | username | -(1 row) - -RESET ROLE; --- has_foreign_data_wrapper_privilege -SELECT has_foreign_data_wrapper_privilege('regress_test_role', - (SELECT oid FROM pg_foreign_data_wrapper WHERE fdwname='foo'), 'USAGE'); - has_foreign_data_wrapper_privilege ------------------------------------- - t -(1 row) - -SELECT has_foreign_data_wrapper_privilege('regress_test_role', 'foo', 'USAGE'); - has_foreign_data_wrapper_privilege ------------------------------------- - t -(1 row) - -SELECT has_foreign_data_wrapper_privilege( - (SELECT oid FROM pg_roles WHERE rolname='regress_test_role'), - (SELECT oid FROM pg_foreign_data_wrapper WHERE fdwname='foo'), 'USAGE'); - has_foreign_data_wrapper_privilege ------------------------------------- - t -(1 row) - -SELECT has_foreign_data_wrapper_privilege( - (SELECT oid FROM pg_foreign_data_wrapper WHERE fdwname='foo'), 'USAGE'); - has_foreign_data_wrapper_privilege ------------------------------------- - t -(1 row) - -SELECT has_foreign_data_wrapper_privilege( - (SELECT oid FROM pg_roles WHERE rolname='regress_test_role'), 'foo', 'USAGE'); - has_foreign_data_wrapper_privilege ------------------------------------- - t -(1 row) - -SELECT has_foreign_data_wrapper_privilege('foo', 'USAGE'); - has_foreign_data_wrapper_privilege ------------------------------------- - t -(1 row) - -GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role; -SELECT has_foreign_data_wrapper_privilege('regress_test_role', 'foo', 'USAGE'); - has_foreign_data_wrapper_privilege ------------------------------------- - t -(1 row) - --- has_server_privilege -SELECT has_server_privilege('regress_test_role', - (SELECT oid FROM pg_foreign_server WHERE srvname='s8'), 'USAGE'); - has_server_privilege ----------------------- - f -(1 row) - -SELECT has_server_privilege('regress_test_role', 's8', 'USAGE'); - has_server_privilege ----------------------- - f -(1 row) - -SELECT has_server_privilege( - (SELECT oid FROM pg_roles WHERE rolname='regress_test_role'), - (SELECT oid FROM pg_foreign_server WHERE srvname='s8'), 'USAGE'); - has_server_privilege ----------------------- - f -(1 row) - -SELECT has_server_privilege( - (SELECT oid FROM pg_foreign_server WHERE srvname='s8'), 'USAGE'); - has_server_privilege ----------------------- - t -(1 row) - -SELECT has_server_privilege( - (SELECT oid FROM pg_roles WHERE rolname='regress_test_role'), 's8', 'USAGE'); - has_server_privilege ----------------------- - f -(1 row) - -SELECT has_server_privilege('s8', 'USAGE'); - has_server_privilege ----------------------- - t -(1 row) - -GRANT USAGE ON FOREIGN SERVER s8 TO regress_test_role; -SELECT has_server_privilege('regress_test_role', 's8', 'USAGE'); - has_server_privilege ----------------------- - t -(1 row) - -REVOKE USAGE ON FOREIGN SERVER s8 FROM regress_test_role; -GRANT USAGE ON FOREIGN SERVER s4 TO regress_test_role; -DROP USER MAPPING FOR public SERVER s4; -ALTER SERVER s6 OPTIONS (DROP host, DROP dbname); -ALTER USER MAPPING FOR regress_test_role SERVER s6 OPTIONS (DROP username); -ALTER FOREIGN DATA WRAPPER foo VALIDATOR postgresql_fdw_validator; -WARNING: changing the foreign-data wrapper validator can cause the options for dependent objects to become invalid --- Privileges -SET ROLE regress_unprivileged_role; -CREATE FOREIGN DATA WRAPPER foobar; -- ERROR -ERROR: permission denied to create foreign-data wrapper "foobar" -HINT: Must be superuser to create a foreign-data wrapper. -ALTER FOREIGN DATA WRAPPER foo OPTIONS (gotcha 'true'); -- ERROR -ERROR: permission denied to alter foreign-data wrapper "foo" -HINT: Must be superuser to alter a foreign-data wrapper. -ALTER FOREIGN DATA WRAPPER foo OWNER TO regress_unprivileged_role; -- ERROR -ERROR: permission denied to change owner of foreign-data wrapper "foo" -HINT: Must be superuser to change owner of a foreign-data wrapper. -DROP FOREIGN DATA WRAPPER foo; -- ERROR -ERROR: must be owner of foreign-data wrapper foo -GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role; -- ERROR -ERROR: permission denied for foreign-data wrapper foo -CREATE SERVER s9 FOREIGN DATA WRAPPER foo; -- ERROR -ERROR: permission denied for foreign-data wrapper foo -ALTER SERVER s4 VERSION '0.5'; -- ERROR -ERROR: must be owner of foreign server s4 -ALTER SERVER s4 OWNER TO regress_unprivileged_role; -- ERROR -ERROR: must be owner of foreign server s4 -DROP SERVER s4; -- ERROR -ERROR: must be owner of foreign server s4 -GRANT USAGE ON FOREIGN SERVER s4 TO regress_test_role; -- ERROR -ERROR: permission denied for foreign server s4 -CREATE USER MAPPING FOR public SERVER s4; -- ERROR -ERROR: must be owner of foreign server s4 -ALTER USER MAPPING FOR regress_test_role SERVER s6 OPTIONS (gotcha 'true'); -- ERROR -ERROR: must be owner of foreign server s6 -DROP USER MAPPING FOR regress_test_role SERVER s6; -- ERROR -ERROR: must be owner of foreign server s6 -RESET ROLE; -GRANT USAGE ON FOREIGN DATA WRAPPER postgresql TO regress_unprivileged_role; -GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_unprivileged_role WITH GRANT OPTION; -SET ROLE regress_unprivileged_role; -CREATE FOREIGN DATA WRAPPER foobar; -- ERROR -ERROR: permission denied to create foreign-data wrapper "foobar" -HINT: Must be superuser to create a foreign-data wrapper. -ALTER FOREIGN DATA WRAPPER foo OPTIONS (gotcha 'true'); -- ERROR -ERROR: permission denied to alter foreign-data wrapper "foo" -HINT: Must be superuser to alter a foreign-data wrapper. -DROP FOREIGN DATA WRAPPER foo; -- ERROR -ERROR: must be owner of foreign-data wrapper foo -GRANT USAGE ON FOREIGN DATA WRAPPER postgresql TO regress_test_role; -- WARNING -WARNING: no privileges were granted for "postgresql" -GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role; -CREATE SERVER s9 FOREIGN DATA WRAPPER postgresql; -ALTER SERVER s6 VERSION '0.5'; -- ERROR -ERROR: must be owner of foreign server s6 -DROP SERVER s6; -- ERROR -ERROR: must be owner of foreign server s6 -GRANT USAGE ON FOREIGN SERVER s6 TO regress_test_role; -- ERROR -ERROR: permission denied for foreign server s6 -GRANT USAGE ON FOREIGN SERVER s9 TO regress_test_role; -CREATE USER MAPPING FOR public SERVER s6; -- ERROR -ERROR: must be owner of foreign server s6 -CREATE USER MAPPING FOR public SERVER s9; -ALTER USER MAPPING FOR regress_test_role SERVER s6 OPTIONS (gotcha 'true'); -- ERROR -ERROR: must be owner of foreign server s6 -DROP USER MAPPING FOR regress_test_role SERVER s6; -- ERROR -ERROR: must be owner of foreign server s6 -RESET ROLE; -REVOKE USAGE ON FOREIGN DATA WRAPPER foo FROM regress_unprivileged_role; -- ERROR -ERROR: dependent privileges exist -HINT: Use CASCADE to revoke them too. -REVOKE USAGE ON FOREIGN DATA WRAPPER foo FROM regress_unprivileged_role CASCADE; -SET ROLE regress_unprivileged_role; -GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role; -- ERROR -ERROR: permission denied for foreign-data wrapper foo -CREATE SERVER s10 FOREIGN DATA WRAPPER foo; -- ERROR -ERROR: permission denied for foreign-data wrapper foo -ALTER SERVER s9 VERSION '1.1'; -GRANT USAGE ON FOREIGN SERVER s9 TO regress_test_role; -CREATE USER MAPPING FOR current_user SERVER s9; -DROP SERVER s9 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to user mapping for public on server s9 -drop cascades to user mapping for regress_unprivileged_role on server s9 -RESET ROLE; -CREATE SERVER s9 FOREIGN DATA WRAPPER foo; -GRANT USAGE ON FOREIGN SERVER s9 TO regress_unprivileged_role; -SET ROLE regress_unprivileged_role; -ALTER SERVER s9 VERSION '1.2'; -- ERROR -ERROR: must be owner of foreign server s9 -GRANT USAGE ON FOREIGN SERVER s9 TO regress_test_role; -- WARNING -WARNING: no privileges were granted for "s9" -CREATE USER MAPPING FOR current_user SERVER s9; -DROP SERVER s9 CASCADE; -- ERROR -ERROR: must be owner of foreign server s9 --- Check visibility of user mapping data -SET ROLE regress_test_role; -CREATE SERVER s10 FOREIGN DATA WRAPPER foo; -CREATE USER MAPPING FOR public SERVER s10 OPTIONS (user 'secret'); -CREATE USER MAPPING FOR regress_unprivileged_role SERVER s10 OPTIONS (user 'secret'); --- owner of server can see some option fields -\deu+ - List of user mappings - Server | User name | FDW options ---------+---------------------------+------------------- - s10 | public | ("user" 'secret') - s10 | regress_unprivileged_role | - s4 | regress_foreign_data_user | - s5 | regress_test_role | (modified '1') - s6 | regress_test_role | - s8 | public | - s8 | regress_foreign_data_user | - s9 | regress_unprivileged_role | - t1 | public | (modified '1') -(9 rows) - -RESET ROLE; --- superuser can see all option fields -\deu+ - List of user mappings - Server | User name | FDW options ---------+---------------------------+--------------------- - s10 | public | ("user" 'secret') - s10 | regress_unprivileged_role | ("user" 'secret') - s4 | regress_foreign_data_user | - s5 | regress_test_role | (modified '1') - s6 | regress_test_role | - s8 | public | - s8 | regress_foreign_data_user | (password 'public') - s9 | regress_unprivileged_role | - t1 | public | (modified '1') -(9 rows) - --- unprivileged user cannot see any option field -SET ROLE regress_unprivileged_role; -\deu+ - List of user mappings - Server | User name | FDW options ---------+---------------------------+------------- - s10 | public | - s10 | regress_unprivileged_role | - s4 | regress_foreign_data_user | - s5 | regress_test_role | - s6 | regress_test_role | - s8 | public | - s8 | regress_foreign_data_user | - s9 | regress_unprivileged_role | - t1 | public | -(9 rows) - -RESET ROLE; -DROP SERVER s10 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to user mapping for public on server s10 -drop cascades to user mapping for regress_unprivileged_role on server s10 --- Triggers -CREATE FUNCTION dummy_trigger() RETURNS TRIGGER AS $$ - BEGIN - RETURN NULL; - END -$$ language plpgsql; -CREATE TRIGGER trigtest_before_stmt BEFORE INSERT OR UPDATE OR DELETE -ON foreign_schema.foreign_table_1 -FOR EACH STATEMENT -EXECUTE PROCEDURE dummy_trigger(); -CREATE TRIGGER trigtest_after_stmt AFTER INSERT OR UPDATE OR DELETE -ON foreign_schema.foreign_table_1 -FOR EACH STATEMENT -EXECUTE PROCEDURE dummy_trigger(); -CREATE TRIGGER trigtest_after_stmt_tt AFTER INSERT OR UPDATE OR DELETE -- ERROR -ON foreign_schema.foreign_table_1 -REFERENCING NEW TABLE AS new_table -FOR EACH STATEMENT -EXECUTE PROCEDURE dummy_trigger(); -ERROR: "foreign_table_1" is a foreign table -DETAIL: Triggers on foreign tables cannot have transition tables. -CREATE TRIGGER trigtest_before_row BEFORE INSERT OR UPDATE OR DELETE -ON foreign_schema.foreign_table_1 -FOR EACH ROW -EXECUTE PROCEDURE dummy_trigger(); -CREATE TRIGGER trigtest_after_row AFTER INSERT OR UPDATE OR DELETE -ON foreign_schema.foreign_table_1 -FOR EACH ROW -EXECUTE PROCEDURE dummy_trigger(); -CREATE CONSTRAINT TRIGGER trigtest_constraint AFTER INSERT OR UPDATE OR DELETE -ON foreign_schema.foreign_table_1 -FOR EACH ROW -EXECUTE PROCEDURE dummy_trigger(); -ERROR: "foreign_table_1" is a foreign table -DETAIL: Foreign tables cannot have constraint triggers. -ALTER FOREIGN TABLE foreign_schema.foreign_table_1 - DISABLE TRIGGER trigtest_before_stmt; -ALTER FOREIGN TABLE foreign_schema.foreign_table_1 - ENABLE TRIGGER trigtest_before_stmt; -DROP TRIGGER trigtest_before_stmt ON foreign_schema.foreign_table_1; -DROP TRIGGER trigtest_before_row ON foreign_schema.foreign_table_1; -DROP TRIGGER trigtest_after_stmt ON foreign_schema.foreign_table_1; -DROP TRIGGER trigtest_after_row ON foreign_schema.foreign_table_1; -DROP FUNCTION dummy_trigger(); --- Table inheritance -CREATE TABLE fd_pt1 ( - c1 integer NOT NULL, - c2 text, - c3 date -); -CREATE FOREIGN TABLE ft2 () INHERITS (fd_pt1) - SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -\d+ fd_pt1 - Table "public.fd_pt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "c1" -Child tables: ft2, FOREIGN - -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "c1" (inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: fd_pt1 - -DROP FOREIGN TABLE ft2; -\d+ fd_pt1 - Table "public.fd_pt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "c1" - -CREATE FOREIGN TABLE ft2 ( - c1 integer NOT NULL, - c2 text, - c3 date -) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') - -ALTER FOREIGN TABLE ft2 INHERIT fd_pt1; -\d+ fd_pt1 - Table "public.fd_pt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "c1" -Child tables: ft2, FOREIGN - -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" (local, inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: fd_pt1 - -CREATE TABLE ct3() INHERITS(ft2); -CREATE FOREIGN TABLE ft3 ( - c1 integer NOT NULL, - c2 text, - c3 date -) INHERITS(ft2) - SERVER s0; -NOTICE: merging column "c1" with inherited definition -NOTICE: merging column "c2" with inherited definition -NOTICE: merging column "c3" with inherited definition -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" (local, inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: fd_pt1 -Child tables: ct3, - ft3, FOREIGN - -\d+ ct3 - Table "public.ct3" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" (inherited) -Inherits: ft2 - -\d+ ft3 - Foreign table "public.ft3" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Not-null constraints: - "ft3_c1_not_null" NOT NULL "c1" (local, inherited) -Server: s0 -Inherits: ft2 - --- add attributes recursively -ALTER TABLE fd_pt1 ADD COLUMN c4 integer; -ALTER TABLE fd_pt1 ADD COLUMN c5 integer DEFAULT 0; -ALTER TABLE fd_pt1 ADD COLUMN c6 integer; -ALTER TABLE fd_pt1 ADD COLUMN c7 integer NOT NULL; -ALTER TABLE fd_pt1 ADD COLUMN c8 integer; -\d+ fd_pt1 - Table "public.fd_pt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | | | extended | | - c3 | date | | | | plain | | - c4 | integer | | | | plain | | - c5 | integer | | | 0 | plain | | - c6 | integer | | | | plain | | - c7 | integer | | not null | | plain | | - c8 | integer | | | | plain | | -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "c1" - "fd_pt1_c7_not_null" NOT NULL "c7" -Child tables: ft2, FOREIGN - -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | - c4 | integer | | | | | plain | | - c5 | integer | | | 0 | | plain | | - c6 | integer | | | | | plain | | - c7 | integer | | not null | | | plain | | - c8 | integer | | | | | plain | | -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" (local, inherited) - "fd_pt1_c7_not_null" NOT NULL "c7" (inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: fd_pt1 -Child tables: ct3, - ft3, FOREIGN - -\d+ ct3 - Table "public.ct3" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | | | extended | | - c3 | date | | | | plain | | - c4 | integer | | | | plain | | - c5 | integer | | | 0 | plain | | - c6 | integer | | | | plain | | - c7 | integer | | not null | | plain | | - c8 | integer | | | | plain | | -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" (inherited) - "fd_pt1_c7_not_null" NOT NULL "c7" (inherited) -Inherits: ft2 - -\d+ ft3 - Foreign table "public.ft3" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | - c4 | integer | | | | | plain | | - c5 | integer | | | 0 | | plain | | - c6 | integer | | | | | plain | | - c7 | integer | | not null | | | plain | | - c8 | integer | | | | | plain | | -Not-null constraints: - "ft3_c1_not_null" NOT NULL "c1" (local, inherited) - "fd_pt1_c7_not_null" NOT NULL "c7" (inherited) -Server: s0 -Inherits: ft2 - --- alter attributes recursively -ALTER TABLE fd_pt1 ALTER COLUMN c4 SET DEFAULT 0; -ALTER TABLE fd_pt1 ALTER COLUMN c5 DROP DEFAULT; -ALTER TABLE fd_pt1 ALTER COLUMN c6 SET NOT NULL; -ALTER TABLE fd_pt1 ALTER COLUMN c7 DROP NOT NULL; -ALTER TABLE fd_pt1 ALTER COLUMN c8 TYPE char(10) USING '0'; -- ERROR -ERROR: "ft2" is not a table -ALTER TABLE fd_pt1 ALTER COLUMN c8 TYPE char(10); -ALTER TABLE fd_pt1 ALTER COLUMN c8 SET DATA TYPE text; -ALTER TABLE fd_pt1 ALTER COLUMN c1 SET STATISTICS 10000; -ALTER TABLE fd_pt1 ALTER COLUMN c1 SET (n_distinct = 100); -ALTER TABLE fd_pt1 ALTER COLUMN c8 SET STATISTICS -1; -ALTER TABLE fd_pt1 ALTER COLUMN c8 SET STORAGE EXTERNAL; -\d+ fd_pt1 - Table "public.fd_pt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | 10000 | - c2 | text | | | | extended | | - c3 | date | | | | plain | | - c4 | integer | | | 0 | plain | | - c5 | integer | | | | plain | | - c6 | integer | | not null | | plain | | - c7 | integer | | | | plain | | - c8 | text | | | | external | | -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "c1" - "fd_pt1_c6_not_null" NOT NULL "c6" -Child tables: ft2, FOREIGN - -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | 10000 | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | - c4 | integer | | | 0 | | plain | | - c5 | integer | | | | | plain | | - c6 | integer | | not null | | | plain | | - c7 | integer | | | | | plain | | - c8 | text | | | | | external | | -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" (local, inherited) - "fd_pt1_c6_not_null" NOT NULL "c6" (inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: fd_pt1 -Child tables: ct3, - ft3, FOREIGN - --- drop attributes recursively -ALTER TABLE fd_pt1 DROP COLUMN c4; -ALTER TABLE fd_pt1 DROP COLUMN c5; -ALTER TABLE fd_pt1 DROP COLUMN c6; -ALTER TABLE fd_pt1 DROP COLUMN c7; -ALTER TABLE fd_pt1 DROP COLUMN c8; -\d+ fd_pt1 - Table "public.fd_pt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | 10000 | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "c1" -Child tables: ft2, FOREIGN - -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | 10000 | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" (local, inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: fd_pt1 -Child tables: ct3, - ft3, FOREIGN - --- add constraints recursively -ALTER TABLE fd_pt1 ADD CONSTRAINT fd_pt1chk1 CHECK (c1 > 0) NO INHERIT; -ALTER TABLE fd_pt1 ADD CONSTRAINT fd_pt1chk2 CHECK (c2 <> ''); --- connoinherit should be true for NO INHERIT constraint -SELECT relname, conname, contype, conislocal, coninhcount, connoinherit - FROM pg_class AS pc JOIN pg_constraint AS pgc ON (conrelid = pc.oid) - WHERE pc.relname = 'fd_pt1' - ORDER BY 1,2; - relname | conname | contype | conislocal | coninhcount | connoinherit ----------+--------------------+---------+------------+-------------+-------------- - fd_pt1 | fd_pt1_c1_not_null | n | t | 0 | f - fd_pt1 | fd_pt1chk1 | c | t | 0 | t - fd_pt1 | fd_pt1chk2 | c | t | 0 | f -(3 rows) - --- child does not inherit NO INHERIT constraints -\d+ fd_pt1 - Table "public.fd_pt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | 10000 | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Check constraints: - "fd_pt1chk1" CHECK (c1 > 0) NO INHERIT - "fd_pt1chk2" CHECK (c2 <> ''::text) -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "c1" -Child tables: ft2, FOREIGN - -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | 10000 | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Check constraints: - "fd_pt1chk2" CHECK (c2 <> ''::text) -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" (local, inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: fd_pt1 -Child tables: ct3, - ft3, FOREIGN - -DROP FOREIGN TABLE ft2; -- ERROR -ERROR: cannot drop foreign table ft2 because other objects depend on it -DETAIL: table ct3 depends on foreign table ft2 -foreign table ft3 depends on foreign table ft2 -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP FOREIGN TABLE ft2 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table ct3 -drop cascades to foreign table ft3 -CREATE FOREIGN TABLE ft2 ( - c1 integer NOT NULL, - c2 text, - c3 date -) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); --- child must have parent's INHERIT constraints -ALTER FOREIGN TABLE ft2 INHERIT fd_pt1; -- ERROR -ERROR: child table is missing constraint "fd_pt1chk2" -ALTER FOREIGN TABLE ft2 ADD CONSTRAINT fd_pt1chk2 CHECK (c2 <> ''); -ALTER FOREIGN TABLE ft2 INHERIT fd_pt1; --- child does not inherit NO INHERIT constraints -\d+ fd_pt1 - Table "public.fd_pt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | 10000 | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Check constraints: - "fd_pt1chk1" CHECK (c1 > 0) NO INHERIT - "fd_pt1chk2" CHECK (c2 <> ''::text) -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "c1" -Child tables: ft2, FOREIGN - -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Check constraints: - "fd_pt1chk2" CHECK (c2 <> ''::text) -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" (local, inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: fd_pt1 - --- drop constraints recursively -ALTER TABLE fd_pt1 DROP CONSTRAINT fd_pt1chk1 CASCADE; -ALTER TABLE fd_pt1 DROP CONSTRAINT fd_pt1chk2 CASCADE; --- NOT VALID case -INSERT INTO fd_pt1 VALUES (1, 'fd_pt1'::text, '1994-01-01'::date); -ALTER TABLE fd_pt1 ADD CONSTRAINT fd_pt1chk3 CHECK (c2 <> '') NOT VALID; -\d+ fd_pt1 - Table "public.fd_pt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | 10000 | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Check constraints: - "fd_pt1chk3" CHECK (c2 <> ''::text) NOT VALID -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "c1" -Child tables: ft2, FOREIGN - -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Check constraints: - "fd_pt1chk2" CHECK (c2 <> ''::text) - "fd_pt1chk3" CHECK (c2 <> ''::text) NOT VALID -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" (local, inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: fd_pt1 - --- VALIDATE CONSTRAINT need do nothing on foreign tables -ALTER TABLE fd_pt1 VALIDATE CONSTRAINT fd_pt1chk3; -\d+ fd_pt1 - Table "public.fd_pt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | 10000 | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Check constraints: - "fd_pt1chk3" CHECK (c2 <> ''::text) -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "c1" -Child tables: ft2, FOREIGN - -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Check constraints: - "fd_pt1chk2" CHECK (c2 <> ''::text) - "fd_pt1chk3" CHECK (c2 <> ''::text) -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" (local, inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: fd_pt1 - --- changes name of an attribute recursively -ALTER TABLE fd_pt1 RENAME COLUMN c1 TO f1; -ALTER TABLE fd_pt1 RENAME COLUMN c2 TO f2; -ALTER TABLE fd_pt1 RENAME COLUMN c3 TO f3; --- changes name of a constraint recursively -ALTER TABLE fd_pt1 RENAME CONSTRAINT fd_pt1chk3 TO f2_check; -\d+ fd_pt1 - Table "public.fd_pt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - f1 | integer | | not null | | plain | 10000 | - f2 | text | | | | extended | | - f3 | date | | | | plain | | -Check constraints: - "f2_check" CHECK (f2 <> ''::text) -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "f1" -Child tables: ft2, FOREIGN - -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - f1 | integer | | not null | | | plain | | - f2 | text | | | | | extended | | - f3 | date | | | | | plain | | -Check constraints: - "f2_check" CHECK (f2 <> ''::text) - "fd_pt1chk2" CHECK (f2 <> ''::text) -Not-null constraints: - "ft2_c1_not_null" NOT NULL "f1" (local, inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: fd_pt1 - -DROP TABLE fd_pt1 CASCADE; -NOTICE: drop cascades to foreign table ft2 --- IMPORT FOREIGN SCHEMA -IMPORT FOREIGN SCHEMA s1 FROM SERVER s9 INTO public; -- ERROR -ERROR: foreign-data wrapper "foo" has no handler -IMPORT FOREIGN SCHEMA s1 LIMIT TO (t1) FROM SERVER s9 INTO public; --ERROR -ERROR: foreign-data wrapper "foo" has no handler -IMPORT FOREIGN SCHEMA s1 EXCEPT (t1) FROM SERVER s9 INTO public; -- ERROR -ERROR: foreign-data wrapper "foo" has no handler -IMPORT FOREIGN SCHEMA s1 EXCEPT (t1, t2) FROM SERVER s9 INTO public -OPTIONS (option1 'value1', option2 'value2'); -- ERROR -ERROR: foreign-data wrapper "foo" has no handler --- DROP FOREIGN TABLE -DROP FOREIGN TABLE no_table; -- ERROR -ERROR: foreign table "no_table" does not exist -DROP FOREIGN TABLE IF EXISTS no_table; -NOTICE: foreign table "no_table" does not exist, skipping -DROP FOREIGN TABLE foreign_schema.foreign_table_1; --- REASSIGN OWNED/DROP OWNED of foreign objects -REASSIGN OWNED BY regress_test_role TO regress_test_role2; -DROP OWNED BY regress_test_role2; -ERROR: cannot drop desired object(s) because other objects depend on them -DETAIL: user mapping for regress_test_role on server s5 depends on server s5 -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP OWNED BY regress_test_role2 CASCADE; -NOTICE: drop cascades to user mapping for regress_test_role on server s5 --- Foreign partition DDL stuff -CREATE TABLE fd_pt2 ( - c1 integer NOT NULL, - c2 text, - c3 date -) PARTITION BY LIST (c1); -CREATE FOREIGN TABLE fd_pt2_1 PARTITION OF fd_pt2 FOR VALUES IN (1) - SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -\d+ fd_pt2 - Partitioned table "public.fd_pt2" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Partition key: LIST (c1) -Not-null constraints: - "fd_pt2_c1_not_null" NOT NULL "c1" -Partitions: fd_pt2_1 FOR VALUES IN (1), FOREIGN - -\d+ fd_pt2_1 - Foreign table "public.fd_pt2_1" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Partition of: fd_pt2 FOR VALUES IN (1) -Partition constraint: ((c1 IS NOT NULL) AND (c1 = 1)) -Not-null constraints: - "fd_pt2_c1_not_null" NOT NULL "c1" (inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') - --- partition cannot have additional columns -DROP FOREIGN TABLE fd_pt2_1; -CREATE FOREIGN TABLE fd_pt2_1 ( - c1 integer NOT NULL, - c2 text, - c3 date, - c4 char -) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -\d+ fd_pt2_1 - Foreign table "public.fd_pt2_1" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+--------------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | - c4 | character(1) | | | | | extended | | -Not-null constraints: - "fd_pt2_1_c1_not_null" NOT NULL "c1" -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') - -ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR -ERROR: table "fd_pt2_1" contains column "c4" not found in parent "fd_pt2" -DETAIL: The new partition may contain only the columns present in parent. -DROP FOREIGN TABLE fd_pt2_1; -\d+ fd_pt2 - Partitioned table "public.fd_pt2" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Partition key: LIST (c1) -Not-null constraints: - "fd_pt2_c1_not_null" NOT NULL "c1" -Number of partitions: 0 - -CREATE FOREIGN TABLE fd_pt2_1 ( - c1 integer NOT NULL, - c2 text, - c3 date -) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -\d+ fd_pt2_1 - Foreign table "public.fd_pt2_1" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Not-null constraints: - "fd_pt2_1_c1_not_null" NOT NULL "c1" -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') - --- no attach partition validation occurs for foreign tables -ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -\d+ fd_pt2 - Partitioned table "public.fd_pt2" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Partition key: LIST (c1) -Not-null constraints: - "fd_pt2_c1_not_null" NOT NULL "c1" -Partitions: fd_pt2_1 FOR VALUES IN (1), FOREIGN - -\d+ fd_pt2_1 - Foreign table "public.fd_pt2_1" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Partition of: fd_pt2 FOR VALUES IN (1) -Partition constraint: ((c1 IS NOT NULL) AND (c1 = 1)) -Not-null constraints: - "fd_pt2_1_c1_not_null" NOT NULL "c1" (inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') - --- cannot add column to a partition -ALTER TABLE fd_pt2_1 ADD c4 char; -ERROR: cannot add column to a partition --- ok to have a partition's own constraints though -ALTER TABLE fd_pt2_1 ALTER c3 SET NOT NULL; -ALTER TABLE fd_pt2_1 ADD CONSTRAINT p21chk CHECK (c2 <> ''); -\d+ fd_pt2 - Partitioned table "public.fd_pt2" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Partition key: LIST (c1) -Not-null constraints: - "fd_pt2_c1_not_null" NOT NULL "c1" -Partitions: fd_pt2_1 FOR VALUES IN (1), FOREIGN - -\d+ fd_pt2_1 - Foreign table "public.fd_pt2_1" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | not null | | | plain | | -Partition of: fd_pt2 FOR VALUES IN (1) -Partition constraint: ((c1 IS NOT NULL) AND (c1 = 1)) -Check constraints: - "p21chk" CHECK (c2 <> ''::text) -Not-null constraints: - "fd_pt2_1_c1_not_null" NOT NULL "c1" (inherited) - "fd_pt2_1_c3_not_null" NOT NULL "c3" -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') - --- cannot drop inherited NOT NULL constraint from a partition -ALTER TABLE fd_pt2_1 ALTER c1 DROP NOT NULL; -ERROR: column "c1" is marked NOT NULL in parent table --- partition must have parent's constraints -ALTER TABLE fd_pt2 DETACH PARTITION fd_pt2_1; -ALTER TABLE fd_pt2 ALTER c2 SET NOT NULL; -\d+ fd_pt2 - Partitioned table "public.fd_pt2" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | not null | | extended | | - c3 | date | | | | plain | | -Partition key: LIST (c1) -Not-null constraints: - "fd_pt2_c1_not_null" NOT NULL "c1" - "fd_pt2_c2_not_null" NOT NULL "c2" -Number of partitions: 0 - -\d+ fd_pt2_1 - Foreign table "public.fd_pt2_1" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | not null | | | plain | | -Check constraints: - "p21chk" CHECK (c2 <> ''::text) -Not-null constraints: - "fd_pt2_1_c1_not_null" NOT NULL "c1" - "fd_pt2_1_c3_not_null" NOT NULL "c3" -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') - -ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR -ERROR: column "c2" in child table must be marked NOT NULL -ALTER FOREIGN TABLE fd_pt2_1 ALTER c2 SET NOT NULL; -ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -ALTER TABLE fd_pt2 DETACH PARTITION fd_pt2_1; -ALTER TABLE fd_pt2 ADD CONSTRAINT fd_pt2chk1 CHECK (c1 > 0); -\d+ fd_pt2 - Partitioned table "public.fd_pt2" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | not null | | extended | | - c3 | date | | | | plain | | -Partition key: LIST (c1) -Check constraints: - "fd_pt2chk1" CHECK (c1 > 0) -Not-null constraints: - "fd_pt2_c1_not_null" NOT NULL "c1" - "fd_pt2_c2_not_null" NOT NULL "c2" -Number of partitions: 0 - -\d+ fd_pt2_1 - Foreign table "public.fd_pt2_1" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | not null | | | extended | | - c3 | date | | not null | | | plain | | -Check constraints: - "p21chk" CHECK (c2 <> ''::text) -Not-null constraints: - "fd_pt2_1_c1_not_null" NOT NULL "c1" - "fd_pt2_1_c2_not_null" NOT NULL "c2" - "fd_pt2_1_c3_not_null" NOT NULL "c3" -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') - -ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR -ERROR: child table is missing constraint "fd_pt2chk1" -ALTER FOREIGN TABLE fd_pt2_1 ADD CONSTRAINT fd_pt2chk1 CHECK (c1 > 0); -ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -DROP FOREIGN TABLE fd_pt2_1; -DROP TABLE fd_pt2; --- foreign table cannot be part of partition tree made of temporary --- relations. -CREATE TEMP TABLE temp_parted (a int) PARTITION BY LIST (a); -CREATE FOREIGN TABLE foreign_part PARTITION OF temp_parted DEFAULT - SERVER s0; -- ERROR -ERROR: cannot create a permanent relation as partition of temporary relation "temp_parted" -CREATE FOREIGN TABLE foreign_part (a int) SERVER s0; -ALTER TABLE temp_parted ATTACH PARTITION foreign_part DEFAULT; -- ERROR -ERROR: cannot attach a permanent relation as partition of temporary relation "temp_parted" -DROP FOREIGN TABLE foreign_part; -DROP TABLE temp_parted; --- Cleanup -DROP SCHEMA foreign_schema CASCADE; -DROP ROLE regress_test_role; -- ERROR -ERROR: role "regress_test_role" cannot be dropped because some objects depend on it -DETAIL: privileges for foreign-data wrapper foo -privileges for server s4 -owner of user mapping for regress_test_role on server s6 -DROP SERVER t1 CASCADE; -NOTICE: drop cascades to user mapping for public on server t1 -DROP USER MAPPING FOR regress_test_role SERVER s6; -DROP FOREIGN DATA WRAPPER foo CASCADE; -NOTICE: drop cascades to 5 other objects -DETAIL: drop cascades to server s4 -drop cascades to user mapping for regress_foreign_data_user on server s4 -drop cascades to server s6 -drop cascades to server s9 -drop cascades to user mapping for regress_unprivileged_role on server s9 -DROP SERVER s8 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to user mapping for regress_foreign_data_user on server s8 -drop cascades to user mapping for public on server s8 -DROP ROLE regress_test_indirect; -DROP ROLE regress_test_role; -DROP ROLE regress_unprivileged_role; -- ERROR -ERROR: role "regress_unprivileged_role" cannot be dropped because some objects depend on it -DETAIL: privileges for foreign-data wrapper postgresql -REVOKE ALL ON FOREIGN DATA WRAPPER postgresql FROM regress_unprivileged_role; -DROP ROLE regress_unprivileged_role; -DROP ROLE regress_test_role2; -DROP FOREIGN DATA WRAPPER postgresql CASCADE; -DROP FOREIGN DATA WRAPPER dummy CASCADE; -NOTICE: drop cascades to server s0 -\c -DROP ROLE regress_foreign_data_user; --- At this point we should have no wrappers, no servers, and no mappings. -SELECT fdwname, fdwhandler, fdwvalidator, fdwoptions FROM pg_foreign_data_wrapper; - fdwname | fdwhandler | fdwvalidator | fdwoptions ----------+------------+--------------+------------ -(0 rows) - -SELECT srvname, srvoptions FROM pg_foreign_server; - srvname | srvoptions ----------+------------ -(0 rows) - -SELECT * FROM pg_user_mapping; - oid | umuser | umserver | umoptions ------+--------+----------+----------- -(0 rows) - +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/window.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/window.out --- /tmp/cirrus-ci-build/src/test/regress/expected/window.out 2024-03-07 14:25:00.334767000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/window.out 2024-03-07 14:27:17.351014000 +0000 @@ -1,5377 +1,2 @@ --- --- WINDOW FUNCTIONS --- -CREATE TEMPORARY TABLE empsalary ( - depname varchar, - empno bigint, - salary int, - enroll_date date -); -INSERT INTO empsalary VALUES -('develop', 10, 5200, '2007-08-01'), -('sales', 1, 5000, '2006-10-01'), -('personnel', 5, 3500, '2007-12-10'), -('sales', 4, 4800, '2007-08-08'), -('personnel', 2, 3900, '2006-12-23'), -('develop', 7, 4200, '2008-01-01'), -('develop', 9, 4500, '2008-01-01'), -('sales', 3, 4800, '2007-08-01'), -('develop', 8, 6000, '2006-10-01'), -('develop', 11, 5200, '2007-08-15'); -SELECT depname, empno, salary, sum(salary) OVER (PARTITION BY depname) FROM empsalary ORDER BY depname, salary; - depname | empno | salary | sum ------------+-------+--------+------- - develop | 7 | 4200 | 25100 - develop | 9 | 4500 | 25100 - develop | 11 | 5200 | 25100 - develop | 10 | 5200 | 25100 - develop | 8 | 6000 | 25100 - personnel | 5 | 3500 | 7400 - personnel | 2 | 3900 | 7400 - sales | 3 | 4800 | 14600 - sales | 4 | 4800 | 14600 - sales | 1 | 5000 | 14600 -(10 rows) - -SELECT depname, empno, salary, rank() OVER (PARTITION BY depname ORDER BY salary) FROM empsalary; - depname | empno | salary | rank ------------+-------+--------+------ - develop | 7 | 4200 | 1 - develop | 9 | 4500 | 2 - develop | 11 | 5200 | 3 - develop | 10 | 5200 | 3 - develop | 8 | 6000 | 5 - personnel | 5 | 3500 | 1 - personnel | 2 | 3900 | 2 - sales | 3 | 4800 | 1 - sales | 4 | 4800 | 1 - sales | 1 | 5000 | 3 -(10 rows) - --- with GROUP BY -SELECT four, ten, SUM(SUM(four)) OVER (PARTITION BY four), AVG(ten) FROM tenk1 -GROUP BY four, ten ORDER BY four, ten; - four | ten | sum | avg -------+-----+------+------------------------ - 0 | 0 | 0 | 0.00000000000000000000 - 0 | 2 | 0 | 2.0000000000000000 - 0 | 4 | 0 | 4.0000000000000000 - 0 | 6 | 0 | 6.0000000000000000 - 0 | 8 | 0 | 8.0000000000000000 - 1 | 1 | 2500 | 1.00000000000000000000 - 1 | 3 | 2500 | 3.0000000000000000 - 1 | 5 | 2500 | 5.0000000000000000 - 1 | 7 | 2500 | 7.0000000000000000 - 1 | 9 | 2500 | 9.0000000000000000 - 2 | 0 | 5000 | 0.00000000000000000000 - 2 | 2 | 5000 | 2.0000000000000000 - 2 | 4 | 5000 | 4.0000000000000000 - 2 | 6 | 5000 | 6.0000000000000000 - 2 | 8 | 5000 | 8.0000000000000000 - 3 | 1 | 7500 | 1.00000000000000000000 - 3 | 3 | 7500 | 3.0000000000000000 - 3 | 5 | 7500 | 5.0000000000000000 - 3 | 7 | 7500 | 7.0000000000000000 - 3 | 9 | 7500 | 9.0000000000000000 -(20 rows) - -SELECT depname, empno, salary, sum(salary) OVER w FROM empsalary WINDOW w AS (PARTITION BY depname); - depname | empno | salary | sum ------------+-------+--------+------- - develop | 11 | 5200 | 25100 - develop | 7 | 4200 | 25100 - develop | 9 | 4500 | 25100 - develop | 8 | 6000 | 25100 - develop | 10 | 5200 | 25100 - personnel | 5 | 3500 | 7400 - personnel | 2 | 3900 | 7400 - sales | 3 | 4800 | 14600 - sales | 1 | 5000 | 14600 - sales | 4 | 4800 | 14600 -(10 rows) - -SELECT depname, empno, salary, rank() OVER w FROM empsalary WINDOW w AS (PARTITION BY depname ORDER BY salary) ORDER BY rank() OVER w; - depname | empno | salary | rank ------------+-------+--------+------ - develop | 7 | 4200 | 1 - personnel | 5 | 3500 | 1 - sales | 3 | 4800 | 1 - sales | 4 | 4800 | 1 - personnel | 2 | 3900 | 2 - develop | 9 | 4500 | 2 - sales | 1 | 5000 | 3 - develop | 11 | 5200 | 3 - develop | 10 | 5200 | 3 - develop | 8 | 6000 | 5 -(10 rows) - --- empty window specification -SELECT COUNT(*) OVER () FROM tenk1 WHERE unique2 < 10; - count -------- - 10 - 10 - 10 - 10 - 10 - 10 - 10 - 10 - 10 - 10 -(10 rows) - -SELECT COUNT(*) OVER w FROM tenk1 WHERE unique2 < 10 WINDOW w AS (); - count -------- - 10 - 10 - 10 - 10 - 10 - 10 - 10 - 10 - 10 - 10 -(10 rows) - --- no window operation -SELECT four FROM tenk1 WHERE FALSE WINDOW w AS (PARTITION BY ten); - four ------- -(0 rows) - --- cumulative aggregate -SELECT sum(four) OVER (PARTITION BY ten ORDER BY unique2) AS sum_1, ten, four FROM tenk1 WHERE unique2 < 10; - sum_1 | ten | four --------+-----+------ - 0 | 0 | 0 - 0 | 0 | 0 - 2 | 0 | 2 - 3 | 1 | 3 - 4 | 1 | 1 - 5 | 1 | 1 - 3 | 3 | 3 - 0 | 4 | 0 - 1 | 7 | 1 - 1 | 9 | 1 -(10 rows) - -SELECT row_number() OVER (ORDER BY unique2) FROM tenk1 WHERE unique2 < 10; - row_number ------------- - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 -(10 rows) - -SELECT rank() OVER (PARTITION BY four ORDER BY ten) AS rank_1, ten, four FROM tenk1 WHERE unique2 < 10; - rank_1 | ten | four ---------+-----+------ - 1 | 0 | 0 - 1 | 0 | 0 - 3 | 4 | 0 - 1 | 1 | 1 - 1 | 1 | 1 - 3 | 7 | 1 - 4 | 9 | 1 - 1 | 0 | 2 - 1 | 1 | 3 - 2 | 3 | 3 -(10 rows) - -SELECT dense_rank() OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; - dense_rank | ten | four -------------+-----+------ - 1 | 0 | 0 - 1 | 0 | 0 - 2 | 4 | 0 - 1 | 1 | 1 - 1 | 1 | 1 - 2 | 7 | 1 - 3 | 9 | 1 - 1 | 0 | 2 - 1 | 1 | 3 - 2 | 3 | 3 -(10 rows) - -SELECT percent_rank() OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; - percent_rank | ten | four ---------------------+-----+------ - 0 | 0 | 0 - 0 | 0 | 0 - 1 | 4 | 0 - 0 | 1 | 1 - 0 | 1 | 1 - 0.6666666666666666 | 7 | 1 - 1 | 9 | 1 - 0 | 0 | 2 - 0 | 1 | 3 - 1 | 3 | 3 -(10 rows) - -SELECT cume_dist() OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; - cume_dist | ten | four ---------------------+-----+------ - 0.6666666666666666 | 0 | 0 - 0.6666666666666666 | 0 | 0 - 1 | 4 | 0 - 0.5 | 1 | 1 - 0.5 | 1 | 1 - 0.75 | 7 | 1 - 1 | 9 | 1 - 1 | 0 | 2 - 0.5 | 1 | 3 - 1 | 3 | 3 -(10 rows) - -SELECT ntile(3) OVER (ORDER BY ten, four), ten, four FROM tenk1 WHERE unique2 < 10; - ntile | ten | four --------+-----+------ - 1 | 0 | 0 - 1 | 0 | 0 - 1 | 0 | 2 - 1 | 1 | 1 - 2 | 1 | 1 - 2 | 1 | 3 - 2 | 3 | 3 - 3 | 4 | 0 - 3 | 7 | 1 - 3 | 9 | 1 -(10 rows) - -SELECT ntile(NULL) OVER (ORDER BY ten, four), ten, four FROM tenk1 LIMIT 2; - ntile | ten | four --------+-----+------ - | 0 | 0 - | 0 | 0 -(2 rows) - -SELECT lag(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; - lag | ten | four ------+-----+------ - | 0 | 0 - 0 | 0 | 0 - 0 | 4 | 0 - | 1 | 1 - 1 | 1 | 1 - 1 | 7 | 1 - 7 | 9 | 1 - | 0 | 2 - | 1 | 3 - 1 | 3 | 3 -(10 rows) - -SELECT lag(ten, four) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; - lag | ten | four ------+-----+------ - 0 | 0 | 0 - 0 | 0 | 0 - 4 | 4 | 0 - | 1 | 1 - 1 | 1 | 1 - 1 | 7 | 1 - 7 | 9 | 1 - | 0 | 2 - | 1 | 3 - | 3 | 3 -(10 rows) - -SELECT lag(ten, four, 0) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; - lag | ten | four ------+-----+------ - 0 | 0 | 0 - 0 | 0 | 0 - 4 | 4 | 0 - 0 | 1 | 1 - 1 | 1 | 1 - 1 | 7 | 1 - 7 | 9 | 1 - 0 | 0 | 2 - 0 | 1 | 3 - 0 | 3 | 3 -(10 rows) - -SELECT lag(ten, four, 0.7) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten; - lag | ten | four ------+-----+------ - 0 | 0 | 0 - 0 | 0 | 0 - 4 | 4 | 0 - 0.7 | 1 | 1 - 1 | 1 | 1 - 1 | 7 | 1 - 7 | 9 | 1 - 0.7 | 0 | 2 - 0.7 | 1 | 3 - 0.7 | 3 | 3 -(10 rows) - -SELECT lead(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; - lead | ten | four -------+-----+------ - 0 | 0 | 0 - 4 | 0 | 0 - | 4 | 0 - 1 | 1 | 1 - 7 | 1 | 1 - 9 | 7 | 1 - | 9 | 1 - | 0 | 2 - 3 | 1 | 3 - | 3 | 3 -(10 rows) - -SELECT lead(ten * 2, 1) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; - lead | ten | four -------+-----+------ - 0 | 0 | 0 - 8 | 0 | 0 - | 4 | 0 - 2 | 1 | 1 - 14 | 1 | 1 - 18 | 7 | 1 - | 9 | 1 - | 0 | 2 - 6 | 1 | 3 - | 3 | 3 -(10 rows) - -SELECT lead(ten * 2, 1, -1) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; - lead | ten | four -------+-----+------ - 0 | 0 | 0 - 8 | 0 | 0 - -1 | 4 | 0 - 2 | 1 | 1 - 14 | 1 | 1 - 18 | 7 | 1 - -1 | 9 | 1 - -1 | 0 | 2 - 6 | 1 | 3 - -1 | 3 | 3 -(10 rows) - -SELECT lead(ten * 2, 1, -1.4) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten; - lead | ten | four -------+-----+------ - 0 | 0 | 0 - 8 | 0 | 0 - -1.4 | 4 | 0 - 2 | 1 | 1 - 14 | 1 | 1 - 18 | 7 | 1 - -1.4 | 9 | 1 - -1.4 | 0 | 2 - 6 | 1 | 3 - -1.4 | 3 | 3 -(10 rows) - -SELECT first_value(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; - first_value | ten | four --------------+-----+------ - 0 | 0 | 0 - 0 | 0 | 0 - 0 | 4 | 0 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 7 | 1 - 1 | 9 | 1 - 0 | 0 | 2 - 1 | 1 | 3 - 1 | 3 | 3 -(10 rows) - --- last_value returns the last row of the frame, which is CURRENT ROW in ORDER BY window. -SELECT last_value(four) OVER (ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; - last_value | ten | four -------------+-----+------ - 0 | 0 | 0 - 0 | 0 | 2 - 0 | 0 | 0 - 1 | 1 | 1 - 1 | 1 | 3 - 1 | 1 | 1 - 3 | 3 | 3 - 0 | 4 | 0 - 1 | 7 | 1 - 1 | 9 | 1 -(10 rows) - -SELECT last_value(ten) OVER (PARTITION BY four), ten, four FROM - (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten)s - ORDER BY four, ten; - last_value | ten | four -------------+-----+------ - 4 | 0 | 0 - 4 | 0 | 0 - 4 | 4 | 0 - 9 | 1 | 1 - 9 | 1 | 1 - 9 | 7 | 1 - 9 | 9 | 1 - 0 | 0 | 2 - 3 | 1 | 3 - 3 | 3 | 3 -(10 rows) - -SELECT nth_value(ten, four + 1) OVER (PARTITION BY four), ten, four - FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten)s; - nth_value | ten | four ------------+-----+------ - 0 | 0 | 0 - 0 | 0 | 0 - 0 | 4 | 0 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 7 | 1 - 1 | 9 | 1 - | 0 | 2 - | 1 | 3 - | 3 | 3 -(10 rows) - -SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER (PARTITION BY two ORDER BY ten) AS wsum -FROM tenk1 GROUP BY ten, two; - ten | two | gsum | wsum ------+-----+-------+-------- - 0 | 0 | 45000 | 45000 - 2 | 0 | 47000 | 92000 - 4 | 0 | 49000 | 141000 - 6 | 0 | 51000 | 192000 - 8 | 0 | 53000 | 245000 - 1 | 1 | 46000 | 46000 - 3 | 1 | 48000 | 94000 - 5 | 1 | 50000 | 144000 - 7 | 1 | 52000 | 196000 - 9 | 1 | 54000 | 250000 -(10 rows) - -SELECT count(*) OVER (PARTITION BY four), four FROM (SELECT * FROM tenk1 WHERE two = 1)s WHERE unique2 < 10; - count | four --------+------ - 4 | 1 - 4 | 1 - 4 | 1 - 4 | 1 - 2 | 3 - 2 | 3 -(6 rows) - -SELECT (count(*) OVER (PARTITION BY four ORDER BY ten) + - sum(hundred) OVER (PARTITION BY four ORDER BY ten))::varchar AS cntsum - FROM tenk1 WHERE unique2 < 10; - cntsum --------- - 22 - 22 - 87 - 24 - 24 - 82 - 92 - 51 - 92 - 136 -(10 rows) - --- opexpr with different windows evaluation. -SELECT * FROM( - SELECT count(*) OVER (PARTITION BY four ORDER BY ten) + - sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS total, - count(*) OVER (PARTITION BY four ORDER BY ten) AS fourcount, - sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS twosum - FROM tenk1 -)sub -WHERE total <> fourcount + twosum; - total | fourcount | twosum --------+-----------+-------- -(0 rows) - -SELECT avg(four) OVER (PARTITION BY four ORDER BY thousand / 100) FROM tenk1 WHERE unique2 < 10; - avg ------------------------- - 0.00000000000000000000 - 0.00000000000000000000 - 0.00000000000000000000 - 1.00000000000000000000 - 1.00000000000000000000 - 1.00000000000000000000 - 1.00000000000000000000 - 2.0000000000000000 - 3.0000000000000000 - 3.0000000000000000 -(10 rows) - -SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER win AS wsum -FROM tenk1 GROUP BY ten, two WINDOW win AS (PARTITION BY two ORDER BY ten); - ten | two | gsum | wsum ------+-----+-------+-------- - 0 | 0 | 45000 | 45000 - 2 | 0 | 47000 | 92000 - 4 | 0 | 49000 | 141000 - 6 | 0 | 51000 | 192000 - 8 | 0 | 53000 | 245000 - 1 | 1 | 46000 | 46000 - 3 | 1 | 48000 | 94000 - 5 | 1 | 50000 | 144000 - 7 | 1 | 52000 | 196000 - 9 | 1 | 54000 | 250000 -(10 rows) - --- more than one window with GROUP BY -SELECT sum(salary), - row_number() OVER (ORDER BY depname), - sum(sum(salary)) OVER (ORDER BY depname DESC) -FROM empsalary GROUP BY depname; - sum | row_number | sum --------+------------+------- - 25100 | 1 | 47100 - 7400 | 2 | 22000 - 14600 | 3 | 14600 -(3 rows) - --- identical windows with different names -SELECT sum(salary) OVER w1, count(*) OVER w2 -FROM empsalary WINDOW w1 AS (ORDER BY salary), w2 AS (ORDER BY salary); - sum | count --------+------- - 3500 | 1 - 7400 | 2 - 11600 | 3 - 16100 | 4 - 25700 | 6 - 25700 | 6 - 30700 | 7 - 41100 | 9 - 41100 | 9 - 47100 | 10 -(10 rows) - --- subplan -SELECT lead(ten, (SELECT two FROM tenk1 WHERE s.unique2 = unique2)) OVER (PARTITION BY four ORDER BY ten) -FROM tenk1 s WHERE unique2 < 10; - lead ------- - 0 - 0 - 4 - 1 - 7 - 9 - - 0 - 3 - -(10 rows) - --- empty table -SELECT count(*) OVER (PARTITION BY four) FROM (SELECT * FROM tenk1 WHERE FALSE)s; - count -------- -(0 rows) - --- mixture of agg/wfunc in the same window -SELECT sum(salary) OVER w, rank() OVER w FROM empsalary WINDOW w AS (PARTITION BY depname ORDER BY salary DESC); - sum | rank --------+------ - 6000 | 1 - 16400 | 2 - 16400 | 2 - 20900 | 4 - 25100 | 5 - 3900 | 1 - 7400 | 2 - 5000 | 1 - 14600 | 2 - 14600 | 2 -(10 rows) - --- strict aggs -SELECT empno, depname, salary, bonus, depadj, MIN(bonus) OVER (ORDER BY empno), MAX(depadj) OVER () FROM( - SELECT *, - CASE WHEN enroll_date < '2008-01-01' THEN 2008 - extract(YEAR FROM enroll_date) END * 500 AS bonus, - CASE WHEN - AVG(salary) OVER (PARTITION BY depname) < salary - THEN 200 END AS depadj FROM empsalary -)s; - empno | depname | salary | bonus | depadj | min | max --------+-----------+--------+-------+--------+------+----- - 1 | sales | 5000 | 1000 | 200 | 1000 | 200 - 2 | personnel | 3900 | 1000 | 200 | 1000 | 200 - 3 | sales | 4800 | 500 | | 500 | 200 - 4 | sales | 4800 | 500 | | 500 | 200 - 5 | personnel | 3500 | 500 | | 500 | 200 - 7 | develop | 4200 | | | 500 | 200 - 8 | develop | 6000 | 1000 | 200 | 500 | 200 - 9 | develop | 4500 | | | 500 | 200 - 10 | develop | 5200 | 500 | 200 | 500 | 200 - 11 | develop | 5200 | 500 | 200 | 500 | 200 -(10 rows) - --- window function over ungrouped agg over empty row set (bug before 9.1) -SELECT SUM(COUNT(f1)) OVER () FROM int4_tbl WHERE f1=42; - sum ------ - 0 -(1 row) - --- window function with ORDER BY an expression involving aggregates (9.1 bug) -select ten, - sum(unique1) + sum(unique2) as res, - rank() over (order by sum(unique1) + sum(unique2)) as rank -from tenk1 -group by ten order by ten; - ten | res | rank ------+----------+------ - 0 | 9976146 | 4 - 1 | 10114187 | 9 - 2 | 10059554 | 8 - 3 | 9878541 | 1 - 4 | 9881005 | 2 - 5 | 9981670 | 5 - 6 | 9947099 | 3 - 7 | 10120309 | 10 - 8 | 9991305 | 6 - 9 | 10040184 | 7 -(10 rows) - --- window and aggregate with GROUP BY expression (9.2 bug) -explain (costs off) -select first_value(max(x)) over (), y - from (select unique1 as x, ten+four as y from tenk1) ss - group by y; - QUERY PLAN ---------------------------------------------- - WindowAgg - -> HashAggregate - Group Key: (tenk1.ten + tenk1.four) - -> Seq Scan on tenk1 -(4 rows) - --- window functions returning pass-by-ref values from different rows -select x, lag(x, 1) over (order by x), lead(x, 3) over (order by x) -from (select x::numeric as x from generate_series(1,10) x); - x | lag | lead -----+-----+------ - 1 | | 4 - 2 | 1 | 5 - 3 | 2 | 6 - 4 | 3 | 7 - 5 | 4 | 8 - 6 | 5 | 9 - 7 | 6 | 10 - 8 | 7 | - 9 | 8 | - 10 | 9 | -(10 rows) - --- test non-default frame specifications -SELECT four, ten, - sum(ten) over (partition by four order by ten), - last_value(ten) over (partition by four order by ten) -FROM (select distinct ten, four from tenk1) ss; - four | ten | sum | last_value -------+-----+-----+------------ - 0 | 0 | 0 | 0 - 0 | 2 | 2 | 2 - 0 | 4 | 6 | 4 - 0 | 6 | 12 | 6 - 0 | 8 | 20 | 8 - 1 | 1 | 1 | 1 - 1 | 3 | 4 | 3 - 1 | 5 | 9 | 5 - 1 | 7 | 16 | 7 - 1 | 9 | 25 | 9 - 2 | 0 | 0 | 0 - 2 | 2 | 2 | 2 - 2 | 4 | 6 | 4 - 2 | 6 | 12 | 6 - 2 | 8 | 20 | 8 - 3 | 1 | 1 | 1 - 3 | 3 | 4 | 3 - 3 | 5 | 9 | 5 - 3 | 7 | 16 | 7 - 3 | 9 | 25 | 9 -(20 rows) - -SELECT four, ten, - sum(ten) over (partition by four order by ten range between unbounded preceding and current row), - last_value(ten) over (partition by four order by ten range between unbounded preceding and current row) -FROM (select distinct ten, four from tenk1) ss; - four | ten | sum | last_value -------+-----+-----+------------ - 0 | 0 | 0 | 0 - 0 | 2 | 2 | 2 - 0 | 4 | 6 | 4 - 0 | 6 | 12 | 6 - 0 | 8 | 20 | 8 - 1 | 1 | 1 | 1 - 1 | 3 | 4 | 3 - 1 | 5 | 9 | 5 - 1 | 7 | 16 | 7 - 1 | 9 | 25 | 9 - 2 | 0 | 0 | 0 - 2 | 2 | 2 | 2 - 2 | 4 | 6 | 4 - 2 | 6 | 12 | 6 - 2 | 8 | 20 | 8 - 3 | 1 | 1 | 1 - 3 | 3 | 4 | 3 - 3 | 5 | 9 | 5 - 3 | 7 | 16 | 7 - 3 | 9 | 25 | 9 -(20 rows) - -SELECT four, ten, - sum(ten) over (partition by four order by ten range between unbounded preceding and unbounded following), - last_value(ten) over (partition by four order by ten range between unbounded preceding and unbounded following) -FROM (select distinct ten, four from tenk1) ss; - four | ten | sum | last_value -------+-----+-----+------------ - 0 | 0 | 20 | 8 - 0 | 2 | 20 | 8 - 0 | 4 | 20 | 8 - 0 | 6 | 20 | 8 - 0 | 8 | 20 | 8 - 1 | 1 | 25 | 9 - 1 | 3 | 25 | 9 - 1 | 5 | 25 | 9 - 1 | 7 | 25 | 9 - 1 | 9 | 25 | 9 - 2 | 0 | 20 | 8 - 2 | 2 | 20 | 8 - 2 | 4 | 20 | 8 - 2 | 6 | 20 | 8 - 2 | 8 | 20 | 8 - 3 | 1 | 25 | 9 - 3 | 3 | 25 | 9 - 3 | 5 | 25 | 9 - 3 | 7 | 25 | 9 - 3 | 9 | 25 | 9 -(20 rows) - -SELECT four, ten/4 as two, - sum(ten/4) over (partition by four order by ten/4 range between unbounded preceding and current row), - last_value(ten/4) over (partition by four order by ten/4 range between unbounded preceding and current row) -FROM (select distinct ten, four from tenk1) ss; - four | two | sum | last_value -------+-----+-----+------------ - 0 | 0 | 0 | 0 - 0 | 0 | 0 | 0 - 0 | 1 | 2 | 1 - 0 | 1 | 2 | 1 - 0 | 2 | 4 | 2 - 1 | 0 | 0 | 0 - 1 | 0 | 0 | 0 - 1 | 1 | 2 | 1 - 1 | 1 | 2 | 1 - 1 | 2 | 4 | 2 - 2 | 0 | 0 | 0 - 2 | 0 | 0 | 0 - 2 | 1 | 2 | 1 - 2 | 1 | 2 | 1 - 2 | 2 | 4 | 2 - 3 | 0 | 0 | 0 - 3 | 0 | 0 | 0 - 3 | 1 | 2 | 1 - 3 | 1 | 2 | 1 - 3 | 2 | 4 | 2 -(20 rows) - -SELECT four, ten/4 as two, - sum(ten/4) over (partition by four order by ten/4 rows between unbounded preceding and current row), - last_value(ten/4) over (partition by four order by ten/4 rows between unbounded preceding and current row) -FROM (select distinct ten, four from tenk1) ss; - four | two | sum | last_value -------+-----+-----+------------ - 0 | 0 | 0 | 0 - 0 | 0 | 0 | 0 - 0 | 1 | 1 | 1 - 0 | 1 | 2 | 1 - 0 | 2 | 4 | 2 - 1 | 0 | 0 | 0 - 1 | 0 | 0 | 0 - 1 | 1 | 1 | 1 - 1 | 1 | 2 | 1 - 1 | 2 | 4 | 2 - 2 | 0 | 0 | 0 - 2 | 0 | 0 | 0 - 2 | 1 | 1 | 1 - 2 | 1 | 2 | 1 - 2 | 2 | 4 | 2 - 3 | 0 | 0 | 0 - 3 | 0 | 0 | 0 - 3 | 1 | 1 | 1 - 3 | 1 | 2 | 1 - 3 | 2 | 4 | 2 -(20 rows) - -SELECT sum(unique1) over (order by four range between current row and unbounded following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 45 | 0 | 0 - 45 | 8 | 0 - 45 | 4 | 0 - 33 | 5 | 1 - 33 | 9 | 1 - 33 | 1 | 1 - 18 | 6 | 2 - 18 | 2 | 2 - 10 | 3 | 3 - 10 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (rows between current row and unbounded following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 45 | 4 | 0 - 41 | 2 | 2 - 39 | 1 | 1 - 38 | 6 | 2 - 32 | 9 | 1 - 23 | 8 | 0 - 15 | 5 | 1 - 10 | 3 | 3 - 7 | 7 | 3 - 0 | 0 | 0 -(10 rows) - -SELECT sum(unique1) over (rows between 2 preceding and 2 following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 7 | 4 | 0 - 13 | 2 | 2 - 22 | 1 | 1 - 26 | 6 | 2 - 29 | 9 | 1 - 31 | 8 | 0 - 32 | 5 | 1 - 23 | 3 | 3 - 15 | 7 | 3 - 10 | 0 | 0 -(10 rows) - -SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude no others), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 7 | 4 | 0 - 13 | 2 | 2 - 22 | 1 | 1 - 26 | 6 | 2 - 29 | 9 | 1 - 31 | 8 | 0 - 32 | 5 | 1 - 23 | 3 | 3 - 15 | 7 | 3 - 10 | 0 | 0 -(10 rows) - -SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude current row), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 3 | 4 | 0 - 11 | 2 | 2 - 21 | 1 | 1 - 20 | 6 | 2 - 20 | 9 | 1 - 23 | 8 | 0 - 27 | 5 | 1 - 20 | 3 | 3 - 8 | 7 | 3 - 10 | 0 | 0 -(10 rows) - -SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude group), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - | 4 | 0 - | 2 | 2 - | 1 | 1 - | 6 | 2 - | 9 | 1 - | 8 | 0 - | 5 | 1 - | 3 | 3 - | 7 | 3 - | 0 | 0 -(10 rows) - -SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude ties), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 4 | 4 | 0 - 2 | 2 | 2 - 1 | 1 | 1 - 6 | 6 | 2 - 9 | 9 | 1 - 8 | 8 | 0 - 5 | 5 | 1 - 3 | 3 | 3 - 7 | 7 | 3 - 0 | 0 | 0 -(10 rows) - -SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude current row), - unique1, four -FROM tenk1 WHERE unique1 < 10; - first_value | unique1 | four --------------+---------+------ - 8 | 0 | 0 - 4 | 8 | 0 - 5 | 4 | 0 - 9 | 5 | 1 - 1 | 9 | 1 - 6 | 1 | 1 - 2 | 6 | 2 - 3 | 2 | 2 - 7 | 3 | 3 - | 7 | 3 -(10 rows) - -SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude group), - unique1, four -FROM tenk1 WHERE unique1 < 10; - first_value | unique1 | four --------------+---------+------ - | 0 | 0 - 5 | 8 | 0 - 5 | 4 | 0 - | 5 | 1 - 6 | 9 | 1 - 6 | 1 | 1 - 3 | 6 | 2 - 3 | 2 | 2 - | 3 | 3 - | 7 | 3 -(10 rows) - -SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude ties), - unique1, four -FROM tenk1 WHERE unique1 < 10; - first_value | unique1 | four --------------+---------+------ - 0 | 0 | 0 - 8 | 8 | 0 - 4 | 4 | 0 - 5 | 5 | 1 - 9 | 9 | 1 - 1 | 1 | 1 - 6 | 6 | 2 - 2 | 2 | 2 - 3 | 3 | 3 - 7 | 7 | 3 -(10 rows) - -SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude current row), - unique1, four -FROM tenk1 WHERE unique1 < 10; - last_value | unique1 | four -------------+---------+------ - 4 | 0 | 0 - 5 | 8 | 0 - 9 | 4 | 0 - 1 | 5 | 1 - 6 | 9 | 1 - 2 | 1 | 1 - 3 | 6 | 2 - 7 | 2 | 2 - 7 | 3 | 3 - | 7 | 3 -(10 rows) - -SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude group), - unique1, four -FROM tenk1 WHERE unique1 < 10; - last_value | unique1 | four -------------+---------+------ - | 0 | 0 - 5 | 8 | 0 - 9 | 4 | 0 - | 5 | 1 - 6 | 9 | 1 - 2 | 1 | 1 - 3 | 6 | 2 - 7 | 2 | 2 - | 3 | 3 - | 7 | 3 -(10 rows) - -SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude ties), - unique1, four -FROM tenk1 WHERE unique1 < 10; - last_value | unique1 | four -------------+---------+------ - 0 | 0 | 0 - 5 | 8 | 0 - 9 | 4 | 0 - 5 | 5 | 1 - 6 | 9 | 1 - 2 | 1 | 1 - 3 | 6 | 2 - 7 | 2 | 2 - 3 | 3 | 3 - 7 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (rows between 2 preceding and 1 preceding), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - | 4 | 0 - 4 | 2 | 2 - 6 | 1 | 1 - 3 | 6 | 2 - 7 | 9 | 1 - 15 | 8 | 0 - 17 | 5 | 1 - 13 | 3 | 3 - 8 | 7 | 3 - 10 | 0 | 0 -(10 rows) - -SELECT sum(unique1) over (rows between 1 following and 3 following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 9 | 4 | 0 - 16 | 2 | 2 - 23 | 1 | 1 - 22 | 6 | 2 - 16 | 9 | 1 - 15 | 8 | 0 - 10 | 5 | 1 - 7 | 3 | 3 - 0 | 7 | 3 - | 0 | 0 -(10 rows) - -SELECT sum(unique1) over (rows between unbounded preceding and 1 following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 6 | 4 | 0 - 7 | 2 | 2 - 13 | 1 | 1 - 22 | 6 | 2 - 30 | 9 | 1 - 35 | 8 | 0 - 38 | 5 | 1 - 45 | 3 | 3 - 45 | 7 | 3 - 45 | 0 | 0 -(10 rows) - -SELECT sum(unique1) over (w range between current row and unbounded following), - unique1, four -FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four); - sum | unique1 | four ------+---------+------ - 45 | 0 | 0 - 45 | 8 | 0 - 45 | 4 | 0 - 33 | 5 | 1 - 33 | 9 | 1 - 33 | 1 | 1 - 18 | 6 | 2 - 18 | 2 | 2 - 10 | 3 | 3 - 10 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (w range between unbounded preceding and current row exclude current row), - unique1, four -FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four); - sum | unique1 | four ------+---------+------ - 12 | 0 | 0 - 4 | 8 | 0 - 8 | 4 | 0 - 22 | 5 | 1 - 18 | 9 | 1 - 26 | 1 | 1 - 29 | 6 | 2 - 33 | 2 | 2 - 42 | 3 | 3 - 38 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (w range between unbounded preceding and current row exclude group), - unique1, four -FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four); - sum | unique1 | four ------+---------+------ - | 0 | 0 - | 8 | 0 - | 4 | 0 - 12 | 5 | 1 - 12 | 9 | 1 - 12 | 1 | 1 - 27 | 6 | 2 - 27 | 2 | 2 - 35 | 3 | 3 - 35 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (w range between unbounded preceding and current row exclude ties), - unique1, four -FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four); - sum | unique1 | four ------+---------+------ - 0 | 0 | 0 - 8 | 8 | 0 - 4 | 4 | 0 - 17 | 5 | 1 - 21 | 9 | 1 - 13 | 1 | 1 - 33 | 6 | 2 - 29 | 2 | 2 - 38 | 3 | 3 - 42 | 7 | 3 -(10 rows) - -SELECT first_value(unique1) over w, - nth_value(unique1, 2) over w AS nth_2, - last_value(unique1) over w, unique1, four -FROM tenk1 WHERE unique1 < 10 -WINDOW w AS (order by four range between current row and unbounded following); - first_value | nth_2 | last_value | unique1 | four --------------+-------+------------+---------+------ - 0 | 8 | 7 | 0 | 0 - 0 | 8 | 7 | 8 | 0 - 0 | 8 | 7 | 4 | 0 - 5 | 9 | 7 | 5 | 1 - 5 | 9 | 7 | 9 | 1 - 5 | 9 | 7 | 1 | 1 - 6 | 2 | 7 | 6 | 2 - 6 | 2 | 7 | 2 | 2 - 3 | 7 | 7 | 3 | 3 - 3 | 7 | 7 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over - (order by unique1 - rows (SELECT unique1 FROM tenk1 ORDER BY unique1 LIMIT 1) + 1 PRECEDING), - unique1 -FROM tenk1 WHERE unique1 < 10; - sum | unique1 ------+--------- - 0 | 0 - 1 | 1 - 3 | 2 - 5 | 3 - 7 | 4 - 9 | 5 - 11 | 6 - 13 | 7 - 15 | 8 - 17 | 9 -(10 rows) - -CREATE TEMP VIEW v_window AS - SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following) as sum_rows - FROM generate_series(1, 10) i; -SELECT * FROM v_window; - i | sum_rows -----+---------- - 1 | 3 - 2 | 6 - 3 | 9 - 4 | 12 - 5 | 15 - 6 | 18 - 7 | 21 - 8 | 24 - 9 | 27 - 10 | 19 -(10 rows) - -SELECT pg_get_viewdef('v_window'); - pg_get_viewdef ------------------------------------------------------------------------------------ - SELECT i, + - sum(i) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS sum_rows+ - FROM generate_series(1, 10) i(i); -(1 row) - -CREATE OR REPLACE TEMP VIEW v_window AS - SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following - exclude current row) as sum_rows FROM generate_series(1, 10) i; -SELECT * FROM v_window; - i | sum_rows -----+---------- - 1 | 2 - 2 | 4 - 3 | 6 - 4 | 8 - 5 | 10 - 6 | 12 - 7 | 14 - 8 | 16 - 9 | 18 - 10 | 9 -(10 rows) - -SELECT pg_get_viewdef('v_window'); - pg_get_viewdef -------------------------------------------------------------------------------------------------------- - SELECT i, + - sum(i) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE CURRENT ROW) AS sum_rows+ - FROM generate_series(1, 10) i(i); -(1 row) - -CREATE OR REPLACE TEMP VIEW v_window AS - SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following - exclude group) as sum_rows FROM generate_series(1, 10) i; -SELECT * FROM v_window; - i | sum_rows -----+---------- - 1 | 2 - 2 | 4 - 3 | 6 - 4 | 8 - 5 | 10 - 6 | 12 - 7 | 14 - 8 | 16 - 9 | 18 - 10 | 9 -(10 rows) - -SELECT pg_get_viewdef('v_window'); - pg_get_viewdef -------------------------------------------------------------------------------------------------- - SELECT i, + - sum(i) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE GROUP) AS sum_rows+ - FROM generate_series(1, 10) i(i); -(1 row) - -CREATE OR REPLACE TEMP VIEW v_window AS - SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following - exclude ties) as sum_rows FROM generate_series(1, 10) i; -SELECT * FROM v_window; - i | sum_rows -----+---------- - 1 | 3 - 2 | 6 - 3 | 9 - 4 | 12 - 5 | 15 - 6 | 18 - 7 | 21 - 8 | 24 - 9 | 27 - 10 | 19 -(10 rows) - -SELECT pg_get_viewdef('v_window'); - pg_get_viewdef ------------------------------------------------------------------------------------------------- - SELECT i, + - sum(i) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE TIES) AS sum_rows+ - FROM generate_series(1, 10) i(i); -(1 row) - -CREATE OR REPLACE TEMP VIEW v_window AS - SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following - exclude no others) as sum_rows FROM generate_series(1, 10) i; -SELECT * FROM v_window; - i | sum_rows -----+---------- - 1 | 3 - 2 | 6 - 3 | 9 - 4 | 12 - 5 | 15 - 6 | 18 - 7 | 21 - 8 | 24 - 9 | 27 - 10 | 19 -(10 rows) - -SELECT pg_get_viewdef('v_window'); - pg_get_viewdef ------------------------------------------------------------------------------------ - SELECT i, + - sum(i) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS sum_rows+ - FROM generate_series(1, 10) i(i); -(1 row) - -CREATE OR REPLACE TEMP VIEW v_window AS - SELECT i, sum(i) over (order by i groups between 1 preceding and 1 following) as sum_rows FROM generate_series(1, 10) i; -SELECT * FROM v_window; - i | sum_rows -----+---------- - 1 | 3 - 2 | 6 - 3 | 9 - 4 | 12 - 5 | 15 - 6 | 18 - 7 | 21 - 8 | 24 - 9 | 27 - 10 | 19 -(10 rows) - -SELECT pg_get_viewdef('v_window'); - pg_get_viewdef -------------------------------------------------------------------------------------- - SELECT i, + - sum(i) OVER (ORDER BY i GROUPS BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS sum_rows+ - FROM generate_series(1, 10) i(i); -(1 row) - -DROP VIEW v_window; -CREATE TEMP VIEW v_window AS - SELECT i, min(i) over (order by i range between '1 day' preceding and '10 days' following) as min_i - FROM generate_series(now(), now()+'100 days'::interval, '1 hour') i; -SELECT pg_get_viewdef('v_window'); - pg_get_viewdef ------------------------------------------------------------------------------------------------------------------------ - SELECT i, + - min(i) OVER (ORDER BY i RANGE BETWEEN '@ 1 day'::interval PRECEDING AND '@ 10 days'::interval FOLLOWING) AS min_i+ - FROM generate_series(now(), (now() + '@ 100 days'::interval), '@ 1 hour'::interval) i(i); -(1 row) - --- RANGE offset PRECEDING/FOLLOWING tests -SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - | 0 | 0 - | 8 | 0 - | 4 | 0 - 12 | 5 | 1 - 12 | 9 | 1 - 12 | 1 | 1 - 27 | 6 | 2 - 27 | 2 | 2 - 23 | 3 | 3 - 23 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four desc range between 2::int8 preceding and 1::int2 preceding), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - | 3 | 3 - | 7 | 3 - 10 | 6 | 2 - 10 | 2 | 2 - 18 | 9 | 1 - 18 | 5 | 1 - 18 | 1 | 1 - 23 | 0 | 0 - 23 | 8 | 0 - 23 | 4 | 0 -(10 rows) - -SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude no others), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - | 0 | 0 - | 8 | 0 - | 4 | 0 - 12 | 5 | 1 - 12 | 9 | 1 - 12 | 1 | 1 - 27 | 6 | 2 - 27 | 2 | 2 - 23 | 3 | 3 - 23 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude current row), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - | 0 | 0 - | 8 | 0 - | 4 | 0 - 12 | 5 | 1 - 12 | 9 | 1 - 12 | 1 | 1 - 27 | 6 | 2 - 27 | 2 | 2 - 23 | 3 | 3 - 23 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude group), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - | 0 | 0 - | 8 | 0 - | 4 | 0 - 12 | 5 | 1 - 12 | 9 | 1 - 12 | 1 | 1 - 27 | 6 | 2 - 27 | 2 | 2 - 23 | 3 | 3 - 23 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude ties), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - | 0 | 0 - | 8 | 0 - | 4 | 0 - 12 | 5 | 1 - 12 | 9 | 1 - 12 | 1 | 1 - 27 | 6 | 2 - 27 | 2 | 2 - 23 | 3 | 3 - 23 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four range between 2::int8 preceding and 6::int2 following exclude ties), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 33 | 0 | 0 - 41 | 8 | 0 - 37 | 4 | 0 - 35 | 5 | 1 - 39 | 9 | 1 - 31 | 1 | 1 - 43 | 6 | 2 - 39 | 2 | 2 - 26 | 3 | 3 - 30 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four range between 2::int8 preceding and 6::int2 following exclude group), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 33 | 0 | 0 - 33 | 8 | 0 - 33 | 4 | 0 - 30 | 5 | 1 - 30 | 9 | 1 - 30 | 1 | 1 - 37 | 6 | 2 - 37 | 2 | 2 - 23 | 3 | 3 - 23 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (partition by four order by unique1 range between 5::int8 preceding and 6::int2 following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 4 | 0 | 0 - 12 | 4 | 0 - 12 | 8 | 0 - 6 | 1 | 1 - 15 | 5 | 1 - 14 | 9 | 1 - 8 | 2 | 2 - 8 | 6 | 2 - 10 | 3 | 3 - 10 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (partition by four order by unique1 range between 5::int8 preceding and 6::int2 following - exclude current row),unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 4 | 0 | 0 - 8 | 4 | 0 - 4 | 8 | 0 - 5 | 1 | 1 - 10 | 5 | 1 - 5 | 9 | 1 - 6 | 2 | 2 - 2 | 6 | 2 - 7 | 3 | 3 - 3 | 7 | 3 -(10 rows) - -select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following), - salary, enroll_date from empsalary; - sum | salary | enroll_date --------+--------+------------- - 34900 | 5000 | 10-01-2006 - 34900 | 6000 | 10-01-2006 - 38400 | 3900 | 12-23-2006 - 47100 | 4800 | 08-01-2007 - 47100 | 5200 | 08-01-2007 - 47100 | 4800 | 08-08-2007 - 47100 | 5200 | 08-15-2007 - 36100 | 3500 | 12-10-2007 - 32200 | 4500 | 01-01-2008 - 32200 | 4200 | 01-01-2008 -(10 rows) - -select sum(salary) over (order by enroll_date desc range between '1 year'::interval preceding and '1 year'::interval following), - salary, enroll_date from empsalary; - sum | salary | enroll_date --------+--------+------------- - 32200 | 4200 | 01-01-2008 - 32200 | 4500 | 01-01-2008 - 36100 | 3500 | 12-10-2007 - 47100 | 5200 | 08-15-2007 - 47100 | 4800 | 08-08-2007 - 47100 | 4800 | 08-01-2007 - 47100 | 5200 | 08-01-2007 - 38400 | 3900 | 12-23-2006 - 34900 | 5000 | 10-01-2006 - 34900 | 6000 | 10-01-2006 -(10 rows) - -select sum(salary) over (order by enroll_date desc range between '1 year'::interval following and '1 year'::interval following), - salary, enroll_date from empsalary; - sum | salary | enroll_date ------+--------+------------- - | 4200 | 01-01-2008 - | 4500 | 01-01-2008 - | 3500 | 12-10-2007 - | 5200 | 08-15-2007 - | 4800 | 08-08-2007 - | 4800 | 08-01-2007 - | 5200 | 08-01-2007 - | 3900 | 12-23-2006 - | 5000 | 10-01-2006 - | 6000 | 10-01-2006 -(10 rows) - -select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following - exclude current row), salary, enroll_date from empsalary; - sum | salary | enroll_date --------+--------+------------- - 29900 | 5000 | 10-01-2006 - 28900 | 6000 | 10-01-2006 - 34500 | 3900 | 12-23-2006 - 42300 | 4800 | 08-01-2007 - 41900 | 5200 | 08-01-2007 - 42300 | 4800 | 08-08-2007 - 41900 | 5200 | 08-15-2007 - 32600 | 3500 | 12-10-2007 - 27700 | 4500 | 01-01-2008 - 28000 | 4200 | 01-01-2008 -(10 rows) - -select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following - exclude group), salary, enroll_date from empsalary; - sum | salary | enroll_date --------+--------+------------- - 23900 | 5000 | 10-01-2006 - 23900 | 6000 | 10-01-2006 - 34500 | 3900 | 12-23-2006 - 37100 | 4800 | 08-01-2007 - 37100 | 5200 | 08-01-2007 - 42300 | 4800 | 08-08-2007 - 41900 | 5200 | 08-15-2007 - 32600 | 3500 | 12-10-2007 - 23500 | 4500 | 01-01-2008 - 23500 | 4200 | 01-01-2008 -(10 rows) - -select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following - exclude ties), salary, enroll_date from empsalary; - sum | salary | enroll_date --------+--------+------------- - 28900 | 5000 | 10-01-2006 - 29900 | 6000 | 10-01-2006 - 38400 | 3900 | 12-23-2006 - 41900 | 4800 | 08-01-2007 - 42300 | 5200 | 08-01-2007 - 47100 | 4800 | 08-08-2007 - 47100 | 5200 | 08-15-2007 - 36100 | 3500 | 12-10-2007 - 28000 | 4500 | 01-01-2008 - 27700 | 4200 | 01-01-2008 -(10 rows) - -select first_value(salary) over(order by salary range between 1000 preceding and 1000 following), - lead(salary) over(order by salary range between 1000 preceding and 1000 following), - nth_value(salary, 1) over(order by salary range between 1000 preceding and 1000 following), - salary from empsalary; - first_value | lead | nth_value | salary --------------+------+-----------+-------- - 3500 | 3900 | 3500 | 3500 - 3500 | 4200 | 3500 | 3900 - 3500 | 4500 | 3500 | 4200 - 3500 | 4800 | 3500 | 4500 - 3900 | 4800 | 3900 | 4800 - 3900 | 5000 | 3900 | 4800 - 4200 | 5200 | 4200 | 5000 - 4200 | 5200 | 4200 | 5200 - 4200 | 6000 | 4200 | 5200 - 5000 | | 5000 | 6000 -(10 rows) - -select last_value(salary) over(order by salary range between 1000 preceding and 1000 following), - lag(salary) over(order by salary range between 1000 preceding and 1000 following), - salary from empsalary; - last_value | lag | salary -------------+------+-------- - 4500 | | 3500 - 4800 | 3500 | 3900 - 5200 | 3900 | 4200 - 5200 | 4200 | 4500 - 5200 | 4500 | 4800 - 5200 | 4800 | 4800 - 6000 | 4800 | 5000 - 6000 | 5000 | 5200 - 6000 | 5200 | 5200 - 6000 | 5200 | 6000 -(10 rows) - -select first_value(salary) over(order by salary range between 1000 following and 3000 following - exclude current row), - lead(salary) over(order by salary range between 1000 following and 3000 following exclude ties), - nth_value(salary, 1) over(order by salary range between 1000 following and 3000 following - exclude ties), - salary from empsalary; - first_value | lead | nth_value | salary --------------+------+-----------+-------- - 4500 | 3900 | 4500 | 3500 - 5000 | 4200 | 5000 | 3900 - 5200 | 4500 | 5200 | 4200 - 6000 | 4800 | 6000 | 4500 - 6000 | 4800 | 6000 | 4800 - 6000 | 5000 | 6000 | 4800 - 6000 | 5200 | 6000 | 5000 - | 5200 | | 5200 - | 6000 | | 5200 - | | | 6000 -(10 rows) - -select last_value(salary) over(order by salary range between 1000 following and 3000 following - exclude group), - lag(salary) over(order by salary range between 1000 following and 3000 following exclude group), - salary from empsalary; - last_value | lag | salary -------------+------+-------- - 6000 | | 3500 - 6000 | 3500 | 3900 - 6000 | 3900 | 4200 - 6000 | 4200 | 4500 - 6000 | 4500 | 4800 - 6000 | 4800 | 4800 - 6000 | 4800 | 5000 - | 5000 | 5200 - | 5200 | 5200 - | 5200 | 6000 -(10 rows) - -select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following - exclude ties), - last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following), - salary, enroll_date from empsalary; - first_value | last_value | salary | enroll_date --------------+------------+--------+------------- - 5000 | 5200 | 5000 | 10-01-2006 - 6000 | 5200 | 6000 | 10-01-2006 - 5000 | 3500 | 3900 | 12-23-2006 - 5000 | 4200 | 4800 | 08-01-2007 - 5000 | 4200 | 5200 | 08-01-2007 - 5000 | 4200 | 4800 | 08-08-2007 - 5000 | 4200 | 5200 | 08-15-2007 - 5000 | 4200 | 3500 | 12-10-2007 - 5000 | 4200 | 4500 | 01-01-2008 - 5000 | 4200 | 4200 | 01-01-2008 -(10 rows) - -select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following - exclude ties), - last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following - exclude ties), - salary, enroll_date from empsalary; - first_value | last_value | salary | enroll_date --------------+------------+--------+------------- - 5000 | 5200 | 5000 | 10-01-2006 - 6000 | 5200 | 6000 | 10-01-2006 - 5000 | 3500 | 3900 | 12-23-2006 - 5000 | 4200 | 4800 | 08-01-2007 - 5000 | 4200 | 5200 | 08-01-2007 - 5000 | 4200 | 4800 | 08-08-2007 - 5000 | 4200 | 5200 | 08-15-2007 - 5000 | 4200 | 3500 | 12-10-2007 - 5000 | 4500 | 4500 | 01-01-2008 - 5000 | 4200 | 4200 | 01-01-2008 -(10 rows) - -select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following - exclude group), - last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following - exclude group), - salary, enroll_date from empsalary; - first_value | last_value | salary | enroll_date --------------+------------+--------+------------- - 3900 | 5200 | 5000 | 10-01-2006 - 3900 | 5200 | 6000 | 10-01-2006 - 5000 | 3500 | 3900 | 12-23-2006 - 5000 | 4200 | 4800 | 08-01-2007 - 5000 | 4200 | 5200 | 08-01-2007 - 5000 | 4200 | 4800 | 08-08-2007 - 5000 | 4200 | 5200 | 08-15-2007 - 5000 | 4200 | 3500 | 12-10-2007 - 5000 | 3500 | 4500 | 01-01-2008 - 5000 | 3500 | 4200 | 01-01-2008 -(10 rows) - -select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following - exclude current row), - last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following - exclude current row), - salary, enroll_date from empsalary; - first_value | last_value | salary | enroll_date --------------+------------+--------+------------- - 6000 | 5200 | 5000 | 10-01-2006 - 5000 | 5200 | 6000 | 10-01-2006 - 5000 | 3500 | 3900 | 12-23-2006 - 5000 | 4200 | 4800 | 08-01-2007 - 5000 | 4200 | 5200 | 08-01-2007 - 5000 | 4200 | 4800 | 08-08-2007 - 5000 | 4200 | 5200 | 08-15-2007 - 5000 | 4200 | 3500 | 12-10-2007 - 5000 | 4200 | 4500 | 01-01-2008 - 5000 | 4500 | 4200 | 01-01-2008 -(10 rows) - --- RANGE offset PRECEDING/FOLLOWING with null values -select x, y, - first_value(y) over w, - last_value(y) over w -from - (select x, x as y from generate_series(1,5) as x - union all select null, 42 - union all select null, 43) ss -window w as - (order by x asc nulls first range between 2 preceding and 2 following); - x | y | first_value | last_value ----+----+-------------+------------ - | 42 | 42 | 43 - | 43 | 42 | 43 - 1 | 1 | 1 | 3 - 2 | 2 | 1 | 4 - 3 | 3 | 1 | 5 - 4 | 4 | 2 | 5 - 5 | 5 | 3 | 5 -(7 rows) - -select x, y, - first_value(y) over w, - last_value(y) over w -from - (select x, x as y from generate_series(1,5) as x - union all select null, 42 - union all select null, 43) ss -window w as - (order by x asc nulls last range between 2 preceding and 2 following); - x | y | first_value | last_value ----+----+-------------+------------ - 1 | 1 | 1 | 3 - 2 | 2 | 1 | 4 - 3 | 3 | 1 | 5 - 4 | 4 | 2 | 5 - 5 | 5 | 3 | 5 - | 42 | 42 | 43 - | 43 | 42 | 43 -(7 rows) - -select x, y, - first_value(y) over w, - last_value(y) over w -from - (select x, x as y from generate_series(1,5) as x - union all select null, 42 - union all select null, 43) ss -window w as - (order by x desc nulls first range between 2 preceding and 2 following); - x | y | first_value | last_value ----+----+-------------+------------ - | 43 | 43 | 42 - | 42 | 43 | 42 - 5 | 5 | 5 | 3 - 4 | 4 | 5 | 2 - 3 | 3 | 5 | 1 - 2 | 2 | 4 | 1 - 1 | 1 | 3 | 1 -(7 rows) - -select x, y, - first_value(y) over w, - last_value(y) over w -from - (select x, x as y from generate_series(1,5) as x - union all select null, 42 - union all select null, 43) ss -window w as - (order by x desc nulls last range between 2 preceding and 2 following); - x | y | first_value | last_value ----+----+-------------+------------ - 5 | 5 | 5 | 3 - 4 | 4 | 5 | 2 - 3 | 3 | 5 | 1 - 2 | 2 | 4 | 1 - 1 | 1 | 3 | 1 - | 42 | 42 | 43 - | 43 | 42 | 43 -(7 rows) - --- There is a syntactic ambiguity in the SQL standard. Since --- UNBOUNDED is a non-reserved word, it could be the name of a --- function parameter and be used as an expression. There is a --- grammar hack to resolve such cases as the keyword. The following --- tests record this behavior. -CREATE FUNCTION unbounded_syntax_test1a(x int) RETURNS TABLE (a int, b int, c int) -LANGUAGE SQL -BEGIN ATOMIC - SELECT sum(unique1) over (rows between x preceding and x following), - unique1, four - FROM tenk1 WHERE unique1 < 10; -END; -CREATE FUNCTION unbounded_syntax_test1b(x int) RETURNS TABLE (a int, b int, c int) -LANGUAGE SQL -AS $$ - SELECT sum(unique1) over (rows between x preceding and x following), - unique1, four - FROM tenk1 WHERE unique1 < 10; -$$; --- These will apply the argument to the window specification inside the function. -SELECT * FROM unbounded_syntax_test1a(2); - a | b | c -----+---+--- - 7 | 4 | 0 - 13 | 2 | 2 - 22 | 1 | 1 - 26 | 6 | 2 - 29 | 9 | 1 - 31 | 8 | 0 - 32 | 5 | 1 - 23 | 3 | 3 - 15 | 7 | 3 - 10 | 0 | 0 -(10 rows) - -SELECT * FROM unbounded_syntax_test1b(2); - a | b | c -----+---+--- - 7 | 4 | 0 - 13 | 2 | 2 - 22 | 1 | 1 - 26 | 6 | 2 - 29 | 9 | 1 - 31 | 8 | 0 - 32 | 5 | 1 - 23 | 3 | 3 - 15 | 7 | 3 - 10 | 0 | 0 -(10 rows) - -CREATE FUNCTION unbounded_syntax_test2a(unbounded int) RETURNS TABLE (a int, b int, c int) -LANGUAGE SQL -BEGIN ATOMIC - SELECT sum(unique1) over (rows between unbounded preceding and unbounded following), - unique1, four - FROM tenk1 WHERE unique1 < 10; -END; -CREATE FUNCTION unbounded_syntax_test2b(unbounded int) RETURNS TABLE (a int, b int, c int) -LANGUAGE SQL -AS $$ - SELECT sum(unique1) over (rows between unbounded preceding and unbounded following), - unique1, four - FROM tenk1 WHERE unique1 < 10; -$$; --- These will not apply the argument but instead treat UNBOUNDED as a keyword. -SELECT * FROM unbounded_syntax_test2a(2); - a | b | c -----+---+--- - 45 | 4 | 0 - 45 | 2 | 2 - 45 | 1 | 1 - 45 | 6 | 2 - 45 | 9 | 1 - 45 | 8 | 0 - 45 | 5 | 1 - 45 | 3 | 3 - 45 | 7 | 3 - 45 | 0 | 0 -(10 rows) - -SELECT * FROM unbounded_syntax_test2b(2); - a | b | c -----+---+--- - 45 | 4 | 0 - 45 | 2 | 2 - 45 | 1 | 1 - 45 | 6 | 2 - 45 | 9 | 1 - 45 | 8 | 0 - 45 | 5 | 1 - 45 | 3 | 3 - 45 | 7 | 3 - 45 | 0 | 0 -(10 rows) - -DROP FUNCTION unbounded_syntax_test1a, unbounded_syntax_test1b, - unbounded_syntax_test2a, unbounded_syntax_test2b; --- Other tests with token UNBOUNDED in potentially problematic position -CREATE FUNCTION unbounded(x int) RETURNS int LANGUAGE SQL IMMUTABLE RETURN x; -SELECT sum(unique1) over (rows between 1 preceding and 1 following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 6 | 4 | 0 - 7 | 2 | 2 - 9 | 1 | 1 - 16 | 6 | 2 - 23 | 9 | 1 - 22 | 8 | 0 - 16 | 5 | 1 - 15 | 3 | 3 - 10 | 7 | 3 - 7 | 0 | 0 -(10 rows) - -SELECT sum(unique1) over (rows between unbounded(1) preceding and unbounded(1) following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 6 | 4 | 0 - 7 | 2 | 2 - 9 | 1 | 1 - 16 | 6 | 2 - 23 | 9 | 1 - 22 | 8 | 0 - 16 | 5 | 1 - 15 | 3 | 3 - 10 | 7 | 3 - 7 | 0 | 0 -(10 rows) - -SELECT sum(unique1) over (rows between unbounded.x preceding and unbounded.x following), - unique1, four -FROM tenk1, (values (1)) as unbounded(x) WHERE unique1 < 10; -ERROR: argument of ROWS must not contain variables -LINE 1: SELECT sum(unique1) over (rows between unbounded.x preceding... - ^ -DROP FUNCTION unbounded; --- Check overflow behavior for various integer sizes -select x, last_value(x) over (order by x::smallint range between current row and 2147450884 following) -from generate_series(32764, 32766) x; - x | last_value --------+------------ - 32764 | 32766 - 32765 | 32766 - 32766 | 32766 -(3 rows) - -select x, last_value(x) over (order by x::smallint desc range between current row and 2147450885 following) -from generate_series(-32766, -32764) x; - x | last_value ---------+------------ - -32764 | -32766 - -32765 | -32766 - -32766 | -32766 -(3 rows) - -select x, last_value(x) over (order by x range between current row and 4 following) -from generate_series(2147483644, 2147483646) x; - x | last_value -------------+------------ - 2147483644 | 2147483646 - 2147483645 | 2147483646 - 2147483646 | 2147483646 -(3 rows) - -select x, last_value(x) over (order by x desc range between current row and 5 following) -from generate_series(-2147483646, -2147483644) x; - x | last_value --------------+------------- - -2147483644 | -2147483646 - -2147483645 | -2147483646 - -2147483646 | -2147483646 -(3 rows) - -select x, last_value(x) over (order by x range between current row and 4 following) -from generate_series(9223372036854775804, 9223372036854775806) x; - x | last_value ----------------------+--------------------- - 9223372036854775804 | 9223372036854775806 - 9223372036854775805 | 9223372036854775806 - 9223372036854775806 | 9223372036854775806 -(3 rows) - -select x, last_value(x) over (order by x desc range between current row and 5 following) -from generate_series(-9223372036854775806, -9223372036854775804) x; - x | last_value -----------------------+---------------------- - -9223372036854775804 | -9223372036854775806 - -9223372036854775805 | -9223372036854775806 - -9223372036854775806 | -9223372036854775806 -(3 rows) - --- Test in_range for other numeric datatypes -create temp table numerics( - id int, - f_float4 float4, - f_float8 float8, - f_numeric numeric -); -insert into numerics values -(0, '-infinity', '-infinity', '-infinity'), -(1, -3, -3, -3), -(2, -1, -1, -1), -(3, 0, 0, 0), -(4, 1.1, 1.1, 1.1), -(5, 1.12, 1.12, 1.12), -(6, 2, 2, 2), -(7, 100, 100, 100), -(8, 'infinity', 'infinity', 'infinity'), -(9, 'NaN', 'NaN', 'NaN'); -select id, f_float4, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float4 range between - 1 preceding and 1 following); - id | f_float4 | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 0 - 1 | -3 | 1 | 1 - 2 | -1 | 2 | 3 - 3 | 0 | 2 | 3 - 4 | 1.1 | 4 | 6 - 5 | 1.12 | 4 | 6 - 6 | 2 | 4 | 6 - 7 | 100 | 7 | 7 - 8 | Infinity | 8 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_float4, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float4 range between - 1 preceding and 1.1::float4 following); - id | f_float4 | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 0 - 1 | -3 | 1 | 1 - 2 | -1 | 2 | 3 - 3 | 0 | 2 | 4 - 4 | 1.1 | 4 | 6 - 5 | 1.12 | 4 | 6 - 6 | 2 | 4 | 6 - 7 | 100 | 7 | 7 - 8 | Infinity | 8 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_float4, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float4 range between - 'inf' preceding and 'inf' following); - id | f_float4 | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 8 - 1 | -3 | 0 | 8 - 2 | -1 | 0 | 8 - 3 | 0 | 0 | 8 - 4 | 1.1 | 0 | 8 - 5 | 1.12 | 0 | 8 - 6 | 2 | 0 | 8 - 7 | 100 | 0 | 8 - 8 | Infinity | 0 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_float4, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float4 range between - 'inf' preceding and 'inf' preceding); - id | f_float4 | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 0 - 1 | -3 | 0 | 0 - 2 | -1 | 0 | 0 - 3 | 0 | 0 | 0 - 4 | 1.1 | 0 | 0 - 5 | 1.12 | 0 | 0 - 6 | 2 | 0 | 0 - 7 | 100 | 0 | 0 - 8 | Infinity | 0 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_float4, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float4 range between - 'inf' following and 'inf' following); - id | f_float4 | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 8 - 1 | -3 | 8 | 8 - 2 | -1 | 8 | 8 - 3 | 0 | 8 | 8 - 4 | 1.1 | 8 | 8 - 5 | 1.12 | 8 | 8 - 6 | 2 | 8 | 8 - 7 | 100 | 8 | 8 - 8 | Infinity | 8 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_float4, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float4 range between - 1.1 preceding and 'NaN' following); -- error, NaN disallowed -ERROR: invalid preceding or following size in window function -select id, f_float8, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float8 range between - 1 preceding and 1 following); - id | f_float8 | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 0 - 1 | -3 | 1 | 1 - 2 | -1 | 2 | 3 - 3 | 0 | 2 | 3 - 4 | 1.1 | 4 | 6 - 5 | 1.12 | 4 | 6 - 6 | 2 | 4 | 6 - 7 | 100 | 7 | 7 - 8 | Infinity | 8 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_float8, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float8 range between - 1 preceding and 1.1::float8 following); - id | f_float8 | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 0 - 1 | -3 | 1 | 1 - 2 | -1 | 2 | 3 - 3 | 0 | 2 | 4 - 4 | 1.1 | 4 | 6 - 5 | 1.12 | 4 | 6 - 6 | 2 | 4 | 6 - 7 | 100 | 7 | 7 - 8 | Infinity | 8 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_float8, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float8 range between - 'inf' preceding and 'inf' following); - id | f_float8 | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 8 - 1 | -3 | 0 | 8 - 2 | -1 | 0 | 8 - 3 | 0 | 0 | 8 - 4 | 1.1 | 0 | 8 - 5 | 1.12 | 0 | 8 - 6 | 2 | 0 | 8 - 7 | 100 | 0 | 8 - 8 | Infinity | 0 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_float8, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float8 range between - 'inf' preceding and 'inf' preceding); - id | f_float8 | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 0 - 1 | -3 | 0 | 0 - 2 | -1 | 0 | 0 - 3 | 0 | 0 | 0 - 4 | 1.1 | 0 | 0 - 5 | 1.12 | 0 | 0 - 6 | 2 | 0 | 0 - 7 | 100 | 0 | 0 - 8 | Infinity | 0 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_float8, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float8 range between - 'inf' following and 'inf' following); - id | f_float8 | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 8 - 1 | -3 | 8 | 8 - 2 | -1 | 8 | 8 - 3 | 0 | 8 | 8 - 4 | 1.1 | 8 | 8 - 5 | 1.12 | 8 | 8 - 6 | 2 | 8 | 8 - 7 | 100 | 8 | 8 - 8 | Infinity | 8 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_float8, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float8 range between - 1.1 preceding and 'NaN' following); -- error, NaN disallowed -ERROR: invalid preceding or following size in window function -select id, f_numeric, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_numeric range between - 1 preceding and 1 following); - id | f_numeric | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 0 - 1 | -3 | 1 | 1 - 2 | -1 | 2 | 3 - 3 | 0 | 2 | 3 - 4 | 1.1 | 4 | 6 - 5 | 1.12 | 4 | 6 - 6 | 2 | 4 | 6 - 7 | 100 | 7 | 7 - 8 | Infinity | 8 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_numeric, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_numeric range between - 1 preceding and 1.1::numeric following); - id | f_numeric | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 0 - 1 | -3 | 1 | 1 - 2 | -1 | 2 | 3 - 3 | 0 | 2 | 4 - 4 | 1.1 | 4 | 6 - 5 | 1.12 | 4 | 6 - 6 | 2 | 4 | 6 - 7 | 100 | 7 | 7 - 8 | Infinity | 8 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_numeric, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_numeric range between - 1 preceding and 1.1::float8 following); -- currently unsupported -ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type numeric and offset type double precision -LINE 4: 1 preceding and 1.1::float8 following); - ^ -HINT: Cast the offset value to an appropriate type. -select id, f_numeric, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_numeric range between - 'inf' preceding and 'inf' following); - id | f_numeric | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 8 - 1 | -3 | 0 | 8 - 2 | -1 | 0 | 8 - 3 | 0 | 0 | 8 - 4 | 1.1 | 0 | 8 - 5 | 1.12 | 0 | 8 - 6 | 2 | 0 | 8 - 7 | 100 | 0 | 8 - 8 | Infinity | 0 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_numeric, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_numeric range between - 'inf' preceding and 'inf' preceding); - id | f_numeric | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 0 - 1 | -3 | 0 | 0 - 2 | -1 | 0 | 0 - 3 | 0 | 0 | 0 - 4 | 1.1 | 0 | 0 - 5 | 1.12 | 0 | 0 - 6 | 2 | 0 | 0 - 7 | 100 | 0 | 0 - 8 | Infinity | 0 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_numeric, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_numeric range between - 'inf' following and 'inf' following); - id | f_numeric | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 8 - 1 | -3 | 8 | 8 - 2 | -1 | 8 | 8 - 3 | 0 | 8 | 8 - 4 | 1.1 | 8 | 8 - 5 | 1.12 | 8 | 8 - 6 | 2 | 8 | 8 - 7 | 100 | 8 | 8 - 8 | Infinity | 8 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_numeric, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_numeric range between - 1.1 preceding and 'NaN' following); -- error, NaN disallowed -ERROR: invalid preceding or following size in window function --- Test in_range for other datetime datatypes -create temp table datetimes( - id int, - f_time time, - f_timetz timetz, - f_interval interval, - f_timestamptz timestamptz, - f_timestamp timestamp -); -insert into datetimes values -(0, '10:00', '10:00 BST', '-infinity', '-infinity', '-infinity'), -(1, '11:00', '11:00 BST', '1 year', '2000-10-19 10:23:54+01', '2000-10-19 10:23:54'), -(2, '12:00', '12:00 BST', '2 years', '2001-10-19 10:23:54+01', '2001-10-19 10:23:54'), -(3, '13:00', '13:00 BST', '3 years', '2001-10-19 10:23:54+01', '2001-10-19 10:23:54'), -(4, '14:00', '14:00 BST', '4 years', '2002-10-19 10:23:54+01', '2002-10-19 10:23:54'), -(5, '15:00', '15:00 BST', '5 years', '2003-10-19 10:23:54+01', '2003-10-19 10:23:54'), -(6, '15:00', '15:00 BST', '5 years', '2004-10-19 10:23:54+01', '2004-10-19 10:23:54'), -(7, '17:00', '17:00 BST', '7 years', '2005-10-19 10:23:54+01', '2005-10-19 10:23:54'), -(8, '18:00', '18:00 BST', '8 years', '2006-10-19 10:23:54+01', '2006-10-19 10:23:54'), -(9, '19:00', '19:00 BST', '9 years', '2007-10-19 10:23:54+01', '2007-10-19 10:23:54'), -(10, '20:00', '20:00 BST', '10 years', '2008-10-19 10:23:54+01', '2008-10-19 10:23:54'), -(11, '21:00', '21:00 BST', 'infinity', 'infinity', 'infinity'); -select id, f_time, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_time range between - '70 min'::interval preceding and '2 hours'::interval following); - id | f_time | first_value | last_value -----+----------+-------------+------------ - 0 | 10:00:00 | 0 | 2 - 1 | 11:00:00 | 0 | 3 - 2 | 12:00:00 | 1 | 4 - 3 | 13:00:00 | 2 | 6 - 4 | 14:00:00 | 3 | 6 - 5 | 15:00:00 | 4 | 7 - 6 | 15:00:00 | 4 | 7 - 7 | 17:00:00 | 7 | 9 - 8 | 18:00:00 | 7 | 10 - 9 | 19:00:00 | 8 | 11 - 10 | 20:00:00 | 9 | 11 - 11 | 21:00:00 | 10 | 11 -(12 rows) - -select id, f_time, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_time desc range between - '70 min' preceding and '2 hours' following); - id | f_time | first_value | last_value -----+----------+-------------+------------ - 11 | 21:00:00 | 11 | 9 - 10 | 20:00:00 | 11 | 8 - 9 | 19:00:00 | 10 | 7 - 8 | 18:00:00 | 9 | 7 - 7 | 17:00:00 | 8 | 5 - 6 | 15:00:00 | 6 | 3 - 5 | 15:00:00 | 6 | 3 - 4 | 14:00:00 | 6 | 2 - 3 | 13:00:00 | 4 | 1 - 2 | 12:00:00 | 3 | 0 - 1 | 11:00:00 | 2 | 0 - 0 | 10:00:00 | 1 | 0 -(12 rows) - -select id, f_time, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_time desc range between - '-70 min' preceding and '2 hours' following); -- error, negative offset disallowed -ERROR: invalid preceding or following size in window function -select id, f_time, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_time range between - 'infinity'::interval preceding and 'infinity'::interval following); - id | f_time | first_value | last_value -----+----------+-------------+------------ - 0 | 10:00:00 | 0 | 11 - 1 | 11:00:00 | 0 | 11 - 2 | 12:00:00 | 0 | 11 - 3 | 13:00:00 | 0 | 11 - 4 | 14:00:00 | 0 | 11 - 5 | 15:00:00 | 0 | 11 - 6 | 15:00:00 | 0 | 11 - 7 | 17:00:00 | 0 | 11 - 8 | 18:00:00 | 0 | 11 - 9 | 19:00:00 | 0 | 11 - 10 | 20:00:00 | 0 | 11 - 11 | 21:00:00 | 0 | 11 -(12 rows) - -select id, f_time, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_time range between - 'infinity'::interval preceding and 'infinity'::interval preceding); - id | f_time | first_value | last_value -----+----------+-------------+------------ - 0 | 10:00:00 | | - 1 | 11:00:00 | | - 2 | 12:00:00 | | - 3 | 13:00:00 | | - 4 | 14:00:00 | | - 5 | 15:00:00 | | - 6 | 15:00:00 | | - 7 | 17:00:00 | | - 8 | 18:00:00 | | - 9 | 19:00:00 | | - 10 | 20:00:00 | | - 11 | 21:00:00 | | -(12 rows) - -select id, f_time, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_time range between - 'infinity'::interval following and 'infinity'::interval following); - id | f_time | first_value | last_value -----+----------+-------------+------------ - 0 | 10:00:00 | | - 1 | 11:00:00 | | - 2 | 12:00:00 | | - 3 | 13:00:00 | | - 4 | 14:00:00 | | - 5 | 15:00:00 | | - 6 | 15:00:00 | | - 7 | 17:00:00 | | - 8 | 18:00:00 | | - 9 | 19:00:00 | | - 10 | 20:00:00 | | - 11 | 21:00:00 | | -(12 rows) - -select id, f_time, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_time range between - '-infinity'::interval following and - 'infinity'::interval following); -- error, negative offset disallowed -ERROR: invalid preceding or following size in window function -select id, f_timetz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timetz range between - '70 min'::interval preceding and '2 hours'::interval following); - id | f_timetz | first_value | last_value -----+-------------+-------------+------------ - 0 | 10:00:00+01 | 0 | 2 - 1 | 11:00:00+01 | 0 | 3 - 2 | 12:00:00+01 | 1 | 4 - 3 | 13:00:00+01 | 2 | 6 - 4 | 14:00:00+01 | 3 | 6 - 5 | 15:00:00+01 | 4 | 7 - 6 | 15:00:00+01 | 4 | 7 - 7 | 17:00:00+01 | 7 | 9 - 8 | 18:00:00+01 | 7 | 10 - 9 | 19:00:00+01 | 8 | 11 - 10 | 20:00:00+01 | 9 | 11 - 11 | 21:00:00+01 | 10 | 11 -(12 rows) - -select id, f_timetz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timetz desc range between - '70 min' preceding and '2 hours' following); - id | f_timetz | first_value | last_value -----+-------------+-------------+------------ - 11 | 21:00:00+01 | 11 | 9 - 10 | 20:00:00+01 | 11 | 8 - 9 | 19:00:00+01 | 10 | 7 - 8 | 18:00:00+01 | 9 | 7 - 7 | 17:00:00+01 | 8 | 5 - 6 | 15:00:00+01 | 6 | 3 - 5 | 15:00:00+01 | 6 | 3 - 4 | 14:00:00+01 | 6 | 2 - 3 | 13:00:00+01 | 4 | 1 - 2 | 12:00:00+01 | 3 | 0 - 1 | 11:00:00+01 | 2 | 0 - 0 | 10:00:00+01 | 1 | 0 -(12 rows) - -select id, f_timetz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timetz desc range between - '70 min' preceding and '-2 hours' following); -- error, negative offset disallowed -ERROR: invalid preceding or following size in window function -select id, f_timetz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timetz range between - 'infinity'::interval preceding and 'infinity'::interval following); - id | f_timetz | first_value | last_value -----+-------------+-------------+------------ - 0 | 10:00:00+01 | 0 | 11 - 1 | 11:00:00+01 | 0 | 11 - 2 | 12:00:00+01 | 0 | 11 - 3 | 13:00:00+01 | 0 | 11 - 4 | 14:00:00+01 | 0 | 11 - 5 | 15:00:00+01 | 0 | 11 - 6 | 15:00:00+01 | 0 | 11 - 7 | 17:00:00+01 | 0 | 11 - 8 | 18:00:00+01 | 0 | 11 - 9 | 19:00:00+01 | 0 | 11 - 10 | 20:00:00+01 | 0 | 11 - 11 | 21:00:00+01 | 0 | 11 -(12 rows) - -select id, f_timetz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timetz range between - 'infinity'::interval preceding and 'infinity'::interval preceding); - id | f_timetz | first_value | last_value -----+-------------+-------------+------------ - 0 | 10:00:00+01 | | - 1 | 11:00:00+01 | | - 2 | 12:00:00+01 | | - 3 | 13:00:00+01 | | - 4 | 14:00:00+01 | | - 5 | 15:00:00+01 | | - 6 | 15:00:00+01 | | - 7 | 17:00:00+01 | | - 8 | 18:00:00+01 | | - 9 | 19:00:00+01 | | - 10 | 20:00:00+01 | | - 11 | 21:00:00+01 | | -(12 rows) - -select id, f_timetz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timetz range between - 'infinity'::interval following and 'infinity'::interval following); - id | f_timetz | first_value | last_value -----+-------------+-------------+------------ - 0 | 10:00:00+01 | | - 1 | 11:00:00+01 | | - 2 | 12:00:00+01 | | - 3 | 13:00:00+01 | | - 4 | 14:00:00+01 | | - 5 | 15:00:00+01 | | - 6 | 15:00:00+01 | | - 7 | 17:00:00+01 | | - 8 | 18:00:00+01 | | - 9 | 19:00:00+01 | | - 10 | 20:00:00+01 | | - 11 | 21:00:00+01 | | -(12 rows) - -select id, f_timetz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timetz range between - 'infinity'::interval following and - '-infinity'::interval following); -- error, negative offset disallowed -ERROR: invalid preceding or following size in window function -select id, f_interval, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_interval range between - '1 year'::interval preceding and '1 year'::interval following); - id | f_interval | first_value | last_value -----+------------+-------------+------------ - 0 | -infinity | 0 | 0 - 1 | @ 1 year | 1 | 2 - 2 | @ 2 years | 1 | 3 - 3 | @ 3 years | 2 | 4 - 4 | @ 4 years | 3 | 6 - 5 | @ 5 years | 4 | 6 - 6 | @ 5 years | 4 | 6 - 7 | @ 7 years | 7 | 8 - 8 | @ 8 years | 7 | 9 - 9 | @ 9 years | 8 | 10 - 10 | @ 10 years | 9 | 10 - 11 | infinity | 11 | 11 -(12 rows) - -select id, f_interval, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_interval desc range between - '1 year' preceding and '1 year' following); - id | f_interval | first_value | last_value -----+------------+-------------+------------ - 11 | infinity | 11 | 11 - 10 | @ 10 years | 10 | 9 - 9 | @ 9 years | 10 | 8 - 8 | @ 8 years | 9 | 7 - 7 | @ 7 years | 8 | 7 - 6 | @ 5 years | 6 | 4 - 5 | @ 5 years | 6 | 4 - 4 | @ 4 years | 6 | 3 - 3 | @ 3 years | 4 | 2 - 2 | @ 2 years | 3 | 1 - 1 | @ 1 year | 2 | 1 - 0 | -infinity | 0 | 0 -(12 rows) - -select id, f_interval, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_interval desc range between - '-1 year' preceding and '1 year' following); -- error, negative offset disallowed -ERROR: invalid preceding or following size in window function -select id, f_interval, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_interval range between - 'infinity'::interval preceding and 'infinity'::interval following); - id | f_interval | first_value | last_value -----+------------+-------------+------------ - 0 | -infinity | 0 | 11 - 1 | @ 1 year | 0 | 11 - 2 | @ 2 years | 0 | 11 - 3 | @ 3 years | 0 | 11 - 4 | @ 4 years | 0 | 11 - 5 | @ 5 years | 0 | 11 - 6 | @ 5 years | 0 | 11 - 7 | @ 7 years | 0 | 11 - 8 | @ 8 years | 0 | 11 - 9 | @ 9 years | 0 | 11 - 10 | @ 10 years | 0 | 11 - 11 | infinity | 0 | 11 -(12 rows) - -select id, f_interval, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_interval range between - 'infinity'::interval preceding and 'infinity'::interval preceding); - id | f_interval | first_value | last_value -----+------------+-------------+------------ - 0 | -infinity | 0 | 0 - 1 | @ 1 year | 0 | 0 - 2 | @ 2 years | 0 | 0 - 3 | @ 3 years | 0 | 0 - 4 | @ 4 years | 0 | 0 - 5 | @ 5 years | 0 | 0 - 6 | @ 5 years | 0 | 0 - 7 | @ 7 years | 0 | 0 - 8 | @ 8 years | 0 | 0 - 9 | @ 9 years | 0 | 0 - 10 | @ 10 years | 0 | 0 - 11 | infinity | 0 | 11 -(12 rows) - -select id, f_interval, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_interval range between - 'infinity'::interval following and 'infinity'::interval following); - id | f_interval | first_value | last_value -----+------------+-------------+------------ - 0 | -infinity | 0 | 11 - 1 | @ 1 year | 11 | 11 - 2 | @ 2 years | 11 | 11 - 3 | @ 3 years | 11 | 11 - 4 | @ 4 years | 11 | 11 - 5 | @ 5 years | 11 | 11 - 6 | @ 5 years | 11 | 11 - 7 | @ 7 years | 11 | 11 - 8 | @ 8 years | 11 | 11 - 9 | @ 9 years | 11 | 11 - 10 | @ 10 years | 11 | 11 - 11 | infinity | 11 | 11 -(12 rows) - -select id, f_interval, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_interval range between - '-infinity'::interval following and - 'infinity'::interval following); -- error, negative offset disallowed -ERROR: invalid preceding or following size in window function -select id, f_timestamptz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamptz range between - '1 year'::interval preceding and '1 year'::interval following); - id | f_timestamptz | first_value | last_value -----+------------------------------+-------------+------------ - 0 | -infinity | 0 | 0 - 1 | Thu Oct 19 02:23:54 2000 PDT | 1 | 3 - 2 | Fri Oct 19 02:23:54 2001 PDT | 1 | 4 - 3 | Fri Oct 19 02:23:54 2001 PDT | 1 | 4 - 4 | Sat Oct 19 02:23:54 2002 PDT | 2 | 5 - 5 | Sun Oct 19 02:23:54 2003 PDT | 4 | 6 - 6 | Tue Oct 19 02:23:54 2004 PDT | 5 | 7 - 7 | Wed Oct 19 02:23:54 2005 PDT | 6 | 8 - 8 | Thu Oct 19 02:23:54 2006 PDT | 7 | 9 - 9 | Fri Oct 19 02:23:54 2007 PDT | 8 | 10 - 10 | Sun Oct 19 02:23:54 2008 PDT | 9 | 10 - 11 | infinity | 11 | 11 -(12 rows) - -select id, f_timestamptz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamptz desc range between - '1 year' preceding and '1 year' following); - id | f_timestamptz | first_value | last_value -----+------------------------------+-------------+------------ - 11 | infinity | 11 | 11 - 10 | Sun Oct 19 02:23:54 2008 PDT | 10 | 9 - 9 | Fri Oct 19 02:23:54 2007 PDT | 10 | 8 - 8 | Thu Oct 19 02:23:54 2006 PDT | 9 | 7 - 7 | Wed Oct 19 02:23:54 2005 PDT | 8 | 6 - 6 | Tue Oct 19 02:23:54 2004 PDT | 7 | 5 - 5 | Sun Oct 19 02:23:54 2003 PDT | 6 | 4 - 4 | Sat Oct 19 02:23:54 2002 PDT | 5 | 2 - 3 | Fri Oct 19 02:23:54 2001 PDT | 4 | 1 - 2 | Fri Oct 19 02:23:54 2001 PDT | 4 | 1 - 1 | Thu Oct 19 02:23:54 2000 PDT | 3 | 1 - 0 | -infinity | 0 | 0 -(12 rows) - -select id, f_timestamptz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamptz desc range between - '1 year' preceding and '-1 year' following); -- error, negative offset disallowed -ERROR: invalid preceding or following size in window function -select id, f_timestamptz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamptz range between - 'infinity'::interval preceding and 'infinity'::interval following); - id | f_timestamptz | first_value | last_value -----+------------------------------+-------------+------------ - 0 | -infinity | 0 | 11 - 1 | Thu Oct 19 02:23:54 2000 PDT | 0 | 11 - 2 | Fri Oct 19 02:23:54 2001 PDT | 0 | 11 - 3 | Fri Oct 19 02:23:54 2001 PDT | 0 | 11 - 4 | Sat Oct 19 02:23:54 2002 PDT | 0 | 11 - 5 | Sun Oct 19 02:23:54 2003 PDT | 0 | 11 - 6 | Tue Oct 19 02:23:54 2004 PDT | 0 | 11 - 7 | Wed Oct 19 02:23:54 2005 PDT | 0 | 11 - 8 | Thu Oct 19 02:23:54 2006 PDT | 0 | 11 - 9 | Fri Oct 19 02:23:54 2007 PDT | 0 | 11 - 10 | Sun Oct 19 02:23:54 2008 PDT | 0 | 11 - 11 | infinity | 0 | 11 -(12 rows) - -select id, f_timestamptz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamptz range between - 'infinity'::interval preceding and 'infinity'::interval preceding); - id | f_timestamptz | first_value | last_value -----+------------------------------+-------------+------------ - 0 | -infinity | 0 | 0 - 1 | Thu Oct 19 02:23:54 2000 PDT | 0 | 0 - 2 | Fri Oct 19 02:23:54 2001 PDT | 0 | 0 - 3 | Fri Oct 19 02:23:54 2001 PDT | 0 | 0 - 4 | Sat Oct 19 02:23:54 2002 PDT | 0 | 0 - 5 | Sun Oct 19 02:23:54 2003 PDT | 0 | 0 - 6 | Tue Oct 19 02:23:54 2004 PDT | 0 | 0 - 7 | Wed Oct 19 02:23:54 2005 PDT | 0 | 0 - 8 | Thu Oct 19 02:23:54 2006 PDT | 0 | 0 - 9 | Fri Oct 19 02:23:54 2007 PDT | 0 | 0 - 10 | Sun Oct 19 02:23:54 2008 PDT | 0 | 0 - 11 | infinity | 0 | 11 -(12 rows) - -select id, f_timestamptz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamptz range between - 'infinity'::interval following and 'infinity'::interval following); - id | f_timestamptz | first_value | last_value -----+------------------------------+-------------+------------ - 0 | -infinity | 0 | 11 - 1 | Thu Oct 19 02:23:54 2000 PDT | 11 | 11 - 2 | Fri Oct 19 02:23:54 2001 PDT | 11 | 11 - 3 | Fri Oct 19 02:23:54 2001 PDT | 11 | 11 - 4 | Sat Oct 19 02:23:54 2002 PDT | 11 | 11 - 5 | Sun Oct 19 02:23:54 2003 PDT | 11 | 11 - 6 | Tue Oct 19 02:23:54 2004 PDT | 11 | 11 - 7 | Wed Oct 19 02:23:54 2005 PDT | 11 | 11 - 8 | Thu Oct 19 02:23:54 2006 PDT | 11 | 11 - 9 | Fri Oct 19 02:23:54 2007 PDT | 11 | 11 - 10 | Sun Oct 19 02:23:54 2008 PDT | 11 | 11 - 11 | infinity | 11 | 11 -(12 rows) - -select id, f_timestamptz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamptz range between - '-infinity'::interval following and - 'infinity'::interval following); -- error, negative offset disallowed -ERROR: invalid preceding or following size in window function -select id, f_timestamp, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamp range between - '1 year'::interval preceding and '1 year'::interval following); - id | f_timestamp | first_value | last_value -----+--------------------------+-------------+------------ - 0 | -infinity | 0 | 0 - 1 | Thu Oct 19 10:23:54 2000 | 1 | 3 - 2 | Fri Oct 19 10:23:54 2001 | 1 | 4 - 3 | Fri Oct 19 10:23:54 2001 | 1 | 4 - 4 | Sat Oct 19 10:23:54 2002 | 2 | 5 - 5 | Sun Oct 19 10:23:54 2003 | 4 | 6 - 6 | Tue Oct 19 10:23:54 2004 | 5 | 7 - 7 | Wed Oct 19 10:23:54 2005 | 6 | 8 - 8 | Thu Oct 19 10:23:54 2006 | 7 | 9 - 9 | Fri Oct 19 10:23:54 2007 | 8 | 10 - 10 | Sun Oct 19 10:23:54 2008 | 9 | 10 - 11 | infinity | 11 | 11 -(12 rows) - -select id, f_timestamp, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamp desc range between - '1 year' preceding and '1 year' following); - id | f_timestamp | first_value | last_value -----+--------------------------+-------------+------------ - 11 | infinity | 11 | 11 - 10 | Sun Oct 19 10:23:54 2008 | 10 | 9 - 9 | Fri Oct 19 10:23:54 2007 | 10 | 8 - 8 | Thu Oct 19 10:23:54 2006 | 9 | 7 - 7 | Wed Oct 19 10:23:54 2005 | 8 | 6 - 6 | Tue Oct 19 10:23:54 2004 | 7 | 5 - 5 | Sun Oct 19 10:23:54 2003 | 6 | 4 - 4 | Sat Oct 19 10:23:54 2002 | 5 | 2 - 3 | Fri Oct 19 10:23:54 2001 | 4 | 1 - 2 | Fri Oct 19 10:23:54 2001 | 4 | 1 - 1 | Thu Oct 19 10:23:54 2000 | 3 | 1 - 0 | -infinity | 0 | 0 -(12 rows) - -select id, f_timestamp, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamp desc range between - '-1 year' preceding and '1 year' following); -- error, negative offset disallowed -ERROR: invalid preceding or following size in window function -select id, f_timestamp, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamp range between - 'infinity'::interval preceding and 'infinity'::interval following); - id | f_timestamp | first_value | last_value -----+--------------------------+-------------+------------ - 0 | -infinity | 0 | 11 - 1 | Thu Oct 19 10:23:54 2000 | 0 | 11 - 2 | Fri Oct 19 10:23:54 2001 | 0 | 11 - 3 | Fri Oct 19 10:23:54 2001 | 0 | 11 - 4 | Sat Oct 19 10:23:54 2002 | 0 | 11 - 5 | Sun Oct 19 10:23:54 2003 | 0 | 11 - 6 | Tue Oct 19 10:23:54 2004 | 0 | 11 - 7 | Wed Oct 19 10:23:54 2005 | 0 | 11 - 8 | Thu Oct 19 10:23:54 2006 | 0 | 11 - 9 | Fri Oct 19 10:23:54 2007 | 0 | 11 - 10 | Sun Oct 19 10:23:54 2008 | 0 | 11 - 11 | infinity | 0 | 11 -(12 rows) - -select id, f_timestamp, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamp range between - 'infinity'::interval preceding and 'infinity'::interval preceding); - id | f_timestamp | first_value | last_value -----+--------------------------+-------------+------------ - 0 | -infinity | 0 | 0 - 1 | Thu Oct 19 10:23:54 2000 | 0 | 0 - 2 | Fri Oct 19 10:23:54 2001 | 0 | 0 - 3 | Fri Oct 19 10:23:54 2001 | 0 | 0 - 4 | Sat Oct 19 10:23:54 2002 | 0 | 0 - 5 | Sun Oct 19 10:23:54 2003 | 0 | 0 - 6 | Tue Oct 19 10:23:54 2004 | 0 | 0 - 7 | Wed Oct 19 10:23:54 2005 | 0 | 0 - 8 | Thu Oct 19 10:23:54 2006 | 0 | 0 - 9 | Fri Oct 19 10:23:54 2007 | 0 | 0 - 10 | Sun Oct 19 10:23:54 2008 | 0 | 0 - 11 | infinity | 0 | 11 -(12 rows) - -select id, f_timestamp, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamp range between - 'infinity'::interval following and 'infinity'::interval following); - id | f_timestamp | first_value | last_value -----+--------------------------+-------------+------------ - 0 | -infinity | 0 | 11 - 1 | Thu Oct 19 10:23:54 2000 | 11 | 11 - 2 | Fri Oct 19 10:23:54 2001 | 11 | 11 - 3 | Fri Oct 19 10:23:54 2001 | 11 | 11 - 4 | Sat Oct 19 10:23:54 2002 | 11 | 11 - 5 | Sun Oct 19 10:23:54 2003 | 11 | 11 - 6 | Tue Oct 19 10:23:54 2004 | 11 | 11 - 7 | Wed Oct 19 10:23:54 2005 | 11 | 11 - 8 | Thu Oct 19 10:23:54 2006 | 11 | 11 - 9 | Fri Oct 19 10:23:54 2007 | 11 | 11 - 10 | Sun Oct 19 10:23:54 2008 | 11 | 11 - 11 | infinity | 11 | 11 -(12 rows) - -select id, f_timestamp, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamp range between - '-infinity'::interval following and - 'infinity'::interval following); -- error, negative offset disallowed -ERROR: invalid preceding or following size in window function --- RANGE offset PRECEDING/FOLLOWING error cases -select sum(salary) over (order by enroll_date, salary range between '1 year'::interval preceding and '2 years'::interval following - exclude ties), salary, enroll_date from empsalary; -ERROR: RANGE with offset PRECEDING/FOLLOWING requires exactly one ORDER BY column -LINE 1: select sum(salary) over (order by enroll_date, salary range ... - ^ -select sum(salary) over (range between '1 year'::interval preceding and '2 years'::interval following - exclude ties), salary, enroll_date from empsalary; -ERROR: RANGE with offset PRECEDING/FOLLOWING requires exactly one ORDER BY column -LINE 1: select sum(salary) over (range between '1 year'::interval pr... - ^ -select sum(salary) over (order by depname range between '1 year'::interval preceding and '2 years'::interval following - exclude ties), salary, enroll_date from empsalary; -ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type text -LINE 1: ... sum(salary) over (order by depname range between '1 year'::... - ^ -select max(enroll_date) over (order by enroll_date range between 1 preceding and 2 following - exclude ties), salary, enroll_date from empsalary; -ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type date and offset type integer -LINE 1: ...ll_date) over (order by enroll_date range between 1 precedin... - ^ -HINT: Cast the offset value to an appropriate type. -select max(enroll_date) over (order by salary range between -1 preceding and 2 following - exclude ties), salary, enroll_date from empsalary; -ERROR: invalid preceding or following size in window function -select max(enroll_date) over (order by salary range between 1 preceding and -2 following - exclude ties), salary, enroll_date from empsalary; -ERROR: invalid preceding or following size in window function -select max(enroll_date) over (order by salary range between '1 year'::interval preceding and '2 years'::interval following - exclude ties), salary, enroll_date from empsalary; -ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type integer and offset type interval -LINE 1: ...(enroll_date) over (order by salary range between '1 year'::... - ^ -HINT: Cast the offset value to an appropriate type. -select max(enroll_date) over (order by enroll_date range between '1 year'::interval preceding and '-2 years'::interval following - exclude ties), salary, enroll_date from empsalary; -ERROR: invalid preceding or following size in window function --- GROUPS tests -SELECT sum(unique1) over (order by four groups between unbounded preceding and current row), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 12 | 0 | 0 - 12 | 8 | 0 - 12 | 4 | 0 - 27 | 5 | 1 - 27 | 9 | 1 - 27 | 1 | 1 - 35 | 6 | 2 - 35 | 2 | 2 - 45 | 3 | 3 - 45 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four groups between unbounded preceding and unbounded following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 45 | 0 | 0 - 45 | 8 | 0 - 45 | 4 | 0 - 45 | 5 | 1 - 45 | 9 | 1 - 45 | 1 | 1 - 45 | 6 | 2 - 45 | 2 | 2 - 45 | 3 | 3 - 45 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four groups between current row and unbounded following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 45 | 0 | 0 - 45 | 8 | 0 - 45 | 4 | 0 - 33 | 5 | 1 - 33 | 9 | 1 - 33 | 1 | 1 - 18 | 6 | 2 - 18 | 2 | 2 - 10 | 3 | 3 - 10 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four groups between 1 preceding and unbounded following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 45 | 0 | 0 - 45 | 8 | 0 - 45 | 4 | 0 - 45 | 5 | 1 - 45 | 9 | 1 - 45 | 1 | 1 - 33 | 6 | 2 - 33 | 2 | 2 - 18 | 3 | 3 - 18 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four groups between 1 following and unbounded following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 33 | 0 | 0 - 33 | 8 | 0 - 33 | 4 | 0 - 18 | 5 | 1 - 18 | 9 | 1 - 18 | 1 | 1 - 10 | 6 | 2 - 10 | 2 | 2 - | 3 | 3 - | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four groups between unbounded preceding and 2 following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 35 | 0 | 0 - 35 | 8 | 0 - 35 | 4 | 0 - 45 | 5 | 1 - 45 | 9 | 1 - 45 | 1 | 1 - 45 | 6 | 2 - 45 | 2 | 2 - 45 | 3 | 3 - 45 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four groups between 2 preceding and 1 preceding), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - | 0 | 0 - | 8 | 0 - | 4 | 0 - 12 | 5 | 1 - 12 | 9 | 1 - 12 | 1 | 1 - 27 | 6 | 2 - 27 | 2 | 2 - 23 | 3 | 3 - 23 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 27 | 0 | 0 - 27 | 8 | 0 - 27 | 4 | 0 - 35 | 5 | 1 - 35 | 9 | 1 - 35 | 1 | 1 - 45 | 6 | 2 - 45 | 2 | 2 - 33 | 3 | 3 - 33 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four groups between 0 preceding and 0 following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 12 | 0 | 0 - 12 | 8 | 0 - 12 | 4 | 0 - 15 | 5 | 1 - 15 | 9 | 1 - 15 | 1 | 1 - 8 | 6 | 2 - 8 | 2 | 2 - 10 | 3 | 3 - 10 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following - exclude current row), unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 27 | 0 | 0 - 19 | 8 | 0 - 23 | 4 | 0 - 30 | 5 | 1 - 26 | 9 | 1 - 34 | 1 | 1 - 39 | 6 | 2 - 43 | 2 | 2 - 30 | 3 | 3 - 26 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following - exclude group), unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 15 | 0 | 0 - 15 | 8 | 0 - 15 | 4 | 0 - 20 | 5 | 1 - 20 | 9 | 1 - 20 | 1 | 1 - 37 | 6 | 2 - 37 | 2 | 2 - 23 | 3 | 3 - 23 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following - exclude ties), unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 15 | 0 | 0 - 23 | 8 | 0 - 19 | 4 | 0 - 25 | 5 | 1 - 29 | 9 | 1 - 21 | 1 | 1 - 43 | 6 | 2 - 39 | 2 | 2 - 26 | 3 | 3 - 30 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (partition by ten - order by four groups between 0 preceding and 0 following),unique1, four, ten -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four | ten ------+---------+------+----- - 0 | 0 | 0 | 0 - 1 | 1 | 1 | 1 - 2 | 2 | 2 | 2 - 3 | 3 | 3 | 3 - 4 | 4 | 0 | 4 - 5 | 5 | 1 | 5 - 6 | 6 | 2 | 6 - 7 | 7 | 3 | 7 - 8 | 8 | 0 | 8 - 9 | 9 | 1 | 9 -(10 rows) - -SELECT sum(unique1) over (partition by ten - order by four groups between 0 preceding and 0 following exclude current row), unique1, four, ten -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four | ten ------+---------+------+----- - | 0 | 0 | 0 - | 1 | 1 | 1 - | 2 | 2 | 2 - | 3 | 3 | 3 - | 4 | 0 | 4 - | 5 | 1 | 5 - | 6 | 2 | 6 - | 7 | 3 | 7 - | 8 | 0 | 8 - | 9 | 1 | 9 -(10 rows) - -SELECT sum(unique1) over (partition by ten - order by four groups between 0 preceding and 0 following exclude group), unique1, four, ten -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four | ten ------+---------+------+----- - | 0 | 0 | 0 - | 1 | 1 | 1 - | 2 | 2 | 2 - | 3 | 3 | 3 - | 4 | 0 | 4 - | 5 | 1 | 5 - | 6 | 2 | 6 - | 7 | 3 | 7 - | 8 | 0 | 8 - | 9 | 1 | 9 -(10 rows) - -SELECT sum(unique1) over (partition by ten - order by four groups between 0 preceding and 0 following exclude ties), unique1, four, ten -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four | ten ------+---------+------+----- - 0 | 0 | 0 | 0 - 1 | 1 | 1 | 1 - 2 | 2 | 2 | 2 - 3 | 3 | 3 | 3 - 4 | 4 | 0 | 4 - 5 | 5 | 1 | 5 - 6 | 6 | 2 | 6 - 7 | 7 | 3 | 7 - 8 | 8 | 0 | 8 - 9 | 9 | 1 | 9 -(10 rows) - -select first_value(salary) over(order by enroll_date groups between 1 preceding and 1 following), - lead(salary) over(order by enroll_date groups between 1 preceding and 1 following), - nth_value(salary, 1) over(order by enroll_date groups between 1 preceding and 1 following), - salary, enroll_date from empsalary; - first_value | lead | nth_value | salary | enroll_date --------------+------+-----------+--------+------------- - 5000 | 6000 | 5000 | 5000 | 10-01-2006 - 5000 | 3900 | 5000 | 6000 | 10-01-2006 - 5000 | 4800 | 5000 | 3900 | 12-23-2006 - 3900 | 5200 | 3900 | 4800 | 08-01-2007 - 3900 | 4800 | 3900 | 5200 | 08-01-2007 - 4800 | 5200 | 4800 | 4800 | 08-08-2007 - 4800 | 3500 | 4800 | 5200 | 08-15-2007 - 5200 | 4500 | 5200 | 3500 | 12-10-2007 - 3500 | 4200 | 3500 | 4500 | 01-01-2008 - 3500 | | 3500 | 4200 | 01-01-2008 -(10 rows) - -select last_value(salary) over(order by enroll_date groups between 1 preceding and 1 following), - lag(salary) over(order by enroll_date groups between 1 preceding and 1 following), - salary, enroll_date from empsalary; - last_value | lag | salary | enroll_date -------------+------+--------+------------- - 3900 | | 5000 | 10-01-2006 - 3900 | 5000 | 6000 | 10-01-2006 - 5200 | 6000 | 3900 | 12-23-2006 - 4800 | 3900 | 4800 | 08-01-2007 - 4800 | 4800 | 5200 | 08-01-2007 - 5200 | 5200 | 4800 | 08-08-2007 - 3500 | 4800 | 5200 | 08-15-2007 - 4200 | 5200 | 3500 | 12-10-2007 - 4200 | 3500 | 4500 | 01-01-2008 - 4200 | 4500 | 4200 | 01-01-2008 -(10 rows) - -select first_value(salary) over(order by enroll_date groups between 1 following and 3 following - exclude current row), - lead(salary) over(order by enroll_date groups between 1 following and 3 following exclude ties), - nth_value(salary, 1) over(order by enroll_date groups between 1 following and 3 following - exclude ties), - salary, enroll_date from empsalary; - first_value | lead | nth_value | salary | enroll_date --------------+------+-----------+--------+------------- - 3900 | 6000 | 3900 | 5000 | 10-01-2006 - 3900 | 3900 | 3900 | 6000 | 10-01-2006 - 4800 | 4800 | 4800 | 3900 | 12-23-2006 - 4800 | 5200 | 4800 | 4800 | 08-01-2007 - 4800 | 4800 | 4800 | 5200 | 08-01-2007 - 5200 | 5200 | 5200 | 4800 | 08-08-2007 - 3500 | 3500 | 3500 | 5200 | 08-15-2007 - 4500 | 4500 | 4500 | 3500 | 12-10-2007 - | 4200 | | 4500 | 01-01-2008 - | | | 4200 | 01-01-2008 -(10 rows) - -select last_value(salary) over(order by enroll_date groups between 1 following and 3 following - exclude group), - lag(salary) over(order by enroll_date groups between 1 following and 3 following exclude group), - salary, enroll_date from empsalary; - last_value | lag | salary | enroll_date -------------+------+--------+------------- - 4800 | | 5000 | 10-01-2006 - 4800 | 5000 | 6000 | 10-01-2006 - 5200 | 6000 | 3900 | 12-23-2006 - 3500 | 3900 | 4800 | 08-01-2007 - 3500 | 4800 | 5200 | 08-01-2007 - 4200 | 5200 | 4800 | 08-08-2007 - 4200 | 4800 | 5200 | 08-15-2007 - 4200 | 5200 | 3500 | 12-10-2007 - | 3500 | 4500 | 01-01-2008 - | 4500 | 4200 | 01-01-2008 -(10 rows) - --- Show differences in offset interpretation between ROWS, RANGE, and GROUPS -WITH cte (x) AS ( - SELECT * FROM generate_series(1, 35, 2) -) -SELECT x, (sum(x) over w) -FROM cte -WINDOW w AS (ORDER BY x rows between 1 preceding and 1 following); - x | sum -----+----- - 1 | 4 - 3 | 9 - 5 | 15 - 7 | 21 - 9 | 27 - 11 | 33 - 13 | 39 - 15 | 45 - 17 | 51 - 19 | 57 - 21 | 63 - 23 | 69 - 25 | 75 - 27 | 81 - 29 | 87 - 31 | 93 - 33 | 99 - 35 | 68 -(18 rows) - -WITH cte (x) AS ( - SELECT * FROM generate_series(1, 35, 2) -) -SELECT x, (sum(x) over w) -FROM cte -WINDOW w AS (ORDER BY x range between 1 preceding and 1 following); - x | sum -----+----- - 1 | 1 - 3 | 3 - 5 | 5 - 7 | 7 - 9 | 9 - 11 | 11 - 13 | 13 - 15 | 15 - 17 | 17 - 19 | 19 - 21 | 21 - 23 | 23 - 25 | 25 - 27 | 27 - 29 | 29 - 31 | 31 - 33 | 33 - 35 | 35 -(18 rows) - -WITH cte (x) AS ( - SELECT * FROM generate_series(1, 35, 2) -) -SELECT x, (sum(x) over w) -FROM cte -WINDOW w AS (ORDER BY x groups between 1 preceding and 1 following); - x | sum -----+----- - 1 | 4 - 3 | 9 - 5 | 15 - 7 | 21 - 9 | 27 - 11 | 33 - 13 | 39 - 15 | 45 - 17 | 51 - 19 | 57 - 21 | 63 - 23 | 69 - 25 | 75 - 27 | 81 - 29 | 87 - 31 | 93 - 33 | 99 - 35 | 68 -(18 rows) - -WITH cte (x) AS ( - select 1 union all select 1 union all select 1 union all - SELECT * FROM generate_series(5, 49, 2) -) -SELECT x, (sum(x) over w) -FROM cte -WINDOW w AS (ORDER BY x rows between 1 preceding and 1 following); - x | sum -----+----- - 1 | 2 - 1 | 3 - 1 | 7 - 5 | 13 - 7 | 21 - 9 | 27 - 11 | 33 - 13 | 39 - 15 | 45 - 17 | 51 - 19 | 57 - 21 | 63 - 23 | 69 - 25 | 75 - 27 | 81 - 29 | 87 - 31 | 93 - 33 | 99 - 35 | 105 - 37 | 111 - 39 | 117 - 41 | 123 - 43 | 129 - 45 | 135 - 47 | 141 - 49 | 96 -(26 rows) - -WITH cte (x) AS ( - select 1 union all select 1 union all select 1 union all - SELECT * FROM generate_series(5, 49, 2) -) -SELECT x, (sum(x) over w) -FROM cte -WINDOW w AS (ORDER BY x range between 1 preceding and 1 following); - x | sum -----+----- - 1 | 3 - 1 | 3 - 1 | 3 - 5 | 5 - 7 | 7 - 9 | 9 - 11 | 11 - 13 | 13 - 15 | 15 - 17 | 17 - 19 | 19 - 21 | 21 - 23 | 23 - 25 | 25 - 27 | 27 - 29 | 29 - 31 | 31 - 33 | 33 - 35 | 35 - 37 | 37 - 39 | 39 - 41 | 41 - 43 | 43 - 45 | 45 - 47 | 47 - 49 | 49 -(26 rows) - -WITH cte (x) AS ( - select 1 union all select 1 union all select 1 union all - SELECT * FROM generate_series(5, 49, 2) -) -SELECT x, (sum(x) over w) -FROM cte -WINDOW w AS (ORDER BY x groups between 1 preceding and 1 following); - x | sum -----+----- - 1 | 8 - 1 | 8 - 1 | 8 - 5 | 15 - 7 | 21 - 9 | 27 - 11 | 33 - 13 | 39 - 15 | 45 - 17 | 51 - 19 | 57 - 21 | 63 - 23 | 69 - 25 | 75 - 27 | 81 - 29 | 87 - 31 | 93 - 33 | 99 - 35 | 105 - 37 | 111 - 39 | 117 - 41 | 123 - 43 | 129 - 45 | 135 - 47 | 141 - 49 | 96 -(26 rows) - --- with UNION -SELECT count(*) OVER (PARTITION BY four) FROM (SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk2)s LIMIT 0; - count -------- -(0 rows) - --- check some degenerate cases -create temp table t1 (f1 int, f2 int8); -insert into t1 values (1,1),(1,2),(2,2); -select f1, sum(f1) over (partition by f1 - range between 1 preceding and 1 following) -from t1 where f1 = f2; -- error, must have order by -ERROR: RANGE with offset PRECEDING/FOLLOWING requires exactly one ORDER BY column -LINE 1: select f1, sum(f1) over (partition by f1 - ^ -explain (costs off) -select f1, sum(f1) over (partition by f1 order by f2 - range between 1 preceding and 1 following) -from t1 where f1 = f2; - QUERY PLAN ---------------------------------- - WindowAgg - -> Sort - Sort Key: f1 - -> Seq Scan on t1 - Filter: (f1 = f2) -(5 rows) - -select f1, sum(f1) over (partition by f1 order by f2 - range between 1 preceding and 1 following) -from t1 where f1 = f2; - f1 | sum -----+----- - 1 | 1 - 2 | 2 -(2 rows) - -select f1, sum(f1) over (partition by f1, f1 order by f2 - range between 2 preceding and 1 preceding) -from t1 where f1 = f2; - f1 | sum -----+----- - 1 | - 2 | -(2 rows) - -select f1, sum(f1) over (partition by f1, f2 order by f2 - range between 1 following and 2 following) -from t1 where f1 = f2; - f1 | sum -----+----- - 1 | - 2 | -(2 rows) - -select f1, sum(f1) over (partition by f1 - groups between 1 preceding and 1 following) -from t1 where f1 = f2; -- error, must have order by -ERROR: GROUPS mode requires an ORDER BY clause -LINE 1: select f1, sum(f1) over (partition by f1 - ^ -explain (costs off) -select f1, sum(f1) over (partition by f1 order by f2 - groups between 1 preceding and 1 following) -from t1 where f1 = f2; - QUERY PLAN ---------------------------------- - WindowAgg - -> Sort - Sort Key: f1 - -> Seq Scan on t1 - Filter: (f1 = f2) -(5 rows) - -select f1, sum(f1) over (partition by f1 order by f2 - groups between 1 preceding and 1 following) -from t1 where f1 = f2; - f1 | sum -----+----- - 1 | 1 - 2 | 2 -(2 rows) - -select f1, sum(f1) over (partition by f1, f1 order by f2 - groups between 2 preceding and 1 preceding) -from t1 where f1 = f2; - f1 | sum -----+----- - 1 | - 2 | -(2 rows) - -select f1, sum(f1) over (partition by f1, f2 order by f2 - groups between 1 following and 2 following) -from t1 where f1 = f2; - f1 | sum -----+----- - 1 | - 2 | -(2 rows) - --- ordering by a non-integer constant is allowed -SELECT rank() OVER (ORDER BY length('abc')); - rank ------- - 1 -(1 row) - --- can't order by another window function -SELECT rank() OVER (ORDER BY rank() OVER (ORDER BY random())); -ERROR: window functions are not allowed in window definitions -LINE 1: SELECT rank() OVER (ORDER BY rank() OVER (ORDER BY random())... - ^ --- some other errors -SELECT * FROM empsalary WHERE row_number() OVER (ORDER BY salary) < 10; -ERROR: window functions are not allowed in WHERE -LINE 1: SELECT * FROM empsalary WHERE row_number() OVER (ORDER BY sa... - ^ -SELECT * FROM empsalary INNER JOIN tenk1 ON row_number() OVER (ORDER BY salary) < 10; -ERROR: window functions are not allowed in JOIN conditions -LINE 1: SELECT * FROM empsalary INNER JOIN tenk1 ON row_number() OVE... - ^ -SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary GROUP BY 1; -ERROR: window functions are not allowed in GROUP BY -LINE 1: SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary GRO... - ^ -SELECT * FROM rank() OVER (ORDER BY random()); -ERROR: syntax error at or near "ORDER" -LINE 1: SELECT * FROM rank() OVER (ORDER BY random()); - ^ -DELETE FROM empsalary WHERE (rank() OVER (ORDER BY random())) > 10; -ERROR: window functions are not allowed in WHERE -LINE 1: DELETE FROM empsalary WHERE (rank() OVER (ORDER BY random())... - ^ -DELETE FROM empsalary RETURNING rank() OVER (ORDER BY random()); -ERROR: window functions are not allowed in RETURNING -LINE 1: DELETE FROM empsalary RETURNING rank() OVER (ORDER BY random... - ^ -SELECT count(*) OVER w FROM tenk1 WINDOW w AS (ORDER BY unique1), w AS (ORDER BY unique1); -ERROR: window "w" is already defined -LINE 1: ...w FROM tenk1 WINDOW w AS (ORDER BY unique1), w AS (ORDER BY ... - ^ -SELECT rank() OVER (PARTITION BY four, ORDER BY ten) FROM tenk1; -ERROR: syntax error at or near "ORDER" -LINE 1: SELECT rank() OVER (PARTITION BY four, ORDER BY ten) FROM te... - ^ -SELECT count() OVER () FROM tenk1; -ERROR: count(*) must be used to call a parameterless aggregate function -LINE 1: SELECT count() OVER () FROM tenk1; - ^ -SELECT generate_series(1, 100) OVER () FROM empsalary; -ERROR: OVER specified, but generate_series is not a window function nor an aggregate function -LINE 1: SELECT generate_series(1, 100) OVER () FROM empsalary; - ^ -SELECT ntile(0) OVER (ORDER BY ten), ten, four FROM tenk1; -ERROR: argument of ntile must be greater than zero -SELECT nth_value(four, 0) OVER (ORDER BY ten), ten, four FROM tenk1; -ERROR: argument of nth_value must be greater than zero --- filter -SELECT sum(salary), row_number() OVER (ORDER BY depname), sum( - sum(salary) FILTER (WHERE enroll_date > '2007-01-01') -) FILTER (WHERE depname <> 'sales') OVER (ORDER BY depname DESC) AS "filtered_sum", - depname -FROM empsalary GROUP BY depname; - sum | row_number | filtered_sum | depname --------+------------+--------------+----------- - 25100 | 1 | 22600 | develop - 7400 | 2 | 3500 | personnel - 14600 | 3 | | sales -(3 rows) - --- --- Test SupportRequestOptimizeWindowClause's ability to de-duplicate --- WindowClauses --- --- Ensure WindowClause frameOptions are changed so that only a single --- WindowAgg exists in the plan. -EXPLAIN (COSTS OFF) -SELECT - empno, - depname, - row_number() OVER (PARTITION BY depname ORDER BY enroll_date) rn, - rank() OVER (PARTITION BY depname ORDER BY enroll_date ROWS BETWEEN - UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) rnk, - dense_rank() OVER (PARTITION BY depname ORDER BY enroll_date RANGE BETWEEN - CURRENT ROW AND CURRENT ROW) drnk, - ntile(10) OVER (PARTITION BY depname ORDER BY enroll_date RANGE BETWEEN - CURRENT ROW AND UNBOUNDED FOLLOWING) nt, - percent_rank() OVER (PARTITION BY depname ORDER BY enroll_date ROWS BETWEEN - CURRENT ROW AND UNBOUNDED FOLLOWING) pr, - cume_dist() OVER (PARTITION BY depname ORDER BY enroll_date RANGE BETWEEN - CURRENT ROW AND UNBOUNDED FOLLOWING) cd -FROM empsalary; - QUERY PLAN ----------------------------------------- - WindowAgg - -> Sort - Sort Key: depname, enroll_date - -> Seq Scan on empsalary -(4 rows) - --- Ensure WindowFuncs which cannot support their WindowClause's frameOptions --- being changed are untouched -EXPLAIN (COSTS OFF, VERBOSE) -SELECT - empno, - depname, - row_number() OVER (PARTITION BY depname ORDER BY enroll_date) rn, - rank() OVER (PARTITION BY depname ORDER BY enroll_date ROWS BETWEEN - UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) rnk, - count(*) OVER (PARTITION BY depname ORDER BY enroll_date RANGE BETWEEN - CURRENT ROW AND CURRENT ROW) cnt -FROM empsalary; - QUERY PLAN ------------------------------------------------------------------------------------------------------- - WindowAgg - Output: empno, depname, (row_number() OVER (?)), (rank() OVER (?)), count(*) OVER (?), enroll_date - -> WindowAgg - Output: depname, enroll_date, empno, row_number() OVER (?), rank() OVER (?) - -> Sort - Output: depname, enroll_date, empno - Sort Key: empsalary.depname, empsalary.enroll_date - -> Seq Scan on pg_temp.empsalary - Output: depname, enroll_date, empno -(9 rows) - --- Ensure the above query gives us the expected results -SELECT - empno, - depname, - row_number() OVER (PARTITION BY depname ORDER BY enroll_date) rn, - rank() OVER (PARTITION BY depname ORDER BY enroll_date ROWS BETWEEN - UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) rnk, - count(*) OVER (PARTITION BY depname ORDER BY enroll_date RANGE BETWEEN - CURRENT ROW AND CURRENT ROW) cnt -FROM empsalary; - empno | depname | rn | rnk | cnt --------+-----------+----+-----+----- - 8 | develop | 1 | 1 | 1 - 10 | develop | 2 | 2 | 1 - 11 | develop | 3 | 3 | 1 - 9 | develop | 4 | 4 | 2 - 7 | develop | 5 | 4 | 2 - 2 | personnel | 1 | 1 | 1 - 5 | personnel | 2 | 2 | 1 - 1 | sales | 1 | 1 | 1 - 3 | sales | 2 | 2 | 1 - 4 | sales | 3 | 3 | 1 -(10 rows) - --- Test pushdown of quals into a subquery containing window functions --- pushdown is safe because all PARTITION BY clauses include depname: -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT depname, - sum(salary) OVER (PARTITION BY depname) depsalary, - min(salary) OVER (PARTITION BY depname || 'A', depname) depminsalary - FROM empsalary) emp -WHERE depname = 'sales'; - QUERY PLAN --------------------------------------------------------------------------- - Subquery Scan on emp - -> WindowAgg - -> WindowAgg - -> Sort - Sort Key: (((empsalary.depname)::text || 'A'::text)) - -> Seq Scan on empsalary - Filter: ((depname)::text = 'sales'::text) -(7 rows) - --- pushdown is unsafe because there's a PARTITION BY clause without depname: -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT depname, - sum(salary) OVER (PARTITION BY enroll_date) enroll_salary, - min(salary) OVER (PARTITION BY depname) depminsalary - FROM empsalary) emp -WHERE depname = 'sales'; - QUERY PLAN -------------------------------------------------------- - Subquery Scan on emp - Filter: ((emp.depname)::text = 'sales'::text) - -> WindowAgg - -> Sort - Sort Key: empsalary.enroll_date - -> WindowAgg - -> Sort - Sort Key: empsalary.depname - -> Seq Scan on empsalary -(9 rows) - --- Test window function run conditions are properly pushed down into the --- WindowAgg -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - row_number() OVER (ORDER BY empno) rn - FROM empsalary) emp -WHERE rn < 3; - QUERY PLAN ----------------------------------------------- - WindowAgg - Run Condition: (row_number() OVER (?) < 3) - -> Sort - Sort Key: empsalary.empno - -> Seq Scan on empsalary -(5 rows) - --- The following 3 statements should result the same result. -SELECT * FROM - (SELECT empno, - row_number() OVER (ORDER BY empno) rn - FROM empsalary) emp -WHERE rn < 3; - empno | rn --------+---- - 1 | 1 - 2 | 2 -(2 rows) - -SELECT * FROM - (SELECT empno, - row_number() OVER (ORDER BY empno) rn - FROM empsalary) emp -WHERE 3 > rn; - empno | rn --------+---- - 1 | 1 - 2 | 2 -(2 rows) - -SELECT * FROM - (SELECT empno, - row_number() OVER (ORDER BY empno) rn - FROM empsalary) emp -WHERE 2 >= rn; - empno | rn --------+---- - 1 | 1 - 2 | 2 -(2 rows) - --- Ensure r <= 3 is pushed down into the run condition of the window agg -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - salary, - rank() OVER (ORDER BY salary DESC) r - FROM empsalary) emp -WHERE r <= 3; - QUERY PLAN ------------------------------------------ - WindowAgg - Run Condition: (rank() OVER (?) <= 3) - -> Sort - Sort Key: empsalary.salary DESC - -> Seq Scan on empsalary -(5 rows) - -SELECT * FROM - (SELECT empno, - salary, - rank() OVER (ORDER BY salary DESC) r - FROM empsalary) emp -WHERE r <= 3; - empno | salary | r --------+--------+--- - 8 | 6000 | 1 - 10 | 5200 | 2 - 11 | 5200 | 2 -(3 rows) - --- Ensure dr = 1 is converted to dr <= 1 to get all rows leading up to dr = 1 -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - salary, - dense_rank() OVER (ORDER BY salary DESC) dr - FROM empsalary) emp -WHERE dr = 1; - QUERY PLAN ------------------------------------------------------ - Subquery Scan on emp - Filter: (emp.dr = 1) - -> WindowAgg - Run Condition: (dense_rank() OVER (?) <= 1) - -> Sort - Sort Key: empsalary.salary DESC - -> Seq Scan on empsalary -(7 rows) - -SELECT * FROM - (SELECT empno, - salary, - dense_rank() OVER (ORDER BY salary DESC) dr - FROM empsalary) emp -WHERE dr = 1; - empno | salary | dr --------+--------+---- - 8 | 6000 | 1 -(1 row) - --- Check COUNT() and COUNT(*) -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - salary, - count(*) OVER (ORDER BY salary DESC) c - FROM empsalary) emp -WHERE c <= 3; - QUERY PLAN -------------------------------------------- - WindowAgg - Run Condition: (count(*) OVER (?) <= 3) - -> Sort - Sort Key: empsalary.salary DESC - -> Seq Scan on empsalary -(5 rows) - -SELECT * FROM - (SELECT empno, - salary, - count(*) OVER (ORDER BY salary DESC) c - FROM empsalary) emp -WHERE c <= 3; - empno | salary | c --------+--------+--- - 8 | 6000 | 1 - 10 | 5200 | 3 - 11 | 5200 | 3 -(3 rows) - -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - salary, - count(empno) OVER (ORDER BY salary DESC) c - FROM empsalary) emp -WHERE c <= 3; - QUERY PLAN ---------------------------------------------------------- - WindowAgg - Run Condition: (count(empsalary.empno) OVER (?) <= 3) - -> Sort - Sort Key: empsalary.salary DESC - -> Seq Scan on empsalary -(5 rows) - -SELECT * FROM - (SELECT empno, - salary, - count(empno) OVER (ORDER BY salary DESC) c - FROM empsalary) emp -WHERE c <= 3; - empno | salary | c --------+--------+--- - 8 | 6000 | 1 - 10 | 5200 | 3 - 11 | 5200 | 3 -(3 rows) - -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - salary, - count(*) OVER (ORDER BY salary DESC ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) c - FROM empsalary) emp -WHERE c >= 3; - QUERY PLAN -------------------------------------------- - WindowAgg - Run Condition: (count(*) OVER (?) >= 3) - -> Sort - Sort Key: empsalary.salary DESC - -> Seq Scan on empsalary -(5 rows) - -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - salary, - count(*) OVER () c - FROM empsalary) emp -WHERE 11 <= c; - QUERY PLAN --------------------------------------------- - WindowAgg - Run Condition: (11 <= count(*) OVER (?)) - -> Seq Scan on empsalary -(3 rows) - -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - salary, - count(*) OVER (ORDER BY salary DESC) c, - dense_rank() OVER (ORDER BY salary DESC) dr - FROM empsalary) emp -WHERE dr = 1; - QUERY PLAN ------------------------------------------------------ - Subquery Scan on emp - Filter: (emp.dr = 1) - -> WindowAgg - Run Condition: (dense_rank() OVER (?) <= 1) - -> Sort - Sort Key: empsalary.salary DESC - -> Seq Scan on empsalary -(7 rows) - --- Ensure we get a run condition when there's a PARTITION BY clause -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - depname, - row_number() OVER (PARTITION BY depname ORDER BY empno) rn - FROM empsalary) emp -WHERE rn < 3; - QUERY PLAN ------------------------------------------------------- - WindowAgg - Run Condition: (row_number() OVER (?) < 3) - -> Sort - Sort Key: empsalary.depname, empsalary.empno - -> Seq Scan on empsalary -(5 rows) - --- and ensure we get the correct results from the above plan -SELECT * FROM - (SELECT empno, - depname, - row_number() OVER (PARTITION BY depname ORDER BY empno) rn - FROM empsalary) emp -WHERE rn < 3; - empno | depname | rn --------+-----------+---- - 7 | develop | 1 - 8 | develop | 2 - 2 | personnel | 1 - 5 | personnel | 2 - 1 | sales | 1 - 3 | sales | 2 -(6 rows) - --- ensure that "unused" subquery columns are not removed when the column only --- exists in the run condition -EXPLAIN (COSTS OFF) -SELECT empno, depname FROM - (SELECT empno, - depname, - row_number() OVER (PARTITION BY depname ORDER BY empno) rn - FROM empsalary) emp -WHERE rn < 3; - QUERY PLAN ------------------------------------------------------------- - Subquery Scan on emp - -> WindowAgg - Run Condition: (row_number() OVER (?) < 3) - -> Sort - Sort Key: empsalary.depname, empsalary.empno - -> Seq Scan on empsalary -(6 rows) - --- likewise with count(empno) instead of row_number() -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - depname, - salary, - count(empno) OVER (PARTITION BY depname ORDER BY salary DESC) c - FROM empsalary) emp -WHERE c <= 3; - QUERY PLAN ------------------------------------------------------------- - WindowAgg - Run Condition: (count(empsalary.empno) OVER (?) <= 3) - -> Sort - Sort Key: empsalary.depname, empsalary.salary DESC - -> Seq Scan on empsalary -(5 rows) - --- and again, check the results are what we expect. -SELECT * FROM - (SELECT empno, - depname, - salary, - count(empno) OVER (PARTITION BY depname ORDER BY salary DESC) c - FROM empsalary) emp -WHERE c <= 3; - empno | depname | salary | c --------+-----------+--------+--- - 8 | develop | 6000 | 1 - 10 | develop | 5200 | 3 - 11 | develop | 5200 | 3 - 2 | personnel | 3900 | 1 - 5 | personnel | 3500 | 2 - 1 | sales | 5000 | 1 - 4 | sales | 4800 | 3 - 3 | sales | 4800 | 3 -(8 rows) - --- Ensure we get the correct run condition when the window function is both --- monotonically increasing and decreasing. -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - depname, - salary, - count(empno) OVER () c - FROM empsalary) emp -WHERE c = 1; - QUERY PLAN --------------------------------------------------------- - WindowAgg - Run Condition: (count(empsalary.empno) OVER (?) = 1) - -> Seq Scan on empsalary -(3 rows) - --- Some more complex cases with multiple window clauses -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT *, - count(salary) OVER (PARTITION BY depname || '') c1, -- w1 - row_number() OVER (PARTITION BY depname) rn, -- w2 - count(*) OVER (PARTITION BY depname) c2, -- w2 - count(*) OVER (PARTITION BY '' || depname) c3, -- w3 - ntile(2) OVER (PARTITION BY depname) nt -- w2 - FROM empsalary -) e WHERE rn <= 1 AND c1 <= 3 AND nt < 2; - QUERY PLAN ------------------------------------------------------------------------------------------------ - Subquery Scan on e - -> WindowAgg - Filter: (((row_number() OVER (?)) <= 1) AND ((ntile(2) OVER (?)) < 2)) - Run Condition: (count(empsalary.salary) OVER (?) <= 3) - -> Sort - Sort Key: (((empsalary.depname)::text || ''::text)) - -> WindowAgg - Run Condition: ((row_number() OVER (?) <= 1) AND (ntile(2) OVER (?) < 2)) - -> Sort - Sort Key: empsalary.depname - -> WindowAgg - -> Sort - Sort Key: ((''::text || (empsalary.depname)::text)) - -> Seq Scan on empsalary -(14 rows) - --- Ensure we correctly filter out all of the run conditions from each window -SELECT * FROM - (SELECT *, - count(salary) OVER (PARTITION BY depname || '') c1, -- w1 - row_number() OVER (PARTITION BY depname) rn, -- w2 - count(*) OVER (PARTITION BY depname) c2, -- w2 - count(*) OVER (PARTITION BY '' || depname) c3, -- w3 - ntile(2) OVER (PARTITION BY depname) nt -- w2 - FROM empsalary -) e WHERE rn <= 1 AND c1 <= 3 AND nt < 2; - depname | empno | salary | enroll_date | c1 | rn | c2 | c3 | nt ------------+-------+--------+-------------+----+----+----+----+---- - personnel | 5 | 3500 | 12-10-2007 | 2 | 1 | 2 | 2 | 1 - sales | 3 | 4800 | 08-01-2007 | 3 | 1 | 3 | 3 | 1 -(2 rows) - --- Ensure we remove references to reduced outer joins as nulling rels in run --- conditions -EXPLAIN (COSTS OFF) -SELECT 1 FROM - (SELECT ntile(e2.salary) OVER (PARTITION BY e1.depname) AS c - FROM empsalary e1 LEFT JOIN empsalary e2 ON TRUE - WHERE e1.empno = e2.empno) s -WHERE s.c = 1; - QUERY PLAN ---------------------------------------------------------- - Subquery Scan on s - Filter: (s.c = 1) - -> WindowAgg - Run Condition: (ntile(e2.salary) OVER (?) <= 1) - -> Sort - Sort Key: e1.depname - -> Merge Join - Merge Cond: (e1.empno = e2.empno) - -> Sort - Sort Key: e1.empno - -> Seq Scan on empsalary e1 - -> Sort - Sort Key: e2.empno - -> Seq Scan on empsalary e2 -(14 rows) - --- Tests to ensure we don't push down the run condition when it's not valid to --- do so. --- Ensure we don't push down when the frame options show that the window --- function is not monotonically increasing -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - salary, - count(*) OVER (ORDER BY salary DESC ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) c - FROM empsalary) emp -WHERE c <= 3; - QUERY PLAN ------------------------------------------------ - Subquery Scan on emp - Filter: (emp.c <= 3) - -> WindowAgg - -> Sort - Sort Key: empsalary.salary DESC - -> Seq Scan on empsalary -(6 rows) - --- Ensure we don't push down when the window function's monotonic properties --- don't match that of the clauses. -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - salary, - count(*) OVER (ORDER BY salary) c - FROM empsalary) emp -WHERE 3 <= c; - QUERY PLAN ------------------------------------------- - Subquery Scan on emp - Filter: (3 <= emp.c) - -> WindowAgg - -> Sort - Sort Key: empsalary.salary - -> Seq Scan on empsalary -(6 rows) - --- Ensure we don't use a run condition when there's a volatile function in the --- WindowFunc -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - salary, - count(random()) OVER (ORDER BY empno DESC) c - FROM empsalary) emp -WHERE c = 1; - QUERY PLAN ----------------------------------------------- - Subquery Scan on emp - Filter: (emp.c = 1) - -> WindowAgg - -> Sort - Sort Key: empsalary.empno DESC - -> Seq Scan on empsalary -(6 rows) - --- Ensure we don't use a run condition when the WindowFunc contains subplans -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - salary, - count((SELECT 1)) OVER (ORDER BY empno DESC) c - FROM empsalary) emp -WHERE c = 1; - QUERY PLAN ----------------------------------------------- - Subquery Scan on emp - Filter: (emp.c = 1) - -> WindowAgg - InitPlan 1 (returns $0) - -> Result - -> Sort - Sort Key: empsalary.empno DESC - -> Seq Scan on empsalary -(8 rows) - --- Test Sort node collapsing -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT depname, - sum(salary) OVER (PARTITION BY depname order by empno) depsalary, - min(salary) OVER (PARTITION BY depname, empno order by enroll_date) depminsalary - FROM empsalary) emp -WHERE depname = 'sales'; - QUERY PLAN ----------------------------------------------------------------------- - Subquery Scan on emp - -> WindowAgg - -> WindowAgg - -> Sort - Sort Key: empsalary.empno, empsalary.enroll_date - -> Seq Scan on empsalary - Filter: ((depname)::text = 'sales'::text) -(7 rows) - --- Ensure that the evaluation order of the WindowAggs results in the WindowAgg --- with the same sort order that's required by the ORDER BY is evaluated last. -EXPLAIN (COSTS OFF) -SELECT empno, - enroll_date, - depname, - sum(salary) OVER (PARTITION BY depname order by empno) depsalary, - min(salary) OVER (PARTITION BY depname order by enroll_date) depminsalary -FROM empsalary -ORDER BY depname, empno; - QUERY PLAN ----------------------------------------------------- - WindowAgg - -> Incremental Sort - Sort Key: depname, empno - Presorted Key: depname - -> WindowAgg - -> Sort - Sort Key: depname, enroll_date - -> Seq Scan on empsalary -(8 rows) - --- As above, but with an adjusted ORDER BY to ensure the above plan didn't --- perform only 2 sorts by accident. -EXPLAIN (COSTS OFF) -SELECT empno, - enroll_date, - depname, - sum(salary) OVER (PARTITION BY depname order by empno) depsalary, - min(salary) OVER (PARTITION BY depname order by enroll_date) depminsalary -FROM empsalary -ORDER BY depname, enroll_date; - QUERY PLAN ------------------------------------------------ - WindowAgg - -> Incremental Sort - Sort Key: depname, enroll_date - Presorted Key: depname - -> WindowAgg - -> Sort - Sort Key: depname, empno - -> Seq Scan on empsalary -(8 rows) - -SET enable_hashagg TO off; --- Ensure we don't get a sort for both DISTINCT and ORDER BY. We expect the --- sort for the DISTINCT to provide presorted input for the ORDER BY. -EXPLAIN (COSTS OFF) -SELECT DISTINCT - empno, - enroll_date, - depname, - sum(salary) OVER (PARTITION BY depname order by empno) depsalary, - min(salary) OVER (PARTITION BY depname order by enroll_date) depminsalary -FROM empsalary -ORDER BY depname, enroll_date; - QUERY PLAN ------------------------------------------------------------------------------------------------ - Unique - -> Incremental Sort - Sort Key: depname, enroll_date, empno, (sum(salary) OVER (?)), (min(salary) OVER (?)) - Presorted Key: depname, enroll_date - -> WindowAgg - -> Incremental Sort - Sort Key: depname, enroll_date - Presorted Key: depname - -> WindowAgg - -> Sort - Sort Key: depname, empno - -> Seq Scan on empsalary -(12 rows) - --- As above but adjust the ORDER BY clause to help ensure the plan with the --- minimum amount of sorting wasn't a fluke. -EXPLAIN (COSTS OFF) -SELECT DISTINCT - empno, - enroll_date, - depname, - sum(salary) OVER (PARTITION BY depname order by empno) depsalary, - min(salary) OVER (PARTITION BY depname order by enroll_date) depminsalary -FROM empsalary -ORDER BY depname, empno; - QUERY PLAN ------------------------------------------------------------------------------------------------ - Unique - -> Incremental Sort - Sort Key: depname, empno, enroll_date, (sum(salary) OVER (?)), (min(salary) OVER (?)) - Presorted Key: depname, empno - -> WindowAgg - -> Incremental Sort - Sort Key: depname, empno - Presorted Key: depname - -> WindowAgg - -> Sort - Sort Key: depname, enroll_date - -> Seq Scan on empsalary -(12 rows) - -RESET enable_hashagg; --- Test Sort node reordering -EXPLAIN (COSTS OFF) -SELECT - lead(1) OVER (PARTITION BY depname ORDER BY salary, enroll_date), - lag(1) OVER (PARTITION BY depname ORDER BY salary,enroll_date,empno) -FROM empsalary; - QUERY PLAN -------------------------------------------------------------- - WindowAgg - -> WindowAgg - -> Sort - Sort Key: depname, salary, enroll_date, empno - -> Seq Scan on empsalary -(5 rows) - --- Test incremental sorting -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT depname, - empno, - salary, - enroll_date, - row_number() OVER (PARTITION BY depname ORDER BY enroll_date) AS first_emp, - row_number() OVER (PARTITION BY depname ORDER BY enroll_date DESC) AS last_emp - FROM empsalary) emp -WHERE first_emp = 1 OR last_emp = 1; - QUERY PLAN ------------------------------------------------------------------------------------ - Subquery Scan on emp - Filter: ((emp.first_emp = 1) OR (emp.last_emp = 1)) - -> WindowAgg - -> Incremental Sort - Sort Key: empsalary.depname, empsalary.enroll_date - Presorted Key: empsalary.depname - -> WindowAgg - -> Sort - Sort Key: empsalary.depname, empsalary.enroll_date DESC - -> Seq Scan on empsalary -(10 rows) - -SELECT * FROM - (SELECT depname, - empno, - salary, - enroll_date, - row_number() OVER (PARTITION BY depname ORDER BY enroll_date) AS first_emp, - row_number() OVER (PARTITION BY depname ORDER BY enroll_date DESC) AS last_emp - FROM empsalary) emp -WHERE first_emp = 1 OR last_emp = 1; - depname | empno | salary | enroll_date | first_emp | last_emp ------------+-------+--------+-------------+-----------+---------- - develop | 8 | 6000 | 10-01-2006 | 1 | 5 - develop | 7 | 4200 | 01-01-2008 | 5 | 1 - personnel | 2 | 3900 | 12-23-2006 | 1 | 2 - personnel | 5 | 3500 | 12-10-2007 | 2 | 1 - sales | 1 | 5000 | 10-01-2006 | 1 | 3 - sales | 4 | 4800 | 08-08-2007 | 3 | 1 -(6 rows) - --- cleanup -DROP TABLE empsalary; --- test user-defined window function with named args and default args -CREATE FUNCTION nth_value_def(val anyelement, n integer = 1) RETURNS anyelement - LANGUAGE internal WINDOW IMMUTABLE STRICT AS 'window_nth_value'; -SELECT nth_value_def(n := 2, val := ten) OVER (PARTITION BY four), ten, four - FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten) s; - nth_value_def | ten | four ----------------+-----+------ - 0 | 0 | 0 - 0 | 0 | 0 - 0 | 4 | 0 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 7 | 1 - 1 | 9 | 1 - | 0 | 2 - 3 | 1 | 3 - 3 | 3 | 3 -(10 rows) - -SELECT nth_value_def(ten) OVER (PARTITION BY four), ten, four - FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten) s; - nth_value_def | ten | four ----------------+-----+------ - 0 | 0 | 0 - 0 | 0 | 0 - 0 | 4 | 0 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 7 | 1 - 1 | 9 | 1 - 0 | 0 | 2 - 1 | 1 | 3 - 1 | 3 | 3 -(10 rows) - --- --- Test the basic moving-aggregate machinery --- --- create aggregates that record the series of transform calls (these are --- intentionally not true inverses) -CREATE FUNCTION logging_sfunc_nonstrict(text, anyelement) RETURNS text AS -$$ SELECT COALESCE($1, '') || '*' || quote_nullable($2) $$ -LANGUAGE SQL IMMUTABLE; -CREATE FUNCTION logging_msfunc_nonstrict(text, anyelement) RETURNS text AS -$$ SELECT COALESCE($1, '') || '+' || quote_nullable($2) $$ -LANGUAGE SQL IMMUTABLE; -CREATE FUNCTION logging_minvfunc_nonstrict(text, anyelement) RETURNS text AS -$$ SELECT $1 || '-' || quote_nullable($2) $$ -LANGUAGE SQL IMMUTABLE; -CREATE AGGREGATE logging_agg_nonstrict (anyelement) -( - stype = text, - sfunc = logging_sfunc_nonstrict, - mstype = text, - msfunc = logging_msfunc_nonstrict, - minvfunc = logging_minvfunc_nonstrict -); -CREATE AGGREGATE logging_agg_nonstrict_initcond (anyelement) -( - stype = text, - sfunc = logging_sfunc_nonstrict, - mstype = text, - msfunc = logging_msfunc_nonstrict, - minvfunc = logging_minvfunc_nonstrict, - initcond = 'I', - minitcond = 'MI' -); -CREATE FUNCTION logging_sfunc_strict(text, anyelement) RETURNS text AS -$$ SELECT $1 || '*' || quote_nullable($2) $$ -LANGUAGE SQL STRICT IMMUTABLE; -CREATE FUNCTION logging_msfunc_strict(text, anyelement) RETURNS text AS -$$ SELECT $1 || '+' || quote_nullable($2) $$ -LANGUAGE SQL STRICT IMMUTABLE; -CREATE FUNCTION logging_minvfunc_strict(text, anyelement) RETURNS text AS -$$ SELECT $1 || '-' || quote_nullable($2) $$ -LANGUAGE SQL STRICT IMMUTABLE; -CREATE AGGREGATE logging_agg_strict (text) -( - stype = text, - sfunc = logging_sfunc_strict, - mstype = text, - msfunc = logging_msfunc_strict, - minvfunc = logging_minvfunc_strict -); -CREATE AGGREGATE logging_agg_strict_initcond (anyelement) -( - stype = text, - sfunc = logging_sfunc_strict, - mstype = text, - msfunc = logging_msfunc_strict, - minvfunc = logging_minvfunc_strict, - initcond = 'I', - minitcond = 'MI' -); --- test strict and non-strict cases -SELECT - p::text || ',' || i::text || ':' || COALESCE(v::text, 'NULL') AS row, - logging_agg_nonstrict(v) over wnd as nstrict, - logging_agg_nonstrict_initcond(v) over wnd as nstrict_init, - logging_agg_strict(v::text) over wnd as strict, - logging_agg_strict_initcond(v) over wnd as strict_init -FROM (VALUES - (1, 1, NULL), - (1, 2, 'a'), - (1, 3, 'b'), - (1, 4, NULL), - (1, 5, NULL), - (1, 6, 'c'), - (2, 1, NULL), - (2, 2, 'x'), - (3, 1, 'z') -) AS t(p, i, v) -WINDOW wnd AS (PARTITION BY P ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) -ORDER BY p, i; - row | nstrict | nstrict_init | strict | strict_init -----------+-----------------------------------------------+-------------------------------------------------+-----------+---------------- - 1,1:NULL | +NULL | MI+NULL | | MI - 1,2:a | +NULL+'a' | MI+NULL+'a' | a | MI+'a' - 1,3:b | +NULL+'a'-NULL+'b' | MI+NULL+'a'-NULL+'b' | a+'b' | MI+'a'+'b' - 1,4:NULL | +NULL+'a'-NULL+'b'-'a'+NULL | MI+NULL+'a'-NULL+'b'-'a'+NULL | a+'b'-'a' | MI+'a'+'b'-'a' - 1,5:NULL | +NULL+'a'-NULL+'b'-'a'+NULL-'b'+NULL | MI+NULL+'a'-NULL+'b'-'a'+NULL-'b'+NULL | | MI - 1,6:c | +NULL+'a'-NULL+'b'-'a'+NULL-'b'+NULL-NULL+'c' | MI+NULL+'a'-NULL+'b'-'a'+NULL-'b'+NULL-NULL+'c' | c | MI+'c' - 2,1:NULL | +NULL | MI+NULL | | MI - 2,2:x | +NULL+'x' | MI+NULL+'x' | x | MI+'x' - 3,1:z | +'z' | MI+'z' | z | MI+'z' -(9 rows) - --- and again, but with filter -SELECT - p::text || ',' || i::text || ':' || - CASE WHEN f THEN COALESCE(v::text, 'NULL') ELSE '-' END as row, - logging_agg_nonstrict(v) filter(where f) over wnd as nstrict_filt, - logging_agg_nonstrict_initcond(v) filter(where f) over wnd as nstrict_init_filt, - logging_agg_strict(v::text) filter(where f) over wnd as strict_filt, - logging_agg_strict_initcond(v) filter(where f) over wnd as strict_init_filt -FROM (VALUES - (1, 1, true, NULL), - (1, 2, false, 'a'), - (1, 3, true, 'b'), - (1, 4, false, NULL), - (1, 5, false, NULL), - (1, 6, false, 'c'), - (2, 1, false, NULL), - (2, 2, true, 'x'), - (3, 1, true, 'z') -) AS t(p, i, f, v) -WINDOW wnd AS (PARTITION BY p ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) -ORDER BY p, i; - row | nstrict_filt | nstrict_init_filt | strict_filt | strict_init_filt -----------+--------------+-------------------+-------------+------------------ - 1,1:NULL | +NULL | MI+NULL | | MI - 1,2:- | +NULL | MI+NULL | | MI - 1,3:b | +'b' | MI+'b' | b | MI+'b' - 1,4:- | +'b' | MI+'b' | b | MI+'b' - 1,5:- | | MI | | MI - 1,6:- | | MI | | MI - 2,1:- | | MI | | MI - 2,2:x | +'x' | MI+'x' | x | MI+'x' - 3,1:z | +'z' | MI+'z' | z | MI+'z' -(9 rows) - --- test that volatile arguments disable moving-aggregate mode -SELECT - i::text || ':' || COALESCE(v::text, 'NULL') as row, - logging_agg_strict(v::text) - over wnd as inverse, - logging_agg_strict(v::text || CASE WHEN random() < 0 then '?' ELSE '' END) - over wnd as noinverse -FROM (VALUES - (1, 'a'), - (2, 'b'), - (3, 'c') -) AS t(i, v) -WINDOW wnd AS (ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) -ORDER BY i; - row | inverse | noinverse ------+---------------+----------- - 1:a | a | a - 2:b | a+'b' | a*'b' - 3:c | a+'b'-'a'+'c' | b*'c' -(3 rows) - -SELECT - i::text || ':' || COALESCE(v::text, 'NULL') as row, - logging_agg_strict(v::text) filter(where true) - over wnd as inverse, - logging_agg_strict(v::text) filter(where random() >= 0) - over wnd as noinverse -FROM (VALUES - (1, 'a'), - (2, 'b'), - (3, 'c') -) AS t(i, v) -WINDOW wnd AS (ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) -ORDER BY i; - row | inverse | noinverse ------+---------------+----------- - 1:a | a | a - 2:b | a+'b' | a*'b' - 3:c | a+'b'-'a'+'c' | b*'c' -(3 rows) - --- test that non-overlapping windows don't use inverse transitions -SELECT - logging_agg_strict(v::text) OVER wnd -FROM (VALUES - (1, 'a'), - (2, 'b'), - (3, 'c') -) AS t(i, v) -WINDOW wnd AS (ORDER BY i ROWS BETWEEN CURRENT ROW AND CURRENT ROW) -ORDER BY i; - logging_agg_strict --------------------- - a - b - c -(3 rows) - --- test that returning NULL from the inverse transition functions --- restarts the aggregation from scratch. The second aggregate is supposed --- to test cases where only some aggregates restart, the third one checks --- that one aggregate restarting doesn't cause others to restart. -CREATE FUNCTION sum_int_randrestart_minvfunc(int4, int4) RETURNS int4 AS -$$ SELECT CASE WHEN random() < 0.2 THEN NULL ELSE $1 - $2 END $$ -LANGUAGE SQL STRICT; -CREATE AGGREGATE sum_int_randomrestart (int4) -( - stype = int4, - sfunc = int4pl, - mstype = int4, - msfunc = int4pl, - minvfunc = sum_int_randrestart_minvfunc -); -WITH -vs AS ( - SELECT i, (random() * 100)::int4 AS v - FROM generate_series(1, 100) AS i -), -sum_following AS ( - SELECT i, SUM(v) OVER - (ORDER BY i DESC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS s - FROM vs -) -SELECT DISTINCT - sum_following.s = sum_int_randomrestart(v) OVER fwd AS eq1, - -sum_following.s = sum_int_randomrestart(-v) OVER fwd AS eq2, - 100*3+(vs.i-1)*3 = length(logging_agg_nonstrict(''::text) OVER fwd) AS eq3 -FROM vs -JOIN sum_following ON sum_following.i = vs.i -WINDOW fwd AS ( - ORDER BY vs.i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING -); - eq1 | eq2 | eq3 ------+-----+----- - t | t | t -(1 row) - --- --- Test various built-in aggregates that have moving-aggregate support --- --- test inverse transition functions handle NULLs properly -SELECT i,AVG(v::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); - i | avg ----+-------------------- - 1 | 1.5000000000000000 - 2 | 2.0000000000000000 - 3 | - 4 | -(4 rows) - -SELECT i,AVG(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); - i | avg ----+-------------------- - 1 | 1.5000000000000000 - 2 | 2.0000000000000000 - 3 | - 4 | -(4 rows) - -SELECT i,AVG(v::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); - i | avg ----+-------------------- - 1 | 1.5000000000000000 - 2 | 2.0000000000000000 - 3 | - 4 | -(4 rows) - -SELECT i,AVG(v::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,1.5),(2,2.5),(3,NULL),(4,NULL)) t(i,v); - i | avg ----+-------------------- - 1 | 2.0000000000000000 - 2 | 2.5000000000000000 - 3 | - 4 | -(4 rows) - -SELECT i,AVG(v::interval) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,'1 sec'),(2,'2 sec'),(3,NULL),(4,NULL)) t(i,v); - i | avg ----+------------ - 1 | @ 1.5 secs - 2 | @ 2 secs - 3 | - 4 | -(4 rows) - --- moving aggregates over infinite intervals -SELECT x - ,avg(x) OVER(ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING ) as curr_next_avg - ,avg(x) OVER(ROWS BETWEEN 1 PRECEDING AND CURRENT ROW ) as prev_curr_avg - ,sum(x) OVER(ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING ) as curr_next_sum - ,sum(x) OVER(ROWS BETWEEN 1 PRECEDING AND CURRENT ROW ) as prev_curr_sum -FROM (VALUES (NULL::interval), - ('infinity'::interval), - ('-2147483648 days -2147483648 months -9223372036854775807 usecs'), -- extreme interval value - ('-infinity'::interval), - ('2147483647 days 2147483647 months 9223372036854775806 usecs'), -- extreme interval value - ('infinity'::interval), - ('6 days'::interval), - ('7 days'::interval), - (NULL::interval), - ('-infinity'::interval)) v(x); - x | curr_next_avg | prev_curr_avg | curr_next_sum | prev_curr_sum -------------------------------------------------------------------------------+-------------------+-------------------+---------------+--------------- - | infinity | | infinity | - infinity | infinity | infinity | infinity | infinity - @ 178956970 years 8 mons 2147483648 days 2562047788 hours 54.775807 secs ago | -infinity | infinity | -infinity | infinity - -infinity | -infinity | -infinity | -infinity | -infinity - @ 178956970 years 7 mons 2147483647 days 2562047788 hours 54.775806 secs | infinity | -infinity | infinity | -infinity - infinity | infinity | infinity | infinity | infinity - @ 6 days | @ 6 days 12 hours | infinity | @ 13 days | infinity - @ 7 days | @ 7 days | @ 6 days 12 hours | @ 7 days | @ 13 days - | -infinity | @ 7 days | -infinity | @ 7 days - -infinity | -infinity | -infinity | -infinity | -infinity -(10 rows) - ---should fail. -SELECT x, avg(x) OVER(ROWS BETWEEN CURRENT ROW AND 2 FOLLOWING) -FROM (VALUES (NULL::interval), - ('3 days'::interval), - ('infinity'::timestamptz - now()), - ('6 days'::interval), - ('-infinity'::interval)) v(x); -ERROR: interval out of range. ---should fail. -SELECT x, sum(x) OVER(ROWS BETWEEN CURRENT ROW AND 2 FOLLOWING) -FROM (VALUES (NULL::interval), - ('3 days'::interval), - ('infinity'::timestamptz - now()), - ('6 days'::interval), - ('-infinity'::interval)) v(x); -ERROR: interval out of range. -SELECT i,SUM(v::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); - i | sum ----+----- - 1 | 3 - 2 | 2 - 3 | - 4 | -(4 rows) - -SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); - i | sum ----+----- - 1 | 3 - 2 | 2 - 3 | - 4 | -(4 rows) - -SELECT i,SUM(v::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); - i | sum ----+----- - 1 | 3 - 2 | 2 - 3 | - 4 | -(4 rows) - -SELECT i,SUM(v::money) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,'1.10'),(2,'2.20'),(3,NULL),(4,NULL)) t(i,v); - i | sum ----+------- - 1 | $3.30 - 2 | $2.20 - 3 | - 4 | -(4 rows) - -SELECT i,SUM(v::interval) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,'1 sec'),(2,'2 sec'),(3,NULL),(4,NULL)) t(i,v); - i | sum ----+---------- - 1 | @ 3 secs - 2 | @ 2 secs - 3 | - 4 | -(4 rows) - -SELECT i,SUM(v::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,1.1),(2,2.2),(3,NULL),(4,NULL)) t(i,v); - i | sum ----+----- - 1 | 3.3 - 2 | 2.2 - 3 | - 4 | -(4 rows) - -SELECT SUM(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,1.01),(2,2),(3,3)) v(i,n); - sum ------- - 6.01 - 5 - 3 -(3 rows) - -SELECT i,COUNT(v) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); - i | count ----+------- - 1 | 2 - 2 | 1 - 3 | 0 - 4 | 0 -(4 rows) - -SELECT i,COUNT(*) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); - i | count ----+------- - 1 | 4 - 2 | 3 - 3 | 2 - 4 | 1 -(4 rows) - -SELECT VAR_POP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - var_pop ------------------------ - 21704.000000000000 - 13868.750000000000 - 11266.666666666667 - 4225.0000000000000000 - 0 -(5 rows) - -SELECT VAR_POP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - var_pop ------------------------ - 21704.000000000000 - 13868.750000000000 - 11266.666666666667 - 4225.0000000000000000 - 0 -(5 rows) - -SELECT VAR_POP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - var_pop ------------------------ - 21704.000000000000 - 13868.750000000000 - 11266.666666666667 - 4225.0000000000000000 - 0 -(5 rows) - -SELECT VAR_POP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - var_pop ------------------------ - 21704.000000000000 - 13868.750000000000 - 11266.666666666667 - 4225.0000000000000000 - 0 -(5 rows) - -SELECT VAR_SAMP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - var_samp ------------------------ - 27130.000000000000 - 18491.666666666667 - 16900.000000000000 - 8450.0000000000000000 - -(5 rows) - -SELECT VAR_SAMP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - var_samp ------------------------ - 27130.000000000000 - 18491.666666666667 - 16900.000000000000 - 8450.0000000000000000 - -(5 rows) - -SELECT VAR_SAMP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - var_samp ------------------------ - 27130.000000000000 - 18491.666666666667 - 16900.000000000000 - 8450.0000000000000000 - -(5 rows) - -SELECT VAR_SAMP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - var_samp ------------------------ - 27130.000000000000 - 18491.666666666667 - 16900.000000000000 - 8450.0000000000000000 - -(5 rows) - -SELECT VARIANCE(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - variance ------------------------ - 27130.000000000000 - 18491.666666666667 - 16900.000000000000 - 8450.0000000000000000 - -(5 rows) - -SELECT VARIANCE(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - variance ------------------------ - 27130.000000000000 - 18491.666666666667 - 16900.000000000000 - 8450.0000000000000000 - -(5 rows) - -SELECT VARIANCE(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - variance ------------------------ - 27130.000000000000 - 18491.666666666667 - 16900.000000000000 - 8450.0000000000000000 - -(5 rows) - -SELECT VARIANCE(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - variance ------------------------ - 27130.000000000000 - 18491.666666666667 - 16900.000000000000 - 8450.0000000000000000 - -(5 rows) - -SELECT STDDEV_POP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); - stddev_pop ---------------------- - 147.322774885623 - 147.322774885623 - 117.765657133139 - 106.144555520604 - 65.0000000000000000 - 0 -(6 rows) - -SELECT STDDEV_POP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); - stddev_pop ---------------------- - 147.322774885623 - 147.322774885623 - 117.765657133139 - 106.144555520604 - 65.0000000000000000 - 0 -(6 rows) - -SELECT STDDEV_POP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); - stddev_pop ---------------------- - 147.322774885623 - 147.322774885623 - 117.765657133139 - 106.144555520604 - 65.0000000000000000 - 0 -(6 rows) - -SELECT STDDEV_POP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); - stddev_pop ---------------------- - 147.322774885623 - 147.322774885623 - 117.765657133139 - 106.144555520604 - 65.0000000000000000 - 0 -(6 rows) - -SELECT STDDEV_SAMP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); - stddev_samp ---------------------- - 164.711869639076 - 164.711869639076 - 135.984067694222 - 130.000000000000 - 91.9238815542511782 - -(6 rows) - -SELECT STDDEV_SAMP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); - stddev_samp ---------------------- - 164.711869639076 - 164.711869639076 - 135.984067694222 - 130.000000000000 - 91.9238815542511782 - -(6 rows) - -SELECT STDDEV_SAMP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); - stddev_samp ---------------------- - 164.711869639076 - 164.711869639076 - 135.984067694222 - 130.000000000000 - 91.9238815542511782 - -(6 rows) - -SELECT STDDEV_SAMP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); - stddev_samp ---------------------- - 164.711869639076 - 164.711869639076 - 135.984067694222 - 130.000000000000 - 91.9238815542511782 - -(6 rows) - -SELECT STDDEV(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - stddev ---------------------- - 164.711869639076 - 164.711869639076 - 135.984067694222 - 130.000000000000 - 91.9238815542511782 - -(6 rows) - -SELECT STDDEV(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - stddev ---------------------- - 164.711869639076 - 164.711869639076 - 135.984067694222 - 130.000000000000 - 91.9238815542511782 - -(6 rows) - -SELECT STDDEV(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - stddev ---------------------- - 164.711869639076 - 164.711869639076 - 135.984067694222 - 130.000000000000 - 91.9238815542511782 - -(6 rows) - -SELECT STDDEV(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - stddev ---------------------- - 164.711869639076 - 164.711869639076 - 135.984067694222 - 130.000000000000 - 91.9238815542511782 - -(6 rows) - --- test that inverse transition functions work with various frame options -SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND CURRENT ROW) - FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); - i | sum ----+----- - 1 | 1 - 2 | 2 - 3 | - 4 | -(4 rows) - -SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING) - FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); - i | sum ----+----- - 1 | 3 - 2 | 2 - 3 | - 4 | -(4 rows) - -SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) - FROM (VALUES(1,1),(2,2),(3,3),(4,4)) t(i,v); - i | sum ----+----- - 1 | 3 - 2 | 6 - 3 | 9 - 4 | 7 -(4 rows) - --- ensure aggregate over numeric properly recovers from NaN values -SELECT a, b, - SUM(b) OVER(ORDER BY A ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) -FROM (VALUES(1,1::numeric),(2,2),(3,'NaN'),(4,3),(5,4)) t(a,b); - a | b | sum ----+-----+----- - 1 | 1 | 1 - 2 | 2 | 3 - 3 | NaN | NaN - 4 | 3 | NaN - 5 | 4 | 7 -(5 rows) - --- It might be tempting for someone to add an inverse trans function for --- float and double precision. This should not be done as it can give incorrect --- results. This test should fail if anyone ever does this without thinking too --- hard about it. -SELECT to_char(SUM(n::float8) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING),'999999999999999999999D9') - FROM (VALUES(1,1e20),(2,1)) n(i,n); - to_char --------------------------- - 100000000000000000000 - 1.0 -(2 rows) - -SELECT i, b, bool_and(b) OVER w, bool_or(b) OVER w - FROM (VALUES (1,true), (2,true), (3,false), (4,false), (5,true)) v(i,b) - WINDOW w AS (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING); - i | b | bool_and | bool_or ----+---+----------+--------- - 1 | t | t | t - 2 | t | f | t - 3 | f | f | f - 4 | f | f | t - 5 | t | t | t -(5 rows) - --- --- Test WindowAgg costing takes into account the number of rows that need to --- be fetched before the first row can be output. --- --- Ensure we get a cheap start up plan as the WindowAgg can output the first --- row after reading 1 row from the join. -EXPLAIN (COSTS OFF) -SELECT COUNT(*) OVER (ORDER BY t1.unique1) -FROM tenk1 t1 INNER JOIN tenk1 t2 ON t1.unique1 = t2.tenthous -LIMIT 1; - QUERY PLAN --------------------------------------------------------------------------- - Limit - -> WindowAgg - -> Nested Loop - -> Index Only Scan using tenk1_unique1 on tenk1 t1 - -> Index Only Scan using tenk1_thous_tenthous on tenk1 t2 - Index Cond: (tenthous = t1.unique1) -(6 rows) - --- Ensure we get a cheap total plan. Lack of ORDER BY in the WindowClause --- means that all rows must be read from the join, so a cheap startup plan --- isn't a good choice. -EXPLAIN (COSTS OFF) -SELECT COUNT(*) OVER () -FROM tenk1 t1 INNER JOIN tenk1 t2 ON t1.unique1 = t2.tenthous -WHERE t2.two = 1 -LIMIT 1; - QUERY PLAN -------------------------------------------------------------------- - Limit - -> WindowAgg - -> Hash Join - Hash Cond: (t1.unique1 = t2.tenthous) - -> Index Only Scan using tenk1_unique1 on tenk1 t1 - -> Hash - -> Seq Scan on tenk1 t2 - Filter: (two = 1) -(8 rows) - --- Ensure we get a cheap total plan. This time use UNBOUNDED FOLLOWING, which --- needs to read all join rows to output the first WindowAgg row. -EXPLAIN (COSTS OFF) -SELECT COUNT(*) OVER (ORDER BY t1.unique1 ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) -FROM tenk1 t1 INNER JOIN tenk1 t2 ON t1.unique1 = t2.tenthous -LIMIT 1; - QUERY PLAN --------------------------------------------------------------------------------- - Limit - -> WindowAgg - -> Merge Join - Merge Cond: (t1.unique1 = t2.tenthous) - -> Index Only Scan using tenk1_unique1 on tenk1 t1 - -> Sort - Sort Key: t2.tenthous - -> Index Only Scan using tenk1_thous_tenthous on tenk1 t2 -(8 rows) - --- Ensure we get a cheap total plan. This time use 10000 FOLLOWING so we need --- to read all join rows. -EXPLAIN (COSTS OFF) -SELECT COUNT(*) OVER (ORDER BY t1.unique1 ROWS BETWEEN UNBOUNDED PRECEDING AND 10000 FOLLOWING) -FROM tenk1 t1 INNER JOIN tenk1 t2 ON t1.unique1 = t2.tenthous -LIMIT 1; - QUERY PLAN --------------------------------------------------------------------------------- - Limit - -> WindowAgg - -> Merge Join - Merge Cond: (t1.unique1 = t2.tenthous) - -> Index Only Scan using tenk1_unique1 on tenk1 t1 - -> Sort - Sort Key: t2.tenthous - -> Index Only Scan using tenk1_thous_tenthous on tenk1 t2 -(8 rows) - --- Tests for problems with failure to walk or mutate expressions --- within window frame clauses. --- test walker (fails with collation error if expressions are not walked) -SELECT array_agg(i) OVER w - FROM generate_series(1,5) i -WINDOW w AS (ORDER BY i ROWS BETWEEN (('foo' < 'foobar')::integer) PRECEDING AND CURRENT ROW); - array_agg ------------ - {1} - {1,2} - {2,3} - {3,4} - {4,5} -(5 rows) - --- test mutator (fails when inlined if expressions are not mutated) -CREATE FUNCTION pg_temp.f(group_size BIGINT) RETURNS SETOF integer[] -AS $$ - SELECT array_agg(s) OVER w - FROM generate_series(1,5) s - WINDOW w AS (ORDER BY s ROWS BETWEEN CURRENT ROW AND GROUP_SIZE FOLLOWING) -$$ LANGUAGE SQL STABLE; -EXPLAIN (costs off) SELECT * FROM pg_temp.f(2); - QUERY PLAN ------------------------------------------------------- - Subquery Scan on f - -> WindowAgg - -> Sort - Sort Key: s.s - -> Function Scan on generate_series s -(5 rows) - -SELECT * FROM pg_temp.f(2); - f ---------- - {1,2,3} - {2,3,4} - {3,4,5} - {4,5} - {5} -(5 rows) - +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/xmlmap_1.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/xmlmap.out --- /tmp/cirrus-ci-build/src/test/regress/expected/xmlmap_1.out 2024-03-07 14:25:00.335112000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/xmlmap.out 2024-03-07 14:27:17.339155000 +0000 @@ -1,107 +1,2 @@ -CREATE SCHEMA testxmlschema; -CREATE TABLE testxmlschema.test1 (a int, b text); -INSERT INTO testxmlschema.test1 VALUES (1, 'one'), (2, 'two'), (-1, null); -CREATE DOMAIN testxmldomain AS varchar; -CREATE TABLE testxmlschema.test2 (z int, y varchar(500), x char(6), - w numeric(9,2), v smallint, u bigint, t real, - s time, stz timetz, r timestamp, rtz timestamptz, q date, - p xml, o testxmldomain, n bool, m bytea, aaa text); -ALTER TABLE testxmlschema.test2 DROP COLUMN aaa; -INSERT INTO testxmlschema.test2 VALUES (55, 'abc', 'def', - 98.6, 2, 999, 0, - '21:07', '21:11 +05', '2009-06-08 21:07:30', '2009-06-08 21:07:30 -07', '2009-06-08', - NULL, 'ABC', true, 'XYZ'); -SELECT table_to_xml('testxmlschema.test1', false, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xml('testxmlschema.test1', true, false, 'foo'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xml('testxmlschema.test1', false, true, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xml('testxmlschema.test1', true, true, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xml('testxmlschema.test2', false, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xmlschema('testxmlschema.test1', false, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xmlschema('testxmlschema.test1', true, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xmlschema('testxmlschema.test1', false, true, 'foo'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xmlschema('testxmlschema.test1', true, true, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xmlschema('testxmlschema.test2', false, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xml_and_xmlschema('testxmlschema.test1', false, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xml_and_xmlschema('testxmlschema.test1', true, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xml_and_xmlschema('testxmlschema.test1', false, true, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xml_and_xmlschema('testxmlschema.test1', true, true, 'foo'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT query_to_xml('SELECT * FROM testxmlschema.test1', false, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT query_to_xmlschema('SELECT * FROM testxmlschema.test1', false, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT query_to_xml_and_xmlschema('SELECT * FROM testxmlschema.test1', true, true, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -DECLARE xc CURSOR WITH HOLD FOR SELECT * FROM testxmlschema.test1 ORDER BY 1, 2; -SELECT cursor_to_xml('xc'::refcursor, 5, false, true, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT cursor_to_xmlschema('xc'::refcursor, false, true, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -MOVE BACKWARD ALL IN xc; -SELECT cursor_to_xml('xc'::refcursor, 5, true, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT cursor_to_xmlschema('xc'::refcursor, true, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT schema_to_xml('testxmlschema', false, true, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT schema_to_xml('testxmlschema', true, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT schema_to_xmlschema('testxmlschema', false, true, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT schema_to_xmlschema('testxmlschema', true, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT schema_to_xml_and_xmlschema('testxmlschema', true, true, 'foo'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. --- test that domains are transformed like their base types -CREATE DOMAIN testboolxmldomain AS bool; -CREATE DOMAIN testdatexmldomain AS date; -CREATE TABLE testxmlschema.test3 - AS SELECT true c1, - true::testboolxmldomain c2, - '2013-02-21'::date c3, - '2013-02-21'::testdatexmldomain c4; -SELECT xmlforest(c1, c2, c3, c4) FROM testxmlschema.test3; -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xml('testxmlschema.test3', true, true, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/functional_deps.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/functional_deps.out --- /tmp/cirrus-ci-build/src/test/regress/expected/functional_deps.out 2024-03-07 14:25:00.330644000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/functional_deps.out 2024-03-07 14:27:17.343083000 +0000 @@ -1,232 +1,2 @@ --- from http://www.depesz.com/index.php/2010/04/19/getting-unique-elements/ -CREATE TEMP TABLE articles ( - id int CONSTRAINT articles_pkey PRIMARY KEY, - keywords text, - title text UNIQUE NOT NULL, - body text UNIQUE, - created date -); -CREATE TEMP TABLE articles_in_category ( - article_id int, - category_id int, - changed date, - PRIMARY KEY (article_id, category_id) -); --- test functional dependencies based on primary keys/unique constraints --- base tables --- group by primary key (OK) -SELECT id, keywords, title, body, created -FROM articles -GROUP BY id; - id | keywords | title | body | created -----+----------+-------+------+--------- -(0 rows) - --- group by unique not null (fail/todo) -SELECT id, keywords, title, body, created -FROM articles -GROUP BY title; -ERROR: column "articles.id" must appear in the GROUP BY clause or be used in an aggregate function -LINE 1: SELECT id, keywords, title, body, created - ^ --- group by unique nullable (fail) -SELECT id, keywords, title, body, created -FROM articles -GROUP BY body; -ERROR: column "articles.id" must appear in the GROUP BY clause or be used in an aggregate function -LINE 1: SELECT id, keywords, title, body, created - ^ --- group by something else (fail) -SELECT id, keywords, title, body, created -FROM articles -GROUP BY keywords; -ERROR: column "articles.id" must appear in the GROUP BY clause or be used in an aggregate function -LINE 1: SELECT id, keywords, title, body, created - ^ --- multiple tables --- group by primary key (OK) -SELECT a.id, a.keywords, a.title, a.body, a.created -FROM articles AS a, articles_in_category AS aic -WHERE a.id = aic.article_id AND aic.category_id in (14,62,70,53,138) -GROUP BY a.id; - id | keywords | title | body | created -----+----------+-------+------+--------- -(0 rows) - --- group by something else (fail) -SELECT a.id, a.keywords, a.title, a.body, a.created -FROM articles AS a, articles_in_category AS aic -WHERE a.id = aic.article_id AND aic.category_id in (14,62,70,53,138) -GROUP BY aic.article_id, aic.category_id; -ERROR: column "a.id" must appear in the GROUP BY clause or be used in an aggregate function -LINE 1: SELECT a.id, a.keywords, a.title, a.body, a.created - ^ --- JOIN syntax --- group by left table's primary key (OK) -SELECT a.id, a.keywords, a.title, a.body, a.created -FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id -WHERE aic.category_id in (14,62,70,53,138) -GROUP BY a.id; - id | keywords | title | body | created -----+----------+-------+------+--------- -(0 rows) - --- group by something else (fail) -SELECT a.id, a.keywords, a.title, a.body, a.created -FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id -WHERE aic.category_id in (14,62,70,53,138) -GROUP BY aic.article_id, aic.category_id; -ERROR: column "a.id" must appear in the GROUP BY clause or be used in an aggregate function -LINE 1: SELECT a.id, a.keywords, a.title, a.body, a.created - ^ --- group by right table's (composite) primary key (OK) -SELECT aic.changed -FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id -WHERE aic.category_id in (14,62,70,53,138) -GROUP BY aic.category_id, aic.article_id; - changed ---------- -(0 rows) - --- group by right table's partial primary key (fail) -SELECT aic.changed -FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id -WHERE aic.category_id in (14,62,70,53,138) -GROUP BY aic.article_id; -ERROR: column "aic.changed" must appear in the GROUP BY clause or be used in an aggregate function -LINE 1: SELECT aic.changed - ^ --- example from documentation -CREATE TEMP TABLE products (product_id int, name text, price numeric); -CREATE TEMP TABLE sales (product_id int, units int); --- OK -SELECT product_id, p.name, (sum(s.units) * p.price) AS sales - FROM products p LEFT JOIN sales s USING (product_id) - GROUP BY product_id, p.name, p.price; - product_id | name | sales -------------+------+------- -(0 rows) - --- fail -SELECT product_id, p.name, (sum(s.units) * p.price) AS sales - FROM products p LEFT JOIN sales s USING (product_id) - GROUP BY product_id; -ERROR: column "p.name" must appear in the GROUP BY clause or be used in an aggregate function -LINE 1: SELECT product_id, p.name, (sum(s.units) * p.price) AS sales - ^ -ALTER TABLE products ADD PRIMARY KEY (product_id); --- OK now -SELECT product_id, p.name, (sum(s.units) * p.price) AS sales - FROM products p LEFT JOIN sales s USING (product_id) - GROUP BY product_id; - product_id | name | sales -------------+------+------- -(0 rows) - --- Drupal example, http://drupal.org/node/555530 -CREATE TEMP TABLE node ( - nid SERIAL, - vid integer NOT NULL default '0', - type varchar(32) NOT NULL default '', - title varchar(128) NOT NULL default '', - uid integer NOT NULL default '0', - status integer NOT NULL default '1', - created integer NOT NULL default '0', - -- snip - PRIMARY KEY (nid, vid) -); -CREATE TEMP TABLE users ( - uid integer NOT NULL default '0', - name varchar(60) NOT NULL default '', - pass varchar(32) NOT NULL default '', - -- snip - PRIMARY KEY (uid), - UNIQUE (name) -); --- OK -SELECT u.uid, u.name FROM node n -INNER JOIN users u ON u.uid = n.uid -WHERE n.type = 'blog' AND n.status = 1 -GROUP BY u.uid, u.name; - uid | name ------+------ -(0 rows) - --- OK -SELECT u.uid, u.name FROM node n -INNER JOIN users u ON u.uid = n.uid -WHERE n.type = 'blog' AND n.status = 1 -GROUP BY u.uid; - uid | name ------+------ -(0 rows) - --- Check views and dependencies --- fail -CREATE TEMP VIEW fdv1 AS -SELECT id, keywords, title, body, created -FROM articles -GROUP BY body; -ERROR: column "articles.id" must appear in the GROUP BY clause or be used in an aggregate function -LINE 2: SELECT id, keywords, title, body, created - ^ --- OK -CREATE TEMP VIEW fdv1 AS -SELECT id, keywords, title, body, created -FROM articles -GROUP BY id; --- fail -ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; -ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it -DETAIL: view fdv1 depends on constraint articles_pkey on table articles -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP VIEW fdv1; --- multiple dependencies -CREATE TEMP VIEW fdv2 AS -SELECT a.id, a.keywords, a.title, aic.category_id, aic.changed -FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id -WHERE aic.category_id in (14,62,70,53,138) -GROUP BY a.id, aic.category_id, aic.article_id; -ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; -- fail -ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it -DETAIL: view fdv2 depends on constraint articles_pkey on table articles -HINT: Use DROP ... CASCADE to drop the dependent objects too. -ALTER TABLE articles_in_category DROP CONSTRAINT articles_in_category_pkey RESTRICT; --fail -ERROR: cannot drop constraint articles_in_category_pkey on table articles_in_category because other objects depend on it -DETAIL: view fdv2 depends on constraint articles_in_category_pkey on table articles_in_category -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP VIEW fdv2; --- nested queries -CREATE TEMP VIEW fdv3 AS -SELECT id, keywords, title, body, created -FROM articles -GROUP BY id -UNION -SELECT id, keywords, title, body, created -FROM articles -GROUP BY id; -ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; -- fail -ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it -DETAIL: view fdv3 depends on constraint articles_pkey on table articles -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP VIEW fdv3; -CREATE TEMP VIEW fdv4 AS -SELECT * FROM articles WHERE title IN (SELECT title FROM articles GROUP BY id); -ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; -- fail -ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it -DETAIL: view fdv4 depends on constraint articles_pkey on table articles -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP VIEW fdv4; --- prepared query plans: this results in failure on reuse -PREPARE foo AS - SELECT id, keywords, title, body, created - FROM articles - GROUP BY id; -EXECUTE foo; - id | keywords | title | body | created -----+----------+-------+------+--------- -(0 rows) - -ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; -EXECUTE foo; -- fail -ERROR: column "articles.keywords" must appear in the GROUP BY clause or be used in an aggregate function +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/advisory_lock.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/advisory_lock.out --- /tmp/cirrus-ci-build/src/test/regress/expected/advisory_lock.out 2024-03-07 14:25:00.329128000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/advisory_lock.out 2024-03-07 14:27:17.341547000 +0000 @@ -1,276 +1,2 @@ --- --- ADVISORY LOCKS --- -SELECT oid AS datoid FROM pg_database WHERE datname = current_database() \gset -BEGIN; -SELECT - pg_advisory_xact_lock(1), pg_advisory_xact_lock_shared(2), - pg_advisory_xact_lock(1, 1), pg_advisory_xact_lock_shared(2, 2); - pg_advisory_xact_lock | pg_advisory_xact_lock_shared | pg_advisory_xact_lock | pg_advisory_xact_lock_shared ------------------------+------------------------------+-----------------------+------------------------------ - | | | -(1 row) - -SELECT locktype, classid, objid, objsubid, mode, granted - FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid - ORDER BY classid, objid, objsubid; - locktype | classid | objid | objsubid | mode | granted -----------+---------+-------+----------+---------------+--------- - advisory | 0 | 1 | 1 | ExclusiveLock | t - advisory | 0 | 2 | 1 | ShareLock | t - advisory | 1 | 1 | 2 | ExclusiveLock | t - advisory | 2 | 2 | 2 | ShareLock | t -(4 rows) - --- pg_advisory_unlock_all() shouldn't release xact locks -SELECT pg_advisory_unlock_all(); - pg_advisory_unlock_all ------------------------- - -(1 row) - -SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid; - count -------- - 4 -(1 row) - --- can't unlock xact locks -SELECT - pg_advisory_unlock(1), pg_advisory_unlock_shared(2), - pg_advisory_unlock(1, 1), pg_advisory_unlock_shared(2, 2); -WARNING: you don't own a lock of type ExclusiveLock -WARNING: you don't own a lock of type ShareLock -WARNING: you don't own a lock of type ExclusiveLock -WARNING: you don't own a lock of type ShareLock - pg_advisory_unlock | pg_advisory_unlock_shared | pg_advisory_unlock | pg_advisory_unlock_shared ---------------------+---------------------------+--------------------+--------------------------- - f | f | f | f -(1 row) - --- automatically release xact locks at commit -COMMIT; -SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid; - count -------- - 0 -(1 row) - -BEGIN; --- holding both session and xact locks on the same objects, xact first -SELECT - pg_advisory_xact_lock(1), pg_advisory_xact_lock_shared(2), - pg_advisory_xact_lock(1, 1), pg_advisory_xact_lock_shared(2, 2); - pg_advisory_xact_lock | pg_advisory_xact_lock_shared | pg_advisory_xact_lock | pg_advisory_xact_lock_shared ------------------------+------------------------------+-----------------------+------------------------------ - | | | -(1 row) - -SELECT locktype, classid, objid, objsubid, mode, granted - FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid - ORDER BY classid, objid, objsubid; - locktype | classid | objid | objsubid | mode | granted -----------+---------+-------+----------+---------------+--------- - advisory | 0 | 1 | 1 | ExclusiveLock | t - advisory | 0 | 2 | 1 | ShareLock | t - advisory | 1 | 1 | 2 | ExclusiveLock | t - advisory | 2 | 2 | 2 | ShareLock | t -(4 rows) - -SELECT - pg_advisory_lock(1), pg_advisory_lock_shared(2), - pg_advisory_lock(1, 1), pg_advisory_lock_shared(2, 2); - pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock | pg_advisory_lock_shared -------------------+-------------------------+------------------+------------------------- - | | | -(1 row) - -ROLLBACK; -SELECT locktype, classid, objid, objsubid, mode, granted - FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid - ORDER BY classid, objid, objsubid; - locktype | classid | objid | objsubid | mode | granted -----------+---------+-------+----------+---------------+--------- - advisory | 0 | 1 | 1 | ExclusiveLock | t - advisory | 0 | 2 | 1 | ShareLock | t - advisory | 1 | 1 | 2 | ExclusiveLock | t - advisory | 2 | 2 | 2 | ShareLock | t -(4 rows) - --- unlocking session locks -SELECT - pg_advisory_unlock(1), pg_advisory_unlock(1), - pg_advisory_unlock_shared(2), pg_advisory_unlock_shared(2), - pg_advisory_unlock(1, 1), pg_advisory_unlock(1, 1), - pg_advisory_unlock_shared(2, 2), pg_advisory_unlock_shared(2, 2); -WARNING: you don't own a lock of type ExclusiveLock -WARNING: you don't own a lock of type ShareLock -WARNING: you don't own a lock of type ExclusiveLock -WARNING: you don't own a lock of type ShareLock - pg_advisory_unlock | pg_advisory_unlock | pg_advisory_unlock_shared | pg_advisory_unlock_shared | pg_advisory_unlock | pg_advisory_unlock | pg_advisory_unlock_shared | pg_advisory_unlock_shared ---------------------+--------------------+---------------------------+---------------------------+--------------------+--------------------+---------------------------+--------------------------- - t | f | t | f | t | f | t | f -(1 row) - -SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid; - count -------- - 0 -(1 row) - -BEGIN; --- holding both session and xact locks on the same objects, session first -SELECT - pg_advisory_lock(1), pg_advisory_lock_shared(2), - pg_advisory_lock(1, 1), pg_advisory_lock_shared(2, 2); - pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock | pg_advisory_lock_shared -------------------+-------------------------+------------------+------------------------- - | | | -(1 row) - -SELECT locktype, classid, objid, objsubid, mode, granted - FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid - ORDER BY classid, objid, objsubid; - locktype | classid | objid | objsubid | mode | granted -----------+---------+-------+----------+---------------+--------- - advisory | 0 | 1 | 1 | ExclusiveLock | t - advisory | 0 | 2 | 1 | ShareLock | t - advisory | 1 | 1 | 2 | ExclusiveLock | t - advisory | 2 | 2 | 2 | ShareLock | t -(4 rows) - -SELECT - pg_advisory_xact_lock(1), pg_advisory_xact_lock_shared(2), - pg_advisory_xact_lock(1, 1), pg_advisory_xact_lock_shared(2, 2); - pg_advisory_xact_lock | pg_advisory_xact_lock_shared | pg_advisory_xact_lock | pg_advisory_xact_lock_shared ------------------------+------------------------------+-----------------------+------------------------------ - | | | -(1 row) - -ROLLBACK; -SELECT locktype, classid, objid, objsubid, mode, granted - FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid - ORDER BY classid, objid, objsubid; - locktype | classid | objid | objsubid | mode | granted -----------+---------+-------+----------+---------------+--------- - advisory | 0 | 1 | 1 | ExclusiveLock | t - advisory | 0 | 2 | 1 | ShareLock | t - advisory | 1 | 1 | 2 | ExclusiveLock | t - advisory | 2 | 2 | 2 | ShareLock | t -(4 rows) - --- releasing all session locks -SELECT pg_advisory_unlock_all(); - pg_advisory_unlock_all ------------------------- - -(1 row) - -SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid; - count -------- - 0 -(1 row) - -BEGIN; --- grabbing txn locks multiple times -SELECT - pg_advisory_xact_lock(1), pg_advisory_xact_lock(1), - pg_advisory_xact_lock_shared(2), pg_advisory_xact_lock_shared(2), - pg_advisory_xact_lock(1, 1), pg_advisory_xact_lock(1, 1), - pg_advisory_xact_lock_shared(2, 2), pg_advisory_xact_lock_shared(2, 2); - pg_advisory_xact_lock | pg_advisory_xact_lock | pg_advisory_xact_lock_shared | pg_advisory_xact_lock_shared | pg_advisory_xact_lock | pg_advisory_xact_lock | pg_advisory_xact_lock_shared | pg_advisory_xact_lock_shared ------------------------+-----------------------+------------------------------+------------------------------+-----------------------+-----------------------+------------------------------+------------------------------ - | | | | | | | -(1 row) - -SELECT locktype, classid, objid, objsubid, mode, granted - FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid - ORDER BY classid, objid, objsubid; - locktype | classid | objid | objsubid | mode | granted -----------+---------+-------+----------+---------------+--------- - advisory | 0 | 1 | 1 | ExclusiveLock | t - advisory | 0 | 2 | 1 | ShareLock | t - advisory | 1 | 1 | 2 | ExclusiveLock | t - advisory | 2 | 2 | 2 | ShareLock | t -(4 rows) - -COMMIT; -SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid; - count -------- - 0 -(1 row) - --- grabbing session locks multiple times -SELECT - pg_advisory_lock(1), pg_advisory_lock(1), - pg_advisory_lock_shared(2), pg_advisory_lock_shared(2), - pg_advisory_lock(1, 1), pg_advisory_lock(1, 1), - pg_advisory_lock_shared(2, 2), pg_advisory_lock_shared(2, 2); - pg_advisory_lock | pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock_shared | pg_advisory_lock | pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock_shared -------------------+------------------+-------------------------+-------------------------+------------------+------------------+-------------------------+------------------------- - | | | | | | | -(1 row) - -SELECT locktype, classid, objid, objsubid, mode, granted - FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid - ORDER BY classid, objid, objsubid; - locktype | classid | objid | objsubid | mode | granted -----------+---------+-------+----------+---------------+--------- - advisory | 0 | 1 | 1 | ExclusiveLock | t - advisory | 0 | 2 | 1 | ShareLock | t - advisory | 1 | 1 | 2 | ExclusiveLock | t - advisory | 2 | 2 | 2 | ShareLock | t -(4 rows) - -SELECT - pg_advisory_unlock(1), pg_advisory_unlock(1), - pg_advisory_unlock_shared(2), pg_advisory_unlock_shared(2), - pg_advisory_unlock(1, 1), pg_advisory_unlock(1, 1), - pg_advisory_unlock_shared(2, 2), pg_advisory_unlock_shared(2, 2); - pg_advisory_unlock | pg_advisory_unlock | pg_advisory_unlock_shared | pg_advisory_unlock_shared | pg_advisory_unlock | pg_advisory_unlock | pg_advisory_unlock_shared | pg_advisory_unlock_shared ---------------------+--------------------+---------------------------+---------------------------+--------------------+--------------------+---------------------------+--------------------------- - t | t | t | t | t | t | t | t -(1 row) - -SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid; - count -------- - 0 -(1 row) - --- .. and releasing them all at once -SELECT - pg_advisory_lock(1), pg_advisory_lock(1), - pg_advisory_lock_shared(2), pg_advisory_lock_shared(2), - pg_advisory_lock(1, 1), pg_advisory_lock(1, 1), - pg_advisory_lock_shared(2, 2), pg_advisory_lock_shared(2, 2); - pg_advisory_lock | pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock_shared | pg_advisory_lock | pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock_shared -------------------+------------------+-------------------------+-------------------------+------------------+------------------+-------------------------+------------------------- - | | | | | | | -(1 row) - -SELECT locktype, classid, objid, objsubid, mode, granted - FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid - ORDER BY classid, objid, objsubid; - locktype | classid | objid | objsubid | mode | granted -----------+---------+-------+----------+---------------+--------- - advisory | 0 | 1 | 1 | ExclusiveLock | t - advisory | 0 | 2 | 1 | ShareLock | t - advisory | 1 | 1 | 2 | ExclusiveLock | t - advisory | 2 | 2 | 2 | ShareLock | t -(4 rows) - -SELECT pg_advisory_unlock_all(); - pg_advisory_unlock_all ------------------------- - -(1 row) - -SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid; - count -------- - 0 -(1 row) - +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/indirect_toast.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/indirect_toast.out --- /tmp/cirrus-ci-build/src/test/regress/expected/indirect_toast.out 2024-03-07 14:25:00.331051000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/indirect_toast.out 2024-03-07 14:27:17.348866000 +0000 @@ -1,166 +1,2 @@ --- --- Tests for external toast datums --- --- directory paths and dlsuffix are passed to us in environment variables -\getenv libdir PG_LIBDIR -\getenv dlsuffix PG_DLSUFFIX -\set regresslib :libdir '/regress' :dlsuffix -CREATE FUNCTION make_tuple_indirect (record) - RETURNS record - AS :'regresslib' - LANGUAGE C STRICT; --- Other compression algorithms may cause the compressed data to be stored --- inline. pglz guarantees that the data is externalized, so stick to it. -SET default_toast_compression = 'pglz'; -CREATE TABLE indtoasttest(descr text, cnt int DEFAULT 0, f1 text, f2 text); -INSERT INTO indtoasttest(descr, f1, f2) VALUES('two-compressed', repeat('1234567890',1000), repeat('1234567890',1000)); -INSERT INTO indtoasttest(descr, f1, f2) VALUES('two-toasted', repeat('1234567890',30000), repeat('1234567890',50000)); -INSERT INTO indtoasttest(descr, f1, f2) VALUES('one-compressed,one-null', NULL, repeat('1234567890',1000)); -INSERT INTO indtoasttest(descr, f1, f2) VALUES('one-toasted,one-null', NULL, repeat('1234567890',50000)); --- check whether indirect tuples works on the most basic level -SELECT descr, substring(make_tuple_indirect(indtoasttest)::text, 1, 200) FROM indtoasttest; - descr | substring --------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - two-compressed | (two-compressed,0,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012 - two-toasted | (two-toasted,0,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345 - one-compressed,one-null | ("one-compressed,one-null",0,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - one-toasted,one-null | ("one-toasted,one-null",0,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 -(4 rows) - --- modification without changing varlenas -UPDATE indtoasttest SET cnt = cnt +1 RETURNING substring(indtoasttest::text, 1, 200); - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,1,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012 - (two-toasted,1,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345 - ("one-compressed,one-null",1,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",1,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 -(4 rows) - --- modification without modifying assigned value -UPDATE indtoasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(indtoasttest::text, 1, 200); - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,2,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012 - (two-toasted,2,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345 - ("one-compressed,one-null",2,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",2,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 -(4 rows) - --- modification modifying, but effectively not changing -UPDATE indtoasttest SET cnt = cnt +1, f1 = f1||'' RETURNING substring(indtoasttest::text, 1, 200); - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,3,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012 - (two-toasted,3,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345 - ("one-compressed,one-null",3,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",3,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 -(4 rows) - -UPDATE indtoasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING substring(indtoasttest::text, 1, 200); - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 - (two-toasted,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234 - ("one-compressed,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 -(4 rows) - -SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest; - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 - (two-toasted,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234 - ("one-compressed,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 -(4 rows) - --- check we didn't screw with main/toast tuple visibility -VACUUM FREEZE indtoasttest; -SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest; - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 - (two-toasted,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234 - ("one-compressed,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 -(4 rows) - --- now create a trigger that forces all Datums to be indirect ones -CREATE FUNCTION update_using_indirect() - RETURNS trigger - LANGUAGE plpgsql AS $$ -BEGIN - NEW := make_tuple_indirect(NEW); - RETURN NEW; -END$$; -CREATE TRIGGER indtoasttest_update_indirect - BEFORE INSERT OR UPDATE - ON indtoasttest - FOR EACH ROW - EXECUTE PROCEDURE update_using_indirect(); --- modification without changing varlenas -UPDATE indtoasttest SET cnt = cnt +1 RETURNING substring(indtoasttest::text, 1, 200); - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,5,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 - (two-toasted,5,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234 - ("one-compressed,one-null",5,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",5,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 -(4 rows) - --- modification without modifying assigned value -UPDATE indtoasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(indtoasttest::text, 1, 200); - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,6,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 - (two-toasted,6,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234 - ("one-compressed,one-null",6,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",6,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 -(4 rows) - --- modification modifying, but effectively not changing -UPDATE indtoasttest SET cnt = cnt +1, f1 = f1||'' RETURNING substring(indtoasttest::text, 1, 200); - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,7,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 - (two-toasted,7,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234 - ("one-compressed,one-null",7,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",7,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 -(4 rows) - -UPDATE indtoasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING substring(indtoasttest::text, 1, 200); - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - (two-toasted,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 - ("one-compressed,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 -(4 rows) - -INSERT INTO indtoasttest(descr, f1, f2) VALUES('one-toasted,one-null, via indirect', repeat('1234567890',30000), NULL); -SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest; - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - (two-toasted,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 - ("one-compressed,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 - ("one-toasted,one-null, via indirect",0,1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 -(5 rows) - --- check we didn't screw with main/toast tuple visibility -VACUUM FREEZE indtoasttest; -SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest; - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - (two-toasted,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 - ("one-compressed,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 - ("one-toasted,one-null, via indirect",0,1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 -(5 rows) - -DROP TABLE indtoasttest; -DROP FUNCTION update_using_indirect(); -RESET default_toast_compression; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/equivclass.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/equivclass.out --- /tmp/cirrus-ci-build/src/test/regress/expected/equivclass.out 2024-03-07 14:25:00.330244000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/equivclass.out 2024-03-07 14:27:17.341729000 +0000 @@ -1,483 +1,2 @@ --- --- Tests for the planner's "equivalence class" mechanism --- --- One thing that's not tested well during normal querying is the logic --- for handling "broken" ECs. This is because an EC can only become broken --- if its underlying btree operator family doesn't include a complete set --- of cross-type equality operators. There are not (and should not be) --- any such families built into Postgres; so we have to hack things up --- to create one. We do this by making two alias types that are really --- int8 (so we need no new C code) and adding only some operators for them --- into the standard integer_ops opfamily. -create type int8alias1; -create function int8alias1in(cstring) returns int8alias1 - strict immutable language internal as 'int8in'; -NOTICE: return type int8alias1 is only a shell -create function int8alias1out(int8alias1) returns cstring - strict immutable language internal as 'int8out'; -NOTICE: argument type int8alias1 is only a shell -create type int8alias1 ( - input = int8alias1in, - output = int8alias1out, - like = int8 -); -create type int8alias2; -create function int8alias2in(cstring) returns int8alias2 - strict immutable language internal as 'int8in'; -NOTICE: return type int8alias2 is only a shell -create function int8alias2out(int8alias2) returns cstring - strict immutable language internal as 'int8out'; -NOTICE: argument type int8alias2 is only a shell -create type int8alias2 ( - input = int8alias2in, - output = int8alias2out, - like = int8 -); -create cast (int8 as int8alias1) without function; -create cast (int8 as int8alias2) without function; -create cast (int8alias1 as int8) without function; -create cast (int8alias2 as int8) without function; -create function int8alias1eq(int8alias1, int8alias1) returns bool - strict immutable language internal as 'int8eq'; -create operator = ( - procedure = int8alias1eq, - leftarg = int8alias1, rightarg = int8alias1, - commutator = =, - restrict = eqsel, join = eqjoinsel, - merges -); -alter operator family integer_ops using btree add - operator 3 = (int8alias1, int8alias1); -create function int8alias2eq(int8alias2, int8alias2) returns bool - strict immutable language internal as 'int8eq'; -create operator = ( - procedure = int8alias2eq, - leftarg = int8alias2, rightarg = int8alias2, - commutator = =, - restrict = eqsel, join = eqjoinsel, - merges -); -alter operator family integer_ops using btree add - operator 3 = (int8alias2, int8alias2); -create function int8alias1eq(int8, int8alias1) returns bool - strict immutable language internal as 'int8eq'; -create operator = ( - procedure = int8alias1eq, - leftarg = int8, rightarg = int8alias1, - restrict = eqsel, join = eqjoinsel, - merges -); -alter operator family integer_ops using btree add - operator 3 = (int8, int8alias1); -create function int8alias1eq(int8alias1, int8alias2) returns bool - strict immutable language internal as 'int8eq'; -create operator = ( - procedure = int8alias1eq, - leftarg = int8alias1, rightarg = int8alias2, - restrict = eqsel, join = eqjoinsel, - merges -); -alter operator family integer_ops using btree add - operator 3 = (int8alias1, int8alias2); -create function int8alias1lt(int8alias1, int8alias1) returns bool - strict immutable language internal as 'int8lt'; -create operator < ( - procedure = int8alias1lt, - leftarg = int8alias1, rightarg = int8alias1 -); -alter operator family integer_ops using btree add - operator 1 < (int8alias1, int8alias1); -create function int8alias1cmp(int8, int8alias1) returns int - strict immutable language internal as 'btint8cmp'; -alter operator family integer_ops using btree add - function 1 int8alias1cmp (int8, int8alias1); -create table ec0 (ff int8 primary key, f1 int8, f2 int8); -create table ec1 (ff int8 primary key, f1 int8alias1, f2 int8alias2); -create table ec2 (xf int8 primary key, x1 int8alias1, x2 int8alias2); --- for the moment we only want to look at nestloop plans -set enable_hashjoin = off; -set enable_mergejoin = off; --- --- Note that for cases where there's a missing operator, we don't care so --- much whether the plan is ideal as that we don't fail or generate an --- outright incorrect plan. --- -explain (costs off) - select * from ec0 where ff = f1 and f1 = '42'::int8; - QUERY PLAN ------------------------------------ - Index Scan using ec0_pkey on ec0 - Index Cond: (ff = '42'::bigint) - Filter: (f1 = '42'::bigint) -(3 rows) - -explain (costs off) - select * from ec0 where ff = f1 and f1 = '42'::int8alias1; - QUERY PLAN ---------------------------------------- - Index Scan using ec0_pkey on ec0 - Index Cond: (ff = '42'::int8alias1) - Filter: (f1 = '42'::int8alias1) -(3 rows) - -explain (costs off) - select * from ec1 where ff = f1 and f1 = '42'::int8alias1; - QUERY PLAN ---------------------------------------- - Index Scan using ec1_pkey on ec1 - Index Cond: (ff = '42'::int8alias1) - Filter: (f1 = '42'::int8alias1) -(3 rows) - -explain (costs off) - select * from ec1 where ff = f1 and f1 = '42'::int8alias2; - QUERY PLAN ---------------------------------------------------- - Seq Scan on ec1 - Filter: ((ff = f1) AND (f1 = '42'::int8alias2)) -(2 rows) - -explain (costs off) - select * from ec1, ec2 where ff = x1 and ff = '42'::int8; - QUERY PLAN -------------------------------------------------------------------- - Nested Loop - Join Filter: (ec1.ff = ec2.x1) - -> Index Scan using ec1_pkey on ec1 - Index Cond: ((ff = '42'::bigint) AND (ff = '42'::bigint)) - -> Seq Scan on ec2 -(5 rows) - -explain (costs off) - select * from ec1, ec2 where ff = x1 and ff = '42'::int8alias1; - QUERY PLAN ---------------------------------------------- - Nested Loop - -> Index Scan using ec1_pkey on ec1 - Index Cond: (ff = '42'::int8alias1) - -> Seq Scan on ec2 - Filter: (x1 = '42'::int8alias1) -(5 rows) - -explain (costs off) - select * from ec1, ec2 where ff = x1 and '42'::int8 = x1; - QUERY PLAN ------------------------------------------ - Nested Loop - Join Filter: (ec1.ff = ec2.x1) - -> Index Scan using ec1_pkey on ec1 - Index Cond: (ff = '42'::bigint) - -> Seq Scan on ec2 - Filter: ('42'::bigint = x1) -(6 rows) - -explain (costs off) - select * from ec1, ec2 where ff = x1 and x1 = '42'::int8alias1; - QUERY PLAN ---------------------------------------------- - Nested Loop - -> Index Scan using ec1_pkey on ec1 - Index Cond: (ff = '42'::int8alias1) - -> Seq Scan on ec2 - Filter: (x1 = '42'::int8alias1) -(5 rows) - -explain (costs off) - select * from ec1, ec2 where ff = x1 and x1 = '42'::int8alias2; - QUERY PLAN ------------------------------------------ - Nested Loop - -> Seq Scan on ec2 - Filter: (x1 = '42'::int8alias2) - -> Index Scan using ec1_pkey on ec1 - Index Cond: (ff = ec2.x1) -(5 rows) - -create unique index ec1_expr1 on ec1((ff + 1)); -create unique index ec1_expr2 on ec1((ff + 2 + 1)); -create unique index ec1_expr3 on ec1((ff + 3 + 1)); -create unique index ec1_expr4 on ec1((ff + 4)); -explain (costs off) - select * from ec1, - (select ff + 1 as x from - (select ff + 2 as ff from ec1 - union all - select ff + 3 as ff from ec1) ss0 - union all - select ff + 4 as x from ec1) as ss1 - where ss1.x = ec1.f1 and ec1.ff = 42::int8; - QUERY PLAN ------------------------------------------------------ - Nested Loop - -> Index Scan using ec1_pkey on ec1 - Index Cond: (ff = '42'::bigint) - -> Append - -> Index Scan using ec1_expr2 on ec1 ec1_1 - Index Cond: (((ff + 2) + 1) = ec1.f1) - -> Index Scan using ec1_expr3 on ec1 ec1_2 - Index Cond: (((ff + 3) + 1) = ec1.f1) - -> Index Scan using ec1_expr4 on ec1 ec1_3 - Index Cond: ((ff + 4) = ec1.f1) -(10 rows) - -explain (costs off) - select * from ec1, - (select ff + 1 as x from - (select ff + 2 as ff from ec1 - union all - select ff + 3 as ff from ec1) ss0 - union all - select ff + 4 as x from ec1) as ss1 - where ss1.x = ec1.f1 and ec1.ff = 42::int8 and ec1.ff = ec1.f1; - QUERY PLAN -------------------------------------------------------------------- - Nested Loop - Join Filter: ((((ec1_1.ff + 2) + 1)) = ec1.f1) - -> Index Scan using ec1_pkey on ec1 - Index Cond: ((ff = '42'::bigint) AND (ff = '42'::bigint)) - Filter: (ff = f1) - -> Append - -> Index Scan using ec1_expr2 on ec1 ec1_1 - Index Cond: (((ff + 2) + 1) = '42'::bigint) - -> Index Scan using ec1_expr3 on ec1 ec1_2 - Index Cond: (((ff + 3) + 1) = '42'::bigint) - -> Index Scan using ec1_expr4 on ec1 ec1_3 - Index Cond: ((ff + 4) = '42'::bigint) -(12 rows) - -explain (costs off) - select * from ec1, - (select ff + 1 as x from - (select ff + 2 as ff from ec1 - union all - select ff + 3 as ff from ec1) ss0 - union all - select ff + 4 as x from ec1) as ss1, - (select ff + 1 as x from - (select ff + 2 as ff from ec1 - union all - select ff + 3 as ff from ec1) ss0 - union all - select ff + 4 as x from ec1) as ss2 - where ss1.x = ec1.f1 and ss1.x = ss2.x and ec1.ff = 42::int8; - QUERY PLAN ---------------------------------------------------------------------- - Nested Loop - -> Nested Loop - -> Index Scan using ec1_pkey on ec1 - Index Cond: (ff = '42'::bigint) - -> Append - -> Index Scan using ec1_expr2 on ec1 ec1_1 - Index Cond: (((ff + 2) + 1) = ec1.f1) - -> Index Scan using ec1_expr3 on ec1 ec1_2 - Index Cond: (((ff + 3) + 1) = ec1.f1) - -> Index Scan using ec1_expr4 on ec1 ec1_3 - Index Cond: ((ff + 4) = ec1.f1) - -> Append - -> Index Scan using ec1_expr2 on ec1 ec1_4 - Index Cond: (((ff + 2) + 1) = (((ec1_1.ff + 2) + 1))) - -> Index Scan using ec1_expr3 on ec1 ec1_5 - Index Cond: (((ff + 3) + 1) = (((ec1_1.ff + 2) + 1))) - -> Index Scan using ec1_expr4 on ec1 ec1_6 - Index Cond: ((ff + 4) = (((ec1_1.ff + 2) + 1))) -(18 rows) - --- let's try that as a mergejoin -set enable_mergejoin = on; -set enable_nestloop = off; -explain (costs off) - select * from ec1, - (select ff + 1 as x from - (select ff + 2 as ff from ec1 - union all - select ff + 3 as ff from ec1) ss0 - union all - select ff + 4 as x from ec1) as ss1, - (select ff + 1 as x from - (select ff + 2 as ff from ec1 - union all - select ff + 3 as ff from ec1) ss0 - union all - select ff + 4 as x from ec1) as ss2 - where ss1.x = ec1.f1 and ss1.x = ss2.x and ec1.ff = 42::int8; - QUERY PLAN ------------------------------------------------------------------ - Merge Join - Merge Cond: ((((ec1_4.ff + 2) + 1)) = (((ec1_1.ff + 2) + 1))) - -> Merge Append - Sort Key: (((ec1_4.ff + 2) + 1)) - -> Index Scan using ec1_expr2 on ec1 ec1_4 - -> Index Scan using ec1_expr3 on ec1 ec1_5 - -> Index Scan using ec1_expr4 on ec1 ec1_6 - -> Materialize - -> Merge Join - Merge Cond: ((((ec1_1.ff + 2) + 1)) = ec1.f1) - -> Merge Append - Sort Key: (((ec1_1.ff + 2) + 1)) - -> Index Scan using ec1_expr2 on ec1 ec1_1 - -> Index Scan using ec1_expr3 on ec1 ec1_2 - -> Index Scan using ec1_expr4 on ec1 ec1_3 - -> Sort - Sort Key: ec1.f1 USING < - -> Index Scan using ec1_pkey on ec1 - Index Cond: (ff = '42'::bigint) -(19 rows) - --- check partially indexed scan -set enable_nestloop = on; -set enable_mergejoin = off; -drop index ec1_expr3; -explain (costs off) - select * from ec1, - (select ff + 1 as x from - (select ff + 2 as ff from ec1 - union all - select ff + 3 as ff from ec1) ss0 - union all - select ff + 4 as x from ec1) as ss1 - where ss1.x = ec1.f1 and ec1.ff = 42::int8; - QUERY PLAN ------------------------------------------------------ - Nested Loop - -> Index Scan using ec1_pkey on ec1 - Index Cond: (ff = '42'::bigint) - -> Append - -> Index Scan using ec1_expr2 on ec1 ec1_1 - Index Cond: (((ff + 2) + 1) = ec1.f1) - -> Seq Scan on ec1 ec1_2 - Filter: (((ff + 3) + 1) = ec1.f1) - -> Index Scan using ec1_expr4 on ec1 ec1_3 - Index Cond: ((ff + 4) = ec1.f1) -(10 rows) - --- let's try that as a mergejoin -set enable_mergejoin = on; -set enable_nestloop = off; -explain (costs off) - select * from ec1, - (select ff + 1 as x from - (select ff + 2 as ff from ec1 - union all - select ff + 3 as ff from ec1) ss0 - union all - select ff + 4 as x from ec1) as ss1 - where ss1.x = ec1.f1 and ec1.ff = 42::int8; - QUERY PLAN ------------------------------------------------------ - Merge Join - Merge Cond: ((((ec1_1.ff + 2) + 1)) = ec1.f1) - -> Merge Append - Sort Key: (((ec1_1.ff + 2) + 1)) - -> Index Scan using ec1_expr2 on ec1 ec1_1 - -> Sort - Sort Key: (((ec1_2.ff + 3) + 1)) - -> Seq Scan on ec1 ec1_2 - -> Index Scan using ec1_expr4 on ec1 ec1_3 - -> Sort - Sort Key: ec1.f1 USING < - -> Index Scan using ec1_pkey on ec1 - Index Cond: (ff = '42'::bigint) -(13 rows) - --- check effects of row-level security -set enable_nestloop = on; -set enable_mergejoin = off; -alter table ec1 enable row level security; -create policy p1 on ec1 using (f1 < '5'::int8alias1); -create user regress_user_ectest; -grant select on ec0 to regress_user_ectest; -grant select on ec1 to regress_user_ectest; --- without any RLS, we'll treat {a.ff, b.ff, 43} as an EquivalenceClass -explain (costs off) - select * from ec0 a, ec1 b - where a.ff = b.ff and a.ff = 43::bigint::int8alias1; - QUERY PLAN ---------------------------------------------- - Nested Loop - -> Index Scan using ec0_pkey on ec0 a - Index Cond: (ff = '43'::int8alias1) - -> Index Scan using ec1_pkey on ec1 b - Index Cond: (ff = '43'::int8alias1) -(5 rows) - -set session authorization regress_user_ectest; --- with RLS active, the non-leakproof a.ff = 43 clause is not treated --- as a suitable source for an EquivalenceClass; currently, this is true --- even though the RLS clause has nothing to do directly with the EC -explain (costs off) - select * from ec0 a, ec1 b - where a.ff = b.ff and a.ff = 43::bigint::int8alias1; - QUERY PLAN ---------------------------------------------- - Nested Loop - -> Index Scan using ec0_pkey on ec0 a - Index Cond: (ff = '43'::int8alias1) - -> Index Scan using ec1_pkey on ec1 b - Index Cond: (ff = a.ff) - Filter: (f1 < '5'::int8alias1) -(6 rows) - -reset session authorization; -revoke select on ec0 from regress_user_ectest; -revoke select on ec1 from regress_user_ectest; -drop user regress_user_ectest; --- check that X=X is converted to X IS NOT NULL when appropriate -explain (costs off) - select * from tenk1 where unique1 = unique1 and unique2 = unique2; - QUERY PLAN -------------------------------------------------------------- - Seq Scan on tenk1 - Filter: ((unique1 IS NOT NULL) AND (unique2 IS NOT NULL)) -(2 rows) - --- Test that broken ECs are processed correctly during self join removal. --- Disable merge joins so that we don't get an error about missing commutator. --- Test both orientations of the join clause, because only one of them breaks --- the EC. -set enable_mergejoin to off; -explain (costs off) - select * from ec0 m join ec0 n on m.ff = n.ff - join ec1 p on m.ff + n.ff = p.f1; - QUERY PLAN ---------------------------------------- - Nested Loop - Join Filter: ((n.ff + n.ff) = p.f1) - -> Seq Scan on ec0 n - -> Materialize - -> Seq Scan on ec1 p -(5 rows) - -explain (costs off) - select * from ec0 m join ec0 n on m.ff = n.ff - join ec1 p on p.f1::int8 = (m.ff + n.ff)::int8alias1; - QUERY PLAN ---------------------------------------------------------------- - Nested Loop - Join Filter: ((p.f1)::bigint = ((n.ff + n.ff))::int8alias1) - -> Seq Scan on ec0 n - -> Materialize - -> Seq Scan on ec1 p -(5 rows) - -reset enable_mergejoin; --- this could be converted, but isn't at present -explain (costs off) - select * from tenk1 where unique1 = unique1 or unique2 = unique2; - QUERY PLAN --------------------------------------------------------- - Seq Scan on tenk1 - Filter: ((unique1 = unique1) OR (unique2 = unique2)) -(2 rows) - --- check that we recognize equivalence with dummy domains in the way -create temp table undername (f1 name, f2 int); -create temp view overview as - select f1::information_schema.sql_identifier as sqli, f2 from undername; -explain (costs off) -- this should not require a sort - select * from overview where sqli = 'foo' order by sqli; - QUERY PLAN ------------------------------- - Seq Scan on undername - Filter: (f1 = 'foo'::name) -(2 rows) - +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/json.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/json.out --- /tmp/cirrus-ci-build/src/test/regress/expected/json.out 2024-03-07 14:25:00.331586000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/json.out 2024-03-07 14:27:17.528629000 +0000 @@ -1,2667 +1,2 @@ --- Strings. -SELECT '""'::json; -- OK. - json ------- - "" -(1 row) - -SELECT $$''$$::json; -- ERROR, single quotes are not allowed -ERROR: invalid input syntax for type json -LINE 1: SELECT $$''$$::json; - ^ -DETAIL: Token "'" is invalid. -CONTEXT: JSON data, line 1: '... -SELECT '"abc"'::json; -- OK - json -------- - "abc" -(1 row) - -SELECT '"abc'::json; -- ERROR, quotes not closed -ERROR: invalid input syntax for type json -LINE 1: SELECT '"abc'::json; - ^ -DETAIL: Token ""abc" is invalid. -CONTEXT: JSON data, line 1: "abc -SELECT '"abc -def"'::json; -- ERROR, unescaped newline in string constant -ERROR: invalid input syntax for type json -LINE 1: SELECT '"abc - ^ -DETAIL: Character with value 0x0a must be escaped. -CONTEXT: JSON data, line 1: "abc -SELECT '"\n\"\\"'::json; -- OK, legal escapes - json ----------- - "\n\"\\" -(1 row) - -SELECT '"\v"'::json; -- ERROR, not a valid JSON escape -ERROR: invalid input syntax for type json -LINE 1: SELECT '"\v"'::json; - ^ -DETAIL: Escape sequence "\v" is invalid. -CONTEXT: JSON data, line 1: "\v... --- Check fast path for longer strings (at least 16 bytes long) -SELECT ('"'||repeat('.', 12)||'abc"')::json; -- OK - json -------------------- - "............abc" -(1 row) - -SELECT ('"'||repeat('.', 12)||'abc\n"')::json; -- OK, legal escapes - json ---------------------- - "............abc\n" -(1 row) - --- see json_encoding test for input with unicode escapes --- Numbers. -SELECT '1'::json; -- OK - json ------- - 1 -(1 row) - -SELECT '0'::json; -- OK - json ------- - 0 -(1 row) - -SELECT '01'::json; -- ERROR, not valid according to JSON spec -ERROR: invalid input syntax for type json -LINE 1: SELECT '01'::json; - ^ -DETAIL: Token "01" is invalid. -CONTEXT: JSON data, line 1: 01 -SELECT '0.1'::json; -- OK - json ------- - 0.1 -(1 row) - -SELECT '9223372036854775808'::json; -- OK, even though it's too large for int8 - json ---------------------- - 9223372036854775808 -(1 row) - -SELECT '1e100'::json; -- OK - json -------- - 1e100 -(1 row) - -SELECT '1.3e100'::json; -- OK - json ---------- - 1.3e100 -(1 row) - -SELECT '1f2'::json; -- ERROR -ERROR: invalid input syntax for type json -LINE 1: SELECT '1f2'::json; - ^ -DETAIL: Token "1f2" is invalid. -CONTEXT: JSON data, line 1: 1f2 -SELECT '0.x1'::json; -- ERROR -ERROR: invalid input syntax for type json -LINE 1: SELECT '0.x1'::json; - ^ -DETAIL: Token "0.x1" is invalid. -CONTEXT: JSON data, line 1: 0.x1 -SELECT '1.3ex100'::json; -- ERROR -ERROR: invalid input syntax for type json -LINE 1: SELECT '1.3ex100'::json; - ^ -DETAIL: Token "1.3ex100" is invalid. -CONTEXT: JSON data, line 1: 1.3ex100 --- Arrays. -SELECT '[]'::json; -- OK - json ------- - [] -(1 row) - -SELECT '[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]'::json; -- OK - json ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]] -(1 row) - -SELECT '[1,2]'::json; -- OK - json -------- - [1,2] -(1 row) - -SELECT '[1,2,]'::json; -- ERROR, trailing comma -ERROR: invalid input syntax for type json -LINE 1: SELECT '[1,2,]'::json; - ^ -DETAIL: Expected JSON value, but found "]". -CONTEXT: JSON data, line 1: [1,2,] -SELECT '[1,2'::json; -- ERROR, no closing bracket -ERROR: invalid input syntax for type json -LINE 1: SELECT '[1,2'::json; - ^ -DETAIL: The input string ended unexpectedly. -CONTEXT: JSON data, line 1: [1,2 -SELECT '[1,[2]'::json; -- ERROR, no closing bracket -ERROR: invalid input syntax for type json -LINE 1: SELECT '[1,[2]'::json; - ^ -DETAIL: The input string ended unexpectedly. -CONTEXT: JSON data, line 1: [1,[2] --- Objects. -SELECT '{}'::json; -- OK - json ------- - {} -(1 row) - -SELECT '{"abc"}'::json; -- ERROR, no value -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc"}'::json; - ^ -DETAIL: Expected ":", but found "}". -CONTEXT: JSON data, line 1: {"abc"} -SELECT '{"abc":1}'::json; -- OK - json ------------ - {"abc":1} -(1 row) - -SELECT '{1:"abc"}'::json; -- ERROR, keys must be strings -ERROR: invalid input syntax for type json -LINE 1: SELECT '{1:"abc"}'::json; - ^ -DETAIL: Expected string or "}", but found "1". -CONTEXT: JSON data, line 1: {1... -SELECT '{"abc",1}'::json; -- ERROR, wrong separator -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc",1}'::json; - ^ -DETAIL: Expected ":", but found ",". -CONTEXT: JSON data, line 1: {"abc",... -SELECT '{"abc"=1}'::json; -- ERROR, totally wrong separator -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc"=1}'::json; - ^ -DETAIL: Token "=" is invalid. -CONTEXT: JSON data, line 1: {"abc"=... -SELECT '{"abc"::1}'::json; -- ERROR, another wrong separator -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc"::1}'::json; - ^ -DETAIL: Expected JSON value, but found ":". -CONTEXT: JSON data, line 1: {"abc"::... -SELECT '{"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}}'::json; -- OK - json ---------------------------------------------------------- - {"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}} -(1 row) - -SELECT '{"abc":1:2}'::json; -- ERROR, colon in wrong spot -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc":1:2}'::json; - ^ -DETAIL: Expected "," or "}", but found ":". -CONTEXT: JSON data, line 1: {"abc":1:... -SELECT '{"abc":1,3}'::json; -- ERROR, no value -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc":1,3}'::json; - ^ -DETAIL: Expected string, but found "3". -CONTEXT: JSON data, line 1: {"abc":1,3... --- Recursion. -SET max_stack_depth = '100kB'; -SELECT repeat('[', 10000)::json; -ERROR: stack depth limit exceeded -HINT: Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate. -SELECT repeat('{"a":', 10000)::json; -ERROR: stack depth limit exceeded -HINT: Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate. -RESET max_stack_depth; --- Miscellaneous stuff. -SELECT 'true'::json; -- OK - json ------- - true -(1 row) - -SELECT 'false'::json; -- OK - json -------- - false -(1 row) - -SELECT 'null'::json; -- OK - json ------- - null -(1 row) - -SELECT ' true '::json; -- OK, even with extra whitespace - json --------- - true -(1 row) - -SELECT 'true false'::json; -- ERROR, too many values -ERROR: invalid input syntax for type json -LINE 1: SELECT 'true false'::json; - ^ -DETAIL: Expected end of input, but found "false". -CONTEXT: JSON data, line 1: true false -SELECT 'true, false'::json; -- ERROR, too many values -ERROR: invalid input syntax for type json -LINE 1: SELECT 'true, false'::json; - ^ -DETAIL: Expected end of input, but found ",". -CONTEXT: JSON data, line 1: true,... -SELECT 'truf'::json; -- ERROR, not a keyword -ERROR: invalid input syntax for type json -LINE 1: SELECT 'truf'::json; - ^ -DETAIL: Token "truf" is invalid. -CONTEXT: JSON data, line 1: truf -SELECT 'trues'::json; -- ERROR, not a keyword -ERROR: invalid input syntax for type json -LINE 1: SELECT 'trues'::json; - ^ -DETAIL: Token "trues" is invalid. -CONTEXT: JSON data, line 1: trues -SELECT ''::json; -- ERROR, no value -ERROR: invalid input syntax for type json -LINE 1: SELECT ''::json; - ^ -DETAIL: The input string ended unexpectedly. -CONTEXT: JSON data, line 1: -SELECT ' '::json; -- ERROR, no value -ERROR: invalid input syntax for type json -LINE 1: SELECT ' '::json; - ^ -DETAIL: The input string ended unexpectedly. -CONTEXT: JSON data, line 1: --- Multi-line JSON input to check ERROR reporting -SELECT '{ - "one": 1, - "two":"two", - "three": - true}'::json; -- OK - json ------------------------------- - { + - "one": 1, + - "two":"two",+ - "three": + - true} -(1 row) - -SELECT '{ - "one": 1, - "two":,"two", -- ERROR extraneous comma before field "two" - "three": - true}'::json; -ERROR: invalid input syntax for type json -LINE 1: SELECT '{ - ^ -DETAIL: Expected JSON value, but found ",". -CONTEXT: JSON data, line 3: "two":,... -SELECT '{ - "one": 1, - "two":"two", - "averyveryveryveryveryveryveryveryveryverylongfieldname":}'::json; -ERROR: invalid input syntax for type json -LINE 1: SELECT '{ - ^ -DETAIL: Expected JSON value, but found "}". -CONTEXT: JSON data, line 4: ...yveryveryveryveryveryveryveryverylongfieldname":} --- ERROR missing value for last field --- test non-error-throwing input -select pg_input_is_valid('{"a":true}', 'json'); - pg_input_is_valid -------------------- - t -(1 row) - -select pg_input_is_valid('{"a":true', 'json'); - pg_input_is_valid -------------------- - f -(1 row) - -select * from pg_input_error_info('{"a":true', 'json'); - message | detail | hint | sql_error_code -------------------------------------+--------------------------------------+------+---------------- - invalid input syntax for type json | The input string ended unexpectedly. | | 22P02 -(1 row) - ---constructors --- array_to_json -SELECT array_to_json(array(select 1 as a)); - array_to_json ---------------- - [1] -(1 row) - -SELECT array_to_json(array_agg(q),false) from (select x as b, x * 2 as c from generate_series(1,3) x) q; - array_to_json ---------------------------------------------- - [{"b":1,"c":2},{"b":2,"c":4},{"b":3,"c":6}] -(1 row) - -SELECT array_to_json(array_agg(q),true) from (select x as b, x * 2 as c from generate_series(1,3) x) q; - array_to_json ------------------ - [{"b":1,"c":2},+ - {"b":2,"c":4},+ - {"b":3,"c":6}] -(1 row) - -SELECT array_to_json(array_agg(q),false) - FROM ( SELECT $$a$$ || x AS b, y AS c, - ARRAY[ROW(x.*,ARRAY[1,2,3]), - ROW(y.*,ARRAY[4,5,6])] AS z - FROM generate_series(1,2) x, - generate_series(4,5) y) q; - array_to_json -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - [{"b":"a1","c":4,"z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]},{"b":"a1","c":5,"z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]},{"b":"a2","c":4,"z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]},{"b":"a2","c":5,"z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}] -(1 row) - -SELECT array_to_json(array_agg(x),false) from generate_series(5,10) x; - array_to_json ----------------- - [5,6,7,8,9,10] -(1 row) - -SELECT array_to_json('{{1,5},{99,100}}'::int[]); - array_to_json ------------------- - [[1,5],[99,100]] -(1 row) - --- row_to_json -SELECT row_to_json(row(1,'foo')); - row_to_json ---------------------- - {"f1":1,"f2":"foo"} -(1 row) - -SELECT row_to_json(q) -FROM (SELECT $$a$$ || x AS b, - y AS c, - ARRAY[ROW(x.*,ARRAY[1,2,3]), - ROW(y.*,ARRAY[4,5,6])] AS z - FROM generate_series(1,2) x, - generate_series(4,5) y) q; - row_to_json --------------------------------------------------------------------- - {"b":"a1","c":4,"z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]} - {"b":"a1","c":5,"z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]} - {"b":"a2","c":4,"z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]} - {"b":"a2","c":5,"z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]} -(4 rows) - -SELECT row_to_json(q,true) -FROM (SELECT $$a$$ || x AS b, - y AS c, - ARRAY[ROW(x.*,ARRAY[1,2,3]), - ROW(y.*,ARRAY[4,5,6])] AS z - FROM generate_series(1,2) x, - generate_series(4,5) y) q; - row_to_json ------------------------------------------------------ - {"b":"a1", + - "c":4, + - "z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]} - {"b":"a1", + - "c":5, + - "z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]} - {"b":"a2", + - "c":4, + - "z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]} - {"b":"a2", + - "c":5, + - "z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]} -(4 rows) - -CREATE TEMP TABLE rows AS -SELECT x, 'txt' || x as y -FROM generate_series(1,3) AS x; -SELECT row_to_json(q,true) -FROM rows q; - row_to_json --------------- - {"x":1, + - "y":"txt1"} - {"x":2, + - "y":"txt2"} - {"x":3, + - "y":"txt3"} -(3 rows) - -SELECT row_to_json(row((select array_agg(x) as d from generate_series(5,10) x)),false); - row_to_json ------------------------ - {"f1":[5,6,7,8,9,10]} -(1 row) - --- anyarray column -analyze rows; -select attname, to_json(histogram_bounds) histogram_bounds -from pg_stats -where tablename = 'rows' and - schemaname = pg_my_temp_schema()::regnamespace::text -order by 1; - attname | histogram_bounds ----------+------------------------ - x | [1,2,3] - y | ["txt1","txt2","txt3"] -(2 rows) - --- to_json, timestamps -select to_json(timestamp '2014-05-28 12:22:35.614298'); - to_json ------------------------------- - "2014-05-28T12:22:35.614298" -(1 row) - -BEGIN; -SET LOCAL TIME ZONE 10.5; -select to_json(timestamptz '2014-05-28 12:22:35.614298-04'); - to_json ------------------------------------- - "2014-05-29T02:52:35.614298+10:30" -(1 row) - -SET LOCAL TIME ZONE -8; -select to_json(timestamptz '2014-05-28 12:22:35.614298-04'); - to_json ------------------------------------- - "2014-05-28T08:22:35.614298-08:00" -(1 row) - -COMMIT; -select to_json(date '2014-05-28'); - to_json --------------- - "2014-05-28" -(1 row) - -select to_json(date 'Infinity'); - to_json ------------- - "infinity" -(1 row) - -select to_json(date '-Infinity'); - to_json -------------- - "-infinity" -(1 row) - -select to_json(timestamp 'Infinity'); - to_json ------------- - "infinity" -(1 row) - -select to_json(timestamp '-Infinity'); - to_json -------------- - "-infinity" -(1 row) - -select to_json(timestamptz 'Infinity'); - to_json ------------- - "infinity" -(1 row) - -select to_json(timestamptz '-Infinity'); - to_json -------------- - "-infinity" -(1 row) - ---json_agg -SELECT json_agg(q) - FROM ( SELECT $$a$$ || x AS b, y AS c, - ARRAY[ROW(x.*,ARRAY[1,2,3]), - ROW(y.*,ARRAY[4,5,6])] AS z - FROM generate_series(1,2) x, - generate_series(4,5) y) q; - json_agg ------------------------------------------------------------------------ - [{"b":"a1","c":4,"z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]}, + - {"b":"a1","c":5,"z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}, + - {"b":"a2","c":4,"z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]}, + - {"b":"a2","c":5,"z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}] -(1 row) - -SELECT json_agg(q ORDER BY x, y) - FROM rows q; - json_agg ------------------------ - [{"x":1,"y":"txt1"}, + - {"x":2,"y":"txt2"}, + - {"x":3,"y":"txt3"}] -(1 row) - -UPDATE rows SET x = NULL WHERE x = 1; -SELECT json_agg(q ORDER BY x NULLS FIRST, y) - FROM rows q; - json_agg --------------------------- - [{"x":null,"y":"txt1"}, + - {"x":2,"y":"txt2"}, + - {"x":3,"y":"txt3"}] -(1 row) - --- non-numeric output -SELECT row_to_json(q) -FROM (SELECT 'NaN'::float8 AS "float8field") q; - row_to_json ------------------------ - {"float8field":"NaN"} -(1 row) - -SELECT row_to_json(q) -FROM (SELECT 'Infinity'::float8 AS "float8field") q; - row_to_json ----------------------------- - {"float8field":"Infinity"} -(1 row) - -SELECT row_to_json(q) -FROM (SELECT '-Infinity'::float8 AS "float8field") q; - row_to_json ------------------------------ - {"float8field":"-Infinity"} -(1 row) - --- json input -SELECT row_to_json(q) -FROM (SELECT '{"a":1,"b": [2,3,4,"d","e","f"],"c":{"p":1,"q":2}}'::json AS "jsonfield") q; - row_to_json ------------------------------------------------------------------- - {"jsonfield":{"a":1,"b": [2,3,4,"d","e","f"],"c":{"p":1,"q":2}}} -(1 row) - --- json extraction functions -CREATE TEMP TABLE test_json ( - json_type text, - test_json json -); -INSERT INTO test_json VALUES -('scalar','"a scalar"'), -('array','["zero", "one","two",null,"four","five", [1,2,3],{"f1":9}]'), -('object','{"field1":"val1","field2":"val2","field3":null, "field4": 4, "field5": [1,2,3], "field6": {"f1":9}}'); -SELECT test_json -> 'x' -FROM test_json -WHERE json_type = 'scalar'; - ?column? ----------- - -(1 row) - -SELECT test_json -> 'x' -FROM test_json -WHERE json_type = 'array'; - ?column? ----------- - -(1 row) - -SELECT test_json -> 'x' -FROM test_json -WHERE json_type = 'object'; - ?column? ----------- - -(1 row) - -SELECT test_json->'field2' -FROM test_json -WHERE json_type = 'object'; - ?column? ----------- - "val2" -(1 row) - -SELECT test_json->>'field2' -FROM test_json -WHERE json_type = 'object'; - ?column? ----------- - val2 -(1 row) - -SELECT test_json -> 2 -FROM test_json -WHERE json_type = 'scalar'; - ?column? ----------- - -(1 row) - -SELECT test_json -> 2 -FROM test_json -WHERE json_type = 'array'; - ?column? ----------- - "two" -(1 row) - -SELECT test_json -> -1 -FROM test_json -WHERE json_type = 'array'; - ?column? ----------- - {"f1":9} -(1 row) - -SELECT test_json -> 2 -FROM test_json -WHERE json_type = 'object'; - ?column? ----------- - -(1 row) - -SELECT test_json->>2 -FROM test_json -WHERE json_type = 'array'; - ?column? ----------- - two -(1 row) - -SELECT test_json ->> 6 FROM test_json WHERE json_type = 'array'; - ?column? ----------- - [1,2,3] -(1 row) - -SELECT test_json ->> 7 FROM test_json WHERE json_type = 'array'; - ?column? ----------- - {"f1":9} -(1 row) - -SELECT test_json ->> 'field4' FROM test_json WHERE json_type = 'object'; - ?column? ----------- - 4 -(1 row) - -SELECT test_json ->> 'field5' FROM test_json WHERE json_type = 'object'; - ?column? ----------- - [1,2,3] -(1 row) - -SELECT test_json ->> 'field6' FROM test_json WHERE json_type = 'object'; - ?column? ----------- - {"f1":9} -(1 row) - -SELECT json_object_keys(test_json) -FROM test_json -WHERE json_type = 'scalar'; -ERROR: cannot call json_object_keys on a scalar -SELECT json_object_keys(test_json) -FROM test_json -WHERE json_type = 'array'; -ERROR: cannot call json_object_keys on an array -SELECT json_object_keys(test_json) -FROM test_json -WHERE json_type = 'object'; - json_object_keys ------------------- - field1 - field2 - field3 - field4 - field5 - field6 -(6 rows) - --- test extending object_keys resultset - initial resultset size is 256 -select count(*) from - (select json_object_keys(json_object(array_agg(g))) - from (select unnest(array['f'||n,n::text])as g - from generate_series(1,300) as n) x ) y; - count -------- - 300 -(1 row) - --- nulls -select (test_json->'field3') is null as expect_false -from test_json -where json_type = 'object'; - expect_false --------------- - f -(1 row) - -select (test_json->>'field3') is null as expect_true -from test_json -where json_type = 'object'; - expect_true -------------- - t -(1 row) - -select (test_json->3) is null as expect_false -from test_json -where json_type = 'array'; - expect_false --------------- - f -(1 row) - -select (test_json->>3) is null as expect_true -from test_json -where json_type = 'array'; - expect_true -------------- - t -(1 row) - --- corner cases -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> null::text; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> null::int; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> 1; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> -1; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> 'z'; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> ''; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::json -> 1; - ?column? -------------- - {"b": "cc"} -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::json -> 3; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::json -> 'z'; - ?column? ----------- - -(1 row) - -select '{"a": "c", "b": null}'::json -> 'b'; - ?column? ----------- - null -(1 row) - -select '"foo"'::json -> 1; - ?column? ----------- - -(1 row) - -select '"foo"'::json -> 'z'; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> null::text; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> null::int; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> 1; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> 'z'; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> ''; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::json ->> 1; - ?column? -------------- - {"b": "cc"} -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::json ->> 3; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::json ->> 'z'; - ?column? ----------- - -(1 row) - -select '{"a": "c", "b": null}'::json ->> 'b'; - ?column? ----------- - -(1 row) - -select '"foo"'::json ->> 1; - ?column? ----------- - -(1 row) - -select '"foo"'::json ->> 'z'; - ?column? ----------- - -(1 row) - --- array length -SELECT json_array_length('[1,2,3,{"f1":1,"f2":[5,6]},4]'); - json_array_length -------------------- - 5 -(1 row) - -SELECT json_array_length('[]'); - json_array_length -------------------- - 0 -(1 row) - -SELECT json_array_length('{"f1":1,"f2":[5,6]}'); -ERROR: cannot get array length of a non-array -SELECT json_array_length('4'); -ERROR: cannot get array length of a scalar --- each -select json_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null}'); - json_each -------------------- - (f1,"[1,2,3]") - (f2,"{""f3"":1}") - (f4,null) -(3 rows) - -select * from json_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q; - key | value ------+----------- - f1 | [1,2,3] - f2 | {"f3":1} - f4 | null - f5 | 99 - f6 | "stringy" -(5 rows) - -select json_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":"null"}'); - json_each_text -------------------- - (f1,"[1,2,3]") - (f2,"{""f3"":1}") - (f4,) - (f5,null) -(4 rows) - -select * from json_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q; - key | value ------+---------- - f1 | [1,2,3] - f2 | {"f3":1} - f4 | - f5 | 99 - f6 | stringy -(5 rows) - --- extract_path, extract_path_as_text -select json_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6'); - json_extract_path -------------------- - "stringy" -(1 row) - -select json_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2'); - json_extract_path -------------------- - {"f3":1} -(1 row) - -select json_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text); - json_extract_path -------------------- - "f3" -(1 row) - -select json_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text); - json_extract_path -------------------- - 1 -(1 row) - -select json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6'); - json_extract_path_text ------------------------- - stringy -(1 row) - -select json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2'); - json_extract_path_text ------------------------- - {"f3":1} -(1 row) - -select json_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text); - json_extract_path_text ------------------------- - f3 -(1 row) - -select json_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text); - json_extract_path_text ------------------------- - 1 -(1 row) - --- extract_path nulls -select json_extract_path('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') is null as expect_false; - expect_false --------------- - f -(1 row) - -select json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') is null as expect_true; - expect_true -------------- - t -(1 row) - -select json_extract_path('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') is null as expect_false; - expect_false --------------- - f -(1 row) - -select json_extract_path_text('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') is null as expect_true; - expect_true -------------- - t -(1 row) - --- extract_path operators -select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f4','f6']; - ?column? ------------ - "stringy" -(1 row) - -select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f2']; - ?column? ----------- - {"f3":1} -(1 row) - -select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f2','0']; - ?column? ----------- - "f3" -(1 row) - -select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f2','1']; - ?column? ----------- - 1 -(1 row) - -select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f4','f6']; - ?column? ----------- - stringy -(1 row) - -select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f2']; - ?column? ----------- - {"f3":1} -(1 row) - -select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f2','0']; - ?column? ----------- - f3 -(1 row) - -select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f2','1']; - ?column? ----------- - 1 -(1 row) - --- corner cases for same -select '{"a": {"b":{"c": "foo"}}}'::json #> '{}'; - ?column? ---------------------------- - {"a": {"b":{"c": "foo"}}} -(1 row) - -select '[1,2,3]'::json #> '{}'; - ?column? ----------- - [1,2,3] -(1 row) - -select '"foo"'::json #> '{}'; - ?column? ----------- - "foo" -(1 row) - -select '42'::json #> '{}'; - ?column? ----------- - 42 -(1 row) - -select 'null'::json #> '{}'; - ?column? ----------- - null -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #> array['a']; - ?column? --------------------- - {"b":{"c": "foo"}} -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #> array['a', null]; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #> array['a', '']; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','b']; - ?column? --------------- - {"c": "foo"} -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','b','c']; - ?column? ----------- - "foo" -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','b','c','d']; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','z','c']; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #> array['a','1','b']; - ?column? ----------- - "cc" -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #> array['a','z','b']; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::json #> array['1','b']; - ?column? ----------- - "cc" -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::json #> array['z','b']; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": null}]'::json #> array['1','b']; - ?column? ----------- - null -(1 row) - -select '"foo"'::json #> array['z']; - ?column? ----------- - -(1 row) - -select '42'::json #> array['f2']; - ?column? ----------- - -(1 row) - -select '42'::json #> array['0']; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #>> '{}'; - ?column? ---------------------------- - {"a": {"b":{"c": "foo"}}} -(1 row) - -select '[1,2,3]'::json #>> '{}'; - ?column? ----------- - [1,2,3] -(1 row) - -select '"foo"'::json #>> '{}'; - ?column? ----------- - foo -(1 row) - -select '42'::json #>> '{}'; - ?column? ----------- - 42 -(1 row) - -select 'null'::json #>> '{}'; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a']; - ?column? --------------------- - {"b":{"c": "foo"}} -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a', null]; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a', '']; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','b']; - ?column? --------------- - {"c": "foo"} -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','b','c']; - ?column? ----------- - foo -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','b','c','d']; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','z','c']; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #>> array['a','1','b']; - ?column? ----------- - cc -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #>> array['a','z','b']; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::json #>> array['1','b']; - ?column? ----------- - cc -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::json #>> array['z','b']; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": null}]'::json #>> array['1','b']; - ?column? ----------- - -(1 row) - -select '"foo"'::json #>> array['z']; - ?column? ----------- - -(1 row) - -select '42'::json #>> array['f2']; - ?column? ----------- - -(1 row) - -select '42'::json #>> array['0']; - ?column? ----------- - -(1 row) - --- array_elements -select json_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]'); - json_array_elements ------------------------ - 1 - true - [1,[2,3]] - null - {"f1":1,"f2":[7,8,9]} - false - "stringy" -(7 rows) - -select * from json_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]') q; - value ------------------------ - 1 - true - [1,[2,3]] - null - {"f1":1,"f2":[7,8,9]} - false - "stringy" -(7 rows) - -select json_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]'); - json_array_elements_text --------------------------- - 1 - true - [1,[2,3]] - - {"f1":1,"f2":[7,8,9]} - false - stringy -(7 rows) - -select * from json_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]') q; - value ------------------------ - 1 - true - [1,[2,3]] - - {"f1":1,"f2":[7,8,9]} - false - stringy -(7 rows) - --- populate_record -create type jpop as (a text, b int, c timestamp); -CREATE DOMAIN js_int_not_null AS int NOT NULL; -CREATE DOMAIN js_int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 3); -CREATE DOMAIN js_int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 3); -create type j_unordered_pair as (x int, y int); -create domain j_ordered_pair as j_unordered_pair check((value).x <= (value).y); -CREATE TYPE jsrec AS ( - i int, - ia _int4, - ia1 int[], - ia2 int[][], - ia3 int[][][], - ia1d js_int_array_1d, - ia2d js_int_array_2d, - t text, - ta text[], - c char(10), - ca char(10)[], - ts timestamp, - js json, - jsb jsonb, - jsa json[], - rec jpop, - reca jpop[] -); -CREATE TYPE jsrec_i_not_null AS ( - i js_int_not_null -); -select * from json_populate_record(null::jpop,'{"a":"blurfl","x":43.2}') q; - a | b | c ---------+---+--- - blurfl | | -(1 row) - -select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"a":"blurfl","x":43.2}') q; - a | b | c ---------+---+-------------------------- - blurfl | 3 | Mon Dec 31 15:30:56 2012 -(1 row) - -select * from json_populate_record(null::jpop,'{"a":"blurfl","x":43.2}') q; - a | b | c ---------+---+--- - blurfl | | -(1 row) - -select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"a":"blurfl","x":43.2}') q; - a | b | c ---------+---+-------------------------- - blurfl | 3 | Mon Dec 31 15:30:56 2012 -(1 row) - -select * from json_populate_record(null::jpop,'{"a":[100,200,false],"x":43.2}') q; - a | b | c ------------------+---+--- - [100,200,false] | | -(1 row) - -select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"a":[100,200,false],"x":43.2}') q; - a | b | c ------------------+---+-------------------------- - [100,200,false] | 3 | Mon Dec 31 15:30:56 2012 -(1 row) - -select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"c":[100,200,false],"x":43.2}') q; -ERROR: invalid input syntax for type timestamp: "[100,200,false]" -select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{}') q; - a | b | c ----+---+-------------------------- - x | 3 | Mon Dec 31 15:30:56 2012 -(1 row) - -SELECT i FROM json_populate_record(NULL::jsrec_i_not_null, '{"x": 43.2}') q; -ERROR: domain js_int_not_null does not allow null values -SELECT i FROM json_populate_record(NULL::jsrec_i_not_null, '{"i": null}') q; -ERROR: domain js_int_not_null does not allow null values -SELECT i FROM json_populate_record(NULL::jsrec_i_not_null, '{"i": 12345}') q; - i -------- - 12345 -(1 row) - -SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": null}') q; - ia ----- - -(1 row) - -SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "ia". -SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [1, "2", null, 4]}') q; - ia --------------- - {1,2,NULL,4} -(1 row) - -SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [[1, 2], [3, 4]]}') q; - ia ---------------- - {{1,2},{3,4}} -(1 row) - -SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [[1], 2]}') q; -ERROR: expected JSON array -HINT: See the array element [1] of key "ia". -SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [[1], [2, 3]]}') q; -ERROR: malformed JSON array -DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. -SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": "{1,2,3}"}') q; - ia ---------- - {1,2,3} -(1 row) - -SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": null}') q; - ia1 ------ - -(1 row) - -SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "ia1". -SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": [1, "2", null, 4]}') q; - ia1 --------------- - {1,2,NULL,4} -(1 row) - -SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": [[1, 2, 3]]}') q; - ia1 ------------ - {{1,2,3}} -(1 row) - -SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": null}') q; - ia1d ------- - -(1 row) - -SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "ia1d". -SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": [1, "2", null, 4]}') q; -ERROR: value for domain js_int_array_1d violates check constraint "js_int_array_1d_check" -SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": [1, "2", null]}') q; - ia1d ------------- - {1,2,NULL} -(1 row) - -SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [1, "2", null, 4]}') q; - ia2 --------------- - {1,2,NULL,4} -(1 row) - -SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[1, 2], [null, 4]]}') q; - ia2 ------------------- - {{1,2},{NULL,4}} -(1 row) - -SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[], []]}') q; - ia2 ------ - {} -(1 row) - -SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[1, 2], [3]]}') q; -ERROR: malformed JSON array -DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. -SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[1, 2], 3, 4]}') q; -ERROR: expected JSON array -HINT: See the array element [1] of key "ia2". -SELECT ia2d FROM json_populate_record(NULL::jsrec, '{"ia2d": [[1, "2"], [null, 4]]}') q; -ERROR: value for domain js_int_array_2d violates check constraint "js_int_array_2d_check" -SELECT ia2d FROM json_populate_record(NULL::jsrec, '{"ia2d": [[1, "2", 3], [null, 5, 6]]}') q; - ia2d ----------------------- - {{1,2,3},{NULL,5,6}} -(1 row) - -SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [1, "2", null, 4]}') q; - ia3 --------------- - {1,2,NULL,4} -(1 row) - -SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [[1, 2], [null, 4]]}') q; - ia3 ------------------- - {{1,2},{NULL,4}} -(1 row) - -SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[], []], [[], []], [[], []] ]}') q; - ia3 ------ - {} -(1 row) - -SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[1, 2]], [[3, 4]] ]}') q; - ia3 -------------------- - {{{1,2}},{{3,4}}} -(1 row) - -SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8]] ]}') q; - ia3 -------------------------------- - {{{1,2},{3,4}},{{5,6},{7,8}}} -(1 row) - -SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8], [9, 10]] ]}') q; -ERROR: malformed JSON array -DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. -SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": null}') q; - ta ----- - -(1 row) - -SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "ta". -SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": [1, "2", null, 4]}') q; - ta --------------- - {1,2,NULL,4} -(1 row) - -SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": [[1, 2, 3], {"k": "v"}]}') q; -ERROR: expected JSON array -HINT: See the array element [1] of key "ta". -SELECT c FROM json_populate_record(NULL::jsrec, '{"c": null}') q; - c ---- - -(1 row) - -SELECT c FROM json_populate_record(NULL::jsrec, '{"c": "aaa"}') q; - c ------------- - aaa -(1 row) - -SELECT c FROM json_populate_record(NULL::jsrec, '{"c": "aaaaaaaaaa"}') q; - c ------------- - aaaaaaaaaa -(1 row) - -SELECT c FROM json_populate_record(NULL::jsrec, '{"c": "aaaaaaaaaaaaa"}') q; -ERROR: value too long for type character(10) -SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": null}') q; - ca ----- - -(1 row) - -SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "ca". -SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": [1, "2", null, 4]}') q; - ca ------------------------------------------------ - {"1 ","2 ",NULL,"4 "} -(1 row) - -SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": ["aaaaaaaaaaaaaaaa"]}') q; -ERROR: value too long for type character(10) -SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": [[1, 2, 3], {"k": "v"}]}') q; -ERROR: expected JSON array -HINT: See the array element [1] of key "ca". -SELECT js FROM json_populate_record(NULL::jsrec, '{"js": null}') q; - js ----- - -(1 row) - -SELECT js FROM json_populate_record(NULL::jsrec, '{"js": true}') q; - js ------- - true -(1 row) - -SELECT js FROM json_populate_record(NULL::jsrec, '{"js": 123.45}') q; - js --------- - 123.45 -(1 row) - -SELECT js FROM json_populate_record(NULL::jsrec, '{"js": "123.45"}') q; - js ----------- - "123.45" -(1 row) - -SELECT js FROM json_populate_record(NULL::jsrec, '{"js": "abc"}') q; - js -------- - "abc" -(1 row) - -SELECT js FROM json_populate_record(NULL::jsrec, '{"js": [123, "123", null, {"key": "value"}]}') q; - js --------------------------------------- - [123, "123", null, {"key": "value"}] -(1 row) - -SELECT js FROM json_populate_record(NULL::jsrec, '{"js": {"a": "bbb", "b": null, "c": 123.45}}') q; - js --------------------------------------- - {"a": "bbb", "b": null, "c": 123.45} -(1 row) - -SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": null}') q; - jsb ------ - -(1 row) - -SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": true}') q; - jsb ------- - true -(1 row) - -SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": 123.45}') q; - jsb --------- - 123.45 -(1 row) - -SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": "123.45"}') q; - jsb ----------- - "123.45" -(1 row) - -SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": "abc"}') q; - jsb -------- - "abc" -(1 row) - -SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": [123, "123", null, {"key": "value"}]}') q; - jsb --------------------------------------- - [123, "123", null, {"key": "value"}] -(1 row) - -SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": {"a": "bbb", "b": null, "c": 123.45}}') q; - jsb --------------------------------------- - {"a": "bbb", "b": null, "c": 123.45} -(1 row) - -SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": null}') q; - jsa ------ - -(1 row) - -SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "jsa". -SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": [1, "2", null, 4]}') q; - jsa --------------------- - {1,"\"2\"",NULL,4} -(1 row) - -SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": ["aaa", null, [1, 2, "3", {}], { "k" : "v" }]}') q; - jsa ----------------------------------------------------------- - {"\"aaa\"",NULL,"[1, 2, \"3\", {}]","{ \"k\" : \"v\" }"} -(1 row) - -SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": 123}') q; -ERROR: cannot call populate_composite on a scalar -SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": [1, 2]}') q; -ERROR: cannot call populate_composite on an array -SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}') q; - rec ------------------------------------ - (abc,,"Thu Jan 02 00:00:00 2003") -(1 row) - -SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": "(abc,42,01.02.2003)"}') q; - rec -------------------------------------- - (abc,42,"Thu Jan 02 00:00:00 2003") -(1 row) - -SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "reca". -SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": [1, 2]}') q; -ERROR: cannot call populate_composite on a scalar -SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}]}') q; - reca --------------------------------------------------------- - {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} -(1 row) - -SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": ["(abc,42,01.02.2003)"]}') q; - reca -------------------------------------------- - {"(abc,42,\"Thu Jan 02 00:00:00 2003\")"} -(1 row) - -SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": "{\"(abc,42,01.02.2003)\"}"}') q; - reca -------------------------------------------- - {"(abc,42,\"Thu Jan 02 00:00:00 2003\")"} -(1 row) - -SELECT rec FROM json_populate_record( - row(NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, - row('x',3,'2012-12-31 15:30:56')::jpop,NULL)::jsrec, - '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}' -) q; - rec ------------------------------------- - (abc,3,"Thu Jan 02 00:00:00 2003") -(1 row) - --- anonymous record type -SELECT json_populate_record(null::record, '{"x": 0, "y": 1}'); -ERROR: could not determine row type for result of json_populate_record -HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list. -SELECT json_populate_record(row(1,2), '{"f1": 0, "f2": 1}'); - json_populate_record ----------------------- - (0,1) -(1 row) - -SELECT * FROM - json_populate_record(null::record, '{"x": 776}') AS (x int, y int); - x | y ------+--- - 776 | -(1 row) - --- composite domain -SELECT json_populate_record(null::j_ordered_pair, '{"x": 0, "y": 1}'); - json_populate_record ----------------------- - (0,1) -(1 row) - -SELECT json_populate_record(row(1,2)::j_ordered_pair, '{"x": 0}'); - json_populate_record ----------------------- - (0,2) -(1 row) - -SELECT json_populate_record(row(1,2)::j_ordered_pair, '{"x": 1, "y": 0}'); -ERROR: value for domain j_ordered_pair violates check constraint "j_ordered_pair_check" --- populate_recordset -select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+---+-------------------------- - blurfl | | - | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+----+-------------------------- - blurfl | 99 | - def | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+---+-------------------------- - blurfl | | - | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+----+-------------------------- - blurfl | 99 | - def | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ----------------+----+-------------------------- - [100,200,300] | 99 | - {"z":true} | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -select * from json_populate_recordset(row('def',99,null)::jpop,'[{"c":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q; -ERROR: invalid input syntax for type timestamp: "[100,200,300]" -create type jpop2 as (a int, b json, c int, d int); -select * from json_populate_recordset(null::jpop2, '[{"a":2,"c":3,"b":{"z":4},"d":6}]') q; - a | b | c | d ----+---------+---+--- - 2 | {"z":4} | 3 | 6 -(1 row) - -select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+---+-------------------------- - blurfl | | - | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+----+-------------------------- - blurfl | 99 | - def | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ----------------+----+-------------------------- - [100,200,300] | 99 | - {"z":true} | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - --- anonymous record type -SELECT json_populate_recordset(null::record, '[{"x": 0, "y": 1}]'); -ERROR: could not determine row type for result of json_populate_recordset -HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list. -SELECT json_populate_recordset(row(1,2), '[{"f1": 0, "f2": 1}]'); - json_populate_recordset -------------------------- - (0,1) -(1 row) - -SELECT i, json_populate_recordset(row(i,50), '[{"f1":"42"},{"f2":"43"}]') -FROM (VALUES (1),(2)) v(i); - i | json_populate_recordset ----+------------------------- - 1 | (42,50) - 1 | (1,43) - 2 | (42,50) - 2 | (2,43) -(4 rows) - -SELECT * FROM - json_populate_recordset(null::record, '[{"x": 776}]') AS (x int, y int); - x | y ------+--- - 776 | -(1 row) - --- empty array is a corner case -SELECT json_populate_recordset(null::record, '[]'); -ERROR: could not determine row type for result of json_populate_recordset -HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list. -SELECT json_populate_recordset(row(1,2), '[]'); - json_populate_recordset -------------------------- -(0 rows) - -SELECT * FROM json_populate_recordset(NULL::jpop,'[]') q; - a | b | c ----+---+--- -(0 rows) - -SELECT * FROM - json_populate_recordset(null::record, '[]') AS (x int, y int); - x | y ----+--- -(0 rows) - --- composite domain -SELECT json_populate_recordset(null::j_ordered_pair, '[{"x": 0, "y": 1}]'); - json_populate_recordset -------------------------- - (0,1) -(1 row) - -SELECT json_populate_recordset(row(1,2)::j_ordered_pair, '[{"x": 0}, {"y": 3}]'); - json_populate_recordset -------------------------- - (0,2) - (1,3) -(2 rows) - -SELECT json_populate_recordset(row(1,2)::j_ordered_pair, '[{"x": 1, "y": 0}]'); -ERROR: value for domain j_ordered_pair violates check constraint "j_ordered_pair_check" --- negative cases where the wrong record type is supplied -select * from json_populate_recordset(row(0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); -ERROR: function return row and query-specified return row do not match -DETAIL: Returned row contains 1 attribute, but query expects 2. -select * from json_populate_recordset(row(0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); -ERROR: function return row and query-specified return row do not match -DETAIL: Returned type integer at ordinal position 1, but query expects text. -select * from json_populate_recordset(row(0::int,0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); -ERROR: function return row and query-specified return row do not match -DETAIL: Returned row contains 3 attributes, but query expects 2. -select * from json_populate_recordset(row(1000000000::int,50::int),'[{"b":"2"},{"a":"3"}]') q (a text, b text); -ERROR: function return row and query-specified return row do not match -DETAIL: Returned type integer at ordinal position 1, but query expects text. --- test type info caching in json_populate_record() -CREATE TEMP TABLE jspoptest (js json); -INSERT INTO jspoptest -SELECT '{ - "jsa": [1, "2", null, 4], - "rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}, - "reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}] -}'::json -FROM generate_series(1, 3); -SELECT (json_populate_record(NULL::jsrec, js)).* FROM jspoptest; - i | ia | ia1 | ia2 | ia3 | ia1d | ia2d | t | ta | c | ca | ts | js | jsb | jsa | rec | reca ----+----+-----+-----+-----+------+------+---+----+---+----+----+----+-----+--------------------+-----------------------------------+-------------------------------------------------------- - | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} - | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} - | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} -(3 rows) - -DROP TYPE jsrec; -DROP TYPE jsrec_i_not_null; -DROP DOMAIN js_int_not_null; -DROP DOMAIN js_int_array_1d; -DROP DOMAIN js_int_array_2d; -DROP DOMAIN j_ordered_pair; -DROP TYPE j_unordered_pair; ---json_typeof() function -select value, json_typeof(value) - from (values (json '123.4'), - (json '-1'), - (json '"foo"'), - (json 'true'), - (json 'false'), - (json 'null'), - (json '[1, 2, 3]'), - (json '[]'), - (json '{"x":"foo", "y":123}'), - (json '{}'), - (NULL::json)) - as data(value); - value | json_typeof -----------------------+------------- - 123.4 | number - -1 | number - "foo" | string - true | boolean - false | boolean - null | null - [1, 2, 3] | array - [] | array - {"x":"foo", "y":123} | object - {} | object - | -(11 rows) - --- json_build_array, json_build_object, json_object_agg -SELECT json_build_array('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}'); - json_build_array ------------------------------------------------------------------------ - ["a", 1, "b", 1.2, "c", true, "d", null, "e", {"x": 3, "y": [1,2,3]}] -(1 row) - -SELECT json_build_array('a', NULL); -- ok - json_build_array ------------------- - ["a", null] -(1 row) - -SELECT json_build_array(VARIADIC NULL::text[]); -- ok - json_build_array ------------------- - -(1 row) - -SELECT json_build_array(VARIADIC '{}'::text[]); -- ok - json_build_array ------------------- - [] -(1 row) - -SELECT json_build_array(VARIADIC '{a,b,c}'::text[]); -- ok - json_build_array ------------------- - ["a", "b", "c"] -(1 row) - -SELECT json_build_array(VARIADIC ARRAY['a', NULL]::text[]); -- ok - json_build_array ------------------- - ["a", null] -(1 row) - -SELECT json_build_array(VARIADIC '{1,2,3,4}'::text[]); -- ok - json_build_array ----------------------- - ["1", "2", "3", "4"] -(1 row) - -SELECT json_build_array(VARIADIC '{1,2,3,4}'::int[]); -- ok - json_build_array ------------------- - [1, 2, 3, 4] -(1 row) - -SELECT json_build_array(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok - json_build_array --------------------- - [1, 4, 2, 5, 3, 6] -(1 row) - -SELECT json_build_object('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}'); - json_build_object ----------------------------------------------------------------------------- - {"a" : 1, "b" : 1.2, "c" : true, "d" : null, "e" : {"x": 3, "y": [1,2,3]}} -(1 row) - -SELECT json_build_object( - 'a', json_build_object('b',false,'c',99), - 'd', json_build_object('e',array[9,8,7]::int[], - 'f', (select row_to_json(r) from ( select relkind, oid::regclass as name from pg_class where relname = 'pg_class') r))); - json_build_object -------------------------------------------------------------------------------------------------- - {"a" : {"b" : false, "c" : 99}, "d" : {"e" : [9,8,7], "f" : {"relkind":"r","name":"pg_class"}}} -(1 row) - -SELECT json_build_object('{a,b,c}'::text[]); -- error -ERROR: argument list must have even number of elements -HINT: The arguments of json_build_object() must consist of alternating keys and values. -SELECT json_build_object('{a,b,c}'::text[], '{d,e,f}'::text[]); -- error, key cannot be array -ERROR: key value must be scalar, not array, composite, or json -SELECT json_build_object('a', 'b', 'c'); -- error -ERROR: argument list must have even number of elements -HINT: The arguments of json_build_object() must consist of alternating keys and values. -SELECT json_build_object(NULL, 'a'); -- error, key cannot be NULL -ERROR: null value not allowed for object key -SELECT json_build_object('a', NULL); -- ok - json_build_object -------------------- - {"a" : null} -(1 row) - -SELECT json_build_object(VARIADIC NULL::text[]); -- ok - json_build_object -------------------- - -(1 row) - -SELECT json_build_object(VARIADIC '{}'::text[]); -- ok - json_build_object -------------------- - {} -(1 row) - -SELECT json_build_object(VARIADIC '{a,b,c}'::text[]); -- error -ERROR: argument list must have even number of elements -HINT: The arguments of json_build_object() must consist of alternating keys and values. -SELECT json_build_object(VARIADIC ARRAY['a', NULL]::text[]); -- ok - json_build_object -------------------- - {"a" : null} -(1 row) - -SELECT json_build_object(VARIADIC ARRAY[NULL, 'a']::text[]); -- error, key cannot be NULL -ERROR: null value not allowed for object key -SELECT json_build_object(VARIADIC '{1,2,3,4}'::text[]); -- ok - json_build_object ------------------------- - {"1" : "2", "3" : "4"} -(1 row) - -SELECT json_build_object(VARIADIC '{1,2,3,4}'::int[]); -- ok - json_build_object --------------------- - {"1" : 2, "3" : 4} -(1 row) - -SELECT json_build_object(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok - json_build_object ------------------------------ - {"1" : 4, "2" : 5, "3" : 6} -(1 row) - --- empty objects/arrays -SELECT json_build_array(); - json_build_array ------------------- - [] -(1 row) - -SELECT json_build_object(); - json_build_object -------------------- - {} -(1 row) - --- make sure keys are quoted -SELECT json_build_object(1,2); - json_build_object -------------------- - {"1" : 2} -(1 row) - --- keys must be scalar and not null -SELECT json_build_object(null,2); -ERROR: null value not allowed for object key -SELECT json_build_object(r,2) FROM (SELECT 1 AS a, 2 AS b) r; -ERROR: key value must be scalar, not array, composite, or json -SELECT json_build_object(json '{"a":1,"b":2}', 3); -ERROR: key value must be scalar, not array, composite, or json -SELECT json_build_object('{1,2,3}'::int[], 3); -ERROR: key value must be scalar, not array, composite, or json -CREATE TEMP TABLE foo (serial_num int, name text, type text); -INSERT INTO foo VALUES (847001,'t15','GE1043'); -INSERT INTO foo VALUES (847002,'t16','GE1043'); -INSERT INTO foo VALUES (847003,'sub-alpha','GESS90'); -SELECT json_build_object('turbines',json_object_agg(serial_num,json_build_object('name',name,'type',type))) -FROM foo; - json_build_object -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - {"turbines" : { "847001" : {"name" : "t15", "type" : "GE1043"}, "847002" : {"name" : "t16", "type" : "GE1043"}, "847003" : {"name" : "sub-alpha", "type" : "GESS90"} }} -(1 row) - -SELECT json_object_agg(name, type) FROM foo; - json_object_agg ----------------------------------------------------------------- - { "t15" : "GE1043", "t16" : "GE1043", "sub-alpha" : "GESS90" } -(1 row) - -INSERT INTO foo VALUES (999999, NULL, 'bar'); -SELECT json_object_agg(name, type) FROM foo; -ERROR: null value not allowed for object key --- json_object --- empty object, one dimension -SELECT json_object('{}'); - json_object -------------- - {} -(1 row) - --- empty object, two dimensions -SELECT json_object('{}', '{}'); - json_object -------------- - {} -(1 row) - --- one dimension -SELECT json_object('{a,1,b,2,3,NULL,"d e f","a b c"}'); - json_object -------------------------------------------------------- - {"a" : "1", "b" : "2", "3" : null, "d e f" : "a b c"} -(1 row) - --- same but with two dimensions -SELECT json_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}'); - json_object -------------------------------------------------------- - {"a" : "1", "b" : "2", "3" : null, "d e f" : "a b c"} -(1 row) - --- odd number error -SELECT json_object('{a,b,c}'); -ERROR: array must have even number of elements --- one column error -SELECT json_object('{{a},{b}}'); -ERROR: array must have two columns --- too many columns error -SELECT json_object('{{a,b,c},{b,c,d}}'); -ERROR: array must have two columns --- too many dimensions error -SELECT json_object('{{{a,b},{c,d}},{{b,c},{d,e}}}'); -ERROR: wrong number of array subscripts ---two argument form of json_object -select json_object('{a,b,c,"d e f"}','{1,2,3,"a b c"}'); - json_object ------------------------------------------------------- - {"a" : "1", "b" : "2", "c" : "3", "d e f" : "a b c"} -(1 row) - --- too many dimensions -SELECT json_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}', '{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}'); -ERROR: wrong number of array subscripts --- mismatched dimensions -select json_object('{a,b,c,"d e f",g}','{1,2,3,"a b c"}'); -ERROR: mismatched array dimensions -select json_object('{a,b,c,"d e f"}','{1,2,3,"a b c",g}'); -ERROR: mismatched array dimensions --- null key error -select json_object('{a,b,NULL,"d e f"}','{1,2,3,"a b c"}'); -ERROR: null value not allowed for object key --- empty key is allowed -select json_object('{a,b,"","d e f"}','{1,2,3,"a b c"}'); - json_object ------------------------------------------------------ - {"a" : "1", "b" : "2", "" : "3", "d e f" : "a b c"} -(1 row) - --- json_to_record and json_to_recordset -select * from json_to_record('{"a":1,"b":"foo","c":"bar"}') - as x(a int, b text, d text); - a | b | d ----+-----+--- - 1 | foo | -(1 row) - -select * from json_to_recordset('[{"a":1,"b":"foo","d":false},{"a":2,"b":"bar","c":true}]') - as x(a int, b text, c boolean); - a | b | c ----+-----+--- - 1 | foo | - 2 | bar | t -(2 rows) - -select * from json_to_recordset('[{"a":1,"b":{"d":"foo"},"c":true},{"a":2,"c":false,"b":{"d":"bar"}}]') - as x(a int, b json, c boolean); - a | b | c ----+-------------+--- - 1 | {"d":"foo"} | t - 2 | {"d":"bar"} | f -(2 rows) - -select *, c is null as c_is_null -from json_to_record('{"a":1, "b":{"c":16, "d":2}, "x":8, "ca": ["1 2", 3], "ia": [[1,2],[3,4]], "r": {"a": "aaa", "b": 123}}'::json) - as t(a int, b json, c text, x int, ca char(5)[], ia int[][], r jpop); - a | b | c | x | ca | ia | r | c_is_null ----+-----------------+---+---+-------------------+---------------+------------+----------- - 1 | {"c":16, "d":2} | | 8 | {"1 2 ","3 "} | {{1,2},{3,4}} | (aaa,123,) | t -(1 row) - -select *, c is null as c_is_null -from json_to_recordset('[{"a":1, "b":{"c":16, "d":2}, "x":8}]'::json) - as t(a int, b json, c text, x int); - a | b | c | x | c_is_null ----+-----------------+---+---+----------- - 1 | {"c":16, "d":2} | | 8 | t -(1 row) - -select * from json_to_record('{"ia": null}') as x(ia _int4); - ia ----- - -(1 row) - -select * from json_to_record('{"ia": 123}') as x(ia _int4); -ERROR: expected JSON array -HINT: See the value of key "ia". -select * from json_to_record('{"ia": [1, "2", null, 4]}') as x(ia _int4); - ia --------------- - {1,2,NULL,4} -(1 row) - -select * from json_to_record('{"ia": [[1, 2], [3, 4]]}') as x(ia _int4); - ia ---------------- - {{1,2},{3,4}} -(1 row) - -select * from json_to_record('{"ia": [[1], 2]}') as x(ia _int4); -ERROR: expected JSON array -HINT: See the array element [1] of key "ia". -select * from json_to_record('{"ia": [[1], [2, 3]]}') as x(ia _int4); -ERROR: malformed JSON array -DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. -select * from json_to_record('{"ia2": [1, 2, 3]}') as x(ia2 int[][]); - ia2 ---------- - {1,2,3} -(1 row) - -select * from json_to_record('{"ia2": [[1, 2], [3, 4]]}') as x(ia2 int4[][]); - ia2 ---------------- - {{1,2},{3,4}} -(1 row) - -select * from json_to_record('{"ia2": [[[1], [2], [3]]]}') as x(ia2 int4[][]); - ia2 ------------------ - {{{1},{2},{3}}} -(1 row) - -select * from json_to_record('{"out": {"key": 1}}') as x(out json); - out ------------- - {"key": 1} -(1 row) - -select * from json_to_record('{"out": [{"key": 1}]}') as x(out json); - out --------------- - [{"key": 1}] -(1 row) - -select * from json_to_record('{"out": "{\"key\": 1}"}') as x(out json); - out ----------------- - "{\"key\": 1}" -(1 row) - -select * from json_to_record('{"out": {"key": 1}}') as x(out jsonb); - out ------------- - {"key": 1} -(1 row) - -select * from json_to_record('{"out": [{"key": 1}]}') as x(out jsonb); - out --------------- - [{"key": 1}] -(1 row) - -select * from json_to_record('{"out": "{\"key\": 1}"}') as x(out jsonb); - out ----------------- - "{\"key\": 1}" -(1 row) - --- json_strip_nulls -select json_strip_nulls(null); - json_strip_nulls ------------------- - -(1 row) - -select json_strip_nulls('1'); - json_strip_nulls ------------------- - 1 -(1 row) - -select json_strip_nulls('"a string"'); - json_strip_nulls ------------------- - "a string" -(1 row) - -select json_strip_nulls('null'); - json_strip_nulls ------------------- - null -(1 row) - -select json_strip_nulls('[1,2,null,3,4]'); - json_strip_nulls ------------------- - [1,2,null,3,4] -(1 row) - -select json_strip_nulls('{"a":1,"b":null,"c":[2,null,3],"d":{"e":4,"f":null}}'); - json_strip_nulls ------------------------------------- - {"a":1,"c":[2,null,3],"d":{"e":4}} -(1 row) - -select json_strip_nulls('[1,{"a":1,"b":null,"c":2},3]'); - json_strip_nulls ---------------------- - [1,{"a":1,"c":2},3] -(1 row) - --- an empty object is not null and should not be stripped -select json_strip_nulls('{"a": {"b": null, "c": null}, "d": {} }'); - json_strip_nulls ------------------- - {"a":{},"d":{}} -(1 row) - --- json to tsvector -select to_tsvector('{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::json); - to_tsvector ---------------------------------------------------------------------------- - 'aaa':1 'bbb':2 'ccc':4 'ddd':3 'eee':6 'fff':7 'ggg':8 'hhh':10 'iii':11 -(1 row) - --- json to tsvector with config -select to_tsvector('simple', '{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::json); - to_tsvector ---------------------------------------------------------------------------- - 'aaa':1 'bbb':2 'ccc':4 'ddd':3 'eee':6 'fff':7 'ggg':8 'hhh':10 'iii':11 -(1 row) - --- json to tsvector with stop words -select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": ["the eee fff ggg"], "c": {"d": "hhh. iii"}}'::json); - to_tsvector ----------------------------------------------------------------------------- - 'aaa':1 'bbb':3 'ccc':5 'ddd':4 'eee':8 'fff':9 'ggg':10 'hhh':12 'iii':13 -(1 row) - --- json to tsvector with numeric values -select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": 123, "c": 456}'::json); - to_tsvector ---------------------------------- - 'aaa':1 'bbb':3 'ccc':5 'ddd':4 -(1 row) - --- json_to_tsvector -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"all"'); - json_to_tsvector ----------------------------------------------------------------------------------------- - '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16 -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"key"'); - json_to_tsvector --------------------------------- - 'b':2 'c':4 'd':6 'f':8 'g':10 -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"string"'); - json_to_tsvector ------------------- - 'aaa':1 'bbb':3 -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"numeric"'); - json_to_tsvector ------------------- - '123':1 '456':3 -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"boolean"'); - json_to_tsvector -------------------- - 'fals':3 'true':1 -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["string", "numeric"]'); - json_to_tsvector ---------------------------------- - '123':5 '456':7 'aaa':1 'bbb':3 -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"all"'); - json_to_tsvector ----------------------------------------------------------------------------------------- - '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16 -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"key"'); - json_to_tsvector --------------------------------- - 'b':2 'c':4 'd':6 'f':8 'g':10 -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"string"'); - json_to_tsvector ------------------- - 'aaa':1 'bbb':3 -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"numeric"'); - json_to_tsvector ------------------- - '123':1 '456':3 -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"boolean"'); - json_to_tsvector -------------------- - 'fals':3 'true':1 -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["string", "numeric"]'); - json_to_tsvector ---------------------------------- - '123':5 '456':7 'aaa':1 'bbb':3 -(1 row) - --- to_tsvector corner cases -select to_tsvector('""'::json); - to_tsvector -------------- - -(1 row) - -select to_tsvector('{}'::json); - to_tsvector -------------- - -(1 row) - -select to_tsvector('[]'::json); - to_tsvector -------------- - -(1 row) - -select to_tsvector('null'::json); - to_tsvector -------------- - -(1 row) - --- json_to_tsvector corner cases -select json_to_tsvector('""'::json, '"all"'); - json_to_tsvector ------------------- - -(1 row) - -select json_to_tsvector('{}'::json, '"all"'); - json_to_tsvector ------------------- - -(1 row) - -select json_to_tsvector('[]'::json, '"all"'); - json_to_tsvector ------------------- - -(1 row) - -select json_to_tsvector('null'::json, '"all"'); - json_to_tsvector ------------------- - -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '""'); -ERROR: wrong flag in flag array: "" -HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '{}'); -ERROR: wrong flag type, only arrays and scalars are allowed -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '[]'); - json_to_tsvector ------------------- - -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, 'null'); -ERROR: flag array element is not a string -HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["all", null]'); -ERROR: flag array element is not a string -HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". --- ts_headline for json -select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh')); - ts_headline ---------------------------------------------------------------------------------------------------------- - {"a":"aaa bbb","b":{"c":"ccc ddd fff","c1":"ccc1 ddd1"},"d":["ggg hhh","iii jjj"]} -(1 row) - -select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh')); - ts_headline ----------------------------------------------------------------------------------------- - {"a":"aaa bbb","b":{"c":"ccc ddd fff"},"d":["ggg hhh","iii jjj"]} -(1 row) - -select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >'); - ts_headline ------------------------------------------------------------------------------------------- - {"a":"aaa ","b":{"c":"ccc fff","c1":"ccc1 ddd1"},"d":["ggg ","iii jjj"]} -(1 row) - -select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >'); - ts_headline ------------------------------------------------------------------------------------------- - {"a":"aaa ","b":{"c":"ccc fff","c1":"ccc1 ddd1"},"d":["ggg ","iii jjj"]} -(1 row) - --- corner cases for ts_headline with json -select ts_headline('null'::json, tsquery('aaa & bbb')); - ts_headline -------------- - null -(1 row) - -select ts_headline('{}'::json, tsquery('aaa & bbb')); - ts_headline -------------- - {} -(1 row) - -select ts_headline('[]'::json, tsquery('aaa & bbb')); - ts_headline -------------- - [] -(1 row) - +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/jsonb.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/jsonb.out --- /tmp/cirrus-ci-build/src/test/regress/expected/jsonb.out 2024-03-07 14:25:00.331672000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/jsonb.out 2024-03-07 14:27:17.533250000 +0000 @@ -1,5699 +1,2 @@ --- directory paths are passed to us in environment variables -\getenv abs_srcdir PG_ABS_SRCDIR -CREATE TABLE testjsonb ( - j jsonb -); -\set filename :abs_srcdir '/data/jsonb.data' -COPY testjsonb FROM :'filename'; --- Strings. -SELECT '""'::jsonb; -- OK. - jsonb -------- - "" -(1 row) - -SELECT $$''$$::jsonb; -- ERROR, single quotes are not allowed -ERROR: invalid input syntax for type json -LINE 1: SELECT $$''$$::jsonb; - ^ -DETAIL: Token "'" is invalid. -CONTEXT: JSON data, line 1: '... -SELECT '"abc"'::jsonb; -- OK - jsonb -------- - "abc" -(1 row) - -SELECT '"abc'::jsonb; -- ERROR, quotes not closed -ERROR: invalid input syntax for type json -LINE 1: SELECT '"abc'::jsonb; - ^ -DETAIL: Token ""abc" is invalid. -CONTEXT: JSON data, line 1: "abc -SELECT '"abc -def"'::jsonb; -- ERROR, unescaped newline in string constant -ERROR: invalid input syntax for type json -LINE 1: SELECT '"abc - ^ -DETAIL: Character with value 0x0a must be escaped. -CONTEXT: JSON data, line 1: "abc -SELECT '"\n\"\\"'::jsonb; -- OK, legal escapes - jsonb ----------- - "\n\"\\" -(1 row) - -SELECT '"\v"'::jsonb; -- ERROR, not a valid JSON escape -ERROR: invalid input syntax for type json -LINE 1: SELECT '"\v"'::jsonb; - ^ -DETAIL: Escape sequence "\v" is invalid. -CONTEXT: JSON data, line 1: "\v... --- see json_encoding test for input with unicode escapes --- Numbers. -SELECT '1'::jsonb; -- OK - jsonb -------- - 1 -(1 row) - -SELECT '0'::jsonb; -- OK - jsonb -------- - 0 -(1 row) - -SELECT '01'::jsonb; -- ERROR, not valid according to JSON spec -ERROR: invalid input syntax for type json -LINE 1: SELECT '01'::jsonb; - ^ -DETAIL: Token "01" is invalid. -CONTEXT: JSON data, line 1: 01 -SELECT '0.1'::jsonb; -- OK - jsonb -------- - 0.1 -(1 row) - -SELECT '9223372036854775808'::jsonb; -- OK, even though it's too large for int8 - jsonb ---------------------- - 9223372036854775808 -(1 row) - -SELECT '1e100'::jsonb; -- OK - jsonb -------------------------------------------------------------------------------------------------------- - 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 -(1 row) - -SELECT '1.3e100'::jsonb; -- OK - jsonb -------------------------------------------------------------------------------------------------------- - 13000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 -(1 row) - -SELECT '1f2'::jsonb; -- ERROR -ERROR: invalid input syntax for type json -LINE 1: SELECT '1f2'::jsonb; - ^ -DETAIL: Token "1f2" is invalid. -CONTEXT: JSON data, line 1: 1f2 -SELECT '0.x1'::jsonb; -- ERROR -ERROR: invalid input syntax for type json -LINE 1: SELECT '0.x1'::jsonb; - ^ -DETAIL: Token "0.x1" is invalid. -CONTEXT: JSON data, line 1: 0.x1 -SELECT '1.3ex100'::jsonb; -- ERROR -ERROR: invalid input syntax for type json -LINE 1: SELECT '1.3ex100'::jsonb; - ^ -DETAIL: Token "1.3ex100" is invalid. -CONTEXT: JSON data, line 1: 1.3ex100 --- Arrays. -SELECT '[]'::jsonb; -- OK - jsonb -------- - [] -(1 row) - -SELECT '[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]'::jsonb; -- OK - jsonb ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]] -(1 row) - -SELECT '[1,2]'::jsonb; -- OK - jsonb --------- - [1, 2] -(1 row) - -SELECT '[1,2,]'::jsonb; -- ERROR, trailing comma -ERROR: invalid input syntax for type json -LINE 1: SELECT '[1,2,]'::jsonb; - ^ -DETAIL: Expected JSON value, but found "]". -CONTEXT: JSON data, line 1: [1,2,] -SELECT '[1,2'::jsonb; -- ERROR, no closing bracket -ERROR: invalid input syntax for type json -LINE 1: SELECT '[1,2'::jsonb; - ^ -DETAIL: The input string ended unexpectedly. -CONTEXT: JSON data, line 1: [1,2 -SELECT '[1,[2]'::jsonb; -- ERROR, no closing bracket -ERROR: invalid input syntax for type json -LINE 1: SELECT '[1,[2]'::jsonb; - ^ -DETAIL: The input string ended unexpectedly. -CONTEXT: JSON data, line 1: [1,[2] --- Objects. -SELECT '{}'::jsonb; -- OK - jsonb -------- - {} -(1 row) - -SELECT '{"abc"}'::jsonb; -- ERROR, no value -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc"}'::jsonb; - ^ -DETAIL: Expected ":", but found "}". -CONTEXT: JSON data, line 1: {"abc"} -SELECT '{"abc":1}'::jsonb; -- OK - jsonb ------------- - {"abc": 1} -(1 row) - -SELECT '{1:"abc"}'::jsonb; -- ERROR, keys must be strings -ERROR: invalid input syntax for type json -LINE 1: SELECT '{1:"abc"}'::jsonb; - ^ -DETAIL: Expected string or "}", but found "1". -CONTEXT: JSON data, line 1: {1... -SELECT '{"abc",1}'::jsonb; -- ERROR, wrong separator -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc",1}'::jsonb; - ^ -DETAIL: Expected ":", but found ",". -CONTEXT: JSON data, line 1: {"abc",... -SELECT '{"abc"=1}'::jsonb; -- ERROR, totally wrong separator -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc"=1}'::jsonb; - ^ -DETAIL: Token "=" is invalid. -CONTEXT: JSON data, line 1: {"abc"=... -SELECT '{"abc"::1}'::jsonb; -- ERROR, another wrong separator -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc"::1}'::jsonb; - ^ -DETAIL: Expected JSON value, but found ":". -CONTEXT: JSON data, line 1: {"abc"::... -SELECT '{"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}}'::jsonb; -- OK - jsonb --------------------------------------------------------------------- - {"abc": 1, "def": 2, "ghi": [3, 4], "hij": {"klm": 5, "nop": [6]}} -(1 row) - -SELECT '{"abc":1:2}'::jsonb; -- ERROR, colon in wrong spot -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc":1:2}'::jsonb; - ^ -DETAIL: Expected "," or "}", but found ":". -CONTEXT: JSON data, line 1: {"abc":1:... -SELECT '{"abc":1,3}'::jsonb; -- ERROR, no value -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc":1,3}'::jsonb; - ^ -DETAIL: Expected string, but found "3". -CONTEXT: JSON data, line 1: {"abc":1,3... --- Recursion. -SET max_stack_depth = '100kB'; -SELECT repeat('[', 10000)::jsonb; -ERROR: stack depth limit exceeded -HINT: Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate. -SELECT repeat('{"a":', 10000)::jsonb; -ERROR: stack depth limit exceeded -HINT: Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate. -RESET max_stack_depth; --- Miscellaneous stuff. -SELECT 'true'::jsonb; -- OK - jsonb -------- - true -(1 row) - -SELECT 'false'::jsonb; -- OK - jsonb -------- - false -(1 row) - -SELECT 'null'::jsonb; -- OK - jsonb -------- - null -(1 row) - -SELECT ' true '::jsonb; -- OK, even with extra whitespace - jsonb -------- - true -(1 row) - -SELECT 'true false'::jsonb; -- ERROR, too many values -ERROR: invalid input syntax for type json -LINE 1: SELECT 'true false'::jsonb; - ^ -DETAIL: Expected end of input, but found "false". -CONTEXT: JSON data, line 1: true false -SELECT 'true, false'::jsonb; -- ERROR, too many values -ERROR: invalid input syntax for type json -LINE 1: SELECT 'true, false'::jsonb; - ^ -DETAIL: Expected end of input, but found ",". -CONTEXT: JSON data, line 1: true,... -SELECT 'truf'::jsonb; -- ERROR, not a keyword -ERROR: invalid input syntax for type json -LINE 1: SELECT 'truf'::jsonb; - ^ -DETAIL: Token "truf" is invalid. -CONTEXT: JSON data, line 1: truf -SELECT 'trues'::jsonb; -- ERROR, not a keyword -ERROR: invalid input syntax for type json -LINE 1: SELECT 'trues'::jsonb; - ^ -DETAIL: Token "trues" is invalid. -CONTEXT: JSON data, line 1: trues -SELECT ''::jsonb; -- ERROR, no value -ERROR: invalid input syntax for type json -LINE 1: SELECT ''::jsonb; - ^ -DETAIL: The input string ended unexpectedly. -CONTEXT: JSON data, line 1: -SELECT ' '::jsonb; -- ERROR, no value -ERROR: invalid input syntax for type json -LINE 1: SELECT ' '::jsonb; - ^ -DETAIL: The input string ended unexpectedly. -CONTEXT: JSON data, line 1: --- Multi-line JSON input to check ERROR reporting -SELECT '{ - "one": 1, - "two":"two", - "three": - true}'::jsonb; -- OK - jsonb ------------------------------------------ - {"one": 1, "two": "two", "three": true} -(1 row) - -SELECT '{ - "one": 1, - "two":,"two", -- ERROR extraneous comma before field "two" - "three": - true}'::jsonb; -ERROR: invalid input syntax for type json -LINE 1: SELECT '{ - ^ -DETAIL: Expected JSON value, but found ",". -CONTEXT: JSON data, line 3: "two":,... -SELECT '{ - "one": 1, - "two":"two", - "averyveryveryveryveryveryveryveryveryverylongfieldname":}'::jsonb; -ERROR: invalid input syntax for type json -LINE 1: SELECT '{ - ^ -DETAIL: Expected JSON value, but found "}". -CONTEXT: JSON data, line 4: ...yveryveryveryveryveryveryveryverylongfieldname":} --- ERROR missing value for last field --- test non-error-throwing input -select pg_input_is_valid('{"a":true}', 'jsonb'); - pg_input_is_valid -------------------- - t -(1 row) - -select pg_input_is_valid('{"a":true', 'jsonb'); - pg_input_is_valid -------------------- - f -(1 row) - -select * from pg_input_error_info('{"a":true', 'jsonb'); - message | detail | hint | sql_error_code -------------------------------------+--------------------------------------+------+---------------- - invalid input syntax for type json | The input string ended unexpectedly. | | 22P02 -(1 row) - -select * from pg_input_error_info('{"a":1e1000000}', 'jsonb'); - message | detail | hint | sql_error_code ---------------------------------+--------+------+---------------- - value overflows numeric format | | | 22003 -(1 row) - --- make sure jsonb is passed through json generators without being escaped -SELECT array_to_json(ARRAY [jsonb '{"a":1}', jsonb '{"b":[2,3]}']); - array_to_json --------------------------- - [{"a": 1},{"b": [2, 3]}] -(1 row) - --- anyarray column -CREATE TEMP TABLE rows AS -SELECT x, 'txt' || x as y -FROM generate_series(1,3) AS x; -analyze rows; -select attname, to_jsonb(histogram_bounds) histogram_bounds -from pg_stats -where tablename = 'rows' and - schemaname = pg_my_temp_schema()::regnamespace::text -order by 1; - attname | histogram_bounds ----------+-------------------------- - x | [1, 2, 3] - y | ["txt1", "txt2", "txt3"] -(2 rows) - --- to_jsonb, timestamps -select to_jsonb(timestamp '2014-05-28 12:22:35.614298'); - to_jsonb ------------------------------- - "2014-05-28T12:22:35.614298" -(1 row) - -BEGIN; -SET LOCAL TIME ZONE 10.5; -select to_jsonb(timestamptz '2014-05-28 12:22:35.614298-04'); - to_jsonb ------------------------------------- - "2014-05-29T02:52:35.614298+10:30" -(1 row) - -SET LOCAL TIME ZONE -8; -select to_jsonb(timestamptz '2014-05-28 12:22:35.614298-04'); - to_jsonb ------------------------------------- - "2014-05-28T08:22:35.614298-08:00" -(1 row) - -COMMIT; -select to_jsonb(date '2014-05-28'); - to_jsonb --------------- - "2014-05-28" -(1 row) - -select to_jsonb(date 'Infinity'); - to_jsonb ------------- - "infinity" -(1 row) - -select to_jsonb(date '-Infinity'); - to_jsonb -------------- - "-infinity" -(1 row) - -select to_jsonb(timestamp 'Infinity'); - to_jsonb ------------- - "infinity" -(1 row) - -select to_jsonb(timestamp '-Infinity'); - to_jsonb -------------- - "-infinity" -(1 row) - -select to_jsonb(timestamptz 'Infinity'); - to_jsonb ------------- - "infinity" -(1 row) - -select to_jsonb(timestamptz '-Infinity'); - to_jsonb -------------- - "-infinity" -(1 row) - ---jsonb_agg -SELECT jsonb_agg(q) - FROM ( SELECT $$a$$ || x AS b, y AS c, - ARRAY[ROW(x.*,ARRAY[1,2,3]), - ROW(y.*,ARRAY[4,5,6])] AS z - FROM generate_series(1,2) x, - generate_series(4,5) y) q; - jsonb_agg --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - [{"b": "a1", "c": 4, "z": [{"f1": 1, "f2": [1, 2, 3]}, {"f1": 4, "f2": [4, 5, 6]}]}, {"b": "a1", "c": 5, "z": [{"f1": 1, "f2": [1, 2, 3]}, {"f1": 5, "f2": [4, 5, 6]}]}, {"b": "a2", "c": 4, "z": [{"f1": 2, "f2": [1, 2, 3]}, {"f1": 4, "f2": [4, 5, 6]}]}, {"b": "a2", "c": 5, "z": [{"f1": 2, "f2": [1, 2, 3]}, {"f1": 5, "f2": [4, 5, 6]}]}] -(1 row) - -SELECT jsonb_agg(q ORDER BY x, y) - FROM rows q; - jsonb_agg ------------------------------------------------------------------------ - [{"x": 1, "y": "txt1"}, {"x": 2, "y": "txt2"}, {"x": 3, "y": "txt3"}] -(1 row) - -UPDATE rows SET x = NULL WHERE x = 1; -SELECT jsonb_agg(q ORDER BY x NULLS FIRST, y) - FROM rows q; - jsonb_agg --------------------------------------------------------------------------- - [{"x": null, "y": "txt1"}, {"x": 2, "y": "txt2"}, {"x": 3, "y": "txt3"}] -(1 row) - --- jsonb extraction functions -CREATE TEMP TABLE test_jsonb ( - json_type text, - test_json jsonb -); -INSERT INTO test_jsonb VALUES -('scalar','"a scalar"'), -('array','["zero", "one","two",null,"four","five", [1,2,3],{"f1":9}]'), -('object','{"field1":"val1","field2":"val2","field3":null, "field4": 4, "field5": [1,2,3], "field6": {"f1":9}}'); -SELECT test_json -> 'x' FROM test_jsonb WHERE json_type = 'scalar'; - ?column? ----------- - -(1 row) - -SELECT test_json -> 'x' FROM test_jsonb WHERE json_type = 'array'; - ?column? ----------- - -(1 row) - -SELECT test_json -> 'x' FROM test_jsonb WHERE json_type = 'object'; - ?column? ----------- - -(1 row) - -SELECT test_json -> 'field2' FROM test_jsonb WHERE json_type = 'object'; - ?column? ----------- - "val2" -(1 row) - -SELECT test_json ->> 'field2' FROM test_jsonb WHERE json_type = 'scalar'; - ?column? ----------- - -(1 row) - -SELECT test_json ->> 'field2' FROM test_jsonb WHERE json_type = 'array'; - ?column? ----------- - -(1 row) - -SELECT test_json ->> 'field2' FROM test_jsonb WHERE json_type = 'object'; - ?column? ----------- - val2 -(1 row) - -SELECT test_json -> 2 FROM test_jsonb WHERE json_type = 'scalar'; - ?column? ----------- - -(1 row) - -SELECT test_json -> 2 FROM test_jsonb WHERE json_type = 'array'; - ?column? ----------- - "two" -(1 row) - -SELECT test_json -> 9 FROM test_jsonb WHERE json_type = 'array'; - ?column? ----------- - -(1 row) - -SELECT test_json -> 2 FROM test_jsonb WHERE json_type = 'object'; - ?column? ----------- - -(1 row) - -SELECT test_json ->> 6 FROM test_jsonb WHERE json_type = 'array'; - ?column? ------------ - [1, 2, 3] -(1 row) - -SELECT test_json ->> 7 FROM test_jsonb WHERE json_type = 'array'; - ?column? ------------ - {"f1": 9} -(1 row) - -SELECT test_json ->> 'field4' FROM test_jsonb WHERE json_type = 'object'; - ?column? ----------- - 4 -(1 row) - -SELECT test_json ->> 'field5' FROM test_jsonb WHERE json_type = 'object'; - ?column? ------------ - [1, 2, 3] -(1 row) - -SELECT test_json ->> 'field6' FROM test_jsonb WHERE json_type = 'object'; - ?column? ------------ - {"f1": 9} -(1 row) - -SELECT test_json ->> 2 FROM test_jsonb WHERE json_type = 'scalar'; - ?column? ----------- - -(1 row) - -SELECT test_json ->> 2 FROM test_jsonb WHERE json_type = 'array'; - ?column? ----------- - two -(1 row) - -SELECT test_json ->> 2 FROM test_jsonb WHERE json_type = 'object'; - ?column? ----------- - -(1 row) - -SELECT jsonb_object_keys(test_json) FROM test_jsonb WHERE json_type = 'scalar'; -ERROR: cannot call jsonb_object_keys on a scalar -SELECT jsonb_object_keys(test_json) FROM test_jsonb WHERE json_type = 'array'; -ERROR: cannot call jsonb_object_keys on an array -SELECT jsonb_object_keys(test_json) FROM test_jsonb WHERE json_type = 'object'; - jsonb_object_keys -------------------- - field1 - field2 - field3 - field4 - field5 - field6 -(6 rows) - --- nulls -SELECT (test_json->'field3') IS NULL AS expect_false FROM test_jsonb WHERE json_type = 'object'; - expect_false --------------- - f -(1 row) - -SELECT (test_json->>'field3') IS NULL AS expect_true FROM test_jsonb WHERE json_type = 'object'; - expect_true -------------- - t -(1 row) - -SELECT (test_json->3) IS NULL AS expect_false FROM test_jsonb WHERE json_type = 'array'; - expect_false --------------- - f -(1 row) - -SELECT (test_json->>3) IS NULL AS expect_true FROM test_jsonb WHERE json_type = 'array'; - expect_true -------------- - t -(1 row) - --- corner cases -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> null::text; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> null::int; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> 1; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> 'z'; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> ''; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::jsonb -> 1; - ?column? -------------- - {"b": "cc"} -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::jsonb -> 3; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::jsonb -> 'z'; - ?column? ----------- - -(1 row) - -select '{"a": "c", "b": null}'::jsonb -> 'b'; - ?column? ----------- - null -(1 row) - -select '"foo"'::jsonb -> 1; - ?column? ----------- - -(1 row) - -select '"foo"'::jsonb -> 'z'; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> null::text; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> null::int; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> 1; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> 'z'; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> ''; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::jsonb ->> 1; - ?column? -------------- - {"b": "cc"} -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::jsonb ->> 3; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::jsonb ->> 'z'; - ?column? ----------- - -(1 row) - -select '{"a": "c", "b": null}'::jsonb ->> 'b'; - ?column? ----------- - -(1 row) - -select '"foo"'::jsonb ->> 1; - ?column? ----------- - -(1 row) - -select '"foo"'::jsonb ->> 'z'; - ?column? ----------- - -(1 row) - --- equality and inequality -SELECT '{"x":"y"}'::jsonb = '{"x":"y"}'::jsonb; - ?column? ----------- - t -(1 row) - -SELECT '{"x":"y"}'::jsonb = '{"x":"z"}'::jsonb; - ?column? ----------- - f -(1 row) - -SELECT '{"x":"y"}'::jsonb <> '{"x":"y"}'::jsonb; - ?column? ----------- - f -(1 row) - -SELECT '{"x":"y"}'::jsonb <> '{"x":"z"}'::jsonb; - ?column? ----------- - t -(1 row) - --- containment -SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b"}'); - jsonb_contains ----------------- - t -(1 row) - -SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b", "c":null}'); - jsonb_contains ----------------- - t -(1 row) - -SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b", "g":null}'); - jsonb_contains ----------------- - f -(1 row) - -SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"g":null}'); - jsonb_contains ----------------- - f -(1 row) - -SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"c"}'); - jsonb_contains ----------------- - f -(1 row) - -SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b"}'); - jsonb_contains ----------------- - t -(1 row) - -SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b", "c":"q"}'); - jsonb_contains ----------------- - f -(1 row) - -SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b"}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b", "c":null}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b", "g":null}'; - ?column? ----------- - f -(1 row) - -SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"g":null}'; - ?column? ----------- - f -(1 row) - -SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"c"}'; - ?column? ----------- - f -(1 row) - -SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b"}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b", "c":"q"}'; - ?column? ----------- - f -(1 row) - -SELECT '[1,2]'::jsonb @> '[1,2,2]'::jsonb; - ?column? ----------- - t -(1 row) - -SELECT '[1,1,2]'::jsonb @> '[1,2,2]'::jsonb; - ?column? ----------- - t -(1 row) - -SELECT '[[1,2]]'::jsonb @> '[[1,2,2]]'::jsonb; - ?column? ----------- - t -(1 row) - -SELECT '[1,2,2]'::jsonb <@ '[1,2]'::jsonb; - ?column? ----------- - t -(1 row) - -SELECT '[1,2,2]'::jsonb <@ '[1,1,2]'::jsonb; - ?column? ----------- - t -(1 row) - -SELECT '[[1,2,2]]'::jsonb <@ '[[1,2]]'::jsonb; - ?column? ----------- - t -(1 row) - -SELECT jsonb_contained('{"a":"b"}', '{"a":"b", "b":1, "c":null}'); - jsonb_contained ------------------ - t -(1 row) - -SELECT jsonb_contained('{"a":"b", "c":null}', '{"a":"b", "b":1, "c":null}'); - jsonb_contained ------------------ - t -(1 row) - -SELECT jsonb_contained('{"a":"b", "g":null}', '{"a":"b", "b":1, "c":null}'); - jsonb_contained ------------------ - f -(1 row) - -SELECT jsonb_contained('{"g":null}', '{"a":"b", "b":1, "c":null}'); - jsonb_contained ------------------ - f -(1 row) - -SELECT jsonb_contained('{"a":"c"}', '{"a":"b", "b":1, "c":null}'); - jsonb_contained ------------------ - f -(1 row) - -SELECT jsonb_contained('{"a":"b"}', '{"a":"b", "b":1, "c":null}'); - jsonb_contained ------------------ - t -(1 row) - -SELECT jsonb_contained('{"a":"b", "c":"q"}', '{"a":"b", "b":1, "c":null}'); - jsonb_contained ------------------ - f -(1 row) - -SELECT '{"a":"b"}'::jsonb <@ '{"a":"b", "b":1, "c":null}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":"b", "c":null}'::jsonb <@ '{"a":"b", "b":1, "c":null}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":"b", "g":null}'::jsonb <@ '{"a":"b", "b":1, "c":null}'; - ?column? ----------- - f -(1 row) - -SELECT '{"g":null}'::jsonb <@ '{"a":"b", "b":1, "c":null}'; - ?column? ----------- - f -(1 row) - -SELECT '{"a":"c"}'::jsonb <@ '{"a":"b", "b":1, "c":null}'; - ?column? ----------- - f -(1 row) - -SELECT '{"a":"b"}'::jsonb <@ '{"a":"b", "b":1, "c":null}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":"b", "c":"q"}'::jsonb <@ '{"a":"b", "b":1, "c":null}'; - ?column? ----------- - f -(1 row) - --- Raw scalar may contain another raw scalar, array may contain a raw scalar -SELECT '[5]'::jsonb @> '[5]'; - ?column? ----------- - t -(1 row) - -SELECT '5'::jsonb @> '5'; - ?column? ----------- - t -(1 row) - -SELECT '[5]'::jsonb @> '5'; - ?column? ----------- - t -(1 row) - --- But a raw scalar cannot contain an array -SELECT '5'::jsonb @> '[5]'; - ?column? ----------- - f -(1 row) - --- In general, one thing should always contain itself. Test array containment: -SELECT '["9", ["7", "3"], 1]'::jsonb @> '["9", ["7", "3"], 1]'::jsonb; - ?column? ----------- - t -(1 row) - -SELECT '["9", ["7", "3"], ["1"]]'::jsonb @> '["9", ["7", "3"], ["1"]]'::jsonb; - ?column? ----------- - t -(1 row) - --- array containment string matching confusion bug -SELECT '{ "name": "Bob", "tags": [ "enim", "qui"]}'::jsonb @> '{"tags":["qu"]}'; - ?column? ----------- - f -(1 row) - --- array length -SELECT jsonb_array_length('[1,2,3,{"f1":1,"f2":[5,6]},4]'); - jsonb_array_length --------------------- - 5 -(1 row) - -SELECT jsonb_array_length('[]'); - jsonb_array_length --------------------- - 0 -(1 row) - -SELECT jsonb_array_length('{"f1":1,"f2":[5,6]}'); -ERROR: cannot get array length of a non-array -SELECT jsonb_array_length('4'); -ERROR: cannot get array length of a scalar --- each -SELECT jsonb_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null}'); - jsonb_each --------------------- - (f1,"[1, 2, 3]") - (f2,"{""f3"": 1}") - (f4,null) -(3 rows) - -SELECT jsonb_each('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q; - q ------------------------------------------------------- - (1,"""first""") - (a,"{""1"": ""first"", ""b"": ""c"", ""c"": ""b""}") - (b,"[1, 2]") - (c,"""cc""") - (n,null) -(5 rows) - -SELECT * FROM jsonb_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q; - key | value ------+----------- - f1 | [1, 2, 3] - f2 | {"f3": 1} - f4 | null - f5 | 99 - f6 | "stringy" -(5 rows) - -SELECT * FROM jsonb_each('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q; - key | value ------+------------------------------------ - 1 | "first" - a | {"1": "first", "b": "c", "c": "b"} - b | [1, 2] - c | "cc" - n | null -(5 rows) - -SELECT jsonb_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":"null"}'); - jsonb_each_text --------------------- - (f1,"[1, 2, 3]") - (f2,"{""f3"": 1}") - (f4,) - (f5,null) -(4 rows) - -SELECT jsonb_each_text('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q; - q ------------------------------------------------------- - (1,first) - (a,"{""1"": ""first"", ""b"": ""c"", ""c"": ""b""}") - (b,"[1, 2]") - (c,cc) - (n,) -(5 rows) - -SELECT * FROM jsonb_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q; - key | value ------+----------- - f1 | [1, 2, 3] - f2 | {"f3": 1} - f4 | - f5 | 99 - f6 | stringy -(5 rows) - -SELECT * FROM jsonb_each_text('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q; - key | value ------+------------------------------------ - 1 | first - a | {"1": "first", "b": "c", "c": "b"} - b | [1, 2] - c | cc - n | -(5 rows) - --- exists -SELECT jsonb_exists('{"a":null, "b":"qq"}', 'a'); - jsonb_exists --------------- - t -(1 row) - -SELECT jsonb_exists('{"a":null, "b":"qq"}', 'b'); - jsonb_exists --------------- - t -(1 row) - -SELECT jsonb_exists('{"a":null, "b":"qq"}', 'c'); - jsonb_exists --------------- - f -(1 row) - -SELECT jsonb_exists('{"a":"null", "b":"qq"}', 'a'); - jsonb_exists --------------- - t -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ? 'a'; - ?column? ----------- - t -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ? 'b'; - ?column? ----------- - t -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ? 'c'; - ?column? ----------- - f -(1 row) - -SELECT jsonb '{"a":"null", "b":"qq"}' ? 'a'; - ?column? ----------- - t -(1 row) - --- array exists - array elements should behave as keys -SELECT count(*) from testjsonb WHERE j->'array' ? 'bar'; - count -------- - 3 -(1 row) - --- type sensitive array exists - should return no rows (since "exists" only --- matches strings that are either object keys or array elements) -SELECT count(*) from testjsonb WHERE j->'array' ? '5'::text; - count -------- - 0 -(1 row) - --- However, a raw scalar is *contained* within the array -SELECT count(*) from testjsonb WHERE j->'array' @> '5'::jsonb; - count -------- - 1 -(1 row) - -SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['a','b']); - jsonb_exists_any ------------------- - t -(1 row) - -SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['b','a']); - jsonb_exists_any ------------------- - t -(1 row) - -SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['c','a']); - jsonb_exists_any ------------------- - t -(1 row) - -SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['c','d']); - jsonb_exists_any ------------------- - f -(1 row) - -SELECT jsonb_exists_any('{"a":null, "b":"qq"}', '{}'::text[]); - jsonb_exists_any ------------------- - f -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['a','b']; - ?column? ----------- - t -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['b','a']; - ?column? ----------- - t -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['c','a']; - ?column? ----------- - t -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['c','d']; - ?column? ----------- - f -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ?| '{}'::text[]; - ?column? ----------- - f -(1 row) - -SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['a','b']); - jsonb_exists_all ------------------- - t -(1 row) - -SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['b','a']); - jsonb_exists_all ------------------- - t -(1 row) - -SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['c','a']); - jsonb_exists_all ------------------- - f -(1 row) - -SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['c','d']); - jsonb_exists_all ------------------- - f -(1 row) - -SELECT jsonb_exists_all('{"a":null, "b":"qq"}', '{}'::text[]); - jsonb_exists_all ------------------- - t -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['a','b']; - ?column? ----------- - t -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['b','a']; - ?column? ----------- - t -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['c','a']; - ?column? ----------- - f -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['c','d']; - ?column? ----------- - f -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['a','a', 'b', 'b', 'b']; - ?column? ----------- - t -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ?& '{}'::text[]; - ?column? ----------- - t -(1 row) - --- typeof -SELECT jsonb_typeof('{}') AS object; - object --------- - object -(1 row) - -SELECT jsonb_typeof('{"c":3,"p":"o"}') AS object; - object --------- - object -(1 row) - -SELECT jsonb_typeof('[]') AS array; - array -------- - array -(1 row) - -SELECT jsonb_typeof('["a", 1]') AS array; - array -------- - array -(1 row) - -SELECT jsonb_typeof('null') AS "null"; - null ------- - null -(1 row) - -SELECT jsonb_typeof('1') AS number; - number --------- - number -(1 row) - -SELECT jsonb_typeof('-1') AS number; - number --------- - number -(1 row) - -SELECT jsonb_typeof('1.0') AS number; - number --------- - number -(1 row) - -SELECT jsonb_typeof('1e2') AS number; - number --------- - number -(1 row) - -SELECT jsonb_typeof('-1.0') AS number; - number --------- - number -(1 row) - -SELECT jsonb_typeof('true') AS boolean; - boolean ---------- - boolean -(1 row) - -SELECT jsonb_typeof('false') AS boolean; - boolean ---------- - boolean -(1 row) - -SELECT jsonb_typeof('"hello"') AS string; - string --------- - string -(1 row) - -SELECT jsonb_typeof('"true"') AS string; - string --------- - string -(1 row) - -SELECT jsonb_typeof('"1.0"') AS string; - string --------- - string -(1 row) - --- jsonb_build_array, jsonb_build_object, jsonb_object_agg -SELECT jsonb_build_array('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}'); - jsonb_build_array -------------------------------------------------------------------------- - ["a", 1, "b", 1.2, "c", true, "d", null, "e", {"x": 3, "y": [1, 2, 3]}] -(1 row) - -SELECT jsonb_build_array('a', NULL); -- ok - jsonb_build_array -------------------- - ["a", null] -(1 row) - -SELECT jsonb_build_array(VARIADIC NULL::text[]); -- ok - jsonb_build_array -------------------- - -(1 row) - -SELECT jsonb_build_array(VARIADIC '{}'::text[]); -- ok - jsonb_build_array -------------------- - [] -(1 row) - -SELECT jsonb_build_array(VARIADIC '{a,b,c}'::text[]); -- ok - jsonb_build_array -------------------- - ["a", "b", "c"] -(1 row) - -SELECT jsonb_build_array(VARIADIC ARRAY['a', NULL]::text[]); -- ok - jsonb_build_array -------------------- - ["a", null] -(1 row) - -SELECT jsonb_build_array(VARIADIC '{1,2,3,4}'::text[]); -- ok - jsonb_build_array ----------------------- - ["1", "2", "3", "4"] -(1 row) - -SELECT jsonb_build_array(VARIADIC '{1,2,3,4}'::int[]); -- ok - jsonb_build_array -------------------- - [1, 2, 3, 4] -(1 row) - -SELECT jsonb_build_array(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok - jsonb_build_array --------------------- - [1, 4, 2, 5, 3, 6] -(1 row) - -SELECT jsonb_build_object('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}'); - jsonb_build_object -------------------------------------------------------------------------- - {"a": 1, "b": 1.2, "c": true, "d": null, "e": {"x": 3, "y": [1, 2, 3]}} -(1 row) - -SELECT jsonb_build_object( - 'a', jsonb_build_object('b',false,'c',99), - 'd', jsonb_build_object('e',array[9,8,7]::int[], - 'f', (select row_to_json(r) from ( select relkind, oid::regclass as name from pg_class where relname = 'pg_class') r))); - jsonb_build_object ------------------------------------------------------------------------------------------------- - {"a": {"b": false, "c": 99}, "d": {"e": [9, 8, 7], "f": {"name": "pg_class", "relkind": "r"}}} -(1 row) - -SELECT jsonb_build_object('{a,b,c}'::text[]); -- error -ERROR: argument list must have even number of elements -HINT: The arguments of jsonb_build_object() must consist of alternating keys and values. -SELECT jsonb_build_object('{a,b,c}'::text[], '{d,e,f}'::text[]); -- error, key cannot be array -ERROR: key value must be scalar, not array, composite, or json -SELECT jsonb_build_object('a', 'b', 'c'); -- error -ERROR: argument list must have even number of elements -HINT: The arguments of jsonb_build_object() must consist of alternating keys and values. -SELECT jsonb_build_object(NULL, 'a'); -- error, key cannot be NULL -ERROR: argument 1: key must not be null -SELECT jsonb_build_object('a', NULL); -- ok - jsonb_build_object --------------------- - {"a": null} -(1 row) - -SELECT jsonb_build_object(VARIADIC NULL::text[]); -- ok - jsonb_build_object --------------------- - -(1 row) - -SELECT jsonb_build_object(VARIADIC '{}'::text[]); -- ok - jsonb_build_object --------------------- - {} -(1 row) - -SELECT jsonb_build_object(VARIADIC '{a,b,c}'::text[]); -- error -ERROR: argument list must have even number of elements -HINT: The arguments of jsonb_build_object() must consist of alternating keys and values. -SELECT jsonb_build_object(VARIADIC ARRAY['a', NULL]::text[]); -- ok - jsonb_build_object --------------------- - {"a": null} -(1 row) - -SELECT jsonb_build_object(VARIADIC ARRAY[NULL, 'a']::text[]); -- error, key cannot be NULL -ERROR: argument 1: key must not be null -SELECT jsonb_build_object(VARIADIC '{1,2,3,4}'::text[]); -- ok - jsonb_build_object ----------------------- - {"1": "2", "3": "4"} -(1 row) - -SELECT jsonb_build_object(VARIADIC '{1,2,3,4}'::int[]); -- ok - jsonb_build_object --------------------- - {"1": 2, "3": 4} -(1 row) - -SELECT jsonb_build_object(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok - jsonb_build_object --------------------------- - {"1": 4, "2": 5, "3": 6} -(1 row) - --- empty objects/arrays -SELECT jsonb_build_array(); - jsonb_build_array -------------------- - [] -(1 row) - -SELECT jsonb_build_object(); - jsonb_build_object --------------------- - {} -(1 row) - --- make sure keys are quoted -SELECT jsonb_build_object(1,2); - jsonb_build_object --------------------- - {"1": 2} -(1 row) - --- keys must be scalar and not null -SELECT jsonb_build_object(null,2); -ERROR: argument 1: key must not be null -SELECT jsonb_build_object(r,2) FROM (SELECT 1 AS a, 2 AS b) r; -ERROR: key value must be scalar, not array, composite, or json -SELECT jsonb_build_object(json '{"a":1,"b":2}', 3); -ERROR: key value must be scalar, not array, composite, or json -SELECT jsonb_build_object('{1,2,3}'::int[], 3); -ERROR: key value must be scalar, not array, composite, or json --- handling of NULL values -SELECT jsonb_object_agg(1, NULL::jsonb); - jsonb_object_agg ------------------- - {"1": null} -(1 row) - -SELECT jsonb_object_agg(NULL, '{"a":1}'); -ERROR: field name must not be null -CREATE TEMP TABLE foo (serial_num int, name text, type text); -INSERT INTO foo VALUES (847001,'t15','GE1043'); -INSERT INTO foo VALUES (847002,'t16','GE1043'); -INSERT INTO foo VALUES (847003,'sub-alpha','GESS90'); -SELECT jsonb_build_object('turbines',jsonb_object_agg(serial_num,jsonb_build_object('name',name,'type',type))) -FROM foo; - jsonb_build_object -------------------------------------------------------------------------------------------------------------------------------------------------------------- - {"turbines": {"847001": {"name": "t15", "type": "GE1043"}, "847002": {"name": "t16", "type": "GE1043"}, "847003": {"name": "sub-alpha", "type": "GESS90"}}} -(1 row) - -SELECT jsonb_object_agg(name, type) FROM foo; - jsonb_object_agg ------------------------------------------------------------ - {"t15": "GE1043", "t16": "GE1043", "sub-alpha": "GESS90"} -(1 row) - -INSERT INTO foo VALUES (999999, NULL, 'bar'); -SELECT jsonb_object_agg(name, type) FROM foo; -ERROR: field name must not be null --- edge case for parser -SELECT jsonb_object_agg(DISTINCT 'a', 'abc'); - jsonb_object_agg ------------------- - {"a": "abc"} -(1 row) - --- jsonb_object --- empty object, one dimension -SELECT jsonb_object('{}'); - jsonb_object --------------- - {} -(1 row) - --- empty object, two dimensions -SELECT jsonb_object('{}', '{}'); - jsonb_object --------------- - {} -(1 row) - --- one dimension -SELECT jsonb_object('{a,1,b,2,3,NULL,"d e f","a b c"}'); - jsonb_object ---------------------------------------------------- - {"3": null, "a": "1", "b": "2", "d e f": "a b c"} -(1 row) - --- same but with two dimensions -SELECT jsonb_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}'); - jsonb_object ---------------------------------------------------- - {"3": null, "a": "1", "b": "2", "d e f": "a b c"} -(1 row) - --- odd number error -SELECT jsonb_object('{a,b,c}'); -ERROR: array must have even number of elements --- one column error -SELECT jsonb_object('{{a},{b}}'); -ERROR: array must have two columns --- too many columns error -SELECT jsonb_object('{{a,b,c},{b,c,d}}'); -ERROR: array must have two columns --- too many dimensions error -SELECT jsonb_object('{{{a,b},{c,d}},{{b,c},{d,e}}}'); -ERROR: wrong number of array subscripts ---two argument form of jsonb_object -select jsonb_object('{a,b,c,"d e f"}','{1,2,3,"a b c"}'); - jsonb_object --------------------------------------------------- - {"a": "1", "b": "2", "c": "3", "d e f": "a b c"} -(1 row) - --- too many dimensions -SELECT jsonb_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}', '{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}'); -ERROR: wrong number of array subscripts --- mismatched dimensions -select jsonb_object('{a,b,c,"d e f",g}','{1,2,3,"a b c"}'); -ERROR: mismatched array dimensions -select jsonb_object('{a,b,c,"d e f"}','{1,2,3,"a b c",g}'); -ERROR: mismatched array dimensions --- null key error -select jsonb_object('{a,b,NULL,"d e f"}','{1,2,3,"a b c"}'); -ERROR: null value not allowed for object key --- empty key is allowed -select jsonb_object('{a,b,"","d e f"}','{1,2,3,"a b c"}'); - jsonb_object -------------------------------------------------- - {"": "3", "a": "1", "b": "2", "d e f": "a b c"} -(1 row) - --- extract_path, extract_path_as_text -SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6'); - jsonb_extract_path --------------------- - "stringy" -(1 row) - -SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2'); - jsonb_extract_path --------------------- - {"f3": 1} -(1 row) - -SELECT jsonb_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text); - jsonb_extract_path --------------------- - "f3" -(1 row) - -SELECT jsonb_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text); - jsonb_extract_path --------------------- - 1 -(1 row) - -SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6'); - jsonb_extract_path_text -------------------------- - stringy -(1 row) - -SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2'); - jsonb_extract_path_text -------------------------- - {"f3": 1} -(1 row) - -SELECT jsonb_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text); - jsonb_extract_path_text -------------------------- - f3 -(1 row) - -SELECT jsonb_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text); - jsonb_extract_path_text -------------------------- - 1 -(1 row) - --- extract_path nulls -SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') IS NULL AS expect_false; - expect_false --------------- - f -(1 row) - -SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') IS NULL AS expect_true; - expect_true -------------- - t -(1 row) - -SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') IS NULL AS expect_false; - expect_false --------------- - f -(1 row) - -SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') IS NULL AS expect_true; - expect_true -------------- - t -(1 row) - --- extract_path operators -SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f4','f6']; - ?column? ------------ - "stringy" -(1 row) - -SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f2']; - ?column? ------------ - {"f3": 1} -(1 row) - -SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f2','0']; - ?column? ----------- - "f3" -(1 row) - -SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f2','1']; - ?column? ----------- - 1 -(1 row) - -SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f4','f6']; - ?column? ----------- - stringy -(1 row) - -SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f2']; - ?column? ------------ - {"f3": 1} -(1 row) - -SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f2','0']; - ?column? ----------- - f3 -(1 row) - -SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f2','1']; - ?column? ----------- - 1 -(1 row) - --- corner cases for same -select '{"a": {"b":{"c": "foo"}}}'::jsonb #> '{}'; - ?column? ----------------------------- - {"a": {"b": {"c": "foo"}}} -(1 row) - -select '[1,2,3]'::jsonb #> '{}'; - ?column? ------------ - [1, 2, 3] -(1 row) - -select '"foo"'::jsonb #> '{}'; - ?column? ----------- - "foo" -(1 row) - -select '42'::jsonb #> '{}'; - ?column? ----------- - 42 -(1 row) - -select 'null'::jsonb #> '{}'; - ?column? ----------- - null -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a']; - ?column? ---------------------- - {"b": {"c": "foo"}} -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a', null]; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a', '']; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','b']; - ?column? --------------- - {"c": "foo"} -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','b','c']; - ?column? ----------- - "foo" -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','b','c','d']; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','z','c']; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #> array['a','1','b']; - ?column? ----------- - "cc" -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #> array['a','z','b']; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::jsonb #> array['1','b']; - ?column? ----------- - "cc" -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::jsonb #> array['z','b']; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": null}]'::jsonb #> array['1','b']; - ?column? ----------- - null -(1 row) - -select '"foo"'::jsonb #> array['z']; - ?column? ----------- - -(1 row) - -select '42'::jsonb #> array['f2']; - ?column? ----------- - -(1 row) - -select '42'::jsonb #> array['0']; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> '{}'; - ?column? ----------------------------- - {"a": {"b": {"c": "foo"}}} -(1 row) - -select '[1,2,3]'::jsonb #>> '{}'; - ?column? ------------ - [1, 2, 3] -(1 row) - -select '"foo"'::jsonb #>> '{}'; - ?column? ----------- - foo -(1 row) - -select '42'::jsonb #>> '{}'; - ?column? ----------- - 42 -(1 row) - -select 'null'::jsonb #>> '{}'; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a']; - ?column? ---------------------- - {"b": {"c": "foo"}} -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a', null]; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a', '']; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','b']; - ?column? --------------- - {"c": "foo"} -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','b','c']; - ?column? ----------- - foo -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','b','c','d']; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','z','c']; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #>> array['a','1','b']; - ?column? ----------- - cc -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #>> array['a','z','b']; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::jsonb #>> array['1','b']; - ?column? ----------- - cc -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::jsonb #>> array['z','b']; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": null}]'::jsonb #>> array['1','b']; - ?column? ----------- - -(1 row) - -select '"foo"'::jsonb #>> array['z']; - ?column? ----------- - -(1 row) - -select '42'::jsonb #>> array['f2']; - ?column? ----------- - -(1 row) - -select '42'::jsonb #>> array['0']; - ?column? ----------- - -(1 row) - --- array_elements -SELECT jsonb_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false]'); - jsonb_array_elements ----------------------------- - 1 - true - [1, [2, 3]] - null - {"f1": 1, "f2": [7, 8, 9]} - false -(6 rows) - -SELECT * FROM jsonb_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false]') q; - value ----------------------------- - 1 - true - [1, [2, 3]] - null - {"f1": 1, "f2": [7, 8, 9]} - false -(6 rows) - -SELECT jsonb_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]'); - jsonb_array_elements_text ----------------------------- - 1 - true - [1, [2, 3]] - - {"f1": 1, "f2": [7, 8, 9]} - false - stringy -(7 rows) - -SELECT * FROM jsonb_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]') q; - value ----------------------------- - 1 - true - [1, [2, 3]] - - {"f1": 1, "f2": [7, 8, 9]} - false - stringy -(7 rows) - --- populate_record -CREATE TYPE jbpop AS (a text, b int, c timestamp); -CREATE DOMAIN jsb_int_not_null AS int NOT NULL; -CREATE DOMAIN jsb_int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 3); -CREATE DOMAIN jsb_int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 3); -create type jb_unordered_pair as (x int, y int); -create domain jb_ordered_pair as jb_unordered_pair check((value).x <= (value).y); -CREATE TYPE jsbrec AS ( - i int, - ia _int4, - ia1 int[], - ia2 int[][], - ia3 int[][][], - ia1d jsb_int_array_1d, - ia2d jsb_int_array_2d, - t text, - ta text[], - c char(10), - ca char(10)[], - ts timestamp, - js json, - jsb jsonb, - jsa json[], - rec jbpop, - reca jbpop[] -); -CREATE TYPE jsbrec_i_not_null AS ( - i jsb_int_not_null -); -SELECT * FROM jsonb_populate_record(NULL::jbpop,'{"a":"blurfl","x":43.2}') q; - a | b | c ---------+---+--- - blurfl | | -(1 row) - -SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"a":"blurfl","x":43.2}') q; - a | b | c ---------+---+-------------------------- - blurfl | 3 | Mon Dec 31 15:30:56 2012 -(1 row) - -SELECT * FROM jsonb_populate_record(NULL::jbpop,'{"a":"blurfl","x":43.2}') q; - a | b | c ---------+---+--- - blurfl | | -(1 row) - -SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"a":"blurfl","x":43.2}') q; - a | b | c ---------+---+-------------------------- - blurfl | 3 | Mon Dec 31 15:30:56 2012 -(1 row) - -SELECT * FROM jsonb_populate_record(NULL::jbpop,'{"a":[100,200,false],"x":43.2}') q; - a | b | c --------------------+---+--- - [100, 200, false] | | -(1 row) - -SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"a":[100,200,false],"x":43.2}') q; - a | b | c --------------------+---+-------------------------- - [100, 200, false] | 3 | Mon Dec 31 15:30:56 2012 -(1 row) - -SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"c":[100,200,false],"x":43.2}') q; -ERROR: invalid input syntax for type timestamp: "[100, 200, false]" -SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop, '{}') q; - a | b | c ----+---+-------------------------- - x | 3 | Mon Dec 31 15:30:56 2012 -(1 row) - -SELECT i FROM jsonb_populate_record(NULL::jsbrec_i_not_null, '{"x": 43.2}') q; -ERROR: domain jsb_int_not_null does not allow null values -SELECT i FROM jsonb_populate_record(NULL::jsbrec_i_not_null, '{"i": null}') q; -ERROR: domain jsb_int_not_null does not allow null values -SELECT i FROM jsonb_populate_record(NULL::jsbrec_i_not_null, '{"i": 12345}') q; - i -------- - 12345 -(1 row) - -SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": null}') q; - ia ----- - -(1 row) - -SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "ia". -SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [1, "2", null, 4]}') q; - ia --------------- - {1,2,NULL,4} -(1 row) - -SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [[1, 2], [3, 4]]}') q; - ia ---------------- - {{1,2},{3,4}} -(1 row) - -SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [[1], 2]}') q; -ERROR: expected JSON array -HINT: See the array element [1] of key "ia". -SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [[1], [2, 3]]}') q; -ERROR: malformed JSON array -DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. -SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": "{1,2,3}"}') q; - ia ---------- - {1,2,3} -(1 row) - -SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": null}') q; - ia1 ------ - -(1 row) - -SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "ia1". -SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": [1, "2", null, 4]}') q; - ia1 --------------- - {1,2,NULL,4} -(1 row) - -SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": [[1, 2, 3]]}') q; - ia1 ------------ - {{1,2,3}} -(1 row) - -SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": null}') q; - ia1d ------- - -(1 row) - -SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "ia1d". -SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": [1, "2", null, 4]}') q; -ERROR: value for domain jsb_int_array_1d violates check constraint "jsb_int_array_1d_check" -SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": [1, "2", null]}') q; - ia1d ------------- - {1,2,NULL} -(1 row) - -SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [1, "2", null, 4]}') q; - ia2 --------------- - {1,2,NULL,4} -(1 row) - -SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[1, 2], [null, 4]]}') q; - ia2 ------------------- - {{1,2},{NULL,4}} -(1 row) - -SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[], []]}') q; - ia2 ------ - {} -(1 row) - -SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[1, 2], [3]]}') q; -ERROR: malformed JSON array -DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. -SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[1, 2], 3, 4]}') q; -ERROR: expected JSON array -HINT: See the array element [1] of key "ia2". -SELECT ia2d FROM jsonb_populate_record(NULL::jsbrec, '{"ia2d": [[1, "2"], [null, 4]]}') q; -ERROR: value for domain jsb_int_array_2d violates check constraint "jsb_int_array_2d_check" -SELECT ia2d FROM jsonb_populate_record(NULL::jsbrec, '{"ia2d": [[1, "2", 3], [null, 5, 6]]}') q; - ia2d ----------------------- - {{1,2,3},{NULL,5,6}} -(1 row) - -SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [1, "2", null, 4]}') q; - ia3 --------------- - {1,2,NULL,4} -(1 row) - -SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [[1, 2], [null, 4]]}') q; - ia3 ------------------- - {{1,2},{NULL,4}} -(1 row) - -SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[], []], [[], []], [[], []] ]}') q; - ia3 ------ - {} -(1 row) - -SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[1, 2]], [[3, 4]] ]}') q; - ia3 -------------------- - {{{1,2}},{{3,4}}} -(1 row) - -SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8]] ]}') q; - ia3 -------------------------------- - {{{1,2},{3,4}},{{5,6},{7,8}}} -(1 row) - -SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8], [9, 10]] ]}') q; -ERROR: malformed JSON array -DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. -SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": null}') q; - ta ----- - -(1 row) - -SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "ta". -SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": [1, "2", null, 4]}') q; - ta --------------- - {1,2,NULL,4} -(1 row) - -SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": [[1, 2, 3], {"k": "v"}]}') q; -ERROR: expected JSON array -HINT: See the array element [1] of key "ta". -SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": null}') q; - c ---- - -(1 row) - -SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": "aaa"}') q; - c ------------- - aaa -(1 row) - -SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": "aaaaaaaaaa"}') q; - c ------------- - aaaaaaaaaa -(1 row) - -SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": "aaaaaaaaaaaaa"}') q; -ERROR: value too long for type character(10) -SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": null}') q; - ca ----- - -(1 row) - -SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "ca". -SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": [1, "2", null, 4]}') q; - ca ------------------------------------------------ - {"1 ","2 ",NULL,"4 "} -(1 row) - -SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": ["aaaaaaaaaaaaaaaa"]}') q; -ERROR: value too long for type character(10) -SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": [[1, 2, 3], {"k": "v"}]}') q; -ERROR: expected JSON array -HINT: See the array element [1] of key "ca". -SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": null}') q; - js ----- - -(1 row) - -SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": true}') q; - js ------- - true -(1 row) - -SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": 123.45}') q; - js --------- - 123.45 -(1 row) - -SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": "123.45"}') q; - js ----------- - "123.45" -(1 row) - -SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": "abc"}') q; - js -------- - "abc" -(1 row) - -SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": [123, "123", null, {"key": "value"}]}') q; - js --------------------------------------- - [123, "123", null, {"key": "value"}] -(1 row) - -SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": {"a": "bbb", "b": null, "c": 123.45}}') q; - js --------------------------------------- - {"a": "bbb", "b": null, "c": 123.45} -(1 row) - -SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": null}') q; - jsb ------ - -(1 row) - -SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": true}') q; - jsb ------- - true -(1 row) - -SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": 123.45}') q; - jsb --------- - 123.45 -(1 row) - -SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": "123.45"}') q; - jsb ----------- - "123.45" -(1 row) - -SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": "abc"}') q; - jsb -------- - "abc" -(1 row) - -SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": [123, "123", null, {"key": "value"}]}') q; - jsb --------------------------------------- - [123, "123", null, {"key": "value"}] -(1 row) - -SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": {"a": "bbb", "b": null, "c": 123.45}}') q; - jsb --------------------------------------- - {"a": "bbb", "b": null, "c": 123.45} -(1 row) - -SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": null}') q; - jsa ------ - -(1 row) - -SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "jsa". -SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": [1, "2", null, 4]}') q; - jsa --------------------- - {1,"\"2\"",NULL,4} -(1 row) - -SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": ["aaa", null, [1, 2, "3", {}], { "k" : "v" }]}') q; - jsa -------------------------------------------------------- - {"\"aaa\"",NULL,"[1, 2, \"3\", {}]","{\"k\": \"v\"}"} -(1 row) - -SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": 123}') q; -ERROR: cannot call populate_composite on a scalar -SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": [1, 2]}') q; -ERROR: cannot call populate_composite on an array -SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}') q; - rec ------------------------------------ - (abc,,"Thu Jan 02 00:00:00 2003") -(1 row) - -SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": "(abc,42,01.02.2003)"}') q; - rec -------------------------------------- - (abc,42,"Thu Jan 02 00:00:00 2003") -(1 row) - -SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "reca". -SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": [1, 2]}') q; -ERROR: cannot call populate_composite on a scalar -SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}]}') q; - reca --------------------------------------------------------- - {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} -(1 row) - -SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": ["(abc,42,01.02.2003)"]}') q; - reca -------------------------------------------- - {"(abc,42,\"Thu Jan 02 00:00:00 2003\")"} -(1 row) - -SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": "{\"(abc,42,01.02.2003)\"}"}') q; - reca -------------------------------------------- - {"(abc,42,\"Thu Jan 02 00:00:00 2003\")"} -(1 row) - -SELECT rec FROM jsonb_populate_record( - row(NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, - row('x',3,'2012-12-31 15:30:56')::jbpop,NULL)::jsbrec, - '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}' -) q; - rec ------------------------------------- - (abc,3,"Thu Jan 02 00:00:00 2003") -(1 row) - --- Tests to check soft-error support for populate_record_field() --- populate_scalar() -create type jsb_char2 as (a char(2)); -select jsonb_populate_record_valid(NULL::jsb_char2, '{"a": "aaa"}'); - jsonb_populate_record_valid ------------------------------ - f -(1 row) - -select * from jsonb_populate_record(NULL::jsb_char2, '{"a": "aaa"}') q; -ERROR: value too long for type character(2) -select jsonb_populate_record_valid(NULL::jsb_char2, '{"a": "aa"}'); - jsonb_populate_record_valid ------------------------------ - t -(1 row) - -select * from jsonb_populate_record(NULL::jsb_char2, '{"a": "aa"}') q; - a ----- - aa -(1 row) - --- populate_array() -create type jsb_ia as (a int[]); -create type jsb_ia2 as (a int[][]); -select jsonb_populate_record_valid(NULL::jsb_ia, '{"a": 43.2}'); - jsonb_populate_record_valid ------------------------------ - f -(1 row) - -select * from jsonb_populate_record(NULL::jsb_ia, '{"a": 43.2}') q; -ERROR: expected JSON array -HINT: See the value of key "a". -select jsonb_populate_record_valid(NULL::jsb_ia, '{"a": [1, 2]}'); - jsonb_populate_record_valid ------------------------------ - t -(1 row) - -select * from jsonb_populate_record(NULL::jsb_ia, '{"a": [1, 2]}') q; - a -------- - {1,2} -(1 row) - -select jsonb_populate_record_valid(NULL::jsb_ia2, '{"a": [[1], [2, 3]]}'); - jsonb_populate_record_valid ------------------------------ - f -(1 row) - -select * from jsonb_populate_record(NULL::jsb_ia2, '{"a": [[1], [2, 3]]}') q; -ERROR: malformed JSON array -DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. -select jsonb_populate_record_valid(NULL::jsb_ia2, '{"a": [[1, 0], [2, 3]]}'); - jsonb_populate_record_valid ------------------------------ - t -(1 row) - -select * from jsonb_populate_record(NULL::jsb_ia2, '{"a": [[1, 0], [2, 3]]}') q; - a ---------------- - {{1,0},{2,3}} -(1 row) - --- populate_domain() -create domain jsb_i_not_null as int not null; -create domain jsb_i_gt_1 as int check (value > 1); -create type jsb_i_not_null_rec as (a jsb_i_not_null); -create type jsb_i_gt_1_rec as (a jsb_i_gt_1); -select jsonb_populate_record_valid(NULL::jsb_i_not_null_rec, '{"a": null}'); - jsonb_populate_record_valid ------------------------------ - f -(1 row) - -select * from jsonb_populate_record(NULL::jsb_i_not_null_rec, '{"a": null}') q; -ERROR: domain jsb_i_not_null does not allow null values -select jsonb_populate_record_valid(NULL::jsb_i_not_null_rec, '{"a": 1}'); - jsonb_populate_record_valid ------------------------------ - t -(1 row) - -select * from jsonb_populate_record(NULL::jsb_i_not_null_rec, '{"a": 1}') q; - a ---- - 1 -(1 row) - -select jsonb_populate_record_valid(NULL::jsb_i_gt_1_rec, '{"a": 1}'); - jsonb_populate_record_valid ------------------------------ - f -(1 row) - -select * from jsonb_populate_record(NULL::jsb_i_gt_1_rec, '{"a": 1}') q; -ERROR: value for domain jsb_i_gt_1 violates check constraint "jsb_i_gt_1_check" -select jsonb_populate_record_valid(NULL::jsb_i_gt_1_rec, '{"a": 2}'); - jsonb_populate_record_valid ------------------------------ - t -(1 row) - -select * from jsonb_populate_record(NULL::jsb_i_gt_1_rec, '{"a": 2}') q; - a ---- - 2 -(1 row) - -drop type jsb_ia, jsb_ia2, jsb_char2, jsb_i_not_null_rec, jsb_i_gt_1_rec; -drop domain jsb_i_not_null, jsb_i_gt_1; --- anonymous record type -SELECT jsonb_populate_record(null::record, '{"x": 0, "y": 1}'); -ERROR: could not determine row type for result of jsonb_populate_record -HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list. -SELECT jsonb_populate_record(row(1,2), '{"f1": 0, "f2": 1}'); - jsonb_populate_record ------------------------ - (0,1) -(1 row) - -SELECT * FROM - jsonb_populate_record(null::record, '{"x": 776}') AS (x int, y int); - x | y ------+--- - 776 | -(1 row) - --- composite domain -SELECT jsonb_populate_record(null::jb_ordered_pair, '{"x": 0, "y": 1}'); - jsonb_populate_record ------------------------ - (0,1) -(1 row) - -SELECT jsonb_populate_record(row(1,2)::jb_ordered_pair, '{"x": 0}'); - jsonb_populate_record ------------------------ - (0,2) -(1 row) - -SELECT jsonb_populate_record(row(1,2)::jb_ordered_pair, '{"x": 1, "y": 0}'); -ERROR: value for domain jb_ordered_pair violates check constraint "jb_ordered_pair_check" --- populate_recordset -SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+---+-------------------------- - blurfl | | - | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+----+-------------------------- - blurfl | 99 | - def | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+---+-------------------------- - blurfl | | - | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+----+-------------------------- - blurfl | 99 | - def | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ------------------+----+-------------------------- - [100, 200, 300] | 99 | - {"z": true} | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"c":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q; -ERROR: invalid input syntax for type timestamp: "[100, 200, 300]" -SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+---+-------------------------- - blurfl | | - | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+----+-------------------------- - blurfl | 99 | - def | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ------------------+----+-------------------------- - [100, 200, 300] | 99 | - {"z": true} | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - --- anonymous record type -SELECT jsonb_populate_recordset(null::record, '[{"x": 0, "y": 1}]'); -ERROR: could not determine row type for result of jsonb_populate_recordset -HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list. -SELECT jsonb_populate_recordset(row(1,2), '[{"f1": 0, "f2": 1}]'); - jsonb_populate_recordset --------------------------- - (0,1) -(1 row) - -SELECT i, jsonb_populate_recordset(row(i,50), '[{"f1":"42"},{"f2":"43"}]') -FROM (VALUES (1),(2)) v(i); - i | jsonb_populate_recordset ----+-------------------------- - 1 | (42,50) - 1 | (1,43) - 2 | (42,50) - 2 | (2,43) -(4 rows) - -SELECT * FROM - jsonb_populate_recordset(null::record, '[{"x": 776}]') AS (x int, y int); - x | y ------+--- - 776 | -(1 row) - --- empty array is a corner case -SELECT jsonb_populate_recordset(null::record, '[]'); -ERROR: could not determine row type for result of jsonb_populate_recordset -HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list. -SELECT jsonb_populate_recordset(row(1,2), '[]'); - jsonb_populate_recordset --------------------------- -(0 rows) - -SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[]') q; - a | b | c ----+---+--- -(0 rows) - -SELECT * FROM - jsonb_populate_recordset(null::record, '[]') AS (x int, y int); - x | y ----+--- -(0 rows) - --- composite domain -SELECT jsonb_populate_recordset(null::jb_ordered_pair, '[{"x": 0, "y": 1}]'); - jsonb_populate_recordset --------------------------- - (0,1) -(1 row) - -SELECT jsonb_populate_recordset(row(1,2)::jb_ordered_pair, '[{"x": 0}, {"y": 3}]'); - jsonb_populate_recordset --------------------------- - (0,2) - (1,3) -(2 rows) - -SELECT jsonb_populate_recordset(row(1,2)::jb_ordered_pair, '[{"x": 1, "y": 0}]'); -ERROR: value for domain jb_ordered_pair violates check constraint "jb_ordered_pair_check" --- negative cases where the wrong record type is supplied -select * from jsonb_populate_recordset(row(0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); -ERROR: function return row and query-specified return row do not match -DETAIL: Returned row contains 1 attribute, but query expects 2. -select * from jsonb_populate_recordset(row(0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); -ERROR: function return row and query-specified return row do not match -DETAIL: Returned type integer at ordinal position 1, but query expects text. -select * from jsonb_populate_recordset(row(0::int,0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); -ERROR: function return row and query-specified return row do not match -DETAIL: Returned row contains 3 attributes, but query expects 2. -select * from jsonb_populate_recordset(row(1000000000::int,50::int),'[{"b":"2"},{"a":"3"}]') q (a text, b text); -ERROR: function return row and query-specified return row do not match -DETAIL: Returned type integer at ordinal position 1, but query expects text. --- jsonb_to_record and jsonb_to_recordset -select * from jsonb_to_record('{"a":1,"b":"foo","c":"bar"}') - as x(a int, b text, d text); - a | b | d ----+-----+--- - 1 | foo | -(1 row) - -select * from jsonb_to_recordset('[{"a":1,"b":"foo","d":false},{"a":2,"b":"bar","c":true}]') - as x(a int, b text, c boolean); - a | b | c ----+-----+--- - 1 | foo | - 2 | bar | t -(2 rows) - -select *, c is null as c_is_null -from jsonb_to_record('{"a":1, "b":{"c":16, "d":2}, "x":8, "ca": ["1 2", 3], "ia": [[1,2],[3,4]], "r": {"a": "aaa", "b": 123}}'::jsonb) - as t(a int, b jsonb, c text, x int, ca char(5)[], ia int[][], r jbpop); - a | b | c | x | ca | ia | r | c_is_null ----+-------------------+---+---+-------------------+---------------+------------+----------- - 1 | {"c": 16, "d": 2} | | 8 | {"1 2 ","3 "} | {{1,2},{3,4}} | (aaa,123,) | t -(1 row) - -select *, c is null as c_is_null -from jsonb_to_recordset('[{"a":1, "b":{"c":16, "d":2}, "x":8}]'::jsonb) - as t(a int, b jsonb, c text, x int); - a | b | c | x | c_is_null ----+-------------------+---+---+----------- - 1 | {"c": 16, "d": 2} | | 8 | t -(1 row) - -select * from jsonb_to_record('{"ia": null}') as x(ia _int4); - ia ----- - -(1 row) - -select * from jsonb_to_record('{"ia": 123}') as x(ia _int4); -ERROR: expected JSON array -HINT: See the value of key "ia". -select * from jsonb_to_record('{"ia": [1, "2", null, 4]}') as x(ia _int4); - ia --------------- - {1,2,NULL,4} -(1 row) - -select * from jsonb_to_record('{"ia": [[1, 2], [3, 4]]}') as x(ia _int4); - ia ---------------- - {{1,2},{3,4}} -(1 row) - -select * from jsonb_to_record('{"ia": [[1], 2]}') as x(ia _int4); -ERROR: expected JSON array -HINT: See the array element [1] of key "ia". -select * from jsonb_to_record('{"ia": [[1], [2, 3]]}') as x(ia _int4); -ERROR: malformed JSON array -DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. -select * from jsonb_to_record('{"ia2": [1, 2, 3]}') as x(ia2 int[][]); - ia2 ---------- - {1,2,3} -(1 row) - -select * from jsonb_to_record('{"ia2": [[1, 2], [3, 4]]}') as x(ia2 int4[][]); - ia2 ---------------- - {{1,2},{3,4}} -(1 row) - -select * from jsonb_to_record('{"ia2": [[[1], [2], [3]]]}') as x(ia2 int4[][]); - ia2 ------------------ - {{{1},{2},{3}}} -(1 row) - -select * from jsonb_to_record('{"out": {"key": 1}}') as x(out json); - out ------------- - {"key": 1} -(1 row) - -select * from jsonb_to_record('{"out": [{"key": 1}]}') as x(out json); - out --------------- - [{"key": 1}] -(1 row) - -select * from jsonb_to_record('{"out": "{\"key\": 1}"}') as x(out json); - out ----------------- - "{\"key\": 1}" -(1 row) - -select * from jsonb_to_record('{"out": {"key": 1}}') as x(out jsonb); - out ------------- - {"key": 1} -(1 row) - -select * from jsonb_to_record('{"out": [{"key": 1}]}') as x(out jsonb); - out --------------- - [{"key": 1}] -(1 row) - -select * from jsonb_to_record('{"out": "{\"key\": 1}"}') as x(out jsonb); - out ----------------- - "{\"key\": 1}" -(1 row) - --- test type info caching in jsonb_populate_record() -CREATE TEMP TABLE jsbpoptest (js jsonb); -INSERT INTO jsbpoptest -SELECT '{ - "jsa": [1, "2", null, 4], - "rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}, - "reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}] -}'::jsonb -FROM generate_series(1, 3); -SELECT (jsonb_populate_record(NULL::jsbrec, js)).* FROM jsbpoptest; - i | ia | ia1 | ia2 | ia3 | ia1d | ia2d | t | ta | c | ca | ts | js | jsb | jsa | rec | reca ----+----+-----+-----+-----+------+------+---+----+---+----+----+----+-----+--------------------+-----------------------------------+-------------------------------------------------------- - | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} - | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} - | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} -(3 rows) - -DROP TYPE jsbrec; -DROP TYPE jsbrec_i_not_null; -DROP DOMAIN jsb_int_not_null; -DROP DOMAIN jsb_int_array_1d; -DROP DOMAIN jsb_int_array_2d; -DROP DOMAIN jb_ordered_pair; -DROP TYPE jb_unordered_pair; --- indexing -SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC"}'; - count -------- - 15 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC", "public":true}'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j ? 'public'; - count -------- - 194 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j ? 'bar'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j ?| ARRAY['public','disabled']; - count -------- - 337 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j ?& ARRAY['public','disabled']; - count -------- - 42 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '"CC" == $.wait'; - count -------- - 15 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == "CC" && true == $.public'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25.0'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($)'; - count -------- - 1012 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public)'; - count -------- - 194 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.bar)'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) || exists($.disabled)'; - count -------- - 337 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) && exists($.disabled)'; - count -------- - 42 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? ("CC" == @)'; - count -------- - 15 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.wait == "CC" && true == @.public)'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.age ? (@ == 25)'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.age == 25.0)'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$'; - count -------- - 1012 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.public'; - count -------- - 194 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.bar'; - count -------- - 0 -(1 row) - -CREATE INDEX jidx ON testjsonb USING gin (j); -SET enable_seqscan = off; -SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC"}'; - count -------- - 15 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC", "public":true}'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"array":["foo"]}'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"array":["bar"]}'; - count -------- - 3 -(1 row) - --- exercise GIN_SEARCH_MODE_ALL -SELECT count(*) FROM testjsonb WHERE j @> '{}'; - count -------- - 1012 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j ? 'public'; - count -------- - 194 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j ? 'bar'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j ?| ARRAY['public','disabled']; - count -------- - 337 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j ?& ARRAY['public','disabled']; - count -------- - 42 -(1 row) - -EXPLAIN (COSTS OFF) -SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null'; - QUERY PLAN ------------------------------------------------------------------ - Aggregate - -> Bitmap Heap Scan on testjsonb - Recheck Cond: (j @@ '($."wait" == null)'::jsonpath) - -> Bitmap Index Scan on jidx - Index Cond: (j @@ '($."wait" == null)'::jsonpath) -(5 rows) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.wait == null))'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.wait ? (@ == null))'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '"CC" == $.wait'; - count -------- - 15 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == "CC" && true == $.public'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25.0'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "foo"'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "bar"'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.array[*] == "bar"))'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array ? (@[*] == "bar"))'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array[*] ? (@ == "bar"))'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($)'; - count -------- - 1012 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public)'; - count -------- - 194 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.bar)'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) || exists($.disabled)'; - count -------- - 337 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) && exists($.disabled)'; - count -------- - 42 -(1 row) - -EXPLAIN (COSTS OFF) -SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)'; - QUERY PLAN -------------------------------------------------------------------- - Aggregate - -> Bitmap Heap Scan on testjsonb - Recheck Cond: (j @? '$."wait"?(@ == null)'::jsonpath) - -> Bitmap Index Scan on jidx - Index Cond: (j @? '$."wait"?(@ == null)'::jsonpath) -(5 rows) - -SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? ("CC" == @)'; - count -------- - 15 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.wait == "CC" && true == @.public)'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.age ? (@ == 25)'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.age == 25.0)'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.array[*] == "bar")'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.array ? (@[*] == "bar")'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.array[*] ? (@ == "bar")'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$'; - count -------- - 1012 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.public'; - count -------- - 194 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.bar'; - count -------- - 0 -(1 row) - --- array exists - array elements should behave as keys (for GIN index scans too) -CREATE INDEX jidx_array ON testjsonb USING gin((j->'array')); -SELECT count(*) from testjsonb WHERE j->'array' ? 'bar'; - count -------- - 3 -(1 row) - --- type sensitive array exists - should return no rows (since "exists" only --- matches strings that are either object keys or array elements) -SELECT count(*) from testjsonb WHERE j->'array' ? '5'::text; - count -------- - 0 -(1 row) - --- However, a raw scalar is *contained* within the array -SELECT count(*) from testjsonb WHERE j->'array' @> '5'::jsonb; - count -------- - 1 -(1 row) - -RESET enable_seqscan; -SELECT count(*) FROM (SELECT (jsonb_each(j)).key FROM testjsonb) AS wow; - count -------- - 4791 -(1 row) - -SELECT key, count(*) FROM (SELECT (jsonb_each(j)).key FROM testjsonb) AS wow GROUP BY key ORDER BY count DESC, key; - key | count ------------+------- - line | 884 - query | 207 - pos | 203 - node | 202 - space | 197 - status | 195 - public | 194 - title | 190 - wait | 190 - org | 189 - user | 189 - coauthors | 188 - disabled | 185 - indexed | 184 - cleaned | 180 - bad | 179 - date | 179 - world | 176 - state | 172 - subtitle | 169 - auth | 168 - abstract | 161 - array | 5 - age | 2 - foo | 2 - fool | 1 -(26 rows) - --- sort/hash -SELECT count(distinct j) FROM testjsonb; - count -------- - 894 -(1 row) - -SET enable_hashagg = off; -SELECT count(*) FROM (SELECT j FROM (SELECT * FROM testjsonb UNION ALL SELECT * FROM testjsonb) js GROUP BY j) js2; - count -------- - 894 -(1 row) - -SET enable_hashagg = on; -SET enable_sort = off; -SELECT count(*) FROM (SELECT j FROM (SELECT * FROM testjsonb UNION ALL SELECT * FROM testjsonb) js GROUP BY j) js2; - count -------- - 894 -(1 row) - -SELECT distinct * FROM (values (jsonb '{}' || ''::text),('{}')) v(j); - j ----- - {} -(1 row) - -SET enable_sort = on; -RESET enable_hashagg; -RESET enable_sort; -DROP INDEX jidx; -DROP INDEX jidx_array; --- btree -CREATE INDEX jidx ON testjsonb USING btree (j); -SET enable_seqscan = off; -SELECT count(*) FROM testjsonb WHERE j > '{"p":1}'; - count -------- - 884 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j = '{"pos":98, "line":371, "node":"CBA", "indexed":true}'; - count -------- - 1 -(1 row) - ---gin path opclass -DROP INDEX jidx; -CREATE INDEX jidx ON testjsonb USING gin (j jsonb_path_ops); -SET enable_seqscan = off; -SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC"}'; - count -------- - 15 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC", "public":true}'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}'; - count -------- - 2 -(1 row) - --- exercise GIN_SEARCH_MODE_ALL -SELECT count(*) FROM testjsonb WHERE j @> '{}'; - count -------- - 1012 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.wait == null))'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.wait ? (@ == null))'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '"CC" == $.wait'; - count -------- - 15 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == "CC" && true == $.public'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25.0'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "foo"'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "bar"'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.array[*] == "bar"))'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array ? (@[*] == "bar"))'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array[*] ? (@ == "bar"))'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($)'; - count -------- - 1012 -(1 row) - -EXPLAIN (COSTS OFF) -SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)'; - QUERY PLAN -------------------------------------------------------------------- - Aggregate - -> Bitmap Heap Scan on testjsonb - Recheck Cond: (j @? '$."wait"?(@ == null)'::jsonpath) - -> Bitmap Index Scan on jidx - Index Cond: (j @? '$."wait"?(@ == null)'::jsonpath) -(5 rows) - -SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? ("CC" == @)'; - count -------- - 15 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.wait == "CC" && true == @.public)'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.age ? (@ == 25)'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.age == 25.0)'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.array[*] == "bar")'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.array ? (@[*] == "bar")'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.array[*] ? (@ == "bar")'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$'; - count -------- - 1012 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.public'; - count -------- - 194 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.bar'; - count -------- - 0 -(1 row) - -RESET enable_seqscan; -DROP INDEX jidx; --- nested tests -SELECT '{"ff":{"a":12,"b":16}}'::jsonb; - jsonb ----------------------------- - {"ff": {"a": 12, "b": 16}} -(1 row) - -SELECT '{"ff":{"a":12,"b":16},"qq":123}'::jsonb; - jsonb ---------------------------------------- - {"ff": {"a": 12, "b": 16}, "qq": 123} -(1 row) - -SELECT '{"aa":["a","aaa"],"qq":{"a":12,"b":16,"c":["c1","c2"],"d":{"d1":"d1","d2":"d2","d1":"d3"}}}'::jsonb; - jsonb --------------------------------------------------------------------------------------------------- - {"aa": ["a", "aaa"], "qq": {"a": 12, "b": 16, "c": ["c1", "c2"], "d": {"d1": "d3", "d2": "d2"}}} -(1 row) - -SELECT '{"aa":["a","aaa"],"qq":{"a":"12","b":"16","c":["c1","c2"],"d":{"d1":"d1","d2":"d2"}}}'::jsonb; - jsonb ------------------------------------------------------------------------------------------------------- - {"aa": ["a", "aaa"], "qq": {"a": "12", "b": "16", "c": ["c1", "c2"], "d": {"d1": "d1", "d2": "d2"}}} -(1 row) - -SELECT '{"aa":["a","aaa"],"qq":{"a":"12","b":"16","c":["c1","c2",["c3"],{"c4":4}],"d":{"d1":"d1","d2":"d2"}}}'::jsonb; - jsonb -------------------------------------------------------------------------------------------------------------------------- - {"aa": ["a", "aaa"], "qq": {"a": "12", "b": "16", "c": ["c1", "c2", ["c3"], {"c4": 4}], "d": {"d1": "d1", "d2": "d2"}}} -(1 row) - -SELECT '{"ff":["a","aaa"]}'::jsonb; - jsonb ----------------------- - {"ff": ["a", "aaa"]} -(1 row) - -SELECT - '{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'ff', - '{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'qq', - ('{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'Y') IS NULL AS f, - ('{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb ->> 'Y') IS NULL AS t, - '{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'x'; - ?column? | ?column? | f | t | ?column? ---------------------+----------+---+---+---------- - {"a": 12, "b": 16} | 123 | f | t | [1, 2] -(1 row) - --- nested containment -SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[1,2]}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":[2,1],"c":"b"}'::jsonb @> '{"a":[1,2]}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":{"1":2},"c":"b"}'::jsonb @> '{"a":[1,2]}'; - ?column? ----------- - f -(1 row) - -SELECT '{"a":{"2":1},"c":"b"}'::jsonb @> '{"a":[1,2]}'; - ?column? ----------- - f -(1 row) - -SELECT '{"a":{"1":2},"c":"b"}'::jsonb @> '{"a":{"1":2}}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":{"2":1},"c":"b"}'::jsonb @> '{"a":{"1":2}}'; - ?column? ----------- - f -(1 row) - -SELECT '["a","b"]'::jsonb @> '["a","b","c","b"]'; - ?column? ----------- - f -(1 row) - -SELECT '["a","b","c","b"]'::jsonb @> '["a","b"]'; - ?column? ----------- - t -(1 row) - -SELECT '["a","b","c",[1,2]]'::jsonb @> '["a",[1,2]]'; - ?column? ----------- - t -(1 row) - -SELECT '["a","b","c",[1,2]]'::jsonb @> '["b",[1,2]]'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[1]}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[2]}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[3]}'; - ?column? ----------- - f -(1 row) - -SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"c":3}]}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"x":4}]}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"x":4},3]}'; - ?column? ----------- - f -(1 row) - -SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"x":4},1]}'; - ?column? ----------- - t -(1 row) - --- check some corner cases for indexed nested containment (bug #13756) -create temp table nestjsonb (j jsonb); -insert into nestjsonb (j) values ('{"a":[["b",{"x":1}],["b",{"x":2}]],"c":3}'); -insert into nestjsonb (j) values ('[[14,2,3]]'); -insert into nestjsonb (j) values ('[1,[14,2,3]]'); -create index on nestjsonb using gin(j jsonb_path_ops); -set enable_seqscan = on; -set enable_bitmapscan = off; -select * from nestjsonb where j @> '{"a":[[{"x":2}]]}'::jsonb; - j ---------------------------------------------------- - {"a": [["b", {"x": 1}], ["b", {"x": 2}]], "c": 3} -(1 row) - -select * from nestjsonb where j @> '{"c":3}'; - j ---------------------------------------------------- - {"a": [["b", {"x": 1}], ["b", {"x": 2}]], "c": 3} -(1 row) - -select * from nestjsonb where j @> '[[14]]'; - j ------------------ - [[14, 2, 3]] - [1, [14, 2, 3]] -(2 rows) - -set enable_seqscan = off; -set enable_bitmapscan = on; -select * from nestjsonb where j @> '{"a":[[{"x":2}]]}'::jsonb; - j ---------------------------------------------------- - {"a": [["b", {"x": 1}], ["b", {"x": 2}]], "c": 3} -(1 row) - -select * from nestjsonb where j @> '{"c":3}'; - j ---------------------------------------------------- - {"a": [["b", {"x": 1}], ["b", {"x": 2}]], "c": 3} -(1 row) - -select * from nestjsonb where j @> '[[14]]'; - j ------------------ - [[14, 2, 3]] - [1, [14, 2, 3]] -(2 rows) - -reset enable_seqscan; -reset enable_bitmapscan; --- nested object field / array index lookup -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'n'; - ?column? ----------- - null -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'a'; - ?column? ----------- - 1 -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'b'; - ?column? ----------- - [1, 2] -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'c'; - ?column? ----------- - {"1": 2} -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'd'; - ?column? ---------------- - {"1": [2, 3]} -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'd' -> '1'; - ?column? ----------- - [2, 3] -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'e'; - ?column? ----------- - -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 0; --expecting error - ?column? ----------- - -(1 row) - -SELECT '["a","b","c",[1,2],null]'::jsonb -> 0; - ?column? ----------- - "a" -(1 row) - -SELECT '["a","b","c",[1,2],null]'::jsonb -> 1; - ?column? ----------- - "b" -(1 row) - -SELECT '["a","b","c",[1,2],null]'::jsonb -> 2; - ?column? ----------- - "c" -(1 row) - -SELECT '["a","b","c",[1,2],null]'::jsonb -> 3; - ?column? ----------- - [1, 2] -(1 row) - -SELECT '["a","b","c",[1,2],null]'::jsonb -> 3 -> 1; - ?column? ----------- - 2 -(1 row) - -SELECT '["a","b","c",[1,2],null]'::jsonb -> 4; - ?column? ----------- - null -(1 row) - -SELECT '["a","b","c",[1,2],null]'::jsonb -> 5; - ?column? ----------- - -(1 row) - -SELECT '["a","b","c",[1,2],null]'::jsonb -> -1; - ?column? ----------- - null -(1 row) - -SELECT '["a","b","c",[1,2],null]'::jsonb -> -5; - ?column? ----------- - "a" -(1 row) - -SELECT '["a","b","c",[1,2],null]'::jsonb -> -6; - ?column? ----------- - -(1 row) - ---nested path extraction -SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{0}'; - ?column? ----------- - -(1 row) - -SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{a}'; - ?column? ----------- - "b" -(1 row) - -SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c}'; - ?column? ------------ - [1, 2, 3] -(1 row) - -SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,0}'; - ?column? ----------- - 1 -(1 row) - -SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,1}'; - ?column? ----------- - 2 -(1 row) - -SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,2}'; - ?column? ----------- - 3 -(1 row) - -SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,3}'; - ?column? ----------- - -(1 row) - -SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-1}'; - ?column? ----------- - 3 -(1 row) - -SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-3}'; - ?column? ----------- - 1 -(1 row) - -SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-4}'; - ?column? ----------- - -(1 row) - -SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{0}'; - ?column? ----------- - 0 -(1 row) - -SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{3}'; - ?column? ----------- - [3, 4] -(1 row) - -SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{4}'; - ?column? ---------------- - {"5": "five"} -(1 row) - -SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{4,5}'; - ?column? ----------- - "five" -(1 row) - ---nested exists -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'n'; - ?column? ----------- - t -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'a'; - ?column? ----------- - t -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'b'; - ?column? ----------- - t -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'c'; - ?column? ----------- - t -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'd'; - ?column? ----------- - t -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'e'; - ?column? ----------- - f -(1 row) - --- jsonb_strip_nulls -select jsonb_strip_nulls(null); - jsonb_strip_nulls -------------------- - -(1 row) - -select jsonb_strip_nulls('1'); - jsonb_strip_nulls -------------------- - 1 -(1 row) - -select jsonb_strip_nulls('"a string"'); - jsonb_strip_nulls -------------------- - "a string" -(1 row) - -select jsonb_strip_nulls('null'); - jsonb_strip_nulls -------------------- - null -(1 row) - -select jsonb_strip_nulls('[1,2,null,3,4]'); - jsonb_strip_nulls --------------------- - [1, 2, null, 3, 4] -(1 row) - -select jsonb_strip_nulls('{"a":1,"b":null,"c":[2,null,3],"d":{"e":4,"f":null}}'); - jsonb_strip_nulls --------------------------------------------- - {"a": 1, "c": [2, null, 3], "d": {"e": 4}} -(1 row) - -select jsonb_strip_nulls('[1,{"a":1,"b":null,"c":2},3]'); - jsonb_strip_nulls --------------------------- - [1, {"a": 1, "c": 2}, 3] -(1 row) - --- an empty object is not null and should not be stripped -select jsonb_strip_nulls('{"a": {"b": null, "c": null}, "d": {} }'); - jsonb_strip_nulls --------------------- - {"a": {}, "d": {}} -(1 row) - -select jsonb_pretty('{"a": "test", "b": [1, 2, 3], "c": "test3", "d":{"dd": "test4", "dd2":{"ddd": "test5"}}}'); - jsonb_pretty ----------------------------- - { + - "a": "test", + - "b": [ + - 1, + - 2, + - 3 + - ], + - "c": "test3", + - "d": { + - "dd": "test4", + - "dd2": { + - "ddd": "test5"+ - } + - } + - } -(1 row) - -select jsonb_pretty('[{"f1":1,"f2":null},2,null,[[{"x":true},6,7],8],3]'); - jsonb_pretty ---------------------------- - [ + - { + - "f1": 1, + - "f2": null + - }, + - 2, + - null, + - [ + - [ + - { + - "x": true+ - }, + - 6, + - 7 + - ], + - 8 + - ], + - 3 + - ] -(1 row) - -select jsonb_pretty('{"a":["b", "c"], "d": {"e":"f"}}'); - jsonb_pretty ------------------- - { + - "a": [ + - "b", + - "c" + - ], + - "d": { + - "e": "f"+ - } + - } -(1 row) - -select jsonb_concat('{"d": "test", "a": [1, 2]}', '{"g": "test2", "c": {"c1":1, "c2":2}}'); - jsonb_concat -------------------------------------------------------------------- - {"a": [1, 2], "c": {"c1": 1, "c2": 2}, "d": "test", "g": "test2"} -(1 row) - -select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{"cq":"l", "b":"g", "fg":false}'; - ?column? ---------------------------------------------- - {"b": "g", "aa": 1, "cq": "l", "fg": false} -(1 row) - -select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{"aq":"l"}'; - ?column? ---------------------------------------- - {"b": 2, "aa": 1, "aq": "l", "cq": 3} -(1 row) - -select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{"aa":"l"}'; - ?column? ------------------------------- - {"b": 2, "aa": "l", "cq": 3} -(1 row) - -select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{}'; - ?column? ----------------------------- - {"b": 2, "aa": 1, "cq": 3} -(1 row) - -select '["a", "b"]'::jsonb || '["c"]'; - ?column? ------------------ - ["a", "b", "c"] -(1 row) - -select '["a", "b"]'::jsonb || '["c", "d"]'; - ?column? ----------------------- - ["a", "b", "c", "d"] -(1 row) - -select '["c"]' || '["a", "b"]'::jsonb; - ?column? ------------------ - ["c", "a", "b"] -(1 row) - -select '["a", "b"]'::jsonb || '"c"'; - ?column? ------------------ - ["a", "b", "c"] -(1 row) - -select '"c"' || '["a", "b"]'::jsonb; - ?column? ------------------ - ["c", "a", "b"] -(1 row) - -select '[]'::jsonb || '["a"]'::jsonb; - ?column? ----------- - ["a"] -(1 row) - -select '[]'::jsonb || '"a"'::jsonb; - ?column? ----------- - ["a"] -(1 row) - -select '"b"'::jsonb || '"a"'::jsonb; - ?column? ------------- - ["b", "a"] -(1 row) - -select '{}'::jsonb || '{"a":"b"}'::jsonb; - ?column? ------------- - {"a": "b"} -(1 row) - -select '[]'::jsonb || '{"a":"b"}'::jsonb; - ?column? --------------- - [{"a": "b"}] -(1 row) - -select '{"a":"b"}'::jsonb || '[]'::jsonb; - ?column? --------------- - [{"a": "b"}] -(1 row) - -select '"a"'::jsonb || '{"a":1}'; - ?column? ------------------ - ["a", {"a": 1}] -(1 row) - -select '{"a":1}' || '"a"'::jsonb; - ?column? ------------------ - [{"a": 1}, "a"] -(1 row) - -select '[3]'::jsonb || '{}'::jsonb; - ?column? ----------- - [3, {}] -(1 row) - -select '3'::jsonb || '[]'::jsonb; - ?column? ----------- - [3] -(1 row) - -select '3'::jsonb || '4'::jsonb; - ?column? ----------- - [3, 4] -(1 row) - -select '3'::jsonb || '{}'::jsonb; - ?column? ----------- - [3, {}] -(1 row) - -select '["a", "b"]'::jsonb || '{"c":1}'; - ?column? ----------------------- - ["a", "b", {"c": 1}] -(1 row) - -select '{"c": 1}'::jsonb || '["a", "b"]'; - ?column? ----------------------- - [{"c": 1}, "a", "b"] -(1 row) - -select '{}'::jsonb || '{"cq":"l", "b":"g", "fg":false}'; - ?column? ------------------------------------- - {"b": "g", "cq": "l", "fg": false} -(1 row) - -select pg_column_size('{}'::jsonb || '{}'::jsonb) = pg_column_size('{}'::jsonb); - ?column? ----------- - t -(1 row) - -select pg_column_size('{"aa":1}'::jsonb || '{"b":2}'::jsonb) = pg_column_size('{"aa":1, "b":2}'::jsonb); - ?column? ----------- - t -(1 row) - -select pg_column_size('{"aa":1, "b":2}'::jsonb || '{}'::jsonb) = pg_column_size('{"aa":1, "b":2}'::jsonb); - ?column? ----------- - t -(1 row) - -select pg_column_size('{}'::jsonb || '{"aa":1, "b":2}'::jsonb) = pg_column_size('{"aa":1, "b":2}'::jsonb); - ?column? ----------- - t -(1 row) - -select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'a'); - jsonb_delete ------------------- - {"b": 2, "c": 3} -(1 row) - -select jsonb_delete('{"a":null , "b":2, "c":3}'::jsonb, 'a'); - jsonb_delete ------------------- - {"b": 2, "c": 3} -(1 row) - -select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'b'); - jsonb_delete ------------------- - {"a": 1, "c": 3} -(1 row) - -select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'c'); - jsonb_delete ------------------- - {"a": 1, "b": 2} -(1 row) - -select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'd'); - jsonb_delete --------------------------- - {"a": 1, "b": 2, "c": 3} -(1 row) - -select '{"a":1 , "b":2, "c":3}'::jsonb - 'a'; - ?column? ------------------- - {"b": 2, "c": 3} -(1 row) - -select '{"a":null , "b":2, "c":3}'::jsonb - 'a'; - ?column? ------------------- - {"b": 2, "c": 3} -(1 row) - -select '{"a":1 , "b":2, "c":3}'::jsonb - 'b'; - ?column? ------------------- - {"a": 1, "c": 3} -(1 row) - -select '{"a":1 , "b":2, "c":3}'::jsonb - 'c'; - ?column? ------------------- - {"a": 1, "b": 2} -(1 row) - -select '{"a":1 , "b":2, "c":3}'::jsonb - 'd'; - ?column? --------------------------- - {"a": 1, "b": 2, "c": 3} -(1 row) - -select pg_column_size('{"a":1 , "b":2, "c":3}'::jsonb - 'b') = pg_column_size('{"a":1, "b":2}'::jsonb); - ?column? ----------- - t -(1 row) - -select '["a","b","c"]'::jsonb - 3; - ?column? ------------------ - ["a", "b", "c"] -(1 row) - -select '["a","b","c"]'::jsonb - 2; - ?column? ------------- - ["a", "b"] -(1 row) - -select '["a","b","c"]'::jsonb - 1; - ?column? ------------- - ["a", "c"] -(1 row) - -select '["a","b","c"]'::jsonb - 0; - ?column? ------------- - ["b", "c"] -(1 row) - -select '["a","b","c"]'::jsonb - -1; - ?column? ------------- - ["a", "b"] -(1 row) - -select '["a","b","c"]'::jsonb - -2; - ?column? ------------- - ["a", "c"] -(1 row) - -select '["a","b","c"]'::jsonb - -3; - ?column? ------------- - ["b", "c"] -(1 row) - -select '["a","b","c"]'::jsonb - -4; - ?column? ------------------ - ["a", "b", "c"] -(1 row) - -select '{"a":1 , "b":2, "c":3}'::jsonb - '{b}'::text[]; - ?column? ------------------- - {"a": 1, "c": 3} -(1 row) - -select '{"a":1 , "b":2, "c":3}'::jsonb - '{c,b}'::text[]; - ?column? ----------- - {"a": 1} -(1 row) - -select '{"a":1 , "b":2, "c":3}'::jsonb - '{}'::text[]; - ?column? --------------------------- - {"a": 1, "b": 2, "c": 3} -(1 row) - -select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{n}', '[1,2,3]'); - jsonb_set --------------------------------------------------------------------------- - {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": [1, 2, 3]} -(1 row) - -select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '[1,2,3]'); - jsonb_set ------------------------------------------------------------------------------ - {"a": 1, "b": [1, [1, 2, 3]], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null} -(1 row) - -select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,1,0}', '[1,2,3]'); - jsonb_set ------------------------------------------------------------------------------ - {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [[1, 2, 3], 3]}, "n": null} -(1 row) - -select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,NULL,0}', '[1,2,3]'); -ERROR: path element at position 2 is null -select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{n}', '{"1": 2}'); - jsonb_set -------------------------------------------------------------------------- - {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": {"1": 2}} -(1 row) - -select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '{"1": 2}'); - jsonb_set ----------------------------------------------------------------------------- - {"a": 1, "b": [1, {"1": 2}], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null} -(1 row) - -select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,1,0}', '{"1": 2}'); - jsonb_set ----------------------------------------------------------------------------- - {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [{"1": 2}, 3]}, "n": null} -(1 row) - -select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,NULL,0}', '{"1": 2}'); -ERROR: path element at position 2 is null -select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '"test"'); - jsonb_set --------------------------------------------------------------------------- - {"a": 1, "b": [1, "test"], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null} -(1 row) - -select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '{"f": "test"}'); - jsonb_set ---------------------------------------------------------------------------------- - {"a": 1, "b": [1, {"f": "test"}], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null} -(1 row) - -select jsonb_delete_path('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}', '{n}'); - jsonb_delete_path ----------------------------------------------------------- - {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [2, 3]}} -(1 row) - -select jsonb_delete_path('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}', '{b,-1}'); - jsonb_delete_path ------------------------------------------------------------------- - {"a": 1, "b": [1], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null} -(1 row) - -select jsonb_delete_path('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}', '{d,1,0}'); - jsonb_delete_path ------------------------------------------------------------------- - {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [3]}, "n": null} -(1 row) - -select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{n}'; - ?column? ----------------------------------------------------------- - {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [2, 3]}} -(1 row) - -select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{b,-1}'; - ?column? ------------------------------------------------------------------- - {"a": 1, "b": [1], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null} -(1 row) - -select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{b,-1e}'; -- invalid array subscript -ERROR: path element at position 2 is not an integer: "-1e" -select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{d,1,0}'; - ?column? ------------------------------------------------------------------- - {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [3]}, "n": null} -(1 row) - --- empty structure and error conditions for delete and replace -select '"a"'::jsonb - 'a'; -- error -ERROR: cannot delete from scalar -select '{}'::jsonb - 'a'; - ?column? ----------- - {} -(1 row) - -select '[]'::jsonb - 'a'; - ?column? ----------- - [] -(1 row) - -select '"a"'::jsonb - 1; -- error -ERROR: cannot delete from scalar -select '{}'::jsonb - 1; -- error -ERROR: cannot delete from object using integer index -select '[]'::jsonb - 1; - ?column? ----------- - [] -(1 row) - -select '"a"'::jsonb #- '{a}'; -- error -ERROR: cannot delete path in scalar -select '{}'::jsonb #- '{a}'; - ?column? ----------- - {} -(1 row) - -select '[]'::jsonb #- '{a}'; - ?column? ----------- - [] -(1 row) - -select jsonb_set('"a"','{a}','"b"'); --error -ERROR: cannot set path in scalar -select jsonb_set('{}','{a}','"b"', false); - jsonb_set ------------ - {} -(1 row) - -select jsonb_set('[]','{1}','"b"', false); - jsonb_set ------------ - [] -(1 row) - -select jsonb_set('[{"f1":1,"f2":null},2,null,3]', '{0}','[2,3,4]', false); - jsonb_set -------------------------- - [[2, 3, 4], 2, null, 3] -(1 row) - --- jsonb_set adding instead of replacing --- prepend to array -select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{b,-33}','{"foo":123}'); - jsonb_set -------------------------------------------------------- - {"a": 1, "b": [{"foo": 123}, 0, 1, 2], "c": {"d": 4}} -(1 row) - --- append to array -select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{b,33}','{"foo":123}'); - jsonb_set -------------------------------------------------------- - {"a": 1, "b": [0, 1, 2, {"foo": 123}], "c": {"d": 4}} -(1 row) - --- check nesting levels addition -select jsonb_set('{"a":1,"b":[4,5,[0,1,2],6,7],"c":{"d":4}}','{b,2,33}','{"foo":123}'); - jsonb_set ---------------------------------------------------------------------- - {"a": 1, "b": [4, 5, [0, 1, 2, {"foo": 123}], 6, 7], "c": {"d": 4}} -(1 row) - --- add new key -select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{c,e}','{"foo":123}'); - jsonb_set ------------------------------------------------------------- - {"a": 1, "b": [0, 1, 2], "c": {"d": 4, "e": {"foo": 123}}} -(1 row) - --- adding doesn't do anything if elements before last aren't present -select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{x,-33}','{"foo":123}'); - jsonb_set ------------------------------------------ - {"a": 1, "b": [0, 1, 2], "c": {"d": 4}} -(1 row) - -select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{x,y}','{"foo":123}'); - jsonb_set ------------------------------------------ - {"a": 1, "b": [0, 1, 2], "c": {"d": 4}} -(1 row) - --- add to empty object -select jsonb_set('{}','{x}','{"foo":123}'); - jsonb_set ---------------------- - {"x": {"foo": 123}} -(1 row) - ---add to empty array -select jsonb_set('[]','{0}','{"foo":123}'); - jsonb_set ----------------- - [{"foo": 123}] -(1 row) - -select jsonb_set('[]','{99}','{"foo":123}'); - jsonb_set ----------------- - [{"foo": 123}] -(1 row) - -select jsonb_set('[]','{-99}','{"foo":123}'); - jsonb_set ----------------- - [{"foo": 123}] -(1 row) - -select jsonb_set('{"a": [1, 2, 3]}', '{a, non_integer}', '"new_value"'); -ERROR: path element at position 2 is not an integer: "non_integer" -select jsonb_set('{"a": {"b": [1, 2, 3]}}', '{a, b, non_integer}', '"new_value"'); -ERROR: path element at position 3 is not an integer: "non_integer" -select jsonb_set('{"a": {"b": [1, 2, 3]}}', '{a, b, NULL}', '"new_value"'); -ERROR: path element at position 3 is null --- jsonb_set_lax -\pset null NULL --- pass though non nulls to jsonb_set -select jsonb_set_lax('{"a":1,"b":2}','{b}','5') ; - jsonb_set_lax ------------------- - {"a": 1, "b": 5} -(1 row) - -select jsonb_set_lax('{"a":1,"b":2}','{d}','6', true) ; - jsonb_set_lax --------------------------- - {"a": 1, "b": 2, "d": 6} -(1 row) - --- using the default treatment -select jsonb_set_lax('{"a":1,"b":2}','{b}',null); - jsonb_set_lax ---------------------- - {"a": 1, "b": null} -(1 row) - -select jsonb_set_lax('{"a":1,"b":2}','{d}',null,true); - jsonb_set_lax ------------------------------ - {"a": 1, "b": 2, "d": null} -(1 row) - --- errors -select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, true, null); -ERROR: null_value_treatment must be "delete_key", "return_target", "use_json_null", or "raise_exception" -select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, true, 'no_such_treatment'); -ERROR: null_value_treatment must be "delete_key", "return_target", "use_json_null", or "raise_exception" --- explicit treatments -select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'raise_exception') as raise_exception; -ERROR: JSON value must not be null -DETAIL: Exception was raised because null_value_treatment is "raise_exception". -HINT: To avoid, either change the null_value_treatment argument or ensure that an SQL NULL is not passed. -select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'return_target') as return_target; - return_target ------------------- - {"a": 1, "b": 2} -(1 row) - -select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'delete_key') as delete_key; - delete_key ------------- - {"a": 1} -(1 row) - -select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'use_json_null') as use_json_null; - use_json_null ---------------------- - {"a": 1, "b": null} -(1 row) - -\pset null '' --- jsonb_insert -select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '"new_value"'); - jsonb_insert -------------------------------- - {"a": [0, "new_value", 1, 2]} -(1 row) - -select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '"new_value"', true); - jsonb_insert -------------------------------- - {"a": [0, 1, "new_value", 2]} -(1 row) - -select jsonb_insert('{"a": {"b": {"c": [0, 1, "test1", "test2"]}}}', '{a, b, c, 2}', '"new_value"'); - jsonb_insert ------------------------------------------------------------- - {"a": {"b": {"c": [0, 1, "new_value", "test1", "test2"]}}} -(1 row) - -select jsonb_insert('{"a": {"b": {"c": [0, 1, "test1", "test2"]}}}', '{a, b, c, 2}', '"new_value"', true); - jsonb_insert ------------------------------------------------------------- - {"a": {"b": {"c": [0, 1, "test1", "new_value", "test2"]}}} -(1 row) - -select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '{"b": "value"}'); - jsonb_insert ----------------------------------- - {"a": [0, {"b": "value"}, 1, 2]} -(1 row) - -select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '["value1", "value2"]'); - jsonb_insert ----------------------------------------- - {"a": [0, ["value1", "value2"], 1, 2]} -(1 row) - --- edge cases -select jsonb_insert('{"a": [0,1,2]}', '{a, 0}', '"new_value"'); - jsonb_insert -------------------------------- - {"a": ["new_value", 0, 1, 2]} -(1 row) - -select jsonb_insert('{"a": [0,1,2]}', '{a, 0}', '"new_value"', true); - jsonb_insert -------------------------------- - {"a": [0, "new_value", 1, 2]} -(1 row) - -select jsonb_insert('{"a": [0,1,2]}', '{a, 2}', '"new_value"'); - jsonb_insert -------------------------------- - {"a": [0, 1, "new_value", 2]} -(1 row) - -select jsonb_insert('{"a": [0,1,2]}', '{a, 2}', '"new_value"', true); - jsonb_insert -------------------------------- - {"a": [0, 1, 2, "new_value"]} -(1 row) - -select jsonb_insert('{"a": [0,1,2]}', '{a, -1}', '"new_value"'); - jsonb_insert -------------------------------- - {"a": [0, 1, "new_value", 2]} -(1 row) - -select jsonb_insert('{"a": [0,1,2]}', '{a, -1}', '"new_value"', true); - jsonb_insert -------------------------------- - {"a": [0, 1, 2, "new_value"]} -(1 row) - -select jsonb_insert('[]', '{1}', '"new_value"'); - jsonb_insert ---------------- - ["new_value"] -(1 row) - -select jsonb_insert('[]', '{1}', '"new_value"', true); - jsonb_insert ---------------- - ["new_value"] -(1 row) - -select jsonb_insert('{"a": []}', '{a, 1}', '"new_value"'); - jsonb_insert ----------------------- - {"a": ["new_value"]} -(1 row) - -select jsonb_insert('{"a": []}', '{a, 1}', '"new_value"', true); - jsonb_insert ----------------------- - {"a": ["new_value"]} -(1 row) - -select jsonb_insert('{"a": [0,1,2]}', '{a, 10}', '"new_value"'); - jsonb_insert -------------------------------- - {"a": [0, 1, 2, "new_value"]} -(1 row) - -select jsonb_insert('{"a": [0,1,2]}', '{a, -10}', '"new_value"'); - jsonb_insert -------------------------------- - {"a": ["new_value", 0, 1, 2]} -(1 row) - --- jsonb_insert should be able to insert new value for objects, but not to replace -select jsonb_insert('{"a": {"b": "value"}}', '{a, c}', '"new_value"'); - jsonb_insert ------------------------------------------ - {"a": {"b": "value", "c": "new_value"}} -(1 row) - -select jsonb_insert('{"a": {"b": "value"}}', '{a, c}', '"new_value"', true); - jsonb_insert ------------------------------------------ - {"a": {"b": "value", "c": "new_value"}} -(1 row) - -select jsonb_insert('{"a": {"b": "value"}}', '{a, b}', '"new_value"'); -ERROR: cannot replace existing key -HINT: Try using the function jsonb_set to replace key value. -select jsonb_insert('{"a": {"b": "value"}}', '{a, b}', '"new_value"', true); -ERROR: cannot replace existing key -HINT: Try using the function jsonb_set to replace key value. --- jsonb subscript -select ('123'::jsonb)['a']; - jsonb -------- - -(1 row) - -select ('123'::jsonb)[0]; - jsonb -------- - -(1 row) - -select ('123'::jsonb)[NULL]; - jsonb -------- - -(1 row) - -select ('{"a": 1}'::jsonb)['a']; - jsonb -------- - 1 -(1 row) - -select ('{"a": 1}'::jsonb)[0]; - jsonb -------- - -(1 row) - -select ('{"a": 1}'::jsonb)['not_exist']; - jsonb -------- - -(1 row) - -select ('{"a": 1}'::jsonb)[NULL]; - jsonb -------- - -(1 row) - -select ('[1, "2", null]'::jsonb)['a']; - jsonb -------- - -(1 row) - -select ('[1, "2", null]'::jsonb)[0]; - jsonb -------- - 1 -(1 row) - -select ('[1, "2", null]'::jsonb)['1']; - jsonb -------- - "2" -(1 row) - -select ('[1, "2", null]'::jsonb)[1.0]; -ERROR: subscript type numeric is not supported -LINE 1: select ('[1, "2", null]'::jsonb)[1.0]; - ^ -HINT: jsonb subscript must be coercible to either integer or text. -select ('[1, "2", null]'::jsonb)[2]; - jsonb -------- - null -(1 row) - -select ('[1, "2", null]'::jsonb)[3]; - jsonb -------- - -(1 row) - -select ('[1, "2", null]'::jsonb)[-2]; - jsonb -------- - "2" -(1 row) - -select ('[1, "2", null]'::jsonb)[1]['a']; - jsonb -------- - -(1 row) - -select ('[1, "2", null]'::jsonb)[1][0]; - jsonb -------- - -(1 row) - -select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['b']; - jsonb -------- - "c" -(1 row) - -select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d']; - jsonb ------------ - [1, 2, 3] -(1 row) - -select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d'][1]; - jsonb -------- - 2 -(1 row) - -select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d']['a']; - jsonb -------- - -(1 row) - -select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1']; - jsonb ---------------- - {"a2": "aaa"} -(1 row) - -select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1']['a2']; - jsonb -------- - "aaa" -(1 row) - -select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1']['a2']['a3']; - jsonb -------- - -(1 row) - -select ('{"a": ["a1", {"b1": ["aaa", "bbb", "ccc"]}], "b": "bb"}'::jsonb)['a'][1]['b1']; - jsonb ------------------------ - ["aaa", "bbb", "ccc"] -(1 row) - -select ('{"a": ["a1", {"b1": ["aaa", "bbb", "ccc"]}], "b": "bb"}'::jsonb)['a'][1]['b1'][2]; - jsonb -------- - "ccc" -(1 row) - --- slices are not supported -select ('{"a": 1}'::jsonb)['a':'b']; -ERROR: jsonb subscript does not support slices -LINE 1: select ('{"a": 1}'::jsonb)['a':'b']; - ^ -select ('[1, "2", null]'::jsonb)[1:2]; -ERROR: jsonb subscript does not support slices -LINE 1: select ('[1, "2", null]'::jsonb)[1:2]; - ^ -select ('[1, "2", null]'::jsonb)[:2]; -ERROR: jsonb subscript does not support slices -LINE 1: select ('[1, "2", null]'::jsonb)[:2]; - ^ -select ('[1, "2", null]'::jsonb)[1:]; -ERROR: jsonb subscript does not support slices -LINE 1: select ('[1, "2", null]'::jsonb)[1:]; - ^ -select ('[1, "2", null]'::jsonb)[:]; -ERROR: jsonb subscript does not support slices -create TEMP TABLE test_jsonb_subscript ( - id int, - test_json jsonb -); -insert into test_jsonb_subscript values -(1, '{}'), -- empty jsonb -(2, '{"key": "value"}'); -- jsonb with data --- update empty jsonb -update test_jsonb_subscript set test_json['a'] = '1' where id = 1; -select * from test_jsonb_subscript; - id | test_json -----+------------------ - 2 | {"key": "value"} - 1 | {"a": 1} -(2 rows) - --- update jsonb with some data -update test_jsonb_subscript set test_json['a'] = '1' where id = 2; -select * from test_jsonb_subscript; - id | test_json -----+-------------------------- - 1 | {"a": 1} - 2 | {"a": 1, "key": "value"} -(2 rows) - --- replace jsonb -update test_jsonb_subscript set test_json['a'] = '"test"'; -select * from test_jsonb_subscript; - id | test_json -----+------------------------------- - 1 | {"a": "test"} - 2 | {"a": "test", "key": "value"} -(2 rows) - --- replace by object -update test_jsonb_subscript set test_json['a'] = '{"b": 1}'::jsonb; -select * from test_jsonb_subscript; - id | test_json -----+--------------------------------- - 1 | {"a": {"b": 1}} - 2 | {"a": {"b": 1}, "key": "value"} -(2 rows) - --- replace by array -update test_jsonb_subscript set test_json['a'] = '[1, 2, 3]'::jsonb; -select * from test_jsonb_subscript; - id | test_json -----+---------------------------------- - 1 | {"a": [1, 2, 3]} - 2 | {"a": [1, 2, 3], "key": "value"} -(2 rows) - --- use jsonb subscription in where clause -select * from test_jsonb_subscript where test_json['key'] = '"value"'; - id | test_json -----+---------------------------------- - 2 | {"a": [1, 2, 3], "key": "value"} -(1 row) - -select * from test_jsonb_subscript where test_json['key_doesnt_exists'] = '"value"'; - id | test_json -----+----------- -(0 rows) - -select * from test_jsonb_subscript where test_json['key'] = '"wrong_value"'; - id | test_json -----+----------- -(0 rows) - --- NULL -update test_jsonb_subscript set test_json[NULL] = '1'; -ERROR: jsonb subscript in assignment must not be null -update test_jsonb_subscript set test_json['another_key'] = NULL; -select * from test_jsonb_subscript; - id | test_json -----+------------------------------------------------------- - 1 | {"a": [1, 2, 3], "another_key": null} - 2 | {"a": [1, 2, 3], "key": "value", "another_key": null} -(2 rows) - --- NULL as jsonb source -insert into test_jsonb_subscript values (3, NULL); -update test_jsonb_subscript set test_json['a'] = '1' where id = 3; -select * from test_jsonb_subscript; - id | test_json -----+------------------------------------------------------- - 1 | {"a": [1, 2, 3], "another_key": null} - 2 | {"a": [1, 2, 3], "key": "value", "another_key": null} - 3 | {"a": 1} -(3 rows) - -update test_jsonb_subscript set test_json = NULL where id = 3; -update test_jsonb_subscript set test_json[0] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+--------------------------------------------------------------- - 1 | {"0": 1, "a": [1, 2, 3], "another_key": null} - 2 | {"0": 1, "a": [1, 2, 3], "key": "value", "another_key": null} - 3 | [1] -(3 rows) - --- Fill the gaps logic -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '[0]'); -update test_jsonb_subscript set test_json[5] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+-------------------------------- - 1 | [0, null, null, null, null, 1] -(1 row) - -update test_jsonb_subscript set test_json[-4] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+----------------------------- - 1 | [0, null, 1, null, null, 1] -(1 row) - -update test_jsonb_subscript set test_json[-8] = '1'; -ERROR: path element at position 1 is out of range: -8 -select * from test_jsonb_subscript; - id | test_json -----+----------------------------- - 1 | [0, null, 1, null, null, 1] -(1 row) - --- keep consistent values position -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '[]'); -update test_jsonb_subscript set test_json[5] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+----------------------------------- - 1 | [null, null, null, null, null, 1] -(1 row) - --- create the whole path -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '{}'); -update test_jsonb_subscript set test_json['a'][0]['b'][0]['c'] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+---------------------------- - 1 | {"a": [{"b": [{"c": 1}]}]} -(1 row) - -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '{}'); -update test_jsonb_subscript set test_json['a'][2]['b'][2]['c'][2] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+------------------------------------------------------------------ - 1 | {"a": [null, null, {"b": [null, null, {"c": [null, null, 1]}]}]} -(1 row) - --- create the whole path with already existing keys -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '{"b": 1}'); -update test_jsonb_subscript set test_json['a'][0] = '2'; -select * from test_jsonb_subscript; - id | test_json -----+-------------------- - 1 | {"a": [2], "b": 1} -(1 row) - --- the start jsonb is an object, first subscript is treated as a key -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '{}'); -update test_jsonb_subscript set test_json[0]['a'] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+----------------- - 1 | {"0": {"a": 1}} -(1 row) - --- the start jsonb is an array -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '[]'); -update test_jsonb_subscript set test_json[0]['a'] = '1'; -update test_jsonb_subscript set test_json[2]['b'] = '2'; -select * from test_jsonb_subscript; - id | test_json -----+---------------------------- - 1 | [{"a": 1}, null, {"b": 2}] -(1 row) - --- overwriting an existing path -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '{}'); -update test_jsonb_subscript set test_json['a']['b'][1] = '1'; -update test_jsonb_subscript set test_json['a']['b'][10] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+---------------------------------------------------------------------------- - 1 | {"a": {"b": [null, 1, null, null, null, null, null, null, null, null, 1]}} -(1 row) - -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '[]'); -update test_jsonb_subscript set test_json[0][0][0] = '1'; -update test_jsonb_subscript set test_json[0][0][1] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+------------ - 1 | [[[1, 1]]] -(1 row) - -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '{}'); -update test_jsonb_subscript set test_json['a']['b'][10] = '1'; -update test_jsonb_subscript set test_json['a'][10][10] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+------------------------------------------------------------------------------------------------------------------------------------------------------ - 1 | {"a": {"b": [null, null, null, null, null, null, null, null, null, null, 1], "10": [null, null, null, null, null, null, null, null, null, null, 1]}} -(1 row) - --- an empty sub element -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '{"a": {}}'); -update test_jsonb_subscript set test_json['a']['b']['c'][2] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+-------------------------------------- - 1 | {"a": {"b": {"c": [null, null, 1]}}} -(1 row) - -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '{"a": []}'); -update test_jsonb_subscript set test_json['a'][1]['c'][2] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+--------------------------------------- - 1 | {"a": [null, {"c": [null, null, 1]}]} -(1 row) - --- trying replace assuming a composite object, but it's an element or a value -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '{"a": 1}'); -update test_jsonb_subscript set test_json['a']['b'] = '1'; -ERROR: cannot replace existing key -DETAIL: The path assumes key is a composite object, but it is a scalar value. -update test_jsonb_subscript set test_json['a']['b']['c'] = '1'; -ERROR: cannot replace existing key -DETAIL: The path assumes key is a composite object, but it is a scalar value. -update test_jsonb_subscript set test_json['a'][0] = '1'; -ERROR: cannot replace existing key -DETAIL: The path assumes key is a composite object, but it is a scalar value. -update test_jsonb_subscript set test_json['a'][0]['c'] = '1'; -ERROR: cannot replace existing key -DETAIL: The path assumes key is a composite object, but it is a scalar value. -update test_jsonb_subscript set test_json['a'][0][0] = '1'; -ERROR: cannot replace existing key -DETAIL: The path assumes key is a composite object, but it is a scalar value. --- trying replace assuming a composite object, but it's a raw scalar -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, 'null'); -update test_jsonb_subscript set test_json[0] = '1'; -ERROR: cannot replace existing key -DETAIL: The path assumes key is a composite object, but it is a scalar value. -update test_jsonb_subscript set test_json[0][0] = '1'; -ERROR: cannot replace existing key -DETAIL: The path assumes key is a composite object, but it is a scalar value. --- try some things with short-header and toasted subscript values -drop table test_jsonb_subscript; -create temp table test_jsonb_subscript ( - id text, - test_json jsonb -); -insert into test_jsonb_subscript values('foo', '{"foo": "bar"}'); -insert into test_jsonb_subscript - select s, ('{"' || s || '": "bar"}')::jsonb from repeat('xyzzy', 500) s; -select length(id), test_json[id] from test_jsonb_subscript; - length | test_json ---------+----------- - 3 | "bar" - 2500 | "bar" -(2 rows) - -update test_jsonb_subscript set test_json[id] = '"baz"'; -select length(id), test_json[id] from test_jsonb_subscript; - length | test_json ---------+----------- - 3 | "baz" - 2500 | "baz" -(2 rows) - -\x -table test_jsonb_subscript; --[ RECORD 1 ]-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -id | foo -test_json | {"foo": "baz"} --[ RECORD 2 ]-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -id | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzy -test_json | {"xyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzy": "baz"} - -\x --- jsonb to tsvector -select to_tsvector('{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::jsonb); - to_tsvector ---------------------------------------------------------------------------- - 'aaa':1 'bbb':2 'ccc':4 'ddd':3 'eee':6 'fff':7 'ggg':8 'hhh':10 'iii':11 -(1 row) - --- jsonb to tsvector with config -select to_tsvector('simple', '{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::jsonb); - to_tsvector ---------------------------------------------------------------------------- - 'aaa':1 'bbb':2 'ccc':4 'ddd':3 'eee':6 'fff':7 'ggg':8 'hhh':10 'iii':11 -(1 row) - --- jsonb to tsvector with stop words -select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": ["the eee fff ggg"], "c": {"d": "hhh. iii"}}'::jsonb); - to_tsvector ----------------------------------------------------------------------------- - 'aaa':1 'bbb':3 'ccc':5 'ddd':4 'eee':8 'fff':9 'ggg':10 'hhh':12 'iii':13 -(1 row) - --- jsonb to tsvector with numeric values -select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": 123, "c": 456}'::jsonb); - to_tsvector ---------------------------------- - 'aaa':1 'bbb':3 'ccc':5 'ddd':4 -(1 row) - --- jsonb_to_tsvector -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"all"'); - jsonb_to_tsvector ----------------------------------------------------------------------------------------- - '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16 -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"key"'); - jsonb_to_tsvector --------------------------------- - 'b':2 'c':4 'd':6 'f':8 'g':10 -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"string"'); - jsonb_to_tsvector -------------------- - 'aaa':1 'bbb':3 -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"numeric"'); - jsonb_to_tsvector -------------------- - '123':1 '456':3 -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"boolean"'); - jsonb_to_tsvector -------------------- - 'fals':3 'true':1 -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["string", "numeric"]'); - jsonb_to_tsvector ---------------------------------- - '123':5 '456':7 'aaa':1 'bbb':3 -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"all"'); - jsonb_to_tsvector ----------------------------------------------------------------------------------------- - '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16 -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"key"'); - jsonb_to_tsvector --------------------------------- - 'b':2 'c':4 'd':6 'f':8 'g':10 -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"string"'); - jsonb_to_tsvector -------------------- - 'aaa':1 'bbb':3 -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"numeric"'); - jsonb_to_tsvector -------------------- - '123':1 '456':3 -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"boolean"'); - jsonb_to_tsvector -------------------- - 'fals':3 'true':1 -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["string", "numeric"]'); - jsonb_to_tsvector ---------------------------------- - '123':5 '456':7 'aaa':1 'bbb':3 -(1 row) - --- to_tsvector corner cases -select to_tsvector('""'::jsonb); - to_tsvector -------------- - -(1 row) - -select to_tsvector('{}'::jsonb); - to_tsvector -------------- - -(1 row) - -select to_tsvector('[]'::jsonb); - to_tsvector -------------- - -(1 row) - -select to_tsvector('null'::jsonb); - to_tsvector -------------- - -(1 row) - --- jsonb_to_tsvector corner cases -select jsonb_to_tsvector('""'::jsonb, '"all"'); - jsonb_to_tsvector -------------------- - -(1 row) - -select jsonb_to_tsvector('{}'::jsonb, '"all"'); - jsonb_to_tsvector -------------------- - -(1 row) - -select jsonb_to_tsvector('[]'::jsonb, '"all"'); - jsonb_to_tsvector -------------------- - -(1 row) - -select jsonb_to_tsvector('null'::jsonb, '"all"'); - jsonb_to_tsvector -------------------- - -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '""'); -ERROR: wrong flag in flag array: "" -HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '{}'); -ERROR: wrong flag type, only arrays and scalars are allowed -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '[]'); - jsonb_to_tsvector -------------------- - -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, 'null'); -ERROR: flag array element is not a string -HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["all", null]'); -ERROR: flag array element is not a string -HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". --- ts_headline for jsonb -select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh')); - ts_headline ------------------------------------------------------------------------------------------------------------------- - {"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]} -(1 row) - -select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh')); - ts_headline ------------------------------------------------------------------------------------------------ - {"a": "aaa bbb", "b": {"c": "ccc ddd fff"}, "d": ["ggg hhh", "iii jjj"]} -(1 row) - -select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >'); - ts_headline ---------------------------------------------------------------------------------------------------- - {"a": "aaa ", "b": {"c": "ccc fff", "c1": "ccc1 ddd1"}, "d": ["ggg ", "iii jjj"]} -(1 row) - -select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >'); - ts_headline ---------------------------------------------------------------------------------------------------- - {"a": "aaa ", "b": {"c": "ccc fff", "c1": "ccc1 ddd1"}, "d": ["ggg ", "iii jjj"]} -(1 row) - --- corner cases for ts_headline with jsonb -select ts_headline('null'::jsonb, tsquery('aaa & bbb')); - ts_headline -------------- - null -(1 row) - -select ts_headline('{}'::jsonb, tsquery('aaa & bbb')); - ts_headline -------------- - {} -(1 row) - -select ts_headline('[]'::jsonb, tsquery('aaa & bbb')); - ts_headline -------------- - [] -(1 row) - --- casts -select 'true'::jsonb::bool; - bool ------- - t -(1 row) - -select '[]'::jsonb::bool; -ERROR: cannot cast jsonb array to type boolean -select '1.0'::jsonb::float; - float8 --------- - 1 -(1 row) - -select '[1.0]'::jsonb::float; -ERROR: cannot cast jsonb array to type double precision -select '12345'::jsonb::int4; - int4 -------- - 12345 -(1 row) - -select '"hello"'::jsonb::int4; -ERROR: cannot cast jsonb string to type integer -select '12345'::jsonb::numeric; - numeric ---------- - 12345 -(1 row) - -select '{}'::jsonb::numeric; -ERROR: cannot cast jsonb object to type numeric -select '12345.05'::jsonb::numeric; - numeric ----------- - 12345.05 -(1 row) - -select '12345.05'::jsonb::float4; - float4 ----------- - 12345.05 -(1 row) - -select '12345.05'::jsonb::float8; - float8 ----------- - 12345.05 -(1 row) - -select '12345.05'::jsonb::int2; - int2 -------- - 12345 -(1 row) - -select '12345.05'::jsonb::int4; - int4 -------- - 12345 -(1 row) - -select '12345.05'::jsonb::int8; - int8 -------- - 12345 -(1 row) - -select '12345.0000000000000000000000000000000000000000000005'::jsonb::numeric; - numeric ------------------------------------------------------- - 12345.0000000000000000000000000000000000000000000005 -(1 row) - -select '12345.0000000000000000000000000000000000000000000005'::jsonb::float4; - float4 --------- - 12345 -(1 row) - -select '12345.0000000000000000000000000000000000000000000005'::jsonb::float8; - float8 --------- - 12345 -(1 row) - -select '12345.0000000000000000000000000000000000000000000005'::jsonb::int2; - int2 -------- - 12345 -(1 row) - -select '12345.0000000000000000000000000000000000000000000005'::jsonb::int4; - int4 -------- - 12345 -(1 row) - -select '12345.0000000000000000000000000000000000000000000005'::jsonb::int8; - int8 -------- - 12345 -(1 row) - +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/json_encoding_2.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/json_encoding.out --- /tmp/cirrus-ci-build/src/test/regress/expected/json_encoding_2.out 2024-03-07 14:25:00.331621000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/json_encoding.out 2024-03-07 14:27:17.532840000 +0000 @@ -1,9 +1,2 @@ --- --- encoding-sensitive tests for json and jsonb --- --- We provide expected-results files for UTF8 (json_encoding.out) --- and for SQL_ASCII (json_encoding_1.out). Skip otherwise. -SELECT getdatabaseencoding() NOT IN ('UTF8', 'SQL_ASCII') - AS skip_test \gset -\if :skip_test -\quit +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/jsonpath.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/jsonpath.out --- /tmp/cirrus-ci-build/src/test/regress/expected/jsonpath.out 2024-03-07 14:25:00.331745000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/jsonpath.out 2024-03-07 14:27:17.534187000 +0000 @@ -1,1296 +1,2 @@ ---jsonpath io -select ''::jsonpath; -ERROR: invalid input syntax for type jsonpath: "" -LINE 1: select ''::jsonpath; - ^ -select '$'::jsonpath; - jsonpath ----------- - $ -(1 row) - -select 'strict $'::jsonpath; - jsonpath ----------- - strict $ -(1 row) - -select 'lax $'::jsonpath; - jsonpath ----------- - $ -(1 row) - -select '$.a'::jsonpath; - jsonpath ----------- - $."a" -(1 row) - -select '$.a.v'::jsonpath; - jsonpath ------------ - $."a"."v" -(1 row) - -select '$.a.*'::jsonpath; - jsonpath ----------- - $."a".* -(1 row) - -select '$.*[*]'::jsonpath; - jsonpath ----------- - $.*[*] -(1 row) - -select '$.a[*]'::jsonpath; - jsonpath ----------- - $."a"[*] -(1 row) - -select '$.a[*][*]'::jsonpath; - jsonpath -------------- - $."a"[*][*] -(1 row) - -select '$[*]'::jsonpath; - jsonpath ----------- - $[*] -(1 row) - -select '$[0]'::jsonpath; - jsonpath ----------- - $[0] -(1 row) - -select '$[*][0]'::jsonpath; - jsonpath ----------- - $[*][0] -(1 row) - -select '$[*].a'::jsonpath; - jsonpath ----------- - $[*]."a" -(1 row) - -select '$[*][0].a.b'::jsonpath; - jsonpath ------------------ - $[*][0]."a"."b" -(1 row) - -select '$.a.**.b'::jsonpath; - jsonpath --------------- - $."a".**."b" -(1 row) - -select '$.a.**{2}.b'::jsonpath; - jsonpath ------------------ - $."a".**{2}."b" -(1 row) - -select '$.a.**{2 to 2}.b'::jsonpath; - jsonpath ------------------ - $."a".**{2}."b" -(1 row) - -select '$.a.**{2 to 5}.b'::jsonpath; - jsonpath ----------------------- - $."a".**{2 to 5}."b" -(1 row) - -select '$.a.**{0 to 5}.b'::jsonpath; - jsonpath ----------------------- - $."a".**{0 to 5}."b" -(1 row) - -select '$.a.**{5 to last}.b'::jsonpath; - jsonpath -------------------------- - $."a".**{5 to last}."b" -(1 row) - -select '$.a.**{last}.b'::jsonpath; - jsonpath --------------------- - $."a".**{last}."b" -(1 row) - -select '$.a.**{last to 5}.b'::jsonpath; - jsonpath -------------------------- - $."a".**{last to 5}."b" -(1 row) - -select '$+1'::jsonpath; - jsonpath ----------- - ($ + 1) -(1 row) - -select '$-1'::jsonpath; - jsonpath ----------- - ($ - 1) -(1 row) - -select '$--+1'::jsonpath; - jsonpath ----------- - ($ - -1) -(1 row) - -select '$.a/+-1'::jsonpath; - jsonpath --------------- - ($."a" / -1) -(1 row) - -select '1 * 2 + 4 % -3 != false'::jsonpath; - jsonpath ---------------------------- - (1 * 2 + 4 % -3 != false) -(1 row) - -select '"\b\f\r\n\t\v\"\''\\"'::jsonpath; - jsonpath -------------------------- - "\b\f\r\n\t\u000b\"'\\" -(1 row) - -select '"\x50\u0067\u{53}\u{051}\u{00004C}"'::jsonpath; - jsonpath ----------- - "PgSQL" -(1 row) - -select '$.foo\x50\u0067\u{53}\u{051}\u{00004C}\t\"bar'::jsonpath; - jsonpath ---------------------- - $."fooPgSQL\t\"bar" -(1 row) - -select '"\z"'::jsonpath; -- unrecognized escape is just the literal char - jsonpath ----------- - "z" -(1 row) - -select '$.g ? ($.a == 1)'::jsonpath; - jsonpath --------------------- - $."g"?($."a" == 1) -(1 row) - -select '$.g ? (@ == 1)'::jsonpath; - jsonpath ----------------- - $."g"?(@ == 1) -(1 row) - -select '$.g ? (@.a == 1)'::jsonpath; - jsonpath --------------------- - $."g"?(@."a" == 1) -(1 row) - -select '$.g ? (@.a == 1 || @.a == 4)'::jsonpath; - jsonpath ----------------------------------- - $."g"?(@."a" == 1 || @."a" == 4) -(1 row) - -select '$.g ? (@.a == 1 && @.a == 4)'::jsonpath; - jsonpath ----------------------------------- - $."g"?(@."a" == 1 && @."a" == 4) -(1 row) - -select '$.g ? (@.a == 1 || @.a == 4 && @.b == 7)'::jsonpath; - jsonpath ------------------------------------------------- - $."g"?(@."a" == 1 || @."a" == 4 && @."b" == 7) -(1 row) - -select '$.g ? (@.a == 1 || !(@.a == 4) && @.b == 7)'::jsonpath; - jsonpath ---------------------------------------------------- - $."g"?(@."a" == 1 || !(@."a" == 4) && @."b" == 7) -(1 row) - -select '$.g ? (@.a == 1 || !(@.x >= 123 || @.a == 4) && @.b == 7)'::jsonpath; - jsonpath -------------------------------------------------------------------- - $."g"?(@."a" == 1 || !(@."x" >= 123 || @."a" == 4) && @."b" == 7) -(1 row) - -select '$.g ? (@.x >= @[*]?(@.a > "abc"))'::jsonpath; - jsonpath ---------------------------------------- - $."g"?(@."x" >= @[*]?(@."a" > "abc")) -(1 row) - -select '$.g ? ((@.x >= 123 || @.a == 4) is unknown)'::jsonpath; - jsonpath -------------------------------------------------- - $."g"?((@."x" >= 123 || @."a" == 4) is unknown) -(1 row) - -select '$.g ? (exists (@.x))'::jsonpath; - jsonpath ------------------------- - $."g"?(exists (@."x")) -(1 row) - -select '$.g ? (exists (@.x ? (@ == 14)))'::jsonpath; - jsonpath ----------------------------------- - $."g"?(exists (@."x"?(@ == 14))) -(1 row) - -select '$.g ? ((@.x >= 123 || @.a == 4) && exists (@.x ? (@ == 14)))'::jsonpath; - jsonpath ------------------------------------------------------------------- - $."g"?((@."x" >= 123 || @."a" == 4) && exists (@."x"?(@ == 14))) -(1 row) - -select '$.g ? (+@.x >= +-(+@.a + 2))'::jsonpath; - jsonpath ------------------------------------- - $."g"?(+@."x" >= +(-(+@."a" + 2))) -(1 row) - -select '$a'::jsonpath; - jsonpath ----------- - $"a" -(1 row) - -select '$a.b'::jsonpath; - jsonpath ----------- - $"a"."b" -(1 row) - -select '$a[*]'::jsonpath; - jsonpath ----------- - $"a"[*] -(1 row) - -select '$.g ? (@.zip == $zip)'::jsonpath; - jsonpath ---------------------------- - $."g"?(@."zip" == $"zip") -(1 row) - -select '$.a[1,2, 3 to 16]'::jsonpath; - jsonpath --------------------- - $."a"[1,2,3 to 16] -(1 row) - -select '$.a[$a + 1, ($b[*]) to -($[0] * 2)]'::jsonpath; - jsonpath ----------------------------------------- - $."a"[$"a" + 1,$"b"[*] to -($[0] * 2)] -(1 row) - -select '$.a[$.a.size() - 3]'::jsonpath; - jsonpath -------------------------- - $."a"[$."a".size() - 3] -(1 row) - -select 'last'::jsonpath; -ERROR: LAST is allowed only in array subscripts -LINE 1: select 'last'::jsonpath; - ^ -select '"last"'::jsonpath; - jsonpath ----------- - "last" -(1 row) - -select '$.last'::jsonpath; - jsonpath ----------- - $."last" -(1 row) - -select '$ ? (last > 0)'::jsonpath; -ERROR: LAST is allowed only in array subscripts -LINE 1: select '$ ? (last > 0)'::jsonpath; - ^ -select '$[last]'::jsonpath; - jsonpath ----------- - $[last] -(1 row) - -select '$[$[0] ? (last > 0)]'::jsonpath; - jsonpath --------------------- - $[$[0]?(last > 0)] -(1 row) - -select 'null.type()'::jsonpath; - jsonpath -------------- - null.type() -(1 row) - -select '1.type()'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1.t" of jsonpath input -LINE 1: select '1.type()'::jsonpath; - ^ -select '(1).type()'::jsonpath; - jsonpath ------------- - (1).type() -(1 row) - -select '1.2.type()'::jsonpath; - jsonpath --------------- - (1.2).type() -(1 row) - -select '"aaa".type()'::jsonpath; - jsonpath --------------- - "aaa".type() -(1 row) - -select 'true.type()'::jsonpath; - jsonpath -------------- - true.type() -(1 row) - -select '$.double().floor().ceiling().abs()'::jsonpath; - jsonpath ------------------------------------- - $.double().floor().ceiling().abs() -(1 row) - -select '$.keyvalue().key'::jsonpath; - jsonpath --------------------- - $.keyvalue()."key" -(1 row) - -select '$.datetime()'::jsonpath; - jsonpath --------------- - $.datetime() -(1 row) - -select '$.datetime("datetime template")'::jsonpath; - jsonpath ---------------------------------- - $.datetime("datetime template") -(1 row) - -select '$.bigint().integer().number().decimal()'::jsonpath; - jsonpath ------------------------------------------ - $.bigint().integer().number().decimal() -(1 row) - -select '$.boolean()'::jsonpath; - jsonpath -------------- - $.boolean() -(1 row) - -select '$.date()'::jsonpath; - jsonpath ----------- - $.date() -(1 row) - -select '$.decimal(4,2)'::jsonpath; - jsonpath ----------------- - $.decimal(4,2) -(1 row) - -select '$.string()'::jsonpath; - jsonpath ------------- - $.string() -(1 row) - -select '$.time()'::jsonpath; - jsonpath ----------- - $.time() -(1 row) - -select '$.time(6)'::jsonpath; - jsonpath ------------ - $.time(6) -(1 row) - -select '$.time_tz()'::jsonpath; - jsonpath -------------- - $.time_tz() -(1 row) - -select '$.time_tz(4)'::jsonpath; - jsonpath --------------- - $.time_tz(4) -(1 row) - -select '$.timestamp()'::jsonpath; - jsonpath ---------------- - $.timestamp() -(1 row) - -select '$.timestamp(2)'::jsonpath; - jsonpath ----------------- - $.timestamp(2) -(1 row) - -select '$.timestamp_tz()'::jsonpath; - jsonpath ------------------- - $.timestamp_tz() -(1 row) - -select '$.timestamp_tz(0)'::jsonpath; - jsonpath -------------------- - $.timestamp_tz(0) -(1 row) - -select '$ ? (@ starts with "abc")'::jsonpath; - jsonpath -------------------------- - $?(@ starts with "abc") -(1 row) - -select '$ ? (@ starts with $var)'::jsonpath; - jsonpath --------------------------- - $?(@ starts with $"var") -(1 row) - -select '$ ? (@ like_regex "(invalid pattern")'::jsonpath; -ERROR: invalid regular expression: parentheses () not balanced -LINE 1: select '$ ? (@ like_regex "(invalid pattern")'::jsonpath; - ^ -select '$ ? (@ like_regex "pattern")'::jsonpath; - jsonpath ----------------------------- - $?(@ like_regex "pattern") -(1 row) - -select '$ ? (@ like_regex "pattern" flag "")'::jsonpath; - jsonpath ----------------------------- - $?(@ like_regex "pattern") -(1 row) - -select '$ ? (@ like_regex "pattern" flag "i")'::jsonpath; - jsonpath -------------------------------------- - $?(@ like_regex "pattern" flag "i") -(1 row) - -select '$ ? (@ like_regex "pattern" flag "is")'::jsonpath; - jsonpath --------------------------------------- - $?(@ like_regex "pattern" flag "is") -(1 row) - -select '$ ? (@ like_regex "pattern" flag "isim")'::jsonpath; - jsonpath ---------------------------------------- - $?(@ like_regex "pattern" flag "ism") -(1 row) - -select '$ ? (@ like_regex "pattern" flag "xsms")'::jsonpath; -ERROR: XQuery "x" flag (expanded regular expressions) is not implemented -LINE 1: select '$ ? (@ like_regex "pattern" flag "xsms")'::jsonpath; - ^ -select '$ ? (@ like_regex "pattern" flag "q")'::jsonpath; - jsonpath -------------------------------------- - $?(@ like_regex "pattern" flag "q") -(1 row) - -select '$ ? (@ like_regex "pattern" flag "iq")'::jsonpath; - jsonpath --------------------------------------- - $?(@ like_regex "pattern" flag "iq") -(1 row) - -select '$ ? (@ like_regex "pattern" flag "smixq")'::jsonpath; - jsonpath ------------------------------------------ - $?(@ like_regex "pattern" flag "ismxq") -(1 row) - -select '$ ? (@ like_regex "pattern" flag "a")'::jsonpath; -ERROR: invalid input syntax for type jsonpath -LINE 1: select '$ ? (@ like_regex "pattern" flag "a")'::jsonpath; - ^ -DETAIL: Unrecognized flag character "a" in LIKE_REGEX predicate. -select '$ < 1'::jsonpath; - jsonpath ----------- - ($ < 1) -(1 row) - -select '($ < 1) || $.a.b <= $x'::jsonpath; - jsonpath ------------------------------- - ($ < 1 || $."a"."b" <= $"x") -(1 row) - -select '@ + 1'::jsonpath; -ERROR: @ is not allowed in root expressions -LINE 1: select '@ + 1'::jsonpath; - ^ -select '($).a.b'::jsonpath; - jsonpath ------------ - $."a"."b" -(1 row) - -select '($.a.b).c.d'::jsonpath; - jsonpath -------------------- - $."a"."b"."c"."d" -(1 row) - -select '($.a.b + -$.x.y).c.d'::jsonpath; - jsonpath ----------------------------------- - ($."a"."b" + -$."x"."y")."c"."d" -(1 row) - -select '(-+$.a.b).c.d'::jsonpath; - jsonpath -------------------------- - (-(+$."a"."b"))."c"."d" -(1 row) - -select '1 + ($.a.b + 2).c.d'::jsonpath; - jsonpath -------------------------------- - (1 + ($."a"."b" + 2)."c"."d") -(1 row) - -select '1 + ($.a.b > 2).c.d'::jsonpath; - jsonpath -------------------------------- - (1 + ($."a"."b" > 2)."c"."d") -(1 row) - -select '($)'::jsonpath; - jsonpath ----------- - $ -(1 row) - -select '(($))'::jsonpath; - jsonpath ----------- - $ -(1 row) - -select '((($ + 1)).a + ((2)).b ? ((((@ > 1)) || (exists(@.c)))))'::jsonpath; - jsonpath ---------------------------------------------------- - (($ + 1)."a" + (2)."b"?(@ > 1 || exists (@."c"))) -(1 row) - -select '$ ? (@.a < 1)'::jsonpath; - jsonpath ---------------- - $?(@."a" < 1) -(1 row) - -select '$ ? (@.a < -1)'::jsonpath; - jsonpath ----------------- - $?(@."a" < -1) -(1 row) - -select '$ ? (@.a < +1)'::jsonpath; - jsonpath ---------------- - $?(@."a" < 1) -(1 row) - -select '$ ? (@.a < .1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < 0.1) -(1 row) - -select '$ ? (@.a < -.1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < -0.1) -(1 row) - -select '$ ? (@.a < +.1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < 0.1) -(1 row) - -select '$ ? (@.a < 0.1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < 0.1) -(1 row) - -select '$ ? (@.a < -0.1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < -0.1) -(1 row) - -select '$ ? (@.a < +0.1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < 0.1) -(1 row) - -select '$ ? (@.a < 10.1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < 10.1) -(1 row) - -select '$ ? (@.a < -10.1)'::jsonpath; - jsonpath -------------------- - $?(@."a" < -10.1) -(1 row) - -select '$ ? (@.a < +10.1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < 10.1) -(1 row) - -select '$ ? (@.a < 1e1)'::jsonpath; - jsonpath ----------------- - $?(@."a" < 10) -(1 row) - -select '$ ? (@.a < -1e1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < -10) -(1 row) - -select '$ ? (@.a < +1e1)'::jsonpath; - jsonpath ----------------- - $?(@."a" < 10) -(1 row) - -select '$ ? (@.a < .1e1)'::jsonpath; - jsonpath ---------------- - $?(@."a" < 1) -(1 row) - -select '$ ? (@.a < -.1e1)'::jsonpath; - jsonpath ----------------- - $?(@."a" < -1) -(1 row) - -select '$ ? (@.a < +.1e1)'::jsonpath; - jsonpath ---------------- - $?(@."a" < 1) -(1 row) - -select '$ ? (@.a < 0.1e1)'::jsonpath; - jsonpath ---------------- - $?(@."a" < 1) -(1 row) - -select '$ ? (@.a < -0.1e1)'::jsonpath; - jsonpath ----------------- - $?(@."a" < -1) -(1 row) - -select '$ ? (@.a < +0.1e1)'::jsonpath; - jsonpath ---------------- - $?(@."a" < 1) -(1 row) - -select '$ ? (@.a < 10.1e1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < 101) -(1 row) - -select '$ ? (@.a < -10.1e1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < -101) -(1 row) - -select '$ ? (@.a < +10.1e1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < 101) -(1 row) - -select '$ ? (@.a < 1e-1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < 0.1) -(1 row) - -select '$ ? (@.a < -1e-1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < -0.1) -(1 row) - -select '$ ? (@.a < +1e-1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < 0.1) -(1 row) - -select '$ ? (@.a < .1e-1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < 0.01) -(1 row) - -select '$ ? (@.a < -.1e-1)'::jsonpath; - jsonpath -------------------- - $?(@."a" < -0.01) -(1 row) - -select '$ ? (@.a < +.1e-1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < 0.01) -(1 row) - -select '$ ? (@.a < 0.1e-1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < 0.01) -(1 row) - -select '$ ? (@.a < -0.1e-1)'::jsonpath; - jsonpath -------------------- - $?(@."a" < -0.01) -(1 row) - -select '$ ? (@.a < +0.1e-1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < 0.01) -(1 row) - -select '$ ? (@.a < 10.1e-1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < 1.01) -(1 row) - -select '$ ? (@.a < -10.1e-1)'::jsonpath; - jsonpath -------------------- - $?(@."a" < -1.01) -(1 row) - -select '$ ? (@.a < +10.1e-1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < 1.01) -(1 row) - -select '$ ? (@.a < 1e+1)'::jsonpath; - jsonpath ----------------- - $?(@."a" < 10) -(1 row) - -select '$ ? (@.a < -1e+1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < -10) -(1 row) - -select '$ ? (@.a < +1e+1)'::jsonpath; - jsonpath ----------------- - $?(@."a" < 10) -(1 row) - -select '$ ? (@.a < .1e+1)'::jsonpath; - jsonpath ---------------- - $?(@."a" < 1) -(1 row) - -select '$ ? (@.a < -.1e+1)'::jsonpath; - jsonpath ----------------- - $?(@."a" < -1) -(1 row) - -select '$ ? (@.a < +.1e+1)'::jsonpath; - jsonpath ---------------- - $?(@."a" < 1) -(1 row) - -select '$ ? (@.a < 0.1e+1)'::jsonpath; - jsonpath ---------------- - $?(@."a" < 1) -(1 row) - -select '$ ? (@.a < -0.1e+1)'::jsonpath; - jsonpath ----------------- - $?(@."a" < -1) -(1 row) - -select '$ ? (@.a < +0.1e+1)'::jsonpath; - jsonpath ---------------- - $?(@."a" < 1) -(1 row) - -select '$ ? (@.a < 10.1e+1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < 101) -(1 row) - -select '$ ? (@.a < -10.1e+1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < -101) -(1 row) - -select '$ ? (@.a < +10.1e+1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < 101) -(1 row) - --- numeric literals -select '0'::jsonpath; - jsonpath ----------- - 0 -(1 row) - -select '00'::jsonpath; -ERROR: trailing junk after numeric literal at or near "00" of jsonpath input -LINE 1: select '00'::jsonpath; - ^ -select '0755'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '0755'::jsonpath; - ^ -select '0.0'::jsonpath; - jsonpath ----------- - 0.0 -(1 row) - -select '0.000'::jsonpath; - jsonpath ----------- - 0.000 -(1 row) - -select '0.000e1'::jsonpath; - jsonpath ----------- - 0.00 -(1 row) - -select '0.000e2'::jsonpath; - jsonpath ----------- - 0.0 -(1 row) - -select '0.000e3'::jsonpath; - jsonpath ----------- - 0 -(1 row) - -select '0.0010'::jsonpath; - jsonpath ----------- - 0.0010 -(1 row) - -select '0.0010e-1'::jsonpath; - jsonpath ----------- - 0.00010 -(1 row) - -select '0.0010e+1'::jsonpath; - jsonpath ----------- - 0.010 -(1 row) - -select '0.0010e+2'::jsonpath; - jsonpath ----------- - 0.10 -(1 row) - -select '.001'::jsonpath; - jsonpath ----------- - 0.001 -(1 row) - -select '.001e1'::jsonpath; - jsonpath ----------- - 0.01 -(1 row) - -select '1.'::jsonpath; - jsonpath ----------- - 1 -(1 row) - -select '1.e1'::jsonpath; - jsonpath ----------- - 10 -(1 row) - -select '1a'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1a" of jsonpath input -LINE 1: select '1a'::jsonpath; - ^ -select '1e'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1e" of jsonpath input -LINE 1: select '1e'::jsonpath; - ^ -select '1.e'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1.e" of jsonpath input -LINE 1: select '1.e'::jsonpath; - ^ -select '1.2a'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1.2a" of jsonpath input -LINE 1: select '1.2a'::jsonpath; - ^ -select '1.2e'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1.2e" of jsonpath input -LINE 1: select '1.2e'::jsonpath; - ^ -select '1.2.e'::jsonpath; - jsonpath ------------ - (1.2)."e" -(1 row) - -select '(1.2).e'::jsonpath; - jsonpath ------------ - (1.2)."e" -(1 row) - -select '1e3'::jsonpath; - jsonpath ----------- - 1000 -(1 row) - -select '1.e3'::jsonpath; - jsonpath ----------- - 1000 -(1 row) - -select '1.e3.e'::jsonpath; - jsonpath ------------- - (1000)."e" -(1 row) - -select '1.e3.e4'::jsonpath; - jsonpath -------------- - (1000)."e4" -(1 row) - -select '1.2e3'::jsonpath; - jsonpath ----------- - 1200 -(1 row) - -select '1.2e3a'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1.2e3a" of jsonpath input -LINE 1: select '1.2e3a'::jsonpath; - ^ -select '1.2.e3'::jsonpath; - jsonpath ------------- - (1.2)."e3" -(1 row) - -select '(1.2).e3'::jsonpath; - jsonpath ------------- - (1.2)."e3" -(1 row) - -select '1..e'::jsonpath; - jsonpath ----------- - (1)."e" -(1 row) - -select '1..e3'::jsonpath; - jsonpath ----------- - (1)."e3" -(1 row) - -select '(1.).e'::jsonpath; - jsonpath ----------- - (1)."e" -(1 row) - -select '(1.).e3'::jsonpath; - jsonpath ----------- - (1)."e3" -(1 row) - -select '1?(2>3)'::jsonpath; - jsonpath -------------- - (1)?(2 > 3) -(1 row) - --- nondecimal -select '0b100101'::jsonpath; - jsonpath ----------- - 37 -(1 row) - -select '0o273'::jsonpath; - jsonpath ----------- - 187 -(1 row) - -select '0x42F'::jsonpath; - jsonpath ----------- - 1071 -(1 row) - --- error cases -select '0b'::jsonpath; -ERROR: trailing junk after numeric literal at or near "0b" of jsonpath input -LINE 1: select '0b'::jsonpath; - ^ -select '1b'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1b" of jsonpath input -LINE 1: select '1b'::jsonpath; - ^ -select '0b0x'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '0b0x'::jsonpath; - ^ -select '0o'::jsonpath; -ERROR: trailing junk after numeric literal at or near "0o" of jsonpath input -LINE 1: select '0o'::jsonpath; - ^ -select '1o'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1o" of jsonpath input -LINE 1: select '1o'::jsonpath; - ^ -select '0o0x'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '0o0x'::jsonpath; - ^ -select '0x'::jsonpath; -ERROR: trailing junk after numeric literal at or near "0x" of jsonpath input -LINE 1: select '0x'::jsonpath; - ^ -select '1x'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1x" of jsonpath input -LINE 1: select '1x'::jsonpath; - ^ -select '0x0y'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '0x0y'::jsonpath; - ^ --- underscores -select '1_000_000'::jsonpath; - jsonpath ----------- - 1000000 -(1 row) - -select '1_2_3'::jsonpath; - jsonpath ----------- - 123 -(1 row) - -select '0x1EEE_FFFF'::jsonpath; - jsonpath ------------ - 518979583 -(1 row) - -select '0o2_73'::jsonpath; - jsonpath ----------- - 187 -(1 row) - -select '0b10_0101'::jsonpath; - jsonpath ----------- - 37 -(1 row) - -select '1_000.000_005'::jsonpath; - jsonpath -------------- - 1000.000005 -(1 row) - -select '1_000.'::jsonpath; - jsonpath ----------- - 1000 -(1 row) - -select '.000_005'::jsonpath; - jsonpath ----------- - 0.000005 -(1 row) - -select '1_000.5e0_1'::jsonpath; - jsonpath ----------- - 10005 -(1 row) - --- error cases -select '_100'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '_100'::jsonpath; - ^ -select '100_'::jsonpath; -ERROR: trailing junk after numeric literal at or near "100_" of jsonpath input -LINE 1: select '100_'::jsonpath; - ^ -select '100__000'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '100__000'::jsonpath; - ^ -select '_1_000.5'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '_1_000.5'::jsonpath; - ^ -select '1_000_.5'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1_000_" of jsonpath input -LINE 1: select '1_000_.5'::jsonpath; - ^ -select '1_000._5'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1_000._" of jsonpath input -LINE 1: select '1_000._5'::jsonpath; - ^ -select '1_000.5_'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1_000.5_" of jsonpath input -LINE 1: select '1_000.5_'::jsonpath; - ^ -select '1_000.5e_1'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1_000.5e" of jsonpath input -LINE 1: select '1_000.5e_1'::jsonpath; - ^ --- underscore after prefix not allowed in JavaScript (but allowed in SQL) -select '0b_10_0101'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '0b_10_0101'::jsonpath; - ^ -select '0o_273'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '0o_273'::jsonpath; - ^ -select '0x_42F'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '0x_42F'::jsonpath; - ^ --- test non-error-throwing API -SELECT str as jsonpath, - pg_input_is_valid(str,'jsonpath') as ok, - errinfo.sql_error_code, - errinfo.message, - errinfo.detail, - errinfo.hint -FROM unnest(ARRAY['$ ? (@ like_regex "pattern" flag "smixq")'::text, - '$ ? (@ like_regex "pattern" flag "a")', - '@ + 1', - '00', - '1a']) str, - LATERAL pg_input_error_info(str, 'jsonpath') as errinfo; - jsonpath | ok | sql_error_code | message | detail | hint --------------------------------------------+----+----------------+-----------------------------------------------------------------------+----------------------------------------------------------+------ - $ ? (@ like_regex "pattern" flag "smixq") | t | | | | - $ ? (@ like_regex "pattern" flag "a") | f | 42601 | invalid input syntax for type jsonpath | Unrecognized flag character "a" in LIKE_REGEX predicate. | - @ + 1 | f | 42601 | @ is not allowed in root expressions | | - 00 | f | 42601 | trailing junk after numeric literal at or near "00" of jsonpath input | | - 1a | f | 42601 | trailing junk after numeric literal at or near "1a" of jsonpath input | | -(5 rows) - +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/jsonpath_encoding_2.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/jsonpath_encoding.out --- /tmp/cirrus-ci-build/src/test/regress/expected/jsonpath_encoding_2.out 2024-03-07 14:25:00.331777000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/jsonpath_encoding.out 2024-03-07 14:27:17.529230000 +0000 @@ -1,9 +1,2 @@ --- --- encoding-sensitive tests for jsonpath --- --- We provide expected-results files for UTF8 (jsonpath_encoding.out) --- and for SQL_ASCII (jsonpath_encoding_1.out). Skip otherwise. -SELECT getdatabaseencoding() NOT IN ('UTF8', 'SQL_ASCII') - AS skip_test \gset -\if :skip_test -\quit +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/jsonb_jsonpath.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/jsonb_jsonpath.out --- /tmp/cirrus-ci-build/src/test/regress/expected/jsonb_jsonpath.out 2024-03-07 14:25:00.331726000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/jsonb_jsonpath.out 2024-03-07 14:27:17.536948000 +0000 @@ -1,4386 +1,2 @@ -select jsonb '{"a": 12}' @? '$'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": 12}' @? '1'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": 12}' @? '$.a.b'; - ?column? ----------- - f -(1 row) - -select jsonb '{"a": 12}' @? '$.b'; - ?column? ----------- - f -(1 row) - -select jsonb '{"a": 12}' @? '$.a + 2'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": 12}' @? '$.b + 2'; - ?column? ----------- - -(1 row) - -select jsonb '{"a": {"a": 12}}' @? '$.a.a'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"a": 12}}' @? '$.*.a'; - ?column? ----------- - t -(1 row) - -select jsonb '{"b": {"a": 12}}' @? '$.*.a'; - ?column? ----------- - t -(1 row) - -select jsonb '{"b": {"a": 12}}' @? '$.*.b'; - ?column? ----------- - f -(1 row) - -select jsonb '{"b": {"a": 12}}' @? 'strict $.*.b'; - ?column? ----------- - -(1 row) - -select jsonb '{}' @? '$.*'; - ?column? ----------- - f -(1 row) - -select jsonb '{"a": 1}' @? '$.*'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"b": 1}}' @? 'lax $.**{1}'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"b": 1}}' @? 'lax $.**{2}'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"b": 1}}' @? 'lax $.**{3}'; - ?column? ----------- - f -(1 row) - -select jsonb '[]' @? '$[*]'; - ?column? ----------- - f -(1 row) - -select jsonb '[1]' @? '$[*]'; - ?column? ----------- - t -(1 row) - -select jsonb '[1]' @? '$[1]'; - ?column? ----------- - f -(1 row) - -select jsonb '[1]' @? 'strict $[1]'; - ?column? ----------- - -(1 row) - -select jsonb_path_query('[1]', 'strict $[1]'); -ERROR: jsonpath array subscript is out of bounds -select jsonb_path_query('[1]', 'strict $[1]', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb '[1]' @? 'lax $[10000000000000000]'; - ?column? ----------- - -(1 row) - -select jsonb '[1]' @? 'strict $[10000000000000000]'; - ?column? ----------- - -(1 row) - -select jsonb_path_query('[1]', 'lax $[10000000000000000]'); -ERROR: jsonpath array subscript is out of integer range -select jsonb_path_query('[1]', 'strict $[10000000000000000]'); -ERROR: jsonpath array subscript is out of integer range -select jsonb '[1]' @? '$[0]'; - ?column? ----------- - t -(1 row) - -select jsonb '[1]' @? '$[0.3]'; - ?column? ----------- - t -(1 row) - -select jsonb '[1]' @? '$[0.5]'; - ?column? ----------- - t -(1 row) - -select jsonb '[1]' @? '$[0.9]'; - ?column? ----------- - t -(1 row) - -select jsonb '[1]' @? '$[1.2]'; - ?column? ----------- - f -(1 row) - -select jsonb '[1]' @? 'strict $[1.2]'; - ?column? ----------- - -(1 row) - -select jsonb '{"a": [1,2,3], "b": [3,4,5]}' @? '$ ? (@.a[*] > @.b[*])'; - ?column? ----------- - f -(1 row) - -select jsonb '{"a": [1,2,3], "b": [3,4,5]}' @? '$ ? (@.a[*] >= @.b[*])'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": [1,2,3], "b": [3,4,"5"]}' @? '$ ? (@.a[*] >= @.b[*])'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": [1,2,3], "b": [3,4,"5"]}' @? 'strict $ ? (@.a[*] >= @.b[*])'; - ?column? ----------- - f -(1 row) - -select jsonb '{"a": [1,2,3], "b": [3,4,null]}' @? '$ ? (@.a[*] >= @.b[*])'; - ?column? ----------- - t -(1 row) - -select jsonb '1' @? '$ ? ((@ == "1") is unknown)'; - ?column? ----------- - t -(1 row) - -select jsonb '1' @? '$ ? ((@ == 1) is unknown)'; - ?column? ----------- - f -(1 row) - -select jsonb '[{"a": 1}, {"a": 2}]' @? '$[0 to 1] ? (@.a > 1)'; - ?column? ----------- - t -(1 row) - -select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'lax $[*].a', silent => false); - jsonb_path_exists -------------------- - t -(1 row) - -select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'lax $[*].a', silent => true); - jsonb_path_exists -------------------- - t -(1 row) - -select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'strict $[*].a', silent => false); -ERROR: jsonpath member accessor can only be applied to an object -select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'strict $[*].a', silent => true); - jsonb_path_exists -------------------- - -(1 row) - -select jsonb_path_query('1', 'lax $.a'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('1', 'strict $.a'); -ERROR: jsonpath member accessor can only be applied to an object -select jsonb_path_query('1', 'strict $.*'); -ERROR: jsonpath wildcard member accessor can only be applied to an object -select jsonb_path_query('1', 'strict $.a', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('1', 'strict $.*', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'lax $.a'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.a'); -ERROR: jsonpath member accessor can only be applied to an object -select jsonb_path_query('[]', 'strict $.a', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{}', 'lax $.a'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{}', 'strict $.a'); -ERROR: JSON object does not contain key "a" -select jsonb_path_query('{}', 'strict $.a', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('1', 'strict $[1]'); -ERROR: jsonpath array accessor can only be applied to an array -select jsonb_path_query('1', 'strict $[*]'); -ERROR: jsonpath wildcard array accessor can only be applied to an array -select jsonb_path_query('[]', 'strict $[1]'); -ERROR: jsonpath array subscript is out of bounds -select jsonb_path_query('[]', 'strict $["a"]'); -ERROR: jsonpath array subscript is not a single numeric value -select jsonb_path_query('1', 'strict $[1]', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('1', 'strict $[*]', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $[1]', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $["a"]', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{"a": 12, "b": {"a": 13}}', '$.a'); - jsonb_path_query ------------------- - 12 -(1 row) - -select jsonb_path_query('{"a": 12, "b": {"a": 13}}', '$.b'); - jsonb_path_query ------------------- - {"a": 13} -(1 row) - -select jsonb_path_query('{"a": 12, "b": {"a": 13}}', '$.*'); - jsonb_path_query ------------------- - 12 - {"a": 13} -(2 rows) - -select jsonb_path_query('{"a": 12, "b": {"a": 13}}', 'lax $.*.a'); - jsonb_path_query ------------------- - 13 -(1 row) - -select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[*].a'); - jsonb_path_query ------------------- - 13 -(1 row) - -select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[*].*'); - jsonb_path_query ------------------- - 13 - 14 -(2 rows) - -select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0].a'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[1].a'); - jsonb_path_query ------------------- - 13 -(1 row) - -select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[2].a'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0,1].a'); - jsonb_path_query ------------------- - 13 -(1 row) - -select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0 to 10].a'); - jsonb_path_query ------------------- - 13 -(1 row) - -select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0 to 10 / 0].a'); -ERROR: division by zero -select jsonb_path_query('[12, {"a": 13}, {"b": 14}, "ccc", true]', '$[2.5 - 1 to $.size() - 2]'); - jsonb_path_query ------------------- - {"a": 13} - {"b": 14} - "ccc" -(3 rows) - -select jsonb_path_query('1', 'lax $[0]'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('1', 'lax $[*]'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('[1]', 'lax $[0]'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('[1]', 'lax $[*]'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('[1,2,3]', 'lax $[*]'); - jsonb_path_query ------------------- - 1 - 2 - 3 -(3 rows) - -select jsonb_path_query('[1,2,3]', 'strict $[*].a'); -ERROR: jsonpath member accessor can only be applied to an object -select jsonb_path_query('[1,2,3]', 'strict $[*].a', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', '$[last]'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', '$[last ? (exists(last))]'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $[last]'); -ERROR: jsonpath array subscript is out of bounds -select jsonb_path_query('[]', 'strict $[last]', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[1]', '$[last]'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('[1,2,3]', '$[last]'); - jsonb_path_query ------------------- - 3 -(1 row) - -select jsonb_path_query('[1,2,3]', '$[last - 1]'); - jsonb_path_query ------------------- - 2 -(1 row) - -select jsonb_path_query('[1,2,3]', '$[last ? (@.type() == "number")]'); - jsonb_path_query ------------------- - 3 -(1 row) - -select jsonb_path_query('[1,2,3]', '$[last ? (@.type() == "string")]'); -ERROR: jsonpath array subscript is not a single numeric value -select jsonb_path_query('[1,2,3]', '$[last ? (@.type() == "string")]', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select * from jsonb_path_query('{"a": 10}', '$'); - jsonb_path_query ------------------- - {"a": 10} -(1 row) - -select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)'); -ERROR: could not find jsonpath variable "value" -select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '1'); -ERROR: "vars" argument is not an object -DETAIL: Jsonpath parameters should be encoded as key-value pairs of "vars" object. -select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '[{"value" : 13}]'); -ERROR: "vars" argument is not an object -DETAIL: Jsonpath parameters should be encoded as key-value pairs of "vars" object. -select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '{"value" : 13}'); - jsonb_path_query ------------------- - {"a": 10} -(1 row) - -select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '{"value" : 8}'); - jsonb_path_query ------------------- -(0 rows) - -select * from jsonb_path_query('{"a": 10}', '$.a ? (@ < $value)', '{"value" : 13}'); - jsonb_path_query ------------------- - 10 -(1 row) - -select * from jsonb_path_query('[10,11,12,13,14,15]', '$[*] ? (@ < $value)', '{"value" : 13}'); - jsonb_path_query ------------------- - 10 - 11 - 12 -(3 rows) - -select * from jsonb_path_query('[10,11,12,13,14,15]', '$[0,1] ? (@ < $x.value)', '{"x": {"value" : 13}}'); - jsonb_path_query ------------------- - 10 - 11 -(2 rows) - -select * from jsonb_path_query('[10,11,12,13,14,15]', '$[0 to 2] ? (@ < $value)', '{"value" : 15}'); - jsonb_path_query ------------------- - 10 - 11 - 12 -(3 rows) - -select * from jsonb_path_query('[1,"1",2,"2",null]', '$[*] ? (@ == "1")'); - jsonb_path_query ------------------- - "1" -(1 row) - -select * from jsonb_path_query('[1,"1",2,"2",null]', '$[*] ? (@ == $value)', '{"value" : "1"}'); - jsonb_path_query ------------------- - "1" -(1 row) - -select * from jsonb_path_query('[1,"1",2,"2",null]', '$[*] ? (@ == $value)', '{"value" : null}'); - jsonb_path_query ------------------- - null -(1 row) - -select * from jsonb_path_query('[1, "2", null]', '$[*] ? (@ != null)'); - jsonb_path_query ------------------- - 1 - "2" -(2 rows) - -select * from jsonb_path_query('[1, "2", null]', '$[*] ? (@ == null)'); - jsonb_path_query ------------------- - null -(1 row) - -select * from jsonb_path_query('{}', '$ ? (@ == @)'); - jsonb_path_query ------------------- -(0 rows) - -select * from jsonb_path_query('[]', 'strict $ ? (@ == @)'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**'); - jsonb_path_query ------------------- - {"a": {"b": 1}} - {"b": 1} - 1 -(3 rows) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0}'); - jsonb_path_query ------------------- - {"a": {"b": 1}} -(1 row) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0 to last}'); - jsonb_path_query ------------------- - {"a": {"b": 1}} - {"b": 1} - 1 -(3 rows) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1}'); - jsonb_path_query ------------------- - {"b": 1} -(1 row) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1 to last}'); - jsonb_path_query ------------------- - {"b": 1} - 1 -(2 rows) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{2}'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{2 to last}'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{3 to last}'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{last}'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**.b ? (@ > 0)'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0}.b ? (@ > 0)'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1}.b ? (@ > 0)'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0 to last}.b ? (@ > 0)'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1 to last}.b ? (@ > 0)'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1 to 2}.b ? (@ > 0)'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**.b ? (@ > 0)'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{0}.b ? (@ > 0)'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{1}.b ? (@ > 0)'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{0 to last}.b ? (@ > 0)'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{1 to last}.b ? (@ > 0)'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{1 to 2}.b ? (@ > 0)'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{2 to 3}.b ? (@ > 0)'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb '{"a": {"b": 1}}' @? '$.**.b ? ( @ > 0)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"b": 1}}' @? '$.**{0}.b ? ( @ > 0)'; - ?column? ----------- - f -(1 row) - -select jsonb '{"a": {"b": 1}}' @? '$.**{1}.b ? ( @ > 0)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"b": 1}}' @? '$.**{0 to last}.b ? ( @ > 0)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"b": 1}}' @? '$.**{1 to last}.b ? ( @ > 0)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"b": 1}}' @? '$.**{1 to 2}.b ? ( @ > 0)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**.b ? ( @ > 0)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{0}.b ? ( @ > 0)'; - ?column? ----------- - f -(1 row) - -select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{1}.b ? ( @ > 0)'; - ?column? ----------- - f -(1 row) - -select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{0 to last}.b ? ( @ > 0)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{1 to last}.b ? ( @ > 0)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{1 to 2}.b ? ( @ > 0)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{2 to 3}.b ? ( @ > 0)'; - ?column? ----------- - t -(1 row) - -select jsonb_path_query('{"g": {"x": 2}}', '$.g ? (exists (@.x))'); - jsonb_path_query ------------------- - {"x": 2} -(1 row) - -select jsonb_path_query('{"g": {"x": 2}}', '$.g ? (exists (@.y))'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{"g": {"x": 2}}', '$.g ? (exists (@.x ? (@ >= 2) ))'); - jsonb_path_query ------------------- - {"x": 2} -(1 row) - -select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'lax $.g ? (exists (@.x))'); - jsonb_path_query ------------------- - {"x": 2} -(1 row) - -select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'lax $.g ? (exists (@.x + "3"))'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'lax $.g ? ((exists (@.x + "3")) is unknown)'); - jsonb_path_query ------------------- - {"x": 2} - {"y": 3} -(2 rows) - -select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g[*] ? (exists (@.x))'); - jsonb_path_query ------------------- - {"x": 2} -(1 row) - -select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g[*] ? ((exists (@.x)) is unknown)'); - jsonb_path_query ------------------- - {"y": 3} -(1 row) - -select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g ? (exists (@[*].x))'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g ? ((exists (@[*].x)) is unknown)'); - jsonb_path_query ----------------------- - [{"x": 2}, {"y": 3}] -(1 row) - ---test ternary logic -select - x, y, - jsonb_path_query( - '[true, false, null]', - '$[*] ? (@ == true && ($x == true && $y == true) || - @ == false && !($x == true && $y == true) || - @ == null && ($x == true && $y == true) is unknown)', - jsonb_build_object('x', x, 'y', y) - ) as "x && y" -from - (values (jsonb 'true'), ('false'), ('"null"')) x(x), - (values (jsonb 'true'), ('false'), ('"null"')) y(y); - x | y | x && y ---------+--------+-------- - true | true | true - true | false | false - true | "null" | null - false | true | false - false | false | false - false | "null" | false - "null" | true | null - "null" | false | false - "null" | "null" | null -(9 rows) - -select - x, y, - jsonb_path_query( - '[true, false, null]', - '$[*] ? (@ == true && ($x == true || $y == true) || - @ == false && !($x == true || $y == true) || - @ == null && ($x == true || $y == true) is unknown)', - jsonb_build_object('x', x, 'y', y) - ) as "x || y" -from - (values (jsonb 'true'), ('false'), ('"null"')) x(x), - (values (jsonb 'true'), ('false'), ('"null"')) y(y); - x | y | x || y ---------+--------+-------- - true | true | true - true | false | true - true | "null" | true - false | true | true - false | false | false - false | "null" | null - "null" | true | true - "null" | false | null - "null" | "null" | null -(9 rows) - -select jsonb '{"a": 1, "b":1}' @? '$ ? (@.a == @.b)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"c": {"a": 1, "b":1}}' @? '$ ? (@.a == @.b)'; - ?column? ----------- - f -(1 row) - -select jsonb '{"c": {"a": 1, "b":1}}' @? '$.c ? (@.a == @.b)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"c": {"a": 1, "b":1}}' @? '$.c ? ($.c.a == @.b)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"c": {"a": 1, "b":1}}' @? '$.* ? (@.a == @.b)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": 1, "b":1}' @? '$.** ? (@.a == @.b)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"c": {"a": 1, "b":1}}' @? '$.** ? (@.a == @.b)'; - ?column? ----------- - t -(1 row) - -select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == 1 + 1)'); - jsonb_path_query ------------------- - {"a": 2, "b": 1} -(1 row) - -select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == (1 + 1))'); - jsonb_path_query ------------------- - {"a": 2, "b": 1} -(1 row) - -select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == @.b + 1)'); - jsonb_path_query ------------------- - {"a": 2, "b": 1} -(1 row) - -select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == (@.b + 1))'); - jsonb_path_query ------------------- - {"a": 2, "b": 1} -(1 row) - -select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == - 1)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == -1)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == -@.b)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == - @.b)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"c": {"a": 0, "b":1}}' @? '$.** ? (@.a == 1 - @.b)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"c": {"a": 2, "b":1}}' @? '$.** ? (@.a == 1 - - @.b)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"c": {"a": 0, "b":1}}' @? '$.** ? (@.a == 1 - +@.b)'; - ?column? ----------- - t -(1 row) - -select jsonb '[1,2,3]' @? '$ ? (+@[*] > +2)'; - ?column? ----------- - t -(1 row) - -select jsonb '[1,2,3]' @? '$ ? (+@[*] > +3)'; - ?column? ----------- - f -(1 row) - -select jsonb '[1,2,3]' @? '$ ? (-@[*] < -2)'; - ?column? ----------- - t -(1 row) - -select jsonb '[1,2,3]' @? '$ ? (-@[*] < -3)'; - ?column? ----------- - f -(1 row) - -select jsonb '1' @? '$ ? ($ > 0)'; - ?column? ----------- - t -(1 row) - --- arithmetic errors -select jsonb_path_query('[1,2,0,3]', '$[*] ? (2 / @ > 0)'); - jsonb_path_query ------------------- - 1 - 2 - 3 -(3 rows) - -select jsonb_path_query('[1,2,0,3]', '$[*] ? ((2 / @ > 0) is unknown)'); - jsonb_path_query ------------------- - 0 -(1 row) - -select jsonb_path_query('0', '1 / $'); -ERROR: division by zero -select jsonb_path_query('0', '1 / $ + 2'); -ERROR: division by zero -select jsonb_path_query('0', '-(3 + 1 % $)'); -ERROR: division by zero -select jsonb_path_query('1', '$ + "2"'); -ERROR: right operand of jsonpath operator + is not a single numeric value -select jsonb_path_query('[1, 2]', '3 * $'); -ERROR: right operand of jsonpath operator * is not a single numeric value -select jsonb_path_query('"a"', '-$'); -ERROR: operand of unary jsonpath operator - is not a numeric value -select jsonb_path_query('[1,"2",3]', '+$'); -ERROR: operand of unary jsonpath operator + is not a numeric value -select jsonb_path_query('1', '$ + "2"', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[1, 2]', '3 * $', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('"a"', '-$', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[1,"2",3]', '+$', silent => true); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb '["1",2,0,3]' @? '-$[*]'; - ?column? ----------- - t -(1 row) - -select jsonb '[1,"2",0,3]' @? '-$[*]'; - ?column? ----------- - t -(1 row) - -select jsonb '["1",2,0,3]' @? 'strict -$[*]'; - ?column? ----------- - -(1 row) - -select jsonb '[1,"2",0,3]' @? 'strict -$[*]'; - ?column? ----------- - -(1 row) - --- unwrapping of operator arguments in lax mode -select jsonb_path_query('{"a": [2]}', 'lax $.a * 3'); - jsonb_path_query ------------------- - 6 -(1 row) - -select jsonb_path_query('{"a": [2]}', 'lax $.a + 3'); - jsonb_path_query ------------------- - 5 -(1 row) - -select jsonb_path_query('{"a": [2, 3, 4]}', 'lax -$.a'); - jsonb_path_query ------------------- - -2 - -3 - -4 -(3 rows) - --- should fail -select jsonb_path_query('{"a": [1, 2]}', 'lax $.a * 3'); -ERROR: left operand of jsonpath operator * is not a single numeric value -select jsonb_path_query('{"a": [1, 2]}', 'lax $.a * 3', silent => true); - jsonb_path_query ------------------- -(0 rows) - --- extension: boolean expressions -select jsonb_path_query('2', '$ > 1'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('2', '$ <= 1'); - jsonb_path_query ------------------- - false -(1 row) - -select jsonb_path_query('2', '$ == "2"'); - jsonb_path_query ------------------- - null -(1 row) - -select jsonb '2' @? '$ == "2"'; - ?column? ----------- - t -(1 row) - -select jsonb '2' @@ '$ > 1'; - ?column? ----------- - t -(1 row) - -select jsonb '2' @@ '$ <= 1'; - ?column? ----------- - f -(1 row) - -select jsonb '2' @@ '$ == "2"'; - ?column? ----------- - -(1 row) - -select jsonb '2' @@ '1'; - ?column? ----------- - -(1 row) - -select jsonb '{}' @@ '$'; - ?column? ----------- - -(1 row) - -select jsonb '[]' @@ '$'; - ?column? ----------- - -(1 row) - -select jsonb '[1,2,3]' @@ '$[*]'; - ?column? ----------- - -(1 row) - -select jsonb '[]' @@ '$[*]'; - ?column? ----------- - -(1 row) - -select jsonb_path_match('[[1, true], [2, false]]', 'strict $[*] ? (@[0] > $x) [1]', '{"x": 1}'); - jsonb_path_match ------------------- - f -(1 row) - -select jsonb_path_match('[[1, true], [2, false]]', 'strict $[*] ? (@[0] < $x) [1]', '{"x": 2}'); - jsonb_path_match ------------------- - t -(1 row) - -select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'lax exists($[*].a)', silent => false); - jsonb_path_match ------------------- - t -(1 row) - -select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'lax exists($[*].a)', silent => true); - jsonb_path_match ------------------- - t -(1 row) - -select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'strict exists($[*].a)', silent => false); - jsonb_path_match ------------------- - -(1 row) - -select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'strict exists($[*].a)', silent => true); - jsonb_path_match ------------------- - -(1 row) - -select jsonb_path_query('[null,1,true,"a",[],{}]', '$.type()'); - jsonb_path_query ------------------- - "array" -(1 row) - -select jsonb_path_query('[null,1,true,"a",[],{}]', 'lax $.type()'); - jsonb_path_query ------------------- - "array" -(1 row) - -select jsonb_path_query('[null,1,true,"a",[],{}]', '$[*].type()'); - jsonb_path_query ------------------- - "null" - "number" - "boolean" - "string" - "array" - "object" -(6 rows) - -select jsonb_path_query('null', 'null.type()'); - jsonb_path_query ------------------- - "null" -(1 row) - -select jsonb_path_query('null', 'true.type()'); - jsonb_path_query ------------------- - "boolean" -(1 row) - -select jsonb_path_query('null', '(123).type()'); - jsonb_path_query ------------------- - "number" -(1 row) - -select jsonb_path_query('null', '"123".type()'); - jsonb_path_query ------------------- - "string" -(1 row) - -select jsonb_path_query('{"a": 2}', '($.a - 5).abs() + 10'); - jsonb_path_query ------------------- - 13 -(1 row) - -select jsonb_path_query('{"a": 2.5}', '-($.a * $.a).floor() % 4.3'); - jsonb_path_query ------------------- - -1.7 -(1 row) - -select jsonb_path_query('[1, 2, 3]', '($[*] > 2) ? (@ == true)'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('[1, 2, 3]', '($[*] > 3).type()'); - jsonb_path_query ------------------- - "boolean" -(1 row) - -select jsonb_path_query('[1, 2, 3]', '($[*].a > 3).type()'); - jsonb_path_query ------------------- - "boolean" -(1 row) - -select jsonb_path_query('[1, 2, 3]', 'strict ($[*].a > 3).type()'); - jsonb_path_query ------------------- - "null" -(1 row) - -select jsonb_path_query('[1,null,true,"11",[],[1],[1,2,3],{},{"a":1,"b":2}]', 'strict $[*].size()'); -ERROR: jsonpath item method .size() can only be applied to an array -select jsonb_path_query('[1,null,true,"11",[],[1],[1,2,3],{},{"a":1,"b":2}]', 'strict $[*].size()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[1,null,true,"11",[],[1],[1,2,3],{},{"a":1,"b":2}]', 'lax $[*].size()'); - jsonb_path_query ------------------- - 1 - 1 - 1 - 1 - 0 - 1 - 3 - 1 - 1 -(9 rows) - -select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].abs()'); - jsonb_path_query ------------------- - 0 - 1 - 2 - 3.4 - 5.6 -(5 rows) - -select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].floor()'); - jsonb_path_query ------------------- - 0 - 1 - -2 - -4 - 5 -(5 rows) - -select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].ceiling()'); - jsonb_path_query ------------------- - 0 - 1 - -2 - -3 - 6 -(5 rows) - -select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].ceiling().abs()'); - jsonb_path_query ------------------- - 0 - 1 - 2 - 3 - 6 -(5 rows) - -select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].ceiling().abs().type()'); - jsonb_path_query ------------------- - "number" - "number" - "number" - "number" - "number" -(5 rows) - -select jsonb_path_query('[{},1]', '$[*].keyvalue()'); -ERROR: jsonpath item method .keyvalue() can only be applied to an object -select jsonb_path_query('[{},1]', '$[*].keyvalue()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{}', '$.keyvalue()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{"a": 1, "b": [1, 2], "c": {"a": "bbb"}}', '$.keyvalue()'); - jsonb_path_query ----------------------------------------------- - {"id": 0, "key": "a", "value": 1} - {"id": 0, "key": "b", "value": [1, 2]} - {"id": 0, "key": "c", "value": {"a": "bbb"}} -(3 rows) - -select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', '$[*].keyvalue()'); - jsonb_path_query ------------------------------------------------ - {"id": 12, "key": "a", "value": 1} - {"id": 12, "key": "b", "value": [1, 2]} - {"id": 72, "key": "c", "value": {"a": "bbb"}} -(3 rows) - -select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', 'strict $.keyvalue()'); -ERROR: jsonpath item method .keyvalue() can only be applied to an object -select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', 'lax $.keyvalue()'); - jsonb_path_query ------------------------------------------------ - {"id": 12, "key": "a", "value": 1} - {"id": 12, "key": "b", "value": [1, 2]} - {"id": 72, "key": "c", "value": {"a": "bbb"}} -(3 rows) - -select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', 'strict $.keyvalue().a'); -ERROR: jsonpath item method .keyvalue() can only be applied to an object -select jsonb '{"a": 1, "b": [1, 2]}' @? 'lax $.keyvalue()'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": 1, "b": [1, 2]}' @? 'lax $.keyvalue().key'; - ?column? ----------- - t -(1 row) - -select jsonb_path_query('null', '$.double()'); -ERROR: jsonpath item method .double() can only be applied to a string or numeric value -select jsonb_path_query('true', '$.double()'); -ERROR: jsonpath item method .double() can only be applied to a string or numeric value -select jsonb_path_query('null', '$.double()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('true', '$.double()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', '$.double()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.double()'); -ERROR: jsonpath item method .double() can only be applied to a string or numeric value -select jsonb_path_query('{}', '$.double()'); -ERROR: jsonpath item method .double() can only be applied to a string or numeric value -select jsonb_path_query('[]', 'strict $.double()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{}', '$.double()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('1.23', '$.double()'); - jsonb_path_query ------------------- - 1.23 -(1 row) - -select jsonb_path_query('"1.23"', '$.double()'); - jsonb_path_query ------------------- - 1.23 -(1 row) - -select jsonb_path_query('"1.23aaa"', '$.double()'); -ERROR: argument "1.23aaa" of jsonpath item method .double() is invalid for type double precision -select jsonb_path_query('1e1000', '$.double()'); -ERROR: argument "10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" of jsonpath item method .double() is invalid for type double precision -select jsonb_path_query('"nan"', '$.double()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .double() -select jsonb_path_query('"NaN"', '$.double()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .double() -select jsonb_path_query('"inf"', '$.double()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .double() -select jsonb_path_query('"-inf"', '$.double()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .double() -select jsonb_path_query('"inf"', '$.double()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('"-inf"', '$.double()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{}', '$.abs()'); -ERROR: jsonpath item method .abs() can only be applied to a numeric value -select jsonb_path_query('true', '$.floor()'); -ERROR: jsonpath item method .floor() can only be applied to a numeric value -select jsonb_path_query('"1.2"', '$.ceiling()'); -ERROR: jsonpath item method .ceiling() can only be applied to a numeric value -select jsonb_path_query('{}', '$.abs()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('true', '$.floor()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('"1.2"', '$.ceiling()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('["", "a", "abc", "abcabc"]', '$[*] ? (@ starts with "abc")'); - jsonb_path_query ------------------- - "abc" - "abcabc" -(2 rows) - -select jsonb_path_query('["", "a", "abc", "abcabc"]', 'strict $ ? (@[*] starts with "abc")'); - jsonb_path_query ----------------------------- - ["", "a", "abc", "abcabc"] -(1 row) - -select jsonb_path_query('["", "a", "abd", "abdabc"]', 'strict $ ? (@[*] starts with "abc")'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('["abc", "abcabc", null, 1]', 'strict $ ? (@[*] starts with "abc")'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('["abc", "abcabc", null, 1]', 'strict $ ? ((@[*] starts with "abc") is unknown)'); - jsonb_path_query ----------------------------- - ["abc", "abcabc", null, 1] -(1 row) - -select jsonb_path_query('[[null, 1, "abc", "abcabc"]]', 'lax $ ? (@[*] starts with "abc")'); - jsonb_path_query ----------------------------- - [null, 1, "abc", "abcabc"] -(1 row) - -select jsonb_path_query('[[null, 1, "abd", "abdabc"]]', 'lax $ ? ((@[*] starts with "abc") is unknown)'); - jsonb_path_query ----------------------------- - [null, 1, "abd", "abdabc"] -(1 row) - -select jsonb_path_query('[null, 1, "abd", "abdabc"]', 'lax $[*] ? ((@ starts with "abc") is unknown)'); - jsonb_path_query ------------------- - null - 1 -(2 rows) - -select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c")'); - jsonb_path_query ------------------- - "abc" - "abdacb" -(2 rows) - -select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c" flag "i")'); - jsonb_path_query ------------------- - "abc" - "aBdC" - "abdacb" -(3 rows) - -select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c" flag "m")'); - jsonb_path_query ------------------- - "abc" - "abdacb" - "adc\nabc" -(3 rows) - -select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c" flag "s")'); - jsonb_path_query ------------------- - "abc" - "abdacb" - "ab\nadc" -(3 rows) - -select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "a\\b" flag "q")'); - jsonb_path_query ------------------- - "a\\b" - "^a\\b$" -(2 rows) - -select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "a\\b" flag "")'); - jsonb_path_query ------------------- - "a\b" -(1 row) - -select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\b$" flag "q")'); - jsonb_path_query ------------------- - "^a\\b$" -(1 row) - -select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\B$" flag "q")'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\B$" flag "iq")'); - jsonb_path_query ------------------- - "^a\\b$" -(1 row) - -select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\b$" flag "")'); - jsonb_path_query ------------------- - "a\b" -(1 row) - -select jsonb_path_query('null', '$.datetime()'); -ERROR: jsonpath item method .datetime() can only be applied to a string -select jsonb_path_query('true', '$.datetime()'); -ERROR: jsonpath item method .datetime() can only be applied to a string -select jsonb_path_query('1', '$.datetime()'); -ERROR: jsonpath item method .datetime() can only be applied to a string -select jsonb_path_query('[]', '$.datetime()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.datetime()'); -ERROR: jsonpath item method .datetime() can only be applied to a string -select jsonb_path_query('{}', '$.datetime()'); -ERROR: jsonpath item method .datetime() can only be applied to a string -select jsonb_path_query('"bogus"', '$.datetime()'); -ERROR: datetime format is not recognized: "bogus" -HINT: Use a datetime template argument to specify the input data format. -select jsonb_path_query('"12:34"', '$.datetime("aaa")'); -ERROR: invalid datetime format separator: "a" -select jsonb_path_query('"aaaa"', '$.datetime("HH24")'); -ERROR: invalid value "aa" for "HH24" -DETAIL: Value must be an integer. -select jsonb '"10-03-2017"' @? '$.datetime("dd-mm-yyyy")'; - ?column? ----------- - t -(1 row) - -select jsonb_path_query('"10-03-2017"', '$.datetime("dd-mm-yyyy")'); - jsonb_path_query ------------------- - "2017-03-10" -(1 row) - -select jsonb_path_query('"10-03-2017"', '$.datetime("dd-mm-yyyy").type()'); - jsonb_path_query ------------------- - "date" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy")'); -ERROR: trailing characters remain in input string after datetime format -select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy").type()'); -ERROR: trailing characters remain in input string after datetime format -select jsonb_path_query('"10-03-2017 12:34"', ' $.datetime("dd-mm-yyyy HH24:MI").type()'); - jsonb_path_query -------------------------------- - "timestamp without time zone" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34 +05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM").type()'); - jsonb_path_query ----------------------------- - "timestamp with time zone" -(1 row) - -select jsonb_path_query('"12:34:56"', '$.datetime("HH24:MI:SS").type()'); - jsonb_path_query --------------------------- - "time without time zone" -(1 row) - -select jsonb_path_query('"12:34:56 +05:20"', '$.datetime("HH24:MI:SS TZH:TZM").type()'); - jsonb_path_query ------------------------ - "time with time zone" -(1 row) - -select jsonb_path_query('"10-03-2017T12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")'); - jsonb_path_query ------------------------ - "2017-03-10T12:34:56" -(1 row) - -select jsonb_path_query('"10-03-2017t12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")'); -ERROR: unmatched format character "T" -select jsonb_path_query('"10-03-2017 12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")'); -ERROR: unmatched format character "T" --- Test .bigint() -select jsonb_path_query('null', '$.bigint()'); -ERROR: jsonpath item method .bigint() can only be applied to a string or numeric value -select jsonb_path_query('true', '$.bigint()'); -ERROR: jsonpath item method .bigint() can only be applied to a string or numeric value -select jsonb_path_query('null', '$.bigint()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('true', '$.bigint()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', '$.bigint()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.bigint()'); -ERROR: jsonpath item method .bigint() can only be applied to a string or numeric value -select jsonb_path_query('{}', '$.bigint()'); -ERROR: jsonpath item method .bigint() can only be applied to a string or numeric value -select jsonb_path_query('[]', 'strict $.bigint()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{}', '$.bigint()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('"1.23"', '$.bigint()'); -ERROR: argument "1.23" of jsonpath item method .bigint() is invalid for type bigint -select jsonb_path_query('"1.23aaa"', '$.bigint()'); -ERROR: argument "1.23aaa" of jsonpath item method .bigint() is invalid for type bigint -select jsonb_path_query('1e1000', '$.bigint()'); -ERROR: argument "10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" of jsonpath item method .bigint() is invalid for type bigint -select jsonb_path_query('"nan"', '$.bigint()'); -ERROR: argument "nan" of jsonpath item method .bigint() is invalid for type bigint -select jsonb_path_query('"NaN"', '$.bigint()'); -ERROR: argument "NaN" of jsonpath item method .bigint() is invalid for type bigint -select jsonb_path_query('"inf"', '$.bigint()'); -ERROR: argument "inf" of jsonpath item method .bigint() is invalid for type bigint -select jsonb_path_query('"-inf"', '$.bigint()'); -ERROR: argument "-inf" of jsonpath item method .bigint() is invalid for type bigint -select jsonb_path_query('"inf"', '$.bigint()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('"-inf"', '$.bigint()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('123', '$.bigint()'); - jsonb_path_query ------------------- - 123 -(1 row) - -select jsonb_path_query('"123"', '$.bigint()'); - jsonb_path_query ------------------- - 123 -(1 row) - -select jsonb_path_query('1.23', '$.bigint()'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('1.83', '$.bigint()'); - jsonb_path_query ------------------- - 2 -(1 row) - -select jsonb_path_query('1234567890123', '$.bigint()'); - jsonb_path_query ------------------- - 1234567890123 -(1 row) - -select jsonb_path_query('"1234567890123"', '$.bigint()'); - jsonb_path_query ------------------- - 1234567890123 -(1 row) - -select jsonb_path_query('12345678901234567890', '$.bigint()'); -ERROR: argument "12345678901234567890" of jsonpath item method .bigint() is invalid for type bigint -select jsonb_path_query('"12345678901234567890"', '$.bigint()'); -ERROR: argument "12345678901234567890" of jsonpath item method .bigint() is invalid for type bigint -select jsonb_path_query('"+123"', '$.bigint()'); - jsonb_path_query ------------------- - 123 -(1 row) - -select jsonb_path_query('-123', '$.bigint()'); - jsonb_path_query ------------------- - -123 -(1 row) - -select jsonb_path_query('"-123"', '$.bigint()'); - jsonb_path_query ------------------- - -123 -(1 row) - -select jsonb_path_query('123', '$.bigint() * 2'); - jsonb_path_query ------------------- - 246 -(1 row) - --- Test .boolean() -select jsonb_path_query('null', '$.boolean()'); -ERROR: jsonpath item method .boolean() can only be applied to a bool, string, or numeric value -select jsonb_path_query('null', '$.boolean()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', '$.boolean()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.boolean()'); -ERROR: jsonpath item method .boolean() can only be applied to a bool, string, or numeric value -select jsonb_path_query('{}', '$.boolean()'); -ERROR: jsonpath item method .boolean() can only be applied to a bool, string, or numeric value -select jsonb_path_query('[]', 'strict $.boolean()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{}', '$.boolean()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('1.23', '$.boolean()'); -ERROR: argument "1.23" of jsonpath item method .boolean() is invalid for type boolean -select jsonb_path_query('"1.23"', '$.boolean()'); -ERROR: argument "1.23" of jsonpath item method .boolean() is invalid for type boolean -select jsonb_path_query('"1.23aaa"', '$.boolean()'); -ERROR: argument "1.23aaa" of jsonpath item method .boolean() is invalid for type boolean -select jsonb_path_query('1e1000', '$.boolean()'); -ERROR: argument "10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" of jsonpath item method .boolean() is invalid for type boolean -select jsonb_path_query('"nan"', '$.boolean()'); -ERROR: argument "nan" of jsonpath item method .boolean() is invalid for type boolean -select jsonb_path_query('"NaN"', '$.boolean()'); -ERROR: argument "NaN" of jsonpath item method .boolean() is invalid for type boolean -select jsonb_path_query('"inf"', '$.boolean()'); -ERROR: argument "inf" of jsonpath item method .boolean() is invalid for type boolean -select jsonb_path_query('"-inf"', '$.boolean()'); -ERROR: argument "-inf" of jsonpath item method .boolean() is invalid for type boolean -select jsonb_path_query('"inf"', '$.boolean()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('"-inf"', '$.boolean()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('"100"', '$.boolean()'); -ERROR: argument "100" of jsonpath item method .boolean() is invalid for type boolean -select jsonb_path_query('true', '$.boolean()'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('false', '$.boolean()'); - jsonb_path_query ------------------- - false -(1 row) - -select jsonb_path_query('1', '$.boolean()'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('0', '$.boolean()'); - jsonb_path_query ------------------- - false -(1 row) - -select jsonb_path_query('-1', '$.boolean()'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('100', '$.boolean()'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('"1"', '$.boolean()'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('"0"', '$.boolean()'); - jsonb_path_query ------------------- - false -(1 row) - -select jsonb_path_query('"true"', '$.boolean()'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('"false"', '$.boolean()'); - jsonb_path_query ------------------- - false -(1 row) - -select jsonb_path_query('"TRUE"', '$.boolean()'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('"FALSE"', '$.boolean()'); - jsonb_path_query ------------------- - false -(1 row) - -select jsonb_path_query('"yes"', '$.boolean()'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('"NO"', '$.boolean()'); - jsonb_path_query ------------------- - false -(1 row) - -select jsonb_path_query('"T"', '$.boolean()'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('"f"', '$.boolean()'); - jsonb_path_query ------------------- - false -(1 row) - -select jsonb_path_query('"y"', '$.boolean()'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('"N"', '$.boolean()'); - jsonb_path_query ------------------- - false -(1 row) - -select jsonb_path_query('true', '$.boolean().type()'); - jsonb_path_query ------------------- - "boolean" -(1 row) - -select jsonb_path_query('123', '$.boolean().type()'); - jsonb_path_query ------------------- - "boolean" -(1 row) - -select jsonb_path_query('"Yes"', '$.boolean().type()'); - jsonb_path_query ------------------- - "boolean" -(1 row) - -select jsonb_path_query_array('[1, "yes", false]', '$[*].boolean()'); - jsonb_path_query_array ------------------------- - [true, true, false] -(1 row) - --- Test .date() -select jsonb_path_query('null', '$.date()'); -ERROR: jsonpath item method .date() can only be applied to a string -select jsonb_path_query('true', '$.date()'); -ERROR: jsonpath item method .date() can only be applied to a string -select jsonb_path_query('1', '$.date()'); -ERROR: jsonpath item method .date() can only be applied to a string -select jsonb_path_query('[]', '$.date()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.date()'); -ERROR: jsonpath item method .date() can only be applied to a string -select jsonb_path_query('{}', '$.date()'); -ERROR: jsonpath item method .date() can only be applied to a string -select jsonb_path_query('"bogus"', '$.date()'); -ERROR: date format is not recognized: "bogus" -select jsonb '"2023-08-15"' @? '$.date()'; - ?column? ----------- - t -(1 row) - -select jsonb_path_query('"2023-08-15"', '$.date()'); - jsonb_path_query ------------------- - "2023-08-15" -(1 row) - -select jsonb_path_query('"2023-08-15"', '$.date().type()'); - jsonb_path_query ------------------- - "date" -(1 row) - -select jsonb_path_query('"12:34:56"', '$.date()'); -ERROR: date format is not recognized: "12:34:56" -select jsonb_path_query('"12:34:56 +05:30"', '$.date()'); -ERROR: date format is not recognized: "12:34:56 +05:30" -select jsonb_path_query('"2023-08-15 12:34:56"', '$.date()'); - jsonb_path_query ------------------- - "2023-08-15" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.date()'); -ERROR: cannot convert value from timestamptz to date without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"2023-08-15 12:34:56 +05:30"', '$.date()'); -- should work - jsonb_path_query_tz ---------------------- - "2023-08-15" -(1 row) - -select jsonb_path_query('"2023-08-15"', '$.date(2)'); -ERROR: syntax error at or near "2" of jsonpath input -LINE 1: select jsonb_path_query('"2023-08-15"', '$.date(2)'); - ^ --- Test .decimal() -select jsonb_path_query('null', '$.decimal()'); -ERROR: jsonpath item method .decimal() can only be applied to a string or numeric value -select jsonb_path_query('true', '$.decimal()'); -ERROR: jsonpath item method .decimal() can only be applied to a string or numeric value -select jsonb_path_query('null', '$.decimal()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('true', '$.decimal()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', '$.decimal()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.decimal()'); -ERROR: jsonpath item method .decimal() can only be applied to a string or numeric value -select jsonb_path_query('{}', '$.decimal()'); -ERROR: jsonpath item method .decimal() can only be applied to a string or numeric value -select jsonb_path_query('[]', 'strict $.decimal()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{}', '$.decimal()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('1.23', '$.decimal()'); - jsonb_path_query ------------------- - 1.23 -(1 row) - -select jsonb_path_query('"1.23"', '$.decimal()'); - jsonb_path_query ------------------- - 1.23 -(1 row) - -select jsonb_path_query('"1.23aaa"', '$.decimal()'); -ERROR: argument "1.23aaa" of jsonpath item method .decimal() is invalid for type numeric -select jsonb_path_query('1e1000', '$.decimal()'); - jsonb_path_query -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 -(1 row) - -select jsonb_path_query('"nan"', '$.decimal()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .decimal() -select jsonb_path_query('"NaN"', '$.decimal()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .decimal() -select jsonb_path_query('"inf"', '$.decimal()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .decimal() -select jsonb_path_query('"-inf"', '$.decimal()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .decimal() -select jsonb_path_query('"inf"', '$.decimal()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('"-inf"', '$.decimal()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('123', '$.decimal()'); - jsonb_path_query ------------------- - 123 -(1 row) - -select jsonb_path_query('"123"', '$.decimal()'); - jsonb_path_query ------------------- - 123 -(1 row) - -select jsonb_path_query('12345678901234567890', '$.decimal()'); - jsonb_path_query ----------------------- - 12345678901234567890 -(1 row) - -select jsonb_path_query('"12345678901234567890"', '$.decimal()'); - jsonb_path_query ----------------------- - 12345678901234567890 -(1 row) - -select jsonb_path_query('"+12.3"', '$.decimal()'); - jsonb_path_query ------------------- - 12.3 -(1 row) - -select jsonb_path_query('-12.3', '$.decimal()'); - jsonb_path_query ------------------- - -12.3 -(1 row) - -select jsonb_path_query('"-12.3"', '$.decimal()'); - jsonb_path_query ------------------- - -12.3 -(1 row) - -select jsonb_path_query('12.3', '$.decimal() * 2'); - jsonb_path_query ------------------- - 24.6 -(1 row) - -select jsonb_path_query('12345.678', '$.decimal(6, 1)'); - jsonb_path_query ------------------- - 12345.7 -(1 row) - -select jsonb_path_query('12345.678', '$.decimal(6, 2)'); -ERROR: argument "12345.678" of jsonpath item method .decimal() is invalid for type numeric -select jsonb_path_query('1234.5678', '$.decimal(6, 2)'); - jsonb_path_query ------------------- - 1234.57 -(1 row) - -select jsonb_path_query('12345.678', '$.decimal(4, 6)'); -ERROR: argument "12345.678" of jsonpath item method .decimal() is invalid for type numeric -select jsonb_path_query('12345.678', '$.decimal(0, 6)'); -ERROR: NUMERIC precision 0 must be between 1 and 1000 -select jsonb_path_query('12345.678', '$.decimal(1001, 6)'); -ERROR: NUMERIC precision 1001 must be between 1 and 1000 -select jsonb_path_query('1234.5678', '$.decimal(+6, +2)'); - jsonb_path_query ------------------- - 1234.57 -(1 row) - -select jsonb_path_query('1234.5678', '$.decimal(+6, -2)'); - jsonb_path_query ------------------- - 1200 -(1 row) - -select jsonb_path_query('1234.5678', '$.decimal(-6, +2)'); -ERROR: NUMERIC precision -6 must be between 1 and 1000 -select jsonb_path_query('1234.5678', '$.decimal(6, -1001)'); -ERROR: NUMERIC scale -1001 must be between -1000 and 1000 -select jsonb_path_query('1234.5678', '$.decimal(6, 1001)'); -ERROR: NUMERIC scale 1001 must be between -1000 and 1000 -select jsonb_path_query('-1234.5678', '$.decimal(+6, -2)'); - jsonb_path_query ------------------- - -1200 -(1 row) - -select jsonb_path_query('0.0123456', '$.decimal(1,2)'); - jsonb_path_query ------------------- - 0.01 -(1 row) - -select jsonb_path_query('0.0012345', '$.decimal(2,4)'); - jsonb_path_query ------------------- - 0.0012 -(1 row) - -select jsonb_path_query('-0.00123456', '$.decimal(2,-4)'); - jsonb_path_query ------------------- - 0 -(1 row) - -select jsonb_path_query('12.3', '$.decimal(12345678901,1)'); -ERROR: precision of jsonpath item method .decimal() is out of range for type integer -select jsonb_path_query('12.3', '$.decimal(1,12345678901)'); -ERROR: scale of jsonpath item method .decimal() is out of range for type integer --- Test .integer() -select jsonb_path_query('null', '$.integer()'); -ERROR: jsonpath item method .integer() can only be applied to a string or numeric value -select jsonb_path_query('true', '$.integer()'); -ERROR: jsonpath item method .integer() can only be applied to a string or numeric value -select jsonb_path_query('null', '$.integer()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('true', '$.integer()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', '$.integer()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.integer()'); -ERROR: jsonpath item method .integer() can only be applied to a string or numeric value -select jsonb_path_query('{}', '$.integer()'); -ERROR: jsonpath item method .integer() can only be applied to a string or numeric value -select jsonb_path_query('[]', 'strict $.integer()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{}', '$.integer()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('"1.23"', '$.integer()'); -ERROR: argument "1.23" of jsonpath item method .integer() is invalid for type integer -select jsonb_path_query('"1.23aaa"', '$.integer()'); -ERROR: argument "1.23aaa" of jsonpath item method .integer() is invalid for type integer -select jsonb_path_query('1e1000', '$.integer()'); -ERROR: argument "10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" of jsonpath item method .integer() is invalid for type integer -select jsonb_path_query('"nan"', '$.integer()'); -ERROR: argument "nan" of jsonpath item method .integer() is invalid for type integer -select jsonb_path_query('"NaN"', '$.integer()'); -ERROR: argument "NaN" of jsonpath item method .integer() is invalid for type integer -select jsonb_path_query('"inf"', '$.integer()'); -ERROR: argument "inf" of jsonpath item method .integer() is invalid for type integer -select jsonb_path_query('"-inf"', '$.integer()'); -ERROR: argument "-inf" of jsonpath item method .integer() is invalid for type integer -select jsonb_path_query('"inf"', '$.integer()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('"-inf"', '$.integer()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('123', '$.integer()'); - jsonb_path_query ------------------- - 123 -(1 row) - -select jsonb_path_query('"123"', '$.integer()'); - jsonb_path_query ------------------- - 123 -(1 row) - -select jsonb_path_query('1.23', '$.integer()'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('1.83', '$.integer()'); - jsonb_path_query ------------------- - 2 -(1 row) - -select jsonb_path_query('12345678901', '$.integer()'); -ERROR: argument "12345678901" of jsonpath item method .integer() is invalid for type integer -select jsonb_path_query('"12345678901"', '$.integer()'); -ERROR: argument "12345678901" of jsonpath item method .integer() is invalid for type integer -select jsonb_path_query('"+123"', '$.integer()'); - jsonb_path_query ------------------- - 123 -(1 row) - -select jsonb_path_query('-123', '$.integer()'); - jsonb_path_query ------------------- - -123 -(1 row) - -select jsonb_path_query('"-123"', '$.integer()'); - jsonb_path_query ------------------- - -123 -(1 row) - -select jsonb_path_query('123', '$.integer() * 2'); - jsonb_path_query ------------------- - 246 -(1 row) - --- Test .number() -select jsonb_path_query('null', '$.number()'); -ERROR: jsonpath item method .number() can only be applied to a string or numeric value -select jsonb_path_query('true', '$.number()'); -ERROR: jsonpath item method .number() can only be applied to a string or numeric value -select jsonb_path_query('null', '$.number()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('true', '$.number()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', '$.number()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.number()'); -ERROR: jsonpath item method .number() can only be applied to a string or numeric value -select jsonb_path_query('{}', '$.number()'); -ERROR: jsonpath item method .number() can only be applied to a string or numeric value -select jsonb_path_query('[]', 'strict $.number()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{}', '$.number()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('1.23', '$.number()'); - jsonb_path_query ------------------- - 1.23 -(1 row) - -select jsonb_path_query('"1.23"', '$.number()'); - jsonb_path_query ------------------- - 1.23 -(1 row) - -select jsonb_path_query('"1.23aaa"', '$.number()'); -ERROR: argument "1.23aaa" of jsonpath item method .number() is invalid for type numeric -select jsonb_path_query('1e1000', '$.number()'); - jsonb_path_query -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 -(1 row) - -select jsonb_path_query('"nan"', '$.number()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .number() -select jsonb_path_query('"NaN"', '$.number()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .number() -select jsonb_path_query('"inf"', '$.number()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .number() -select jsonb_path_query('"-inf"', '$.number()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .number() -select jsonb_path_query('"inf"', '$.number()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('"-inf"', '$.number()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('123', '$.number()'); - jsonb_path_query ------------------- - 123 -(1 row) - -select jsonb_path_query('"123"', '$.number()'); - jsonb_path_query ------------------- - 123 -(1 row) - -select jsonb_path_query('12345678901234567890', '$.number()'); - jsonb_path_query ----------------------- - 12345678901234567890 -(1 row) - -select jsonb_path_query('"12345678901234567890"', '$.number()'); - jsonb_path_query ----------------------- - 12345678901234567890 -(1 row) - -select jsonb_path_query('"+12.3"', '$.number()'); - jsonb_path_query ------------------- - 12.3 -(1 row) - -select jsonb_path_query('-12.3', '$.number()'); - jsonb_path_query ------------------- - -12.3 -(1 row) - -select jsonb_path_query('"-12.3"', '$.number()'); - jsonb_path_query ------------------- - -12.3 -(1 row) - -select jsonb_path_query('12.3', '$.number() * 2'); - jsonb_path_query ------------------- - 24.6 -(1 row) - --- Test .string() -select jsonb_path_query('null', '$.string()'); -ERROR: jsonpath item method .string() can only be applied to a bool, string, numeric, or datetime value -select jsonb_path_query('null', '$.string()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', '$.string()'); -ERROR: jsonpath item method .string() can only be applied to a bool, string, numeric, or datetime value -select jsonb_path_query('[]', 'strict $.string()'); -ERROR: jsonpath item method .string() can only be applied to a bool, string, numeric, or datetime value -select jsonb_path_query('{}', '$.string()'); -ERROR: jsonpath item method .string() can only be applied to a bool, string, numeric, or datetime value -select jsonb_path_query('[]', 'strict $.string()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{}', '$.string()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('1.23', '$.string()'); - jsonb_path_query ------------------- - "1.23" -(1 row) - -select jsonb_path_query('"1.23"', '$.string()'); - jsonb_path_query ------------------- - "1.23" -(1 row) - -select jsonb_path_query('"1.23aaa"', '$.string()'); - jsonb_path_query ------------------- - "1.23aaa" -(1 row) - -select jsonb_path_query('1234', '$.string()'); - jsonb_path_query ------------------- - "1234" -(1 row) - -select jsonb_path_query('true', '$.string()'); - jsonb_path_query ------------------- - "true" -(1 row) - -select jsonb_path_query('1234', '$.string().type()'); - jsonb_path_query ------------------- - "string" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +5:30"', '$.timestamp().string()'); -ERROR: cannot convert value from timestamptz to timestamp without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"2023-08-15 12:34:56 +5:30"', '$.timestamp().string()'); -- should work - jsonb_path_query_tz ----------------------------- - "Tue Aug 15 00:04:56 2023" -(1 row) - -select jsonb_path_query_array('[1.23, "yes", false]', '$[*].string()'); - jsonb_path_query_array --------------------------- - ["1.23", "yes", "false"] -(1 row) - -select jsonb_path_query_array('[1.23, "yes", false]', '$[*].string().type()'); - jsonb_path_query_array --------------------------------- - ["string", "string", "string"] -(1 row) - --- Test .time() -select jsonb_path_query('null', '$.time()'); -ERROR: jsonpath item method .time() can only be applied to a string -select jsonb_path_query('true', '$.time()'); -ERROR: jsonpath item method .time() can only be applied to a string -select jsonb_path_query('1', '$.time()'); -ERROR: jsonpath item method .time() can only be applied to a string -select jsonb_path_query('[]', '$.time()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.time()'); -ERROR: jsonpath item method .time() can only be applied to a string -select jsonb_path_query('{}', '$.time()'); -ERROR: jsonpath item method .time() can only be applied to a string -select jsonb_path_query('"bogus"', '$.time()'); -ERROR: time format is not recognized: "bogus" -select jsonb '"12:34:56"' @? '$.time()'; - ?column? ----------- - t -(1 row) - -select jsonb_path_query('"12:34:56"', '$.time()'); - jsonb_path_query ------------------- - "12:34:56" -(1 row) - -select jsonb_path_query('"12:34:56"', '$.time().type()'); - jsonb_path_query --------------------------- - "time without time zone" -(1 row) - -select jsonb_path_query('"2023-08-15"', '$.time()'); -ERROR: time format is not recognized: "2023-08-15" -select jsonb_path_query('"12:34:56 +05:30"', '$.time()'); -ERROR: cannot convert value from timetz to time without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"12:34:56 +05:30"', '$.time()'); -- should work - jsonb_path_query_tz ---------------------- - "12:34:56" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56"', '$.time()'); - jsonb_path_query ------------------- - "12:34:56" -(1 row) - -select jsonb_path_query('"12:34:56.789"', '$.time(-1)'); -ERROR: syntax error at or near "-" of jsonpath input -LINE 1: select jsonb_path_query('"12:34:56.789"', '$.time(-1)'); - ^ -select jsonb_path_query('"12:34:56.789"', '$.time(2.0)'); -ERROR: syntax error at or near "2.0" of jsonpath input -LINE 1: select jsonb_path_query('"12:34:56.789"', '$.time(2.0)'); - ^ -select jsonb_path_query('"12:34:56.789"', '$.time(12345678901)'); -ERROR: time precision of jsonpath item method .time() is out of range for type integer -select jsonb_path_query('"12:34:56.789"', '$.time(0)'); - jsonb_path_query ------------------- - "12:34:57" -(1 row) - -select jsonb_path_query('"12:34:56.789"', '$.time(2)'); - jsonb_path_query ------------------- - "12:34:56.79" -(1 row) - -select jsonb_path_query('"12:34:56.789"', '$.time(5)'); - jsonb_path_query ------------------- - "12:34:56.789" -(1 row) - -select jsonb_path_query('"12:34:56.789"', '$.time(10)'); -WARNING: TIME(10) precision reduced to maximum allowed, 6 - jsonb_path_query ------------------- - "12:34:56.789" -(1 row) - -select jsonb_path_query('"12:34:56.789012"', '$.time(8)'); -WARNING: TIME(8) precision reduced to maximum allowed, 6 - jsonb_path_query -------------------- - "12:34:56.789012" -(1 row) - --- Test .time_tz() -select jsonb_path_query('null', '$.time_tz()'); -ERROR: jsonpath item method .time_tz() can only be applied to a string -select jsonb_path_query('true', '$.time_tz()'); -ERROR: jsonpath item method .time_tz() can only be applied to a string -select jsonb_path_query('1', '$.time_tz()'); -ERROR: jsonpath item method .time_tz() can only be applied to a string -select jsonb_path_query('[]', '$.time_tz()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.time_tz()'); -ERROR: jsonpath item method .time_tz() can only be applied to a string -select jsonb_path_query('{}', '$.time_tz()'); -ERROR: jsonpath item method .time_tz() can only be applied to a string -select jsonb_path_query('"bogus"', '$.time_tz()'); -ERROR: time_tz format is not recognized: "bogus" -select jsonb '"12:34:56 +05:30"' @? '$.time_tz()'; - ?column? ----------- - t -(1 row) - -select jsonb_path_query('"12:34:56 +05:30"', '$.time_tz()'); - jsonb_path_query ------------------- - "12:34:56+05:30" -(1 row) - -select jsonb_path_query('"12:34:56 +05:30"', '$.time_tz().type()'); - jsonb_path_query ------------------------ - "time with time zone" -(1 row) - -select jsonb_path_query('"2023-08-15"', '$.time_tz()'); -ERROR: time_tz format is not recognized: "2023-08-15" -select jsonb_path_query('"2023-08-15 12:34:56"', '$.time_tz()'); -ERROR: time_tz format is not recognized: "2023-08-15 12:34:56" -select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(-1)'); -ERROR: syntax error at or near "-" of jsonpath input -LINE 1: select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(... - ^ -select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(2.0)'); -ERROR: syntax error at or near "2.0" of jsonpath input -LINE 1: select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(... - ^ -select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(12345678901)'); -ERROR: time precision of jsonpath item method .time_tz() is out of range for type integer -select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(0)'); - jsonb_path_query ------------------- - "12:34:57+05:30" -(1 row) - -select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(2)'); - jsonb_path_query ---------------------- - "12:34:56.79+05:30" -(1 row) - -select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(5)'); - jsonb_path_query ----------------------- - "12:34:56.789+05:30" -(1 row) - -select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(10)'); -WARNING: TIME(10) WITH TIME ZONE precision reduced to maximum allowed, 6 - jsonb_path_query ----------------------- - "12:34:56.789+05:30" -(1 row) - -select jsonb_path_query('"12:34:56.789012 +05:30"', '$.time_tz(8)'); -WARNING: TIME(8) WITH TIME ZONE precision reduced to maximum allowed, 6 - jsonb_path_query -------------------------- - "12:34:56.789012+05:30" -(1 row) - --- Test .timestamp() -select jsonb_path_query('null', '$.timestamp()'); -ERROR: jsonpath item method .timestamp() can only be applied to a string -select jsonb_path_query('true', '$.timestamp()'); -ERROR: jsonpath item method .timestamp() can only be applied to a string -select jsonb_path_query('1', '$.timestamp()'); -ERROR: jsonpath item method .timestamp() can only be applied to a string -select jsonb_path_query('[]', '$.timestamp()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.timestamp()'); -ERROR: jsonpath item method .timestamp() can only be applied to a string -select jsonb_path_query('{}', '$.timestamp()'); -ERROR: jsonpath item method .timestamp() can only be applied to a string -select jsonb_path_query('"bogus"', '$.timestamp()'); -ERROR: timestamp format is not recognized: "bogus" -select jsonb '"2023-08-15 12:34:56"' @? '$.timestamp()'; - ?column? ----------- - t -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56"', '$.timestamp()'); - jsonb_path_query ------------------------ - "2023-08-15T12:34:56" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56"', '$.timestamp().type()'); - jsonb_path_query -------------------------------- - "timestamp without time zone" -(1 row) - -select jsonb_path_query('"2023-08-15"', '$.timestamp()'); - jsonb_path_query ------------------------ - "2023-08-15T00:00:00" -(1 row) - -select jsonb_path_query('"12:34:56"', '$.timestamp()'); -ERROR: timestamp format is not recognized: "12:34:56" -select jsonb_path_query('"12:34:56 +05:30"', '$.timestamp()'); -ERROR: timestamp format is not recognized: "12:34:56 +05:30" -select jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timestamp(-1)'); -ERROR: syntax error at or near "-" of jsonpath input -LINE 1: ...ect jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timesta... - ^ -select jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timestamp(2.0)'); -ERROR: syntax error at or near "2.0" of jsonpath input -LINE 1: ...ect jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timesta... - ^ -select jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timestamp(12345678901)'); -ERROR: time precision of jsonpath item method .timestamp() is out of range for type integer -select jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timestamp(0)'); - jsonb_path_query ------------------------ - "2023-08-15T12:34:57" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timestamp(2)'); - jsonb_path_query --------------------------- - "2023-08-15T12:34:56.79" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timestamp(5)'); - jsonb_path_query ---------------------------- - "2023-08-15T12:34:56.789" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timestamp(10)'); -WARNING: TIMESTAMP(10) precision reduced to maximum allowed, 6 - jsonb_path_query ---------------------------- - "2023-08-15T12:34:56.789" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56.789012"', '$.timestamp(8)'); -WARNING: TIMESTAMP(8) precision reduced to maximum allowed, 6 - jsonb_path_query ------------------------------- - "2023-08-15T12:34:56.789012" -(1 row) - --- Test .timestamp_tz() -select jsonb_path_query('null', '$.timestamp_tz()'); -ERROR: jsonpath item method .timestamp_tz() can only be applied to a string -select jsonb_path_query('true', '$.timestamp_tz()'); -ERROR: jsonpath item method .timestamp_tz() can only be applied to a string -select jsonb_path_query('1', '$.timestamp_tz()'); -ERROR: jsonpath item method .timestamp_tz() can only be applied to a string -select jsonb_path_query('[]', '$.timestamp_tz()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.timestamp_tz()'); -ERROR: jsonpath item method .timestamp_tz() can only be applied to a string -select jsonb_path_query('{}', '$.timestamp_tz()'); -ERROR: jsonpath item method .timestamp_tz() can only be applied to a string -select jsonb_path_query('"bogus"', '$.timestamp_tz()'); -ERROR: timestamp_tz format is not recognized: "bogus" -select jsonb '"2023-08-15 12:34:56 +05:30"' @? '$.timestamp_tz()'; - ?column? ----------- - t -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.timestamp_tz()'); - jsonb_path_query ------------------------------ - "2023-08-15T12:34:56+05:30" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.timestamp_tz().type()'); - jsonb_path_query ----------------------------- - "timestamp with time zone" -(1 row) - -select jsonb_path_query('"2023-08-15"', '$.timestamp_tz()'); -ERROR: cannot convert value from date to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"2023-08-15"', '$.timestamp_tz()'); -- should work - jsonb_path_query_tz ------------------------------ - "2023-08-15T07:00:00+00:00" -(1 row) - -select jsonb_path_query('"12:34:56"', '$.timestamp_tz()'); -ERROR: timestamp_tz format is not recognized: "12:34:56" -select jsonb_path_query('"12:34:56 +05:30"', '$.timestamp_tz()'); -ERROR: timestamp_tz format is not recognized: "12:34:56 +05:30" -select jsonb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timestamp_tz(-1)'); -ERROR: syntax error at or near "-" of jsonpath input -LINE 1: ...nb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timesta... - ^ -select jsonb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timestamp_tz(2.0)'); -ERROR: syntax error at or near "2.0" of jsonpath input -LINE 1: ...nb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timesta... - ^ -select jsonb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timestamp_tz(12345678901)'); -ERROR: time precision of jsonpath item method .timestamp_tz() is out of range for type integer -select jsonb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timestamp_tz(0)'); - jsonb_path_query ------------------------------ - "2023-08-15T12:34:57+05:30" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timestamp_tz(2)'); - jsonb_path_query --------------------------------- - "2023-08-15T12:34:56.79+05:30" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timestamp_tz(5)'); - jsonb_path_query ---------------------------------- - "2023-08-15T12:34:56.789+05:30" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timestamp_tz(10)'); -WARNING: TIMESTAMP(10) WITH TIME ZONE precision reduced to maximum allowed, 6 - jsonb_path_query ---------------------------------- - "2023-08-15T12:34:56.789+05:30" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56.789012 +05:30"', '$.timestamp_tz(8)'); -WARNING: TIMESTAMP(8) WITH TIME ZONE precision reduced to maximum allowed, 6 - jsonb_path_query ------------------------------------- - "2023-08-15T12:34:56.789012+05:30" -(1 row) - -set time zone '+00'; -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.time()'); -ERROR: cannot convert value from timestamptz to time without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"2023-08-15 12:34:56 +05:30"', '$.time()'); -- should work - jsonb_path_query_tz ---------------------- - "07:04:56" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.time_tz()'); - jsonb_path_query ------------------- - "07:04:56+00:00" -(1 row) - -select jsonb_path_query('"12:34:56"', '$.time_tz()'); -ERROR: cannot convert value from time to timetz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"12:34:56"', '$.time_tz()'); -- should work - jsonb_path_query_tz ---------------------- - "12:34:56+00:00" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.timestamp()'); -ERROR: cannot convert value from timestamptz to timestamp without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"2023-08-15 12:34:56 +05:30"', '$.timestamp()'); -- should work - jsonb_path_query_tz ------------------------ - "2023-08-15T07:04:56" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56"', '$.timestamp_tz()'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"2023-08-15 12:34:56"', '$.timestamp_tz()'); -- should work - jsonb_path_query_tz ------------------------------ - "2023-08-15T12:34:56+00:00" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI")'); - jsonb_path_query ------------------------ - "2017-03-10T12:34:00" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); -ERROR: input string is too short for datetime format -select jsonb_path_query('"10-03-2017 12:34 +05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); - jsonb_path_query ------------------------------ - "2017-03-10T12:34:00+05:00" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34 -05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); - jsonb_path_query ------------------------------ - "2017-03-10T12:34:00-05:00" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34 +05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")'); - jsonb_path_query ------------------------------ - "2017-03-10T12:34:00+05:20" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34 -05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")'); - jsonb_path_query ------------------------------ - "2017-03-10T12:34:00-05:20" -(1 row) - -select jsonb_path_query('"12:34"', '$.datetime("HH24:MI")'); - jsonb_path_query ------------------- - "12:34:00" -(1 row) - -select jsonb_path_query('"12:34"', '$.datetime("HH24:MI TZH")'); -ERROR: input string is too short for datetime format -select jsonb_path_query('"12:34 +05"', '$.datetime("HH24:MI TZH")'); - jsonb_path_query ------------------- - "12:34:00+05:00" -(1 row) - -select jsonb_path_query('"12:34 -05"', '$.datetime("HH24:MI TZH")'); - jsonb_path_query ------------------- - "12:34:00-05:00" -(1 row) - -select jsonb_path_query('"12:34 +05:20"', '$.datetime("HH24:MI TZH:TZM")'); - jsonb_path_query ------------------- - "12:34:00+05:20" -(1 row) - -select jsonb_path_query('"12:34 -05:20"', '$.datetime("HH24:MI TZH:TZM")'); - jsonb_path_query ------------------- - "12:34:00-05:20" -(1 row) - -set time zone '+10'; -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.time()'); -ERROR: cannot convert value from timestamptz to time without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"2023-08-15 12:34:56 +05:30"', '$.time()'); -- should work - jsonb_path_query_tz ---------------------- - "17:04:56" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.time_tz()'); - jsonb_path_query ------------------- - "17:04:56+10:00" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.timestamp()'); -ERROR: cannot convert value from timestamptz to timestamp without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"2023-08-15 12:34:56 +05:30"', '$.timestamp()'); -- should work - jsonb_path_query_tz ------------------------ - "2023-08-15T17:04:56" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56"', '$.timestamp_tz()'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"2023-08-15 12:34:56"', '$.timestamp_tz()'); -- should work - jsonb_path_query_tz ------------------------------ - "2023-08-15T02:34:56+00:00" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.timestamp_tz()'); - jsonb_path_query ------------------------------ - "2023-08-15T12:34:56+05:30" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI")'); - jsonb_path_query ------------------------ - "2017-03-10T12:34:00" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); -ERROR: input string is too short for datetime format -select jsonb_path_query('"10-03-2017 12:34 +05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); - jsonb_path_query ------------------------------ - "2017-03-10T12:34:00+05:00" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34 -05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); - jsonb_path_query ------------------------------ - "2017-03-10T12:34:00-05:00" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34 +05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")'); - jsonb_path_query ------------------------------ - "2017-03-10T12:34:00+05:20" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34 -05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")'); - jsonb_path_query ------------------------------ - "2017-03-10T12:34:00-05:20" -(1 row) - -select jsonb_path_query('"12:34"', '$.datetime("HH24:MI")'); - jsonb_path_query ------------------- - "12:34:00" -(1 row) - -select jsonb_path_query('"12:34"', '$.datetime("HH24:MI TZH")'); -ERROR: input string is too short for datetime format -select jsonb_path_query('"12:34 +05"', '$.datetime("HH24:MI TZH")'); - jsonb_path_query ------------------- - "12:34:00+05:00" -(1 row) - -select jsonb_path_query('"12:34 -05"', '$.datetime("HH24:MI TZH")'); - jsonb_path_query ------------------- - "12:34:00-05:00" -(1 row) - -select jsonb_path_query('"12:34 +05:20"', '$.datetime("HH24:MI TZH:TZM")'); - jsonb_path_query ------------------- - "12:34:00+05:20" -(1 row) - -select jsonb_path_query('"12:34 -05:20"', '$.datetime("HH24:MI TZH:TZM")'); - jsonb_path_query ------------------- - "12:34:00-05:20" -(1 row) - -set time zone default; -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.time()'); -ERROR: cannot convert value from timestamptz to time without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"2023-08-15 12:34:56 +05:30"', '$.time()'); -- should work - jsonb_path_query_tz ---------------------- - "00:04:56" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.time_tz()'); - jsonb_path_query ------------------- - "00:04:56-07:00" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.timestamp()'); -ERROR: cannot convert value from timestamptz to timestamp without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"2023-08-15 12:34:56 +05:30"', '$.timestamp()'); -- should work - jsonb_path_query_tz ------------------------ - "2023-08-15T00:04:56" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.timestamp_tz()'); - jsonb_path_query ------------------------------ - "2023-08-15T12:34:56+05:30" -(1 row) - -select jsonb_path_query('"2017-03-10"', '$.datetime().type()'); - jsonb_path_query ------------------- - "date" -(1 row) - -select jsonb_path_query('"2017-03-10"', '$.datetime()'); - jsonb_path_query ------------------- - "2017-03-10" -(1 row) - -select jsonb_path_query('"2017-03-10 12:34:56"', '$.datetime().type()'); - jsonb_path_query -------------------------------- - "timestamp without time zone" -(1 row) - -select jsonb_path_query('"2017-03-10 12:34:56"', '$.datetime()'); - jsonb_path_query ------------------------ - "2017-03-10T12:34:56" -(1 row) - -select jsonb_path_query('"2017-03-10 12:34:56+3"', '$.datetime().type()'); - jsonb_path_query ----------------------------- - "timestamp with time zone" -(1 row) - -select jsonb_path_query('"2017-03-10 12:34:56+3"', '$.datetime()'); - jsonb_path_query ------------------------------ - "2017-03-10T12:34:56+03:00" -(1 row) - -select jsonb_path_query('"2017-03-10 12:34:56+3:10"', '$.datetime().type()'); - jsonb_path_query ----------------------------- - "timestamp with time zone" -(1 row) - -select jsonb_path_query('"2017-03-10 12:34:56+3:10"', '$.datetime()'); - jsonb_path_query ------------------------------ - "2017-03-10T12:34:56+03:10" -(1 row) - -select jsonb_path_query('"2017-03-10T12:34:56+3:10"', '$.datetime()'); - jsonb_path_query ------------------------------ - "2017-03-10T12:34:56+03:10" -(1 row) - -select jsonb_path_query('"2017-03-10t12:34:56+3:10"', '$.datetime()'); -ERROR: datetime format is not recognized: "2017-03-10t12:34:56+3:10" -HINT: Use a datetime template argument to specify the input data format. -select jsonb_path_query('"2017-03-10 12:34:56.789+3:10"', '$.datetime()'); - jsonb_path_query ---------------------------------- - "2017-03-10T12:34:56.789+03:10" -(1 row) - -select jsonb_path_query('"2017-03-10T12:34:56.789+3:10"', '$.datetime()'); - jsonb_path_query ---------------------------------- - "2017-03-10T12:34:56.789+03:10" -(1 row) - -select jsonb_path_query('"2017-03-10t12:34:56.789+3:10"', '$.datetime()'); -ERROR: datetime format is not recognized: "2017-03-10t12:34:56.789+3:10" -HINT: Use a datetime template argument to specify the input data format. -select jsonb_path_query('"2017-03-10T12:34:56.789EST"', '$.datetime()'); - jsonb_path_query ---------------------------------- - "2017-03-10T12:34:56.789-05:00" -(1 row) - -select jsonb_path_query('"2017-03-10T12:34:56.789Z"', '$.datetime()'); - jsonb_path_query ---------------------------------- - "2017-03-10T12:34:56.789+00:00" -(1 row) - -select jsonb_path_query('"12:34:56"', '$.datetime().type()'); - jsonb_path_query --------------------------- - "time without time zone" -(1 row) - -select jsonb_path_query('"12:34:56"', '$.datetime()'); - jsonb_path_query ------------------- - "12:34:56" -(1 row) - -select jsonb_path_query('"12:34:56+3"', '$.datetime().type()'); - jsonb_path_query ------------------------ - "time with time zone" -(1 row) - -select jsonb_path_query('"12:34:56+3"', '$.datetime()'); - jsonb_path_query ------------------- - "12:34:56+03:00" -(1 row) - -select jsonb_path_query('"12:34:56+3:10"', '$.datetime().type()'); - jsonb_path_query ------------------------ - "time with time zone" -(1 row) - -select jsonb_path_query('"12:34:56+3:10"', '$.datetime()'); - jsonb_path_query ------------------- - "12:34:56+03:10" -(1 row) - -set time zone '+00'; --- date comparison -select jsonb_path_query( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].datetime() ? (@ == "10.03.2017".datetime("dd.mm.yyyy"))'); -ERROR: cannot convert value from date to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].datetime() ? (@ >= "10.03.2017".datetime("dd.mm.yyyy"))'); -ERROR: cannot convert value from date to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].datetime() ? (@ < "10.03.2017".datetime("dd.mm.yyyy"))'); -ERROR: cannot convert value from date to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].datetime() ? (@ == "10.03.2017".datetime("dd.mm.yyyy"))'); - jsonb_path_query_tz ------------------------------ - "2017-03-10" - "2017-03-10T00:00:00" - "2017-03-10T03:00:00+03:00" -(3 rows) - -select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].datetime() ? (@ >= "10.03.2017".datetime("dd.mm.yyyy"))'); - jsonb_path_query_tz ------------------------------ - "2017-03-10" - "2017-03-11" - "2017-03-10T00:00:00" - "2017-03-10T12:34:56" - "2017-03-10T03:00:00+03:00" -(5 rows) - -select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].datetime() ? (@ < "10.03.2017".datetime("dd.mm.yyyy"))'); - jsonb_path_query_tz ------------------------------ - "2017-03-09" - "2017-03-10T01:02:03+04:00" -(2 rows) - -select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].datetime() ? (@ == "2017-03-10".date())'); - jsonb_path_query_tz ------------------------------ - "2017-03-10" - "2017-03-10T00:00:00" - "2017-03-10T03:00:00+03:00" -(3 rows) - -select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].datetime() ? (@ >= "2017-03-10".date())'); - jsonb_path_query_tz ------------------------------ - "2017-03-10" - "2017-03-11" - "2017-03-10T00:00:00" - "2017-03-10T12:34:56" - "2017-03-10T03:00:00+03:00" -(5 rows) - -select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].datetime() ? (@ < "2017-03-10".date())'); - jsonb_path_query_tz ------------------------------ - "2017-03-09" - "2017-03-10T01:02:03+04:00" -(2 rows) - -select jsonb_path_query( - '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].date() ? (@ == "2017-03-10".date())'); -ERROR: cannot convert value from timestamptz to date without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].date() ? (@ >= "2017-03-10".date())'); -ERROR: cannot convert value from timestamptz to date without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].date() ? (@ < "2017-03-10".date())'); -ERROR: cannot convert value from timestamptz to date without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].date() ? (@ == "2017-03-10".date())'); - jsonb_path_query_tz ---------------------- - "2017-03-10" - "2017-03-10" - "2017-03-10" - "2017-03-10" -(4 rows) - -select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].date() ? (@ >= "2017-03-10".date())'); - jsonb_path_query_tz ---------------------- - "2017-03-10" - "2017-03-11" - "2017-03-10" - "2017-03-10" - "2017-03-10" -(5 rows) - -select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].date() ? (@ < "2017-03-10".date())'); - jsonb_path_query_tz ---------------------- - "2017-03-09" - "2017-03-09" -(2 rows) - --- time comparison -select jsonb_path_query( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].datetime() ? (@ == "12:35".datetime("HH24:MI"))'); -ERROR: cannot convert value from time to timetz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].datetime() ? (@ >= "12:35".datetime("HH24:MI"))'); -ERROR: cannot convert value from time to timetz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].datetime() ? (@ < "12:35".datetime("HH24:MI"))'); -ERROR: cannot convert value from time to timetz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].datetime() ? (@ == "12:35".datetime("HH24:MI"))'); - jsonb_path_query_tz ---------------------- - "12:35:00" - "12:35:00+00:00" -(2 rows) - -select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].datetime() ? (@ >= "12:35".datetime("HH24:MI"))'); - jsonb_path_query_tz ---------------------- - "12:35:00" - "12:36:00" - "12:35:00+00:00" -(3 rows) - -select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].datetime() ? (@ < "12:35".datetime("HH24:MI"))'); - jsonb_path_query_tz ---------------------- - "12:34:00" - "12:35:00+01:00" - "13:35:00+01:00" -(3 rows) - -select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].datetime() ? (@ == "12:35:00".time())'); - jsonb_path_query_tz ---------------------- - "12:35:00" - "12:35:00+00:00" -(2 rows) - -select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].datetime() ? (@ >= "12:35:00".time())'); - jsonb_path_query_tz ---------------------- - "12:35:00" - "12:36:00" - "12:35:00+00:00" -(3 rows) - -select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].datetime() ? (@ < "12:35:00".time())'); - jsonb_path_query_tz ---------------------- - "12:34:00" - "12:35:00+01:00" - "13:35:00+01:00" -(3 rows) - -select jsonb_path_query( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].time() ? (@ == "12:35:00".time())'); -ERROR: cannot convert value from timetz to time without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].time() ? (@ >= "12:35:00".time())'); -ERROR: cannot convert value from timetz to time without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].time() ? (@ < "12:35:00".time())'); -ERROR: cannot convert value from timetz to time without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["12:34:00.123", "12:35:00.123", "12:36:00.1123", "12:35:00.1123+00", "12:35:00.123+01", "13:35:00.123+01", "2017-03-10 12:35:00.1", "2017-03-10 12:35:00.123+01"]', - '$[*].time(2) ? (@ >= "12:35:00.123".time(2))'); -ERROR: cannot convert value from timetz to time without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].time() ? (@ == "12:35:00".time())'); - jsonb_path_query_tz ---------------------- - "12:35:00" - "12:35:00" - "12:35:00" - "12:35:00" -(4 rows) - -select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].time() ? (@ >= "12:35:00".time())'); - jsonb_path_query_tz ---------------------- - "12:35:00" - "12:36:00" - "12:35:00" - "12:35:00" - "13:35:00" - "12:35:00" -(6 rows) - -select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].time() ? (@ < "12:35:00".time())'); - jsonb_path_query_tz ---------------------- - "12:34:00" - "11:35:00" -(2 rows) - -select jsonb_path_query_tz( - '["12:34:00.123", "12:35:00.123", "12:36:00.1123", "12:35:00.1123+00", "12:35:00.123+01", "13:35:00.123+01", "2017-03-10 12:35:00.1", "2017-03-10 12:35:00.123+01"]', - '$[*].time(2) ? (@ >= "12:35:00.123".time(2))'); - jsonb_path_query_tz ---------------------- - "12:35:00.12" - "12:36:00.11" - "12:35:00.12" - "13:35:00.12" -(4 rows) - --- timetz comparison -select jsonb_path_query( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].datetime() ? (@ == "12:35 +1".datetime("HH24:MI TZH"))'); -ERROR: cannot convert value from time to timetz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].datetime() ? (@ >= "12:35 +1".datetime("HH24:MI TZH"))'); -ERROR: cannot convert value from time to timetz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].datetime() ? (@ < "12:35 +1".datetime("HH24:MI TZH"))'); -ERROR: cannot convert value from time to timetz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].datetime() ? (@ == "12:35 +1".datetime("HH24:MI TZH"))'); - jsonb_path_query_tz ---------------------- - "12:35:00+01:00" -(1 row) - -select jsonb_path_query_tz( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].datetime() ? (@ >= "12:35 +1".datetime("HH24:MI TZH"))'); - jsonb_path_query_tz ---------------------- - "12:35:00+01:00" - "12:36:00+01:00" - "12:35:00-02:00" - "11:35:00" - "12:35:00" -(5 rows) - -select jsonb_path_query_tz( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].datetime() ? (@ < "12:35 +1".datetime("HH24:MI TZH"))'); - jsonb_path_query_tz ---------------------- - "12:34:00+01:00" - "12:35:00+02:00" - "10:35:00" -(3 rows) - -select jsonb_path_query_tz( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].datetime() ? (@ == "12:35:00 +1".time_tz())'); - jsonb_path_query_tz ---------------------- - "12:35:00+01:00" -(1 row) - -select jsonb_path_query_tz( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].datetime() ? (@ >= "12:35:00 +1".time_tz())'); - jsonb_path_query_tz ---------------------- - "12:35:00+01:00" - "12:36:00+01:00" - "12:35:00-02:00" - "11:35:00" - "12:35:00" -(5 rows) - -select jsonb_path_query_tz( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].datetime() ? (@ < "12:35:00 +1".time_tz())'); - jsonb_path_query_tz ---------------------- - "12:34:00+01:00" - "12:35:00+02:00" - "10:35:00" -(3 rows) - -select jsonb_path_query( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].time_tz() ? (@ == "12:35:00 +1".time_tz())'); -ERROR: cannot convert value from time to timetz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].time_tz() ? (@ >= "12:35:00 +1".time_tz())'); -ERROR: cannot convert value from time to timetz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].time_tz() ? (@ < "12:35:00 +1".time_tz())'); -ERROR: cannot convert value from time to timetz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["12:34:00.123+01", "12:35:00.123+01", "12:36:00.1123+01", "12:35:00.1123+02", "12:35:00.123-02", "10:35:00.123", "11:35:00.1", "12:35:00.123", "2017-03-10 12:35:00.123 +1"]', - '$[*].time_tz(2) ? (@ >= "12:35:00.123 +1".time_tz(2))'); -ERROR: cannot convert value from time to timetz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].time_tz() ? (@ == "12:35:00 +1".time_tz())'); - jsonb_path_query_tz ---------------------- - "12:35:00+01:00" -(1 row) - -select jsonb_path_query_tz( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].time_tz() ? (@ >= "12:35:00 +1".time_tz())'); - jsonb_path_query_tz ---------------------- - "12:35:00+01:00" - "12:36:00+01:00" - "12:35:00-02:00" - "11:35:00+00:00" - "12:35:00+00:00" - "11:35:00+00:00" -(6 rows) - -select jsonb_path_query_tz( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].time_tz() ? (@ < "12:35:00 +1".time_tz())'); - jsonb_path_query_tz ---------------------- - "12:34:00+01:00" - "12:35:00+02:00" - "10:35:00+00:00" -(3 rows) - -select jsonb_path_query_tz( - '["12:34:00.123+01", "12:35:00.123+01", "12:36:00.1123+01", "12:35:00.1123+02", "12:35:00.123-02", "10:35:00.123", "11:35:00.1", "12:35:00.123", "2017-03-10 12:35:00.123 +1"]', - '$[*].time_tz(2) ? (@ >= "12:35:00.123 +1".time_tz(2))'); - jsonb_path_query_tz ---------------------- - "12:35:00.12+01:00" - "12:36:00.11+01:00" - "12:35:00.12-02:00" - "12:35:00.12+00:00" - "11:35:00.12+00:00" -(5 rows) - --- timestamp comparison -select jsonb_path_query( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ == "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ >= "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ < "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ == "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:35:00" - "2017-03-10T13:35:00+01:00" -(2 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ >= "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:35:00" - "2017-03-10T12:36:00" - "2017-03-10T13:35:00+01:00" - "2017-03-10T12:35:00-01:00" - "2017-03-11" -(5 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ < "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:34:00" - "2017-03-10T12:35:00+01:00" - "2017-03-10" -(3 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]', - '$[*].datetime() ? (@ == "2017-03-10 12:35:00".timestamp())'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:35:00" - "2017-03-10T13:35:00+01:00" -(2 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]', - '$[*].datetime() ? (@ >= "2017-03-10 12:35:00".timestamp())'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:35:00" - "2017-03-10T12:36:00" - "2017-03-10T13:35:00+01:00" - "2017-03-10T12:35:00-01:00" - "2017-03-11" -(5 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]', - '$[*].datetime() ? (@ < "2017-03-10 12:35:00".timestamp())'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:34:00" - "2017-03-10T12:35:00+01:00" - "2017-03-10" -(3 rows) - -select jsonb_path_query( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]', - '$[*].timestamp() ? (@ == "2017-03-10 12:35:00".timestamp())'); -ERROR: cannot convert value from timestamptz to timestamp without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]', - '$[*].timestamp() ? (@ >= "2017-03-10 12:35:00".timestamp())'); -ERROR: cannot convert value from timestamptz to timestamp without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]', - '$[*].timestamp() ? (@ < "2017-03-10 12:35:00".timestamp())'); -ERROR: cannot convert value from timestamptz to timestamp without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10 12:34:00.123", "2017-03-10 12:35:00.123", "2017-03-10 12:36:00.1123", "2017-03-10 12:35:00.1123+01", "2017-03-10 13:35:00.123+01", "2017-03-10 12:35:00.1-01", "2017-03-10", "2017-03-11"]', - '$[*].timestamp(2) ? (@ >= "2017-03-10 12:35:00.123".timestamp(2))'); -ERROR: cannot convert value from timestamptz to timestamp without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]', - '$[*].timestamp() ? (@ == "2017-03-10 12:35:00".timestamp())'); - jsonb_path_query_tz ------------------------ - "2017-03-10T12:35:00" - "2017-03-10T12:35:00" -(2 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]', - '$[*].timestamp() ? (@ >= "2017-03-10 12:35:00".timestamp())'); - jsonb_path_query_tz ------------------------ - "2017-03-10T12:35:00" - "2017-03-10T12:36:00" - "2017-03-10T12:35:00" - "2017-03-10T13:35:00" - "2017-03-11T00:00:00" -(5 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]', - '$[*].timestamp() ? (@ < "2017-03-10 12:35:00".timestamp())'); - jsonb_path_query_tz ------------------------ - "2017-03-10T12:34:00" - "2017-03-10T11:35:00" - "2017-03-10T00:00:00" -(3 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00.123", "2017-03-10 12:35:00.123", "2017-03-10 12:36:00.1123", "2017-03-10 12:35:00.1123+01", "2017-03-10 13:35:00.123+01", "2017-03-10 12:35:00.1-01", "2017-03-10", "2017-03-11"]', - '$[*].timestamp(2) ? (@ >= "2017-03-10 12:35:00.123".timestamp(2))'); - jsonb_path_query_tz --------------------------- - "2017-03-10T12:35:00.12" - "2017-03-10T12:36:00.11" - "2017-03-10T12:35:00.12" - "2017-03-10T13:35:00.1" - "2017-03-11T00:00:00" -(5 rows) - --- timestamptz comparison -select jsonb_path_query( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ == "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ >= "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ < "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ == "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:35:00+01:00" - "2017-03-10T11:35:00" -(2 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ >= "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:35:00+01:00" - "2017-03-10T12:36:00+01:00" - "2017-03-10T12:35:00-02:00" - "2017-03-10T11:35:00" - "2017-03-10T12:35:00" - "2017-03-11" -(6 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ < "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:34:00+01:00" - "2017-03-10T12:35:00+02:00" - "2017-03-10T10:35:00" - "2017-03-10" -(4 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]', - '$[*].datetime() ? (@ == "2017-03-10 12:35:00 +1".timestamp_tz())'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:35:00+01:00" - "2017-03-10T11:35:00" -(2 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]', - '$[*].datetime() ? (@ >= "2017-03-10 12:35:00 +1".timestamp_tz())'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:35:00+01:00" - "2017-03-10T12:36:00+01:00" - "2017-03-10T12:35:00-02:00" - "2017-03-10T11:35:00" - "2017-03-10T12:35:00" - "2017-03-11" -(6 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]', - '$[*].datetime() ? (@ < "2017-03-10 12:35:00 +1".timestamp_tz())'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:34:00+01:00" - "2017-03-10T12:35:00+02:00" - "2017-03-10T10:35:00" - "2017-03-10" -(4 rows) - -select jsonb_path_query( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]', - '$[*].timestamp_tz() ? (@ == "2017-03-10 12:35:00 +1".timestamp_tz())'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]', - '$[*].timestamp_tz() ? (@ >= "2017-03-10 12:35:00 +1".timestamp_tz())'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]', - '$[*].timestamp_tz() ? (@ < "2017-03-10 12:35:00 +1".timestamp_tz())'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10 12:34:00.123+01", "2017-03-10 12:35:00.123+01", "2017-03-10 12:36:00.1123+01", "2017-03-10 12:35:00.1123+02", "2017-03-10 12:35:00.123-02", "2017-03-10 10:35:00.123", "2017-03-10 11:35:00.1", "2017-03-10 12:35:00.123", "2017-03-10", "2017-03-11"]', - '$[*].timestamp_tz(2) ? (@ >= "2017-03-10 12:35:00.123 +1".timestamp_tz(2))'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]', - '$[*].timestamp_tz() ? (@ == "2017-03-10 12:35:00 +1".timestamp_tz())'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:35:00+01:00" - "2017-03-10T11:35:00+00:00" -(2 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]', - '$[*].timestamp_tz() ? (@ >= "2017-03-10 12:35:00 +1".timestamp_tz())'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:35:00+01:00" - "2017-03-10T12:36:00+01:00" - "2017-03-10T12:35:00-02:00" - "2017-03-10T11:35:00+00:00" - "2017-03-10T12:35:00+00:00" - "2017-03-11T00:00:00+00:00" -(6 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]', - '$[*].timestamp_tz() ? (@ < "2017-03-10 12:35:00 +1".timestamp_tz())'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:34:00+01:00" - "2017-03-10T12:35:00+02:00" - "2017-03-10T10:35:00+00:00" - "2017-03-10T00:00:00+00:00" -(4 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00.123+01", "2017-03-10 12:35:00.123+01", "2017-03-10 12:36:00.1123+01", "2017-03-10 12:35:00.1123+02", "2017-03-10 12:35:00.123-02", "2017-03-10 10:35:00.123", "2017-03-10 11:35:00.1", "2017-03-10 12:35:00.123", "2017-03-10", "2017-03-11"]', - '$[*].timestamp_tz(2) ? (@ >= "2017-03-10 12:35:00.123 +1".timestamp_tz(2))'); - jsonb_path_query_tz --------------------------------- - "2017-03-10T12:35:00.12+01:00" - "2017-03-10T12:36:00.11+01:00" - "2017-03-10T12:35:00.12-02:00" - "2017-03-10T12:35:00.12+00:00" - "2017-03-11T00:00:00+00:00" -(5 rows) - --- overflow during comparison -select jsonb_path_query('"1000000-01-01"', '$.datetime() > "2020-01-01 12:00:00".datetime()'::jsonpath); - jsonb_path_query ------------------- - true -(1 row) - -set time zone default; --- jsonpath operators -SELECT jsonb_path_query('[{"a": 1}, {"a": 2}]', '$[*]'); - jsonb_path_query ------------------- - {"a": 1} - {"a": 2} -(2 rows) - -SELECT jsonb_path_query('[{"a": 1}, {"a": 2}]', '$[*] ? (@.a > 10)'); - jsonb_path_query ------------------- -(0 rows) - -SELECT jsonb_path_query('[{"a": 1}]', '$undefined_var'); -ERROR: could not find jsonpath variable "undefined_var" -SELECT jsonb_path_query('[{"a": 1}]', 'false'); - jsonb_path_query ------------------- - false -(1 row) - -SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}, {}]', 'strict $[*].a'); -ERROR: JSON object does not contain key "a" -SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}]', '$[*].a'); - jsonb_path_query_array ------------------------- - [1, 2] -(1 row) - -SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ == 1)'); - jsonb_path_query_array ------------------------- - [1] -(1 row) - -SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ > 10)'); - jsonb_path_query_array ------------------------- - [] -(1 row) - -SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 1, "max": 4}'); - jsonb_path_query_array ------------------------- - [2, 3] -(1 row) - -SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 3, "max": 4}'); - jsonb_path_query_array ------------------------- - [] -(1 row) - -SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {}]', 'strict $[*].a'); -ERROR: JSON object does not contain key "a" -SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {}]', 'strict $[*].a', silent => true); - jsonb_path_query_first ------------------------- - 1 -(1 row) - -SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}]', '$[*].a'); - jsonb_path_query_first ------------------------- - 1 -(1 row) - -SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ == 1)'); - jsonb_path_query_first ------------------------- - 1 -(1 row) - -SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ > 10)'); - jsonb_path_query_first ------------------------- - -(1 row) - -SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 1, "max": 4}'); - jsonb_path_query_first ------------------------- - 2 -(1 row) - -SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 3, "max": 4}'); - jsonb_path_query_first ------------------------- - -(1 row) - -SELECT jsonb_path_query_first('[{"a": 1}]', '$undefined_var'); -ERROR: could not find jsonpath variable "undefined_var" -SELECT jsonb_path_query_first('[{"a": 1}]', 'false'); - jsonb_path_query_first ------------------------- - false -(1 row) - -SELECT jsonb '[{"a": 1}, {"a": 2}]' @? '$[*].a ? (@ > 1)'; - ?column? ----------- - t -(1 row) - -SELECT jsonb '[{"a": 1}, {"a": 2}]' @? '$[*] ? (@.a > 2)'; - ?column? ----------- - f -(1 row) - -SELECT jsonb_path_exists('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ > 1)'); - jsonb_path_exists -------------------- - t -(1 row) - -SELECT jsonb_path_exists('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*] ? (@.a > $min && @.a < $max)', vars => '{"min": 1, "max": 4}'); - jsonb_path_exists -------------------- - t -(1 row) - -SELECT jsonb_path_exists('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*] ? (@.a > $min && @.a < $max)', vars => '{"min": 3, "max": 4}'); - jsonb_path_exists -------------------- - f -(1 row) - -SELECT jsonb_path_exists('[{"a": 1}]', '$undefined_var'); -ERROR: could not find jsonpath variable "undefined_var" -SELECT jsonb_path_exists('[{"a": 1}]', 'false'); - jsonb_path_exists -------------------- - t -(1 row) - -SELECT jsonb_path_match('true', '$', silent => false); - jsonb_path_match ------------------- - t -(1 row) - -SELECT jsonb_path_match('false', '$', silent => false); - jsonb_path_match ------------------- - f -(1 row) - -SELECT jsonb_path_match('null', '$', silent => false); - jsonb_path_match ------------------- - -(1 row) - -SELECT jsonb_path_match('1', '$', silent => true); - jsonb_path_match ------------------- - -(1 row) - -SELECT jsonb_path_match('1', '$', silent => false); -ERROR: single boolean result is expected -SELECT jsonb_path_match('"a"', '$', silent => false); -ERROR: single boolean result is expected -SELECT jsonb_path_match('{}', '$', silent => false); -ERROR: single boolean result is expected -SELECT jsonb_path_match('[true]', '$', silent => false); -ERROR: single boolean result is expected -SELECT jsonb_path_match('{}', 'lax $.a', silent => false); -ERROR: single boolean result is expected -SELECT jsonb_path_match('{}', 'strict $.a', silent => false); -ERROR: JSON object does not contain key "a" -SELECT jsonb_path_match('{}', 'strict $.a', silent => true); - jsonb_path_match ------------------- - -(1 row) - -SELECT jsonb_path_match('[true, true]', '$[*]', silent => false); -ERROR: single boolean result is expected -SELECT jsonb '[{"a": 1}, {"a": 2}]' @@ '$[*].a > 1'; - ?column? ----------- - t -(1 row) - -SELECT jsonb '[{"a": 1}, {"a": 2}]' @@ '$[*].a > 2'; - ?column? ----------- - f -(1 row) - -SELECT jsonb_path_match('[{"a": 1}, {"a": 2}]', '$[*].a > 1'); - jsonb_path_match ------------------- - t -(1 row) - -SELECT jsonb_path_match('[{"a": 1}]', '$undefined_var'); -ERROR: could not find jsonpath variable "undefined_var" -SELECT jsonb_path_match('[{"a": 1}]', 'false'); - jsonb_path_match ------------------- - f -(1 row) - --- test string comparison (Unicode codepoint collation) -WITH str(j, num) AS -( - SELECT jsonb_build_object('s', s), num - FROM unnest('{"", "a", "ab", "abc", "abcd", "b", "A", "AB", "ABC", "ABc", "ABcD", "B"}'::text[]) WITH ORDINALITY AS a(s, num) -) -SELECT - s1.j, s2.j, - jsonb_path_query_first(s1.j, '$.s < $s', vars => s2.j) lt, - jsonb_path_query_first(s1.j, '$.s <= $s', vars => s2.j) le, - jsonb_path_query_first(s1.j, '$.s == $s', vars => s2.j) eq, - jsonb_path_query_first(s1.j, '$.s >= $s', vars => s2.j) ge, - jsonb_path_query_first(s1.j, '$.s > $s', vars => s2.j) gt -FROM str s1, str s2 -ORDER BY s1.num, s2.num; - j | j | lt | le | eq | ge | gt ----------------+---------------+-------+-------+-------+-------+------- - {"s": ""} | {"s": ""} | false | true | true | true | false - {"s": ""} | {"s": "a"} | true | true | false | false | false - {"s": ""} | {"s": "ab"} | true | true | false | false | false - {"s": ""} | {"s": "abc"} | true | true | false | false | false - {"s": ""} | {"s": "abcd"} | true | true | false | false | false - {"s": ""} | {"s": "b"} | true | true | false | false | false - {"s": ""} | {"s": "A"} | true | true | false | false | false - {"s": ""} | {"s": "AB"} | true | true | false | false | false - {"s": ""} | {"s": "ABC"} | true | true | false | false | false - {"s": ""} | {"s": "ABc"} | true | true | false | false | false - {"s": ""} | {"s": "ABcD"} | true | true | false | false | false - {"s": ""} | {"s": "B"} | true | true | false | false | false - {"s": "a"} | {"s": ""} | false | false | false | true | true - {"s": "a"} | {"s": "a"} | false | true | true | true | false - {"s": "a"} | {"s": "ab"} | true | true | false | false | false - {"s": "a"} | {"s": "abc"} | true | true | false | false | false - {"s": "a"} | {"s": "abcd"} | true | true | false | false | false - {"s": "a"} | {"s": "b"} | true | true | false | false | false - {"s": "a"} | {"s": "A"} | false | false | false | true | true - {"s": "a"} | {"s": "AB"} | false | false | false | true | true - {"s": "a"} | {"s": "ABC"} | false | false | false | true | true - {"s": "a"} | {"s": "ABc"} | false | false | false | true | true - {"s": "a"} | {"s": "ABcD"} | false | false | false | true | true - {"s": "a"} | {"s": "B"} | false | false | false | true | true - {"s": "ab"} | {"s": ""} | false | false | false | true | true - {"s": "ab"} | {"s": "a"} | false | false | false | true | true - {"s": "ab"} | {"s": "ab"} | false | true | true | true | false - {"s": "ab"} | {"s": "abc"} | true | true | false | false | false - {"s": "ab"} | {"s": "abcd"} | true | true | false | false | false - {"s": "ab"} | {"s": "b"} | true | true | false | false | false - {"s": "ab"} | {"s": "A"} | false | false | false | true | true - {"s": "ab"} | {"s": "AB"} | false | false | false | true | true - {"s": "ab"} | {"s": "ABC"} | false | false | false | true | true - {"s": "ab"} | {"s": "ABc"} | false | false | false | true | true - {"s": "ab"} | {"s": "ABcD"} | false | false | false | true | true - {"s": "ab"} | {"s": "B"} | false | false | false | true | true - {"s": "abc"} | {"s": ""} | false | false | false | true | true - {"s": "abc"} | {"s": "a"} | false | false | false | true | true - {"s": "abc"} | {"s": "ab"} | false | false | false | true | true - {"s": "abc"} | {"s": "abc"} | false | true | true | true | false - {"s": "abc"} | {"s": "abcd"} | true | true | false | false | false - {"s": "abc"} | {"s": "b"} | true | true | false | false | false - {"s": "abc"} | {"s": "A"} | false | false | false | true | true - {"s": "abc"} | {"s": "AB"} | false | false | false | true | true - {"s": "abc"} | {"s": "ABC"} | false | false | false | true | true - {"s": "abc"} | {"s": "ABc"} | false | false | false | true | true - {"s": "abc"} | {"s": "ABcD"} | false | false | false | true | true - {"s": "abc"} | {"s": "B"} | false | false | false | true | true - {"s": "abcd"} | {"s": ""} | false | false | false | true | true - {"s": "abcd"} | {"s": "a"} | false | false | false | true | true - {"s": "abcd"} | {"s": "ab"} | false | false | false | true | true - {"s": "abcd"} | {"s": "abc"} | false | false | false | true | true - {"s": "abcd"} | {"s": "abcd"} | false | true | true | true | false - {"s": "abcd"} | {"s": "b"} | true | true | false | false | false - {"s": "abcd"} | {"s": "A"} | false | false | false | true | true - {"s": "abcd"} | {"s": "AB"} | false | false | false | true | true - {"s": "abcd"} | {"s": "ABC"} | false | false | false | true | true - {"s": "abcd"} | {"s": "ABc"} | false | false | false | true | true - {"s": "abcd"} | {"s": "ABcD"} | false | false | false | true | true - {"s": "abcd"} | {"s": "B"} | false | false | false | true | true - {"s": "b"} | {"s": ""} | false | false | false | true | true - {"s": "b"} | {"s": "a"} | false | false | false | true | true - {"s": "b"} | {"s": "ab"} | false | false | false | true | true - {"s": "b"} | {"s": "abc"} | false | false | false | true | true - {"s": "b"} | {"s": "abcd"} | false | false | false | true | true - {"s": "b"} | {"s": "b"} | false | true | true | true | false - {"s": "b"} | {"s": "A"} | false | false | false | true | true - {"s": "b"} | {"s": "AB"} | false | false | false | true | true - {"s": "b"} | {"s": "ABC"} | false | false | false | true | true - {"s": "b"} | {"s": "ABc"} | false | false | false | true | true - {"s": "b"} | {"s": "ABcD"} | false | false | false | true | true - {"s": "b"} | {"s": "B"} | false | false | false | true | true - {"s": "A"} | {"s": ""} | false | false | false | true | true - {"s": "A"} | {"s": "a"} | true | true | false | false | false - {"s": "A"} | {"s": "ab"} | true | true | false | false | false - {"s": "A"} | {"s": "abc"} | true | true | false | false | false - {"s": "A"} | {"s": "abcd"} | true | true | false | false | false - {"s": "A"} | {"s": "b"} | true | true | false | false | false - {"s": "A"} | {"s": "A"} | false | true | true | true | false - {"s": "A"} | {"s": "AB"} | true | true | false | false | false - {"s": "A"} | {"s": "ABC"} | true | true | false | false | false - {"s": "A"} | {"s": "ABc"} | true | true | false | false | false - {"s": "A"} | {"s": "ABcD"} | true | true | false | false | false - {"s": "A"} | {"s": "B"} | true | true | false | false | false - {"s": "AB"} | {"s": ""} | false | false | false | true | true - {"s": "AB"} | {"s": "a"} | true | true | false | false | false - {"s": "AB"} | {"s": "ab"} | true | true | false | false | false - {"s": "AB"} | {"s": "abc"} | true | true | false | false | false - {"s": "AB"} | {"s": "abcd"} | true | true | false | false | false - {"s": "AB"} | {"s": "b"} | true | true | false | false | false - {"s": "AB"} | {"s": "A"} | false | false | false | true | true - {"s": "AB"} | {"s": "AB"} | false | true | true | true | false - {"s": "AB"} | {"s": "ABC"} | true | true | false | false | false - {"s": "AB"} | {"s": "ABc"} | true | true | false | false | false - {"s": "AB"} | {"s": "ABcD"} | true | true | false | false | false - {"s": "AB"} | {"s": "B"} | true | true | false | false | false - {"s": "ABC"} | {"s": ""} | false | false | false | true | true - {"s": "ABC"} | {"s": "a"} | true | true | false | false | false - {"s": "ABC"} | {"s": "ab"} | true | true | false | false | false - {"s": "ABC"} | {"s": "abc"} | true | true | false | false | false - {"s": "ABC"} | {"s": "abcd"} | true | true | false | false | false - {"s": "ABC"} | {"s": "b"} | true | true | false | false | false - {"s": "ABC"} | {"s": "A"} | false | false | false | true | true - {"s": "ABC"} | {"s": "AB"} | false | false | false | true | true - {"s": "ABC"} | {"s": "ABC"} | false | true | true | true | false - {"s": "ABC"} | {"s": "ABc"} | true | true | false | false | false - {"s": "ABC"} | {"s": "ABcD"} | true | true | false | false | false - {"s": "ABC"} | {"s": "B"} | true | true | false | false | false - {"s": "ABc"} | {"s": ""} | false | false | false | true | true - {"s": "ABc"} | {"s": "a"} | true | true | false | false | false - {"s": "ABc"} | {"s": "ab"} | true | true | false | false | false - {"s": "ABc"} | {"s": "abc"} | true | true | false | false | false - {"s": "ABc"} | {"s": "abcd"} | true | true | false | false | false - {"s": "ABc"} | {"s": "b"} | true | true | false | false | false - {"s": "ABc"} | {"s": "A"} | false | false | false | true | true - {"s": "ABc"} | {"s": "AB"} | false | false | false | true | true - {"s": "ABc"} | {"s": "ABC"} | false | false | false | true | true - {"s": "ABc"} | {"s": "ABc"} | false | true | true | true | false - {"s": "ABc"} | {"s": "ABcD"} | true | true | false | false | false - {"s": "ABc"} | {"s": "B"} | true | true | false | false | false - {"s": "ABcD"} | {"s": ""} | false | false | false | true | true - {"s": "ABcD"} | {"s": "a"} | true | true | false | false | false - {"s": "ABcD"} | {"s": "ab"} | true | true | false | false | false - {"s": "ABcD"} | {"s": "abc"} | true | true | false | false | false - {"s": "ABcD"} | {"s": "abcd"} | true | true | false | false | false - {"s": "ABcD"} | {"s": "b"} | true | true | false | false | false - {"s": "ABcD"} | {"s": "A"} | false | false | false | true | true - {"s": "ABcD"} | {"s": "AB"} | false | false | false | true | true - {"s": "ABcD"} | {"s": "ABC"} | false | false | false | true | true - {"s": "ABcD"} | {"s": "ABc"} | false | false | false | true | true - {"s": "ABcD"} | {"s": "ABcD"} | false | true | true | true | false - {"s": "ABcD"} | {"s": "B"} | true | true | false | false | false - {"s": "B"} | {"s": ""} | false | false | false | true | true - {"s": "B"} | {"s": "a"} | true | true | false | false | false - {"s": "B"} | {"s": "ab"} | true | true | false | false | false - {"s": "B"} | {"s": "abc"} | true | true | false | false | false - {"s": "B"} | {"s": "abcd"} | true | true | false | false | false - {"s": "B"} | {"s": "b"} | true | true | false | false | false - {"s": "B"} | {"s": "A"} | false | false | false | true | true - {"s": "B"} | {"s": "AB"} | false | false | false | true | true - {"s": "B"} | {"s": "ABC"} | false | false | false | true | true - {"s": "B"} | {"s": "ABc"} | false | false | false | true | true - {"s": "B"} | {"s": "ABcD"} | false | false | false | true | true - {"s": "B"} | {"s": "B"} | false | true | true | true | false -(144 rows) - +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/sqljson.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/sqljson.out --- /tmp/cirrus-ci-build/src/test/regress/expected/sqljson.out 2024-03-07 14:25:00.333911000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/sqljson.out 2024-03-07 14:27:17.533672000 +0000 @@ -1,1280 +1,2 @@ --- JSON() -SELECT JSON(); -ERROR: syntax error at or near ")" -LINE 1: SELECT JSON(); - ^ -SELECT JSON(NULL); - json ------- - -(1 row) - -SELECT JSON('{ "a" : 1 } '); - json --------------- - { "a" : 1 } -(1 row) - -SELECT JSON('{ "a" : 1 } ' FORMAT JSON); - json --------------- - { "a" : 1 } -(1 row) - -SELECT JSON('{ "a" : 1 } ' FORMAT JSON ENCODING UTF8); -ERROR: JSON ENCODING clause is only allowed for bytea input type -LINE 1: SELECT JSON('{ "a" : 1 } ' FORMAT JSON ENCODING UTF8); - ^ -SELECT JSON('{ "a" : 1 } '::bytea FORMAT JSON ENCODING UTF8); - json --------------- - { "a" : 1 } -(1 row) - -SELECT pg_typeof(JSON('{ "a" : 1 } ')); - pg_typeof ------------ - json -(1 row) - -SELECT JSON(' 1 '::json); - json ---------- - 1 -(1 row) - -SELECT JSON(' 1 '::jsonb); - json ------- - 1 -(1 row) - -SELECT JSON(' 1 '::json WITH UNIQUE KEYS); -ERROR: cannot use non-string types with WITH UNIQUE KEYS clause -LINE 1: SELECT JSON(' 1 '::json WITH UNIQUE KEYS); - ^ -SELECT JSON(123); -ERROR: cannot cast type integer to json -LINE 1: SELECT JSON(123); - ^ -SELECT JSON('{"a": 1, "a": 2}'); - json ------------------- - {"a": 1, "a": 2} -(1 row) - -SELECT JSON('{"a": 1, "a": 2}' WITH UNIQUE KEYS); -ERROR: duplicate JSON object key value -SELECT JSON('{"a": 1, "a": 2}' WITHOUT UNIQUE KEYS); - json ------------------- - {"a": 1, "a": 2} -(1 row) - -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON('123'); - QUERY PLAN ------------------------------ - Result - Output: JSON('123'::json) -(2 rows) - -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON('123' FORMAT JSON); - QUERY PLAN ------------------------------ - Result - Output: JSON('123'::json) -(2 rows) - -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON('123'::bytea FORMAT JSON); - QUERY PLAN ------------------------------------------------ - Result - Output: JSON('\x313233'::bytea FORMAT JSON) -(2 rows) - -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON('123'::bytea FORMAT JSON ENCODING UTF8); - QUERY PLAN -------------------------------------------------------------- - Result - Output: JSON('\x313233'::bytea FORMAT JSON ENCODING UTF8) -(2 rows) - -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON('123' WITH UNIQUE KEYS); - QUERY PLAN ----------------------------------------------- - Result - Output: JSON('123'::text WITH UNIQUE KEYS) -(2 rows) - -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON('123' WITHOUT UNIQUE KEYS); - QUERY PLAN ------------------------------ - Result - Output: JSON('123'::json) -(2 rows) - -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON('123'); - QUERY PLAN ------------------------------ - Result - Output: JSON('123'::json) -(2 rows) - -SELECT pg_typeof(JSON('123')); - pg_typeof ------------ - json -(1 row) - --- JSON_SCALAR() -SELECT JSON_SCALAR(); -ERROR: syntax error at or near ")" -LINE 1: SELECT JSON_SCALAR(); - ^ -SELECT JSON_SCALAR(NULL); - json_scalar -------------- - -(1 row) - -SELECT JSON_SCALAR(NULL::int); - json_scalar -------------- - -(1 row) - -SELECT JSON_SCALAR(123); - json_scalar -------------- - 123 -(1 row) - -SELECT JSON_SCALAR(123.45); - json_scalar -------------- - 123.45 -(1 row) - -SELECT JSON_SCALAR(123.45::numeric); - json_scalar -------------- - 123.45 -(1 row) - -SELECT JSON_SCALAR(true); - json_scalar -------------- - true -(1 row) - -SELECT JSON_SCALAR(false); - json_scalar -------------- - false -(1 row) - -SELECT JSON_SCALAR(' 123.45'); - json_scalar -------------- - " 123.45" -(1 row) - -SELECT JSON_SCALAR('2020-06-07'::date); - json_scalar --------------- - "2020-06-07" -(1 row) - -SELECT JSON_SCALAR('2020-06-07 01:02:03'::timestamp); - json_scalar ------------------------ - "2020-06-07T01:02:03" -(1 row) - -SELECT JSON_SCALAR('{}'::json); - json_scalar -------------- - {} -(1 row) - -SELECT JSON_SCALAR('{}'::jsonb); - json_scalar -------------- - {} -(1 row) - -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON_SCALAR(123); - QUERY PLAN ----------------------------- - Result - Output: JSON_SCALAR(123) -(2 rows) - -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON_SCALAR('123'); - QUERY PLAN ------------------------------------- - Result - Output: JSON_SCALAR('123'::text) -(2 rows) - --- JSON_SERIALIZE() -SELECT JSON_SERIALIZE(); -ERROR: syntax error at or near ")" -LINE 1: SELECT JSON_SERIALIZE(); - ^ -SELECT JSON_SERIALIZE(NULL); - json_serialize ----------------- - -(1 row) - -SELECT JSON_SERIALIZE(JSON('{ "a" : 1 } ')); - json_serialize ----------------- - { "a" : 1 } -(1 row) - -SELECT JSON_SERIALIZE('{ "a" : 1 } '); - json_serialize ----------------- - { "a" : 1 } -(1 row) - -SELECT JSON_SERIALIZE('1'); - json_serialize ----------------- - 1 -(1 row) - -SELECT JSON_SERIALIZE('1' FORMAT JSON); - json_serialize ----------------- - 1 -(1 row) - -SELECT JSON_SERIALIZE('{ "a" : 1 } ' RETURNING bytea); - json_serialize ----------------------------- - \x7b20226122203a2031207d20 -(1 row) - -SELECT JSON_SERIALIZE('{ "a" : 1 } ' RETURNING varchar); - json_serialize ----------------- - { "a" : 1 } -(1 row) - -SELECT pg_typeof(JSON_SERIALIZE(NULL)); - pg_typeof ------------ - text -(1 row) - --- only string types or bytea allowed -SELECT JSON_SERIALIZE('{ "a" : 1 } ' RETURNING jsonb); -ERROR: cannot use RETURNING type jsonb in JSON_SERIALIZE() -HINT: Try returning a string type or bytea. -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON_SERIALIZE('{}'); - QUERY PLAN ------------------------------------------------------ - Result - Output: JSON_SERIALIZE('{}'::json RETURNING text) -(2 rows) - -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON_SERIALIZE('{}' RETURNING bytea); - QUERY PLAN ------------------------------------------------------- - Result - Output: JSON_SERIALIZE('{}'::json RETURNING bytea) -(2 rows) - --- JSON_OBJECT() -SELECT JSON_OBJECT(); - json_object -------------- - {} -(1 row) - -SELECT JSON_OBJECT(RETURNING json); - json_object -------------- - {} -(1 row) - -SELECT JSON_OBJECT(RETURNING json FORMAT JSON); - json_object -------------- - {} -(1 row) - -SELECT JSON_OBJECT(RETURNING jsonb); - json_object -------------- - {} -(1 row) - -SELECT JSON_OBJECT(RETURNING jsonb FORMAT JSON); - json_object -------------- - {} -(1 row) - -SELECT JSON_OBJECT(RETURNING text); - json_object -------------- - {} -(1 row) - -SELECT JSON_OBJECT(RETURNING text FORMAT JSON); - json_object -------------- - {} -(1 row) - -SELECT JSON_OBJECT(RETURNING text FORMAT JSON ENCODING UTF8); -ERROR: cannot set JSON encoding for non-bytea output types -LINE 1: SELECT JSON_OBJECT(RETURNING text FORMAT JSON ENCODING UTF8)... - ^ -SELECT JSON_OBJECT(RETURNING text FORMAT JSON ENCODING INVALID_ENCODING); -ERROR: unrecognized JSON encoding: invalid_encoding -SELECT JSON_OBJECT(RETURNING bytea); - json_object -------------- - \x7b7d -(1 row) - -SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON); - json_object -------------- - \x7b7d -(1 row) - -SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON ENCODING UTF8); - json_object -------------- - \x7b7d -(1 row) - -SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON ENCODING UTF16); -ERROR: unsupported JSON encoding -LINE 1: SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON ENCODING UTF1... - ^ -HINT: Only UTF8 JSON encoding is supported. -SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON ENCODING UTF32); -ERROR: unsupported JSON encoding -LINE 1: SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON ENCODING UTF3... - ^ -HINT: Only UTF8 JSON encoding is supported. -SELECT JSON_OBJECT('foo': NULL::int FORMAT JSON); -ERROR: cannot use non-string types with explicit FORMAT JSON clause -LINE 1: SELECT JSON_OBJECT('foo': NULL::int FORMAT JSON); - ^ -SELECT JSON_OBJECT('foo': NULL::int FORMAT JSON ENCODING UTF8); -ERROR: JSON ENCODING clause is only allowed for bytea input type -LINE 1: SELECT JSON_OBJECT('foo': NULL::int FORMAT JSON ENCODING UTF... - ^ -SELECT JSON_OBJECT('foo': NULL::json FORMAT JSON); - json_object ----------------- - {"foo" : null} -(1 row) - -SELECT JSON_OBJECT('foo': NULL::json FORMAT JSON ENCODING UTF8); -ERROR: JSON ENCODING clause is only allowed for bytea input type -LINE 1: SELECT JSON_OBJECT('foo': NULL::json FORMAT JSON ENCODING UT... - ^ -SELECT JSON_OBJECT('foo': NULL::jsonb FORMAT JSON); - json_object ---------------- - {"foo": null} -(1 row) - -SELECT JSON_OBJECT('foo': NULL::jsonb FORMAT JSON ENCODING UTF8); -ERROR: JSON ENCODING clause is only allowed for bytea input type -LINE 1: SELECT JSON_OBJECT('foo': NULL::jsonb FORMAT JSON ENCODING U... - ^ -SELECT JSON_OBJECT(NULL: 1); -ERROR: null value not allowed for object key -SELECT JSON_OBJECT('a': 2 + 3); - json_object -------------- - {"a" : 5} -(1 row) - -SELECT JSON_OBJECT('a' VALUE 2 + 3); - json_object -------------- - {"a" : 5} -(1 row) - ---SELECT JSON_OBJECT(KEY 'a' VALUE 2 + 3); -SELECT JSON_OBJECT('a' || 2: 1); - json_object -------------- - {"a2" : 1} -(1 row) - -SELECT JSON_OBJECT(('a' || 2) VALUE 1); - json_object -------------- - {"a2" : 1} -(1 row) - ---SELECT JSON_OBJECT('a' || 2 VALUE 1); ---SELECT JSON_OBJECT(KEY 'a' || 2 VALUE 1); -SELECT JSON_OBJECT('a': 2::text); - json_object -------------- - {"a" : "2"} -(1 row) - -SELECT JSON_OBJECT('a' VALUE 2::text); - json_object -------------- - {"a" : "2"} -(1 row) - ---SELECT JSON_OBJECT(KEY 'a' VALUE 2::text); -SELECT JSON_OBJECT(1::text: 2); - json_object -------------- - {"1" : 2} -(1 row) - -SELECT JSON_OBJECT((1::text) VALUE 2); - json_object -------------- - {"1" : 2} -(1 row) - ---SELECT JSON_OBJECT(1::text VALUE 2); ---SELECT JSON_OBJECT(KEY 1::text VALUE 2); -SELECT JSON_OBJECT(json '[1]': 123); -ERROR: key value must be scalar, not array, composite, or json -SELECT JSON_OBJECT(ARRAY[1,2,3]: 'aaa'); -ERROR: key value must be scalar, not array, composite, or json -SELECT JSON_OBJECT( - 'a': '123', - 1.23: 123, - 'c': json '[ 1,true,{ } ]', - 'd': jsonb '{ "x" : 123.45 }' -); - json_object -------------------------------------------------------------------- - {"a": "123", "c": [1, true, {}], "d": {"x": 123.45}, "1.23": 123} -(1 row) - -SELECT JSON_OBJECT( - 'a': '123', - 1.23: 123, - 'c': json '[ 1,true,{ } ]', - 'd': jsonb '{ "x" : 123.45 }' - RETURNING jsonb -); - json_object -------------------------------------------------------------------- - {"a": "123", "c": [1, true, {}], "d": {"x": 123.45}, "1.23": 123} -(1 row) - -/* -SELECT JSON_OBJECT( - 'a': '123', - KEY 1.23 VALUE 123, - 'c' VALUE json '[1, true, {}]' -); -*/ -SELECT JSON_OBJECT('a': '123', 'b': JSON_OBJECT('a': 111, 'b': 'aaa')); - json_object ------------------------------------------------ - {"a" : "123", "b" : {"a" : 111, "b" : "aaa"}} -(1 row) - -SELECT JSON_OBJECT('a': '123', 'b': JSON_OBJECT('a': 111, 'b': 'aaa' RETURNING jsonb)); - json_object -------------------------------------------- - {"a": "123", "b": {"a": 111, "b": "aaa"}} -(1 row) - -SELECT JSON_OBJECT('a': JSON_OBJECT('b': 1 RETURNING text)); - json_object ------------------------ - {"a" : "{\"b\" : 1}"} -(1 row) - -SELECT JSON_OBJECT('a': JSON_OBJECT('b': 1 RETURNING text) FORMAT JSON); - json_object -------------------- - {"a" : {"b" : 1}} -(1 row) - -SELECT JSON_OBJECT('a': JSON_OBJECT('b': 1 RETURNING bytea)); - json_object ---------------------------------- - {"a" : "\\x7b226222203a20317d"} -(1 row) - -SELECT JSON_OBJECT('a': JSON_OBJECT('b': 1 RETURNING bytea) FORMAT JSON); - json_object -------------------- - {"a" : {"b" : 1}} -(1 row) - -SELECT JSON_OBJECT('a': '1', 'b': NULL, 'c': 2); - json_object ----------------------------------- - {"a" : "1", "b" : null, "c" : 2} -(1 row) - -SELECT JSON_OBJECT('a': '1', 'b': NULL, 'c': 2 NULL ON NULL); - json_object ----------------------------------- - {"a" : "1", "b" : null, "c" : 2} -(1 row) - -SELECT JSON_OBJECT('a': '1', 'b': NULL, 'c': 2 ABSENT ON NULL); - json_object ----------------------- - {"a" : "1", "c" : 2} -(1 row) - -SELECT JSON_OBJECT(1: 1, '1': NULL WITH UNIQUE); -ERROR: duplicate JSON object key value: "1" -SELECT JSON_OBJECT(1: 1, '1': NULL ABSENT ON NULL WITH UNIQUE); -ERROR: duplicate JSON object key value: "1" -SELECT JSON_OBJECT(1: 1, '1': NULL NULL ON NULL WITH UNIQUE RETURNING jsonb); -ERROR: duplicate JSON object key value -SELECT JSON_OBJECT(1: 1, '1': NULL ABSENT ON NULL WITH UNIQUE RETURNING jsonb); -ERROR: duplicate JSON object key value -SELECT JSON_OBJECT(1: 1, '2': NULL, '1': 1 NULL ON NULL WITH UNIQUE); -ERROR: duplicate JSON object key value: "1" -SELECT JSON_OBJECT(1: 1, '2': NULL, '1': 1 ABSENT ON NULL WITH UNIQUE); -ERROR: duplicate JSON object key value: "1" -SELECT JSON_OBJECT(1: 1, '2': NULL, '1': 1 ABSENT ON NULL WITHOUT UNIQUE); - json_object --------------------- - {"1" : 1, "1" : 1} -(1 row) - -SELECT JSON_OBJECT(1: 1, '2': NULL, '1': 1 ABSENT ON NULL WITH UNIQUE RETURNING jsonb); -ERROR: duplicate JSON object key value -SELECT JSON_OBJECT(1: 1, '2': NULL, '1': 1 ABSENT ON NULL WITHOUT UNIQUE RETURNING jsonb); - json_object -------------- - {"1": 1} -(1 row) - -SELECT JSON_OBJECT(1: 1, '2': NULL, '3': 1, 4: NULL, '5': 'a' ABSENT ON NULL WITH UNIQUE RETURNING jsonb); - json_object ----------------------------- - {"1": 1, "3": 1, "5": "a"} -(1 row) - --- JSON_ARRAY() -SELECT JSON_ARRAY(); - json_array ------------- - [] -(1 row) - -SELECT JSON_ARRAY(RETURNING json); - json_array ------------- - [] -(1 row) - -SELECT JSON_ARRAY(RETURNING json FORMAT JSON); - json_array ------------- - [] -(1 row) - -SELECT JSON_ARRAY(RETURNING jsonb); - json_array ------------- - [] -(1 row) - -SELECT JSON_ARRAY(RETURNING jsonb FORMAT JSON); - json_array ------------- - [] -(1 row) - -SELECT JSON_ARRAY(RETURNING text); - json_array ------------- - [] -(1 row) - -SELECT JSON_ARRAY(RETURNING text FORMAT JSON); - json_array ------------- - [] -(1 row) - -SELECT JSON_ARRAY(RETURNING text FORMAT JSON ENCODING UTF8); -ERROR: cannot set JSON encoding for non-bytea output types -LINE 1: SELECT JSON_ARRAY(RETURNING text FORMAT JSON ENCODING UTF8); - ^ -SELECT JSON_ARRAY(RETURNING text FORMAT JSON ENCODING INVALID_ENCODING); -ERROR: unrecognized JSON encoding: invalid_encoding -SELECT JSON_ARRAY(RETURNING bytea); - json_array ------------- - \x5b5d -(1 row) - -SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON); - json_array ------------- - \x5b5d -(1 row) - -SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON ENCODING UTF8); - json_array ------------- - \x5b5d -(1 row) - -SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON ENCODING UTF16); -ERROR: unsupported JSON encoding -LINE 1: SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON ENCODING UTF16... - ^ -HINT: Only UTF8 JSON encoding is supported. -SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON ENCODING UTF32); -ERROR: unsupported JSON encoding -LINE 1: SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON ENCODING UTF32... - ^ -HINT: Only UTF8 JSON encoding is supported. -SELECT JSON_ARRAY('aaa', 111, true, array[1,2,3], NULL, json '{"a": [1]}', jsonb '["a",3]'); - json_array ------------------------------------------------------ - ["aaa", 111, true, [1, 2, 3], {"a": [1]}, ["a", 3]] -(1 row) - -SELECT JSON_ARRAY('a', NULL, 'b' NULL ON NULL); - json_array ------------------- - ["a", null, "b"] -(1 row) - -SELECT JSON_ARRAY('a', NULL, 'b' ABSENT ON NULL); - json_array ------------- - ["a", "b"] -(1 row) - -SELECT JSON_ARRAY(NULL, NULL, 'b' ABSENT ON NULL); - json_array ------------- - ["b"] -(1 row) - -SELECT JSON_ARRAY('a', NULL, 'b' NULL ON NULL RETURNING jsonb); - json_array ------------------- - ["a", null, "b"] -(1 row) - -SELECT JSON_ARRAY('a', NULL, 'b' ABSENT ON NULL RETURNING jsonb); - json_array ------------- - ["a", "b"] -(1 row) - -SELECT JSON_ARRAY(NULL, NULL, 'b' ABSENT ON NULL RETURNING jsonb); - json_array ------------- - ["b"] -(1 row) - -SELECT JSON_ARRAY(JSON_ARRAY('{ "a" : 123 }' RETURNING text)); - json_array -------------------------------- - ["[\"{ \\\"a\\\" : 123 }\"]"] -(1 row) - -SELECT JSON_ARRAY(JSON_ARRAY('{ "a" : 123 }' FORMAT JSON RETURNING text)); - json_array ------------------------ - ["[{ \"a\" : 123 }]"] -(1 row) - -SELECT JSON_ARRAY(JSON_ARRAY('{ "a" : 123 }' FORMAT JSON RETURNING text) FORMAT JSON); - json_array -------------------- - [[{ "a" : 123 }]] -(1 row) - -SELECT JSON_ARRAY(SELECT i FROM (VALUES (1), (2), (NULL), (4)) foo(i)); - json_array ------------- - [1, 2, 4] -(1 row) - -SELECT JSON_ARRAY(SELECT i FROM (VALUES (NULL::int[]), ('{1,2}'), (NULL), (NULL), ('{3,4}'), (NULL)) foo(i)); - json_array ------------- - [[1,2], + - [3,4]] -(1 row) - -SELECT JSON_ARRAY(SELECT i FROM (VALUES (NULL::int[]), ('{1,2}'), (NULL), (NULL), ('{3,4}'), (NULL)) foo(i) RETURNING jsonb); - json_array ------------------- - [[1, 2], [3, 4]] -(1 row) - ---SELECT JSON_ARRAY(SELECT i FROM (VALUES (NULL::int[]), ('{1,2}'), (NULL), (NULL), ('{3,4}'), (NULL)) foo(i) NULL ON NULL); ---SELECT JSON_ARRAY(SELECT i FROM (VALUES (NULL::int[]), ('{1,2}'), (NULL), (NULL), ('{3,4}'), (NULL)) foo(i) NULL ON NULL RETURNING jsonb); -SELECT JSON_ARRAY(SELECT i FROM (VALUES (3), (1), (NULL), (2)) foo(i) ORDER BY i); - json_array ------------- - [1, 2, 3] -(1 row) - --- Should fail -SELECT JSON_ARRAY(SELECT FROM (VALUES (1)) foo(i)); -ERROR: subquery must return only one column -LINE 1: SELECT JSON_ARRAY(SELECT FROM (VALUES (1)) foo(i)); - ^ -SELECT JSON_ARRAY(SELECT i, i FROM (VALUES (1)) foo(i)); -ERROR: subquery must return only one column -LINE 1: SELECT JSON_ARRAY(SELECT i, i FROM (VALUES (1)) foo(i)); - ^ -SELECT JSON_ARRAY(SELECT * FROM (VALUES (1, 2)) foo(i, j)); -ERROR: subquery must return only one column -LINE 1: SELECT JSON_ARRAY(SELECT * FROM (VALUES (1, 2)) foo(i, j)); - ^ --- JSON_ARRAYAGG() -SELECT JSON_ARRAYAGG(i) IS NULL, - JSON_ARRAYAGG(i RETURNING jsonb) IS NULL -FROM generate_series(1, 0) i; - ?column? | ?column? -----------+---------- - t | t -(1 row) - -SELECT JSON_ARRAYAGG(i), - JSON_ARRAYAGG(i RETURNING jsonb) -FROM generate_series(1, 5) i; - json_arrayagg | json_arrayagg ------------------+----------------- - [1, 2, 3, 4, 5] | [1, 2, 3, 4, 5] -(1 row) - -SELECT JSON_ARRAYAGG(i ORDER BY i DESC) -FROM generate_series(1, 5) i; - json_arrayagg ------------------ - [5, 4, 3, 2, 1] -(1 row) - -SELECT JSON_ARRAYAGG(i::text::json) -FROM generate_series(1, 5) i; - json_arrayagg ------------------ - [1, 2, 3, 4, 5] -(1 row) - -SELECT JSON_ARRAYAGG(JSON_ARRAY(i, i + 1 RETURNING text) FORMAT JSON) -FROM generate_series(1, 5) i; - json_arrayagg ------------------------------------------- - [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6]] -(1 row) - -SELECT JSON_ARRAYAGG(NULL), - JSON_ARRAYAGG(NULL RETURNING jsonb) -FROM generate_series(1, 5); - json_arrayagg | json_arrayagg ----------------+--------------- - [] | [] -(1 row) - -SELECT JSON_ARRAYAGG(NULL NULL ON NULL), - JSON_ARRAYAGG(NULL NULL ON NULL RETURNING jsonb) -FROM generate_series(1, 5); - json_arrayagg | json_arrayagg ---------------------------------+-------------------------------- - [null, null, null, null, null] | [null, null, null, null, null] -(1 row) - -\x -SELECT - JSON_ARRAYAGG(bar) as no_options, - JSON_ARRAYAGG(bar RETURNING jsonb) as returning_jsonb, - JSON_ARRAYAGG(bar ABSENT ON NULL) as absent_on_null, - JSON_ARRAYAGG(bar ABSENT ON NULL RETURNING jsonb) as absentonnull_returning_jsonb, - JSON_ARRAYAGG(bar NULL ON NULL) as null_on_null, - JSON_ARRAYAGG(bar NULL ON NULL RETURNING jsonb) as nullonnull_returning_jsonb, - JSON_ARRAYAGG(foo) as row_no_options, - JSON_ARRAYAGG(foo RETURNING jsonb) as row_returning_jsonb, - JSON_ARRAYAGG(foo ORDER BY bar) FILTER (WHERE bar > 2) as row_filtered_agg, - JSON_ARRAYAGG(foo ORDER BY bar RETURNING jsonb) FILTER (WHERE bar > 2) as row_filtered_agg_returning_jsonb -FROM - (VALUES (NULL), (3), (1), (NULL), (NULL), (5), (2), (4), (NULL)) foo(bar); --[ RECORD 1 ]--------------------+------------------------------------------------------------------------------------------------------------------------- -no_options | [1, 2, 3, 4, 5] -returning_jsonb | [1, 2, 3, 4, 5] -absent_on_null | [1, 2, 3, 4, 5] -absentonnull_returning_jsonb | [1, 2, 3, 4, 5] -null_on_null | [1, 2, 3, 4, 5, null, null, null, null] -nullonnull_returning_jsonb | [1, 2, 3, 4, 5, null, null, null, null] -row_no_options | [{"bar":1}, + - | {"bar":2}, + - | {"bar":3}, + - | {"bar":4}, + - | {"bar":5}, + - | {"bar":null}, + - | {"bar":null}, + - | {"bar":null}, + - | {"bar":null}] -row_returning_jsonb | [{"bar": 1}, {"bar": 2}, {"bar": 3}, {"bar": 4}, {"bar": 5}, {"bar": null}, {"bar": null}, {"bar": null}, {"bar": null}] -row_filtered_agg | [{"bar":3}, + - | {"bar":4}, + - | {"bar":5}] -row_filtered_agg_returning_jsonb | [{"bar": 3}, {"bar": 4}, {"bar": 5}] - -\x -SELECT - bar, JSON_ARRAYAGG(bar) FILTER (WHERE bar > 2) OVER (PARTITION BY foo.bar % 2) -FROM - (VALUES (NULL), (3), (1), (NULL), (NULL), (5), (2), (4), (NULL), (5), (4)) foo(bar); - bar | json_arrayagg ------+--------------- - 4 | [4, 4] - 4 | [4, 4] - 2 | [4, 4] - 5 | [5, 3, 5] - 3 | [5, 3, 5] - 1 | [5, 3, 5] - 5 | [5, 3, 5] - | - | - | - | -(11 rows) - --- JSON_OBJECTAGG() -SELECT JSON_OBJECTAGG('key': 1) IS NULL, - JSON_OBJECTAGG('key': 1 RETURNING jsonb) IS NULL -WHERE FALSE; - ?column? | ?column? -----------+---------- - t | t -(1 row) - -SELECT JSON_OBJECTAGG(NULL: 1); -ERROR: null value not allowed for object key -SELECT JSON_OBJECTAGG(NULL: 1 RETURNING jsonb); -ERROR: field name must not be null -SELECT - JSON_OBJECTAGG(i: i), --- JSON_OBJECTAGG(i VALUE i), --- JSON_OBJECTAGG(KEY i VALUE i), - JSON_OBJECTAGG(i: i RETURNING jsonb) -FROM - generate_series(1, 5) i; - json_objectagg | json_objectagg --------------------------------------------------+------------------------------------------ - { "1" : 1, "2" : 2, "3" : 3, "4" : 4, "5" : 5 } | {"1": 1, "2": 2, "3": 3, "4": 4, "5": 5} -(1 row) - -SELECT - JSON_OBJECTAGG(k: v), - JSON_OBJECTAGG(k: v NULL ON NULL), - JSON_OBJECTAGG(k: v ABSENT ON NULL), - JSON_OBJECTAGG(k: v RETURNING jsonb), - JSON_OBJECTAGG(k: v NULL ON NULL RETURNING jsonb), - JSON_OBJECTAGG(k: v ABSENT ON NULL RETURNING jsonb) -FROM - (VALUES (1, 1), (1, NULL), (2, NULL), (3, 3)) foo(k, v); - json_objectagg | json_objectagg | json_objectagg | json_objectagg | json_objectagg | json_objectagg -----------------------------------------------+----------------------------------------------+----------------------+--------------------------------+--------------------------------+------------------ - { "1" : 1, "1" : null, "2" : null, "3" : 3 } | { "1" : 1, "1" : null, "2" : null, "3" : 3 } | { "1" : 1, "3" : 3 } | {"1": null, "2": null, "3": 3} | {"1": null, "2": null, "3": 3} | {"1": 1, "3": 3} -(1 row) - -SELECT JSON_OBJECTAGG(k: v WITH UNIQUE KEYS) -FROM (VALUES (1, 1), (1, NULL), (2, 2)) foo(k, v); -ERROR: duplicate JSON object key value: "1" -SELECT JSON_OBJECTAGG(k: v ABSENT ON NULL WITH UNIQUE KEYS) -FROM (VALUES (1, 1), (1, NULL), (2, 2)) foo(k, v); -ERROR: duplicate JSON object key value: "1" -SELECT JSON_OBJECTAGG(k: v ABSENT ON NULL WITH UNIQUE KEYS) -FROM (VALUES (1, 1), (0, NULL), (3, NULL), (2, 2), (4, NULL)) foo(k, v); - json_objectagg ----------------------- - { "1" : 1, "2" : 2 } -(1 row) - -SELECT JSON_OBJECTAGG(k: v WITH UNIQUE KEYS RETURNING jsonb) -FROM (VALUES (1, 1), (1, NULL), (2, 2)) foo(k, v); -ERROR: duplicate JSON object key value -SELECT JSON_OBJECTAGG(k: v ABSENT ON NULL WITH UNIQUE KEYS RETURNING jsonb) -FROM (VALUES (1, 1), (1, NULL), (2, 2)) foo(k, v); -ERROR: duplicate JSON object key value -SELECT JSON_OBJECTAGG(k: v ABSENT ON NULL WITH UNIQUE KEYS RETURNING jsonb) -FROM (VALUES (1, 1), (0, NULL),(4, null), (5, null),(6, null),(2, 2)) foo(k, v); - json_objectagg ------------------- - {"1": 1, "2": 2} -(1 row) - --- Test JSON_OBJECT deparsing -EXPLAIN (VERBOSE, COSTS OFF) -SELECT JSON_OBJECT('foo' : '1' FORMAT JSON, 'bar' : 'baz' RETURNING json); - QUERY PLAN ------------------------------------------------------------------------------- - Result - Output: JSON_OBJECT('foo' : '1'::json, 'bar' : 'baz'::text RETURNING json) -(2 rows) - -CREATE VIEW json_object_view AS -SELECT JSON_OBJECT('foo' : '1' FORMAT JSON, 'bar' : 'baz' RETURNING json); -\sv json_object_view -CREATE OR REPLACE VIEW public.json_object_view AS - SELECT JSON_OBJECT('foo' : '1'::text FORMAT JSON, 'bar' : 'baz'::text RETURNING json) AS "json_object" -DROP VIEW json_object_view; -SELECT to_json(a) AS a, JSON_OBJECTAGG(k : v WITH UNIQUE KEYS) OVER (ORDER BY k) -FROM (VALUES (1,1), (2,2)) a(k,v); - a | json_objectagg ----------------+---------------------- - {"k":1,"v":1} | { "1" : 1 } - {"k":2,"v":2} | { "1" : 1, "2" : 2 } -(2 rows) - -SELECT to_json(a) AS a, JSON_OBJECTAGG(k : v WITH UNIQUE KEYS) OVER (ORDER BY k) -FROM (VALUES (1,1), (1,2), (2,2)) a(k,v); -ERROR: duplicate JSON object key value: "1" -SELECT to_json(a) AS a, JSON_OBJECTAGG(k : v ABSENT ON NULL WITH UNIQUE KEYS) - OVER (ORDER BY k) -FROM (VALUES (1,1), (1,null), (2,2)) a(k,v); -ERROR: duplicate JSON object key value: "1" -SELECT to_json(a) AS a, JSON_OBJECTAGG(k : v ABSENT ON NULL) -OVER (ORDER BY k) -FROM (VALUES (1,1), (1,null), (2,2)) a(k,v); - a | json_objectagg -------------------+---------------------- - {"k":1,"v":1} | { "1" : 1 } - {"k":1,"v":null} | { "1" : 1 } - {"k":2,"v":2} | { "1" : 1, "2" : 2 } -(3 rows) - -SELECT to_json(a) AS a, JSON_OBJECTAGG(k : v ABSENT ON NULL) -OVER (ORDER BY k RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) -FROM (VALUES (1,1), (1,null), (2,2)) a(k,v); - a | json_objectagg -------------------+---------------------- - {"k":1,"v":1} | { "1" : 1, "2" : 2 } - {"k":1,"v":null} | { "1" : 1, "2" : 2 } - {"k":2,"v":2} | { "1" : 1, "2" : 2 } -(3 rows) - --- Test JSON_ARRAY deparsing -EXPLAIN (VERBOSE, COSTS OFF) -SELECT JSON_ARRAY('1' FORMAT JSON, 2 RETURNING json); - QUERY PLAN ---------------------------------------------------- - Result - Output: JSON_ARRAY('1'::json, 2 RETURNING json) -(2 rows) - -CREATE VIEW json_array_view AS -SELECT JSON_ARRAY('1' FORMAT JSON, 2 RETURNING json); -\sv json_array_view -CREATE OR REPLACE VIEW public.json_array_view AS - SELECT JSON_ARRAY('1'::text FORMAT JSON, 2 RETURNING json) AS "json_array" -DROP VIEW json_array_view; --- Test JSON_OBJECTAGG deparsing -EXPLAIN (VERBOSE, COSTS OFF) -SELECT JSON_OBJECTAGG(i: ('111' || i)::bytea FORMAT JSON WITH UNIQUE RETURNING text) FILTER (WHERE i > 3) -FROM generate_series(1,5) i; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------- - Aggregate - Output: JSON_OBJECTAGG(i : (('111'::text || (i)::text))::bytea FORMAT JSON WITH UNIQUE KEYS RETURNING text) FILTER (WHERE (i > 3)) - -> Function Scan on pg_catalog.generate_series i - Output: i - Function Call: generate_series(1, 5) -(5 rows) - -EXPLAIN (VERBOSE, COSTS OFF) -SELECT JSON_OBJECTAGG(i: ('111' || i)::bytea FORMAT JSON WITH UNIQUE RETURNING text) OVER (PARTITION BY i % 2) -FROM generate_series(1,5) i; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------ - WindowAgg - Output: JSON_OBJECTAGG(i : (('111'::text || (i)::text))::bytea FORMAT JSON WITH UNIQUE KEYS RETURNING text) OVER (?), ((i % 2)) - -> Sort - Output: ((i % 2)), i - Sort Key: ((i.i % 2)) - -> Function Scan on pg_catalog.generate_series i - Output: (i % 2), i - Function Call: generate_series(1, 5) -(8 rows) - -CREATE VIEW json_objectagg_view AS -SELECT JSON_OBJECTAGG(i: ('111' || i)::bytea FORMAT JSON WITH UNIQUE RETURNING text) FILTER (WHERE i > 3) -FROM generate_series(1,5) i; -\sv json_objectagg_view -CREATE OR REPLACE VIEW public.json_objectagg_view AS - SELECT JSON_OBJECTAGG(i : ('111'::text || i)::bytea FORMAT JSON WITH UNIQUE KEYS RETURNING text) FILTER (WHERE i > 3) AS "json_objectagg" - FROM generate_series(1, 5) i(i) -DROP VIEW json_objectagg_view; --- Test JSON_ARRAYAGG deparsing -EXPLAIN (VERBOSE, COSTS OFF) -SELECT JSON_ARRAYAGG(('111' || i)::bytea FORMAT JSON NULL ON NULL RETURNING text) FILTER (WHERE i > 3) -FROM generate_series(1,5) i; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------ - Aggregate - Output: JSON_ARRAYAGG((('111'::text || (i)::text))::bytea FORMAT JSON NULL ON NULL RETURNING text) FILTER (WHERE (i > 3)) - -> Function Scan on pg_catalog.generate_series i - Output: i - Function Call: generate_series(1, 5) -(5 rows) - -EXPLAIN (VERBOSE, COSTS OFF) -SELECT JSON_ARRAYAGG(('111' || i)::bytea FORMAT JSON NULL ON NULL RETURNING text) OVER (PARTITION BY i % 2) -FROM generate_series(1,5) i; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------- - WindowAgg - Output: JSON_ARRAYAGG((('111'::text || (i)::text))::bytea FORMAT JSON NULL ON NULL RETURNING text) OVER (?), ((i % 2)) - -> Sort - Output: ((i % 2)), i - Sort Key: ((i.i % 2)) - -> Function Scan on pg_catalog.generate_series i - Output: (i % 2), i - Function Call: generate_series(1, 5) -(8 rows) - -CREATE VIEW json_arrayagg_view AS -SELECT JSON_ARRAYAGG(('111' || i)::bytea FORMAT JSON NULL ON NULL RETURNING text) FILTER (WHERE i > 3) -FROM generate_series(1,5) i; -\sv json_arrayagg_view -CREATE OR REPLACE VIEW public.json_arrayagg_view AS - SELECT JSON_ARRAYAGG(('111'::text || i)::bytea FORMAT JSON NULL ON NULL RETURNING text) FILTER (WHERE i > 3) AS "json_arrayagg" - FROM generate_series(1, 5) i(i) -DROP VIEW json_arrayagg_view; --- Test JSON_ARRAY(subquery) deparsing -EXPLAIN (VERBOSE, COSTS OFF) -SELECT JSON_ARRAY(SELECT i FROM (VALUES (1), (2), (NULL), (4)) foo(i) RETURNING jsonb); - QUERY PLAN ---------------------------------------------------------------------- - Result - Output: $0 - InitPlan 1 (returns $0) - -> Aggregate - Output: JSON_ARRAYAGG("*VALUES*".column1 RETURNING jsonb) - -> Values Scan on "*VALUES*" - Output: "*VALUES*".column1 -(7 rows) - -CREATE VIEW json_array_subquery_view AS -SELECT JSON_ARRAY(SELECT i FROM (VALUES (1), (2), (NULL), (4)) foo(i) RETURNING jsonb); -\sv json_array_subquery_view -CREATE OR REPLACE VIEW public.json_array_subquery_view AS - SELECT ( SELECT JSON_ARRAYAGG(q.a RETURNING jsonb) AS "json_arrayagg" - FROM ( SELECT foo.i - FROM ( VALUES (1), (2), (NULL::integer), (4)) foo(i)) q(a)) AS "json_array" -DROP VIEW json_array_subquery_view; --- IS JSON predicate -SELECT NULL IS JSON; - ?column? ----------- - -(1 row) - -SELECT NULL IS NOT JSON; - ?column? ----------- - -(1 row) - -SELECT NULL::json IS JSON; - ?column? ----------- - -(1 row) - -SELECT NULL::jsonb IS JSON; - ?column? ----------- - -(1 row) - -SELECT NULL::text IS JSON; - ?column? ----------- - -(1 row) - -SELECT NULL::bytea IS JSON; - ?column? ----------- - -(1 row) - -SELECT NULL::int IS JSON; -ERROR: cannot use type integer in IS JSON predicate -SELECT '' IS JSON; - ?column? ----------- - f -(1 row) - -SELECT bytea '\x00' IS JSON; -ERROR: invalid byte sequence for encoding "UTF8": 0x00 -CREATE TABLE test_is_json (js text); -INSERT INTO test_is_json VALUES - (NULL), - (''), - ('123'), - ('"aaa "'), - ('true'), - ('null'), - ('[]'), - ('[1, "2", {}]'), - ('{}'), - ('{ "a": 1, "b": null }'), - ('{ "a": 1, "a": null }'), - ('{ "a": 1, "b": [{ "a": 1 }, { "a": 2 }] }'), - ('{ "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] }'), - ('aaa'), - ('{a:1}'), - ('["a",]'); -SELECT - js, - js IS JSON "IS JSON", - js IS NOT JSON "IS NOT JSON", - js IS JSON VALUE "IS VALUE", - js IS JSON OBJECT "IS OBJECT", - js IS JSON ARRAY "IS ARRAY", - js IS JSON SCALAR "IS SCALAR", - js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE", - js IS JSON WITH UNIQUE KEYS "WITH UNIQUE" -FROM - test_is_json; - js | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE ------------------------------------------------+---------+-------------+----------+-----------+----------+-----------+----------------+------------- - | | | | | | | | - | f | t | f | f | f | f | f | f - 123 | t | f | t | f | f | t | t | t - "aaa " | t | f | t | f | f | t | t | t - true | t | f | t | f | f | t | t | t - null | t | f | t | f | f | t | t | t - [] | t | f | t | f | t | f | t | t - [1, "2", {}] | t | f | t | f | t | f | t | t - {} | t | f | t | t | f | f | t | t - { "a": 1, "b": null } | t | f | t | t | f | f | t | t - { "a": 1, "a": null } | t | f | t | t | f | f | t | f - { "a": 1, "b": [{ "a": 1 }, { "a": 2 }] } | t | f | t | t | f | f | t | t - { "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] } | t | f | t | t | f | f | t | f - aaa | f | t | f | f | f | f | f | f - {a:1} | f | t | f | f | f | f | f | f - ["a",] | f | t | f | f | f | f | f | f -(16 rows) - -SELECT - js, - js IS JSON "IS JSON", - js IS NOT JSON "IS NOT JSON", - js IS JSON VALUE "IS VALUE", - js IS JSON OBJECT "IS OBJECT", - js IS JSON ARRAY "IS ARRAY", - js IS JSON SCALAR "IS SCALAR", - js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE", - js IS JSON WITH UNIQUE KEYS "WITH UNIQUE" -FROM - (SELECT js::json FROM test_is_json WHERE js IS JSON) foo(js); - js | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE ------------------------------------------------+---------+-------------+----------+-----------+----------+-----------+----------------+------------- - 123 | t | f | t | f | f | t | t | t - "aaa " | t | f | t | f | f | t | t | t - true | t | f | t | f | f | t | t | t - null | t | f | t | f | f | t | t | t - [] | t | f | t | f | t | f | t | t - [1, "2", {}] | t | f | t | f | t | f | t | t - {} | t | f | t | t | f | f | t | t - { "a": 1, "b": null } | t | f | t | t | f | f | t | t - { "a": 1, "a": null } | t | f | t | t | f | f | t | f - { "a": 1, "b": [{ "a": 1 }, { "a": 2 }] } | t | f | t | t | f | f | t | t - { "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] } | t | f | t | t | f | f | t | f -(11 rows) - -SELECT - js0, - js IS JSON "IS JSON", - js IS NOT JSON "IS NOT JSON", - js IS JSON VALUE "IS VALUE", - js IS JSON OBJECT "IS OBJECT", - js IS JSON ARRAY "IS ARRAY", - js IS JSON SCALAR "IS SCALAR", - js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE", - js IS JSON WITH UNIQUE KEYS "WITH UNIQUE" -FROM - (SELECT js, js::bytea FROM test_is_json WHERE js IS JSON) foo(js0, js); - js0 | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE ------------------------------------------------+---------+-------------+----------+-----------+----------+-----------+----------------+------------- - 123 | t | f | t | f | f | t | t | t - "aaa " | t | f | t | f | f | t | t | t - true | t | f | t | f | f | t | t | t - null | t | f | t | f | f | t | t | t - [] | t | f | t | f | t | f | t | t - [1, "2", {}] | t | f | t | f | t | f | t | t - {} | t | f | t | t | f | f | t | t - { "a": 1, "b": null } | t | f | t | t | f | f | t | t - { "a": 1, "a": null } | t | f | t | t | f | f | t | f - { "a": 1, "b": [{ "a": 1 }, { "a": 2 }] } | t | f | t | t | f | f | t | t - { "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] } | t | f | t | t | f | f | t | f -(11 rows) - -SELECT - js, - js IS JSON "IS JSON", - js IS NOT JSON "IS NOT JSON", - js IS JSON VALUE "IS VALUE", - js IS JSON OBJECT "IS OBJECT", - js IS JSON ARRAY "IS ARRAY", - js IS JSON SCALAR "IS SCALAR", - js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE", - js IS JSON WITH UNIQUE KEYS "WITH UNIQUE" -FROM - (SELECT js::jsonb FROM test_is_json WHERE js IS JSON) foo(js); - js | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE --------------------------------------+---------+-------------+----------+-----------+----------+-----------+----------------+------------- - 123 | t | f | t | f | f | t | t | t - "aaa " | t | f | t | f | f | t | t | t - true | t | f | t | f | f | t | t | t - null | t | f | t | f | f | t | t | t - [] | t | f | t | f | t | f | t | t - [1, "2", {}] | t | f | t | f | t | f | t | t - {} | t | f | t | t | f | f | t | t - {"a": 1, "b": null} | t | f | t | t | f | f | t | t - {"a": null} | t | f | t | t | f | f | t | t - {"a": 1, "b": [{"a": 1}, {"a": 2}]} | t | f | t | t | f | f | t | t - {"a": 1, "b": [{"a": 2, "b": 0}]} | t | f | t | t | f | f | t | t -(11 rows) - --- Test IS JSON deparsing -EXPLAIN (VERBOSE, COSTS OFF) -SELECT '1' IS JSON AS "any", ('1' || i) IS JSON SCALAR AS "scalar", '[]' IS NOT JSON ARRAY AS "array", '{}' IS JSON OBJECT WITH UNIQUE AS "object" FROM generate_series(1, 3) i; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------- - Function Scan on pg_catalog.generate_series i - Output: ('1'::text IS JSON), (('1'::text || (i)::text) IS JSON SCALAR), (NOT ('[]'::text IS JSON ARRAY)), ('{}'::text IS JSON OBJECT WITH UNIQUE KEYS) - Function Call: generate_series(1, 3) -(3 rows) - -CREATE VIEW is_json_view AS -SELECT '1' IS JSON AS "any", ('1' || i) IS JSON SCALAR AS "scalar", '[]' IS NOT JSON ARRAY AS "array", '{}' IS JSON OBJECT WITH UNIQUE AS "object" FROM generate_series(1, 3) i; -\sv is_json_view -CREATE OR REPLACE VIEW public.is_json_view AS - SELECT '1'::text IS JSON AS "any", - ('1'::text || i) IS JSON SCALAR AS scalar, - NOT '[]'::text IS JSON ARRAY AS "array", - '{}'::text IS JSON OBJECT WITH UNIQUE KEYS AS object - FROM generate_series(1, 3) i(i) -DROP VIEW is_json_view; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/indexing.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/indexing.out --- /tmp/cirrus-ci-build/src/test/regress/expected/indexing.out 2024-03-07 14:25:00.331032000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/indexing.out 2024-03-07 14:27:23.679998000 +0000 @@ -1500,153 +1500,10 @@ Number of partitions: 1 (Use \d+ to list them.) \d parted_index_col_drop2 - Partitioned table "public.parted_index_col_drop2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition of: parted_index_col_drop FOR VALUES IN (2) -Partition key: LIST (a) -Indexes: - "parted_index_col_drop2_b_idx" btree (b) -Number of partitions: 0 - -\d parted_index_col_drop11 - Table "public.parted_index_col_drop11" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition of: parted_index_col_drop1 FOR VALUES IN (1) -Indexes: - "parted_index_col_drop11_b_idx" btree (b) - -drop table parted_index_col_drop; --- Check that invalid indexes are not selected when attaching a partition. -create table parted_inval_tab (a int) partition by range (a); -create index parted_inval_idx on parted_inval_tab (a); -create table parted_inval_tab_1 (a int) partition by range (a); -create table parted_inval_tab_1_1 partition of parted_inval_tab_1 - for values from (0) to (10); -create table parted_inval_tab_1_2 partition of parted_inval_tab_1 - for values from (10) to (20); --- this creates an invalid index. -create index parted_inval_ixd_1 on only parted_inval_tab_1 (a); --- this creates new indexes for all the partitions of parted_inval_tab_1, --- discarding the invalid index created previously as what is chosen. -alter table parted_inval_tab attach partition parted_inval_tab_1 - for values from (1) to (100); -select indexrelid::regclass, indisvalid, - indrelid::regclass, inhparent::regclass - from pg_index idx left join - pg_inherits inh on (idx.indexrelid = inh.inhrelid) - where indexrelid::regclass::text like 'parted_inval%' - order by indexrelid::regclass::text collate "C"; - indexrelid | indisvalid | indrelid | inhparent -----------------------------+------------+----------------------+-------------------------- - parted_inval_idx | t | parted_inval_tab | - parted_inval_ixd_1 | f | parted_inval_tab_1 | - parted_inval_tab_1_1_a_idx | t | parted_inval_tab_1_1 | parted_inval_tab_1_a_idx - parted_inval_tab_1_2_a_idx | t | parted_inval_tab_1_2 | parted_inval_tab_1_a_idx - parted_inval_tab_1_a_idx | t | parted_inval_tab_1 | parted_inval_idx -(5 rows) - -drop table parted_inval_tab; --- Check setup of indisvalid across a complex partition tree on index --- creation. If one index in a partition index is invalid, so should its --- partitioned index. -create table parted_isvalid_tab (a int, b int) partition by range (a); -create table parted_isvalid_tab_1 partition of parted_isvalid_tab - for values from (1) to (10) partition by range (a); -create table parted_isvalid_tab_2 partition of parted_isvalid_tab - for values from (10) to (20) partition by range (a); -create table parted_isvalid_tab_11 partition of parted_isvalid_tab_1 - for values from (1) to (5); -create table parted_isvalid_tab_12 partition of parted_isvalid_tab_1 - for values from (5) to (10); --- create an invalid index on one of the partitions. -insert into parted_isvalid_tab_11 values (1, 0); -create index concurrently parted_isvalid_idx_11 on parted_isvalid_tab_11 ((a/b)); -ERROR: division by zero --- The previous invalid index is selected, invalidating all the indexes up to --- the top-most parent. -create index parted_isvalid_idx on parted_isvalid_tab ((a/b)); -select indexrelid::regclass, indisvalid, - indrelid::regclass, inhparent::regclass - from pg_index idx left join - pg_inherits inh on (idx.indexrelid = inh.inhrelid) - where indexrelid::regclass::text like 'parted_isvalid%' - order by indexrelid::regclass::text collate "C"; - indexrelid | indisvalid | indrelid | inhparent ---------------------------------+------------+-----------------------+------------------------------- - parted_isvalid_idx | f | parted_isvalid_tab | - parted_isvalid_idx_11 | f | parted_isvalid_tab_11 | parted_isvalid_tab_1_expr_idx - parted_isvalid_tab_12_expr_idx | t | parted_isvalid_tab_12 | parted_isvalid_tab_1_expr_idx - parted_isvalid_tab_1_expr_idx | f | parted_isvalid_tab_1 | parted_isvalid_idx - parted_isvalid_tab_2_expr_idx | t | parted_isvalid_tab_2 | parted_isvalid_idx -(5 rows) - -drop table parted_isvalid_tab; --- Check state of replica indexes when attaching a partition. -begin; -create table parted_replica_tab (id int not null) partition by range (id); -create table parted_replica_tab_1 partition of parted_replica_tab - for values from (1) to (10) partition by range (id); -create table parted_replica_tab_11 partition of parted_replica_tab_1 - for values from (1) to (5); -create unique index parted_replica_idx - on only parted_replica_tab using btree (id); -create unique index parted_replica_idx_1 - on only parted_replica_tab_1 using btree (id); --- This triggers an update of pg_index.indisreplident for parted_replica_idx. -alter table only parted_replica_tab_1 replica identity - using index parted_replica_idx_1; -create unique index parted_replica_idx_11 on parted_replica_tab_11 USING btree (id); -select indexrelid::regclass, indisvalid, indisreplident, - indrelid::regclass, inhparent::regclass - from pg_index idx left join - pg_inherits inh on (idx.indexrelid = inh.inhrelid) - where indexrelid::regclass::text like 'parted_replica%' - order by indexrelid::regclass::text collate "C"; - indexrelid | indisvalid | indisreplident | indrelid | inhparent ------------------------+------------+----------------+-----------------------+----------- - parted_replica_idx | f | f | parted_replica_tab | - parted_replica_idx_1 | f | t | parted_replica_tab_1 | - parted_replica_idx_11 | t | f | parted_replica_tab_11 | -(3 rows) - --- parted_replica_idx is not valid yet here, because parted_replica_idx_1 --- is not valid. -alter index parted_replica_idx ATTACH PARTITION parted_replica_idx_1; -select indexrelid::regclass, indisvalid, indisreplident, - indrelid::regclass, inhparent::regclass - from pg_index idx left join - pg_inherits inh on (idx.indexrelid = inh.inhrelid) - where indexrelid::regclass::text like 'parted_replica%' - order by indexrelid::regclass::text collate "C"; - indexrelid | indisvalid | indisreplident | indrelid | inhparent ------------------------+------------+----------------+-----------------------+-------------------- - parted_replica_idx | f | f | parted_replica_tab | - parted_replica_idx_1 | f | t | parted_replica_tab_1 | parted_replica_idx - parted_replica_idx_11 | t | f | parted_replica_tab_11 | -(3 rows) - --- parted_replica_idx becomes valid here. -alter index parted_replica_idx_1 ATTACH PARTITION parted_replica_idx_11; -alter table only parted_replica_tab_1 replica identity - using index parted_replica_idx_1; -commit; -select indexrelid::regclass, indisvalid, indisreplident, - indrelid::regclass, inhparent::regclass - from pg_index idx left join - pg_inherits inh on (idx.indexrelid = inh.inhrelid) - where indexrelid::regclass::text like 'parted_replica%' - order by indexrelid::regclass::text collate "C"; - indexrelid | indisvalid | indisreplident | indrelid | inhparent ------------------------+------------+----------------+-----------------------+---------------------- - parted_replica_idx | t | f | parted_replica_tab | - parted_replica_idx_1 | t | t | parted_replica_tab_1 | parted_replica_idx - parted_replica_idx_11 | t | f | parted_replica_tab_11 | parted_replica_idx_1 -(3 rows) - -drop table parted_replica_tab; +WARNING: terminating connection because of crash of another server process +DETAIL: The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory. +HINT: In a moment you should be able to reconnect to the database and repeat your command. +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/partition_aggregate.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/partition_aggregate.out --- /tmp/cirrus-ci-build/src/test/regress/expected/partition_aggregate.out 2024-03-07 14:25:00.332416000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/partition_aggregate.out 2024-03-07 14:27:23.677429000 +0000 @@ -339,1182 +339,7 @@ -- ROLLUP, partitionwise aggregation does not apply EXPLAIN (COSTS OFF) SELECT c, sum(a) FROM pagg_tab GROUP BY rollup(c) ORDER BY 1, 2; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: pagg_tab.c, (sum(pagg_tab.a)) - -> MixedAggregate - Hash Key: pagg_tab.c - Group Key: () - -> Append - -> Seq Scan on pagg_tab_p1 pagg_tab_1 - -> Seq Scan on pagg_tab_p2 pagg_tab_2 - -> Seq Scan on pagg_tab_p3 pagg_tab_3 -(9 rows) - --- ORDERED SET within the aggregate. --- Full aggregation; since all the rows that belong to the same group come --- from the same partition, having an ORDER BY within the aggregate doesn't --- make any difference. -EXPLAIN (COSTS OFF) -SELECT c, sum(b order by a) FROM pagg_tab GROUP BY c ORDER BY 1, 2; - QUERY PLAN ---------------------------------------------------------------- - Sort - Sort Key: pagg_tab.c, (sum(pagg_tab.b ORDER BY pagg_tab.a)) - -> Append - -> GroupAggregate - Group Key: pagg_tab.c - -> Sort - Sort Key: pagg_tab.c, pagg_tab.a - -> Seq Scan on pagg_tab_p1 pagg_tab - -> GroupAggregate - Group Key: pagg_tab_1.c - -> Sort - Sort Key: pagg_tab_1.c, pagg_tab_1.a - -> Seq Scan on pagg_tab_p2 pagg_tab_1 - -> GroupAggregate - Group Key: pagg_tab_2.c - -> Sort - Sort Key: pagg_tab_2.c, pagg_tab_2.a - -> Seq Scan on pagg_tab_p3 pagg_tab_2 -(18 rows) - --- Since GROUP BY clause does not match with PARTITION KEY; we need to do --- partial aggregation. However, ORDERED SET are not partial safe and thus --- partitionwise aggregation plan is not generated. -EXPLAIN (COSTS OFF) -SELECT a, sum(b order by a) FROM pagg_tab GROUP BY a ORDER BY 1, 2; - QUERY PLAN ---------------------------------------------------------------- - Sort - Sort Key: pagg_tab.a, (sum(pagg_tab.b ORDER BY pagg_tab.a)) - -> GroupAggregate - Group Key: pagg_tab.a - -> Sort - Sort Key: pagg_tab.a - -> Append - -> Seq Scan on pagg_tab_p1 pagg_tab_1 - -> Seq Scan on pagg_tab_p2 pagg_tab_2 - -> Seq Scan on pagg_tab_p3 pagg_tab_3 -(10 rows) - --- JOIN query -CREATE TABLE pagg_tab1(x int, y int) PARTITION BY RANGE(x); -CREATE TABLE pagg_tab1_p1 PARTITION OF pagg_tab1 FOR VALUES FROM (0) TO (10); -CREATE TABLE pagg_tab1_p2 PARTITION OF pagg_tab1 FOR VALUES FROM (10) TO (20); -CREATE TABLE pagg_tab1_p3 PARTITION OF pagg_tab1 FOR VALUES FROM (20) TO (30); -CREATE TABLE pagg_tab2(x int, y int) PARTITION BY RANGE(y); -CREATE TABLE pagg_tab2_p1 PARTITION OF pagg_tab2 FOR VALUES FROM (0) TO (10); -CREATE TABLE pagg_tab2_p2 PARTITION OF pagg_tab2 FOR VALUES FROM (10) TO (20); -CREATE TABLE pagg_tab2_p3 PARTITION OF pagg_tab2 FOR VALUES FROM (20) TO (30); -INSERT INTO pagg_tab1 SELECT i % 30, i % 20 FROM generate_series(0, 299, 2) i; -INSERT INTO pagg_tab2 SELECT i % 20, i % 30 FROM generate_series(0, 299, 3) i; -ANALYZE pagg_tab1; -ANALYZE pagg_tab2; --- When GROUP BY clause matches; full aggregation is performed for each partition. -EXPLAIN (COSTS OFF) -SELECT t1.x, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------- - Sort - Sort Key: t1.x, (sum(t1.y)), (count(*)) - -> Append - -> HashAggregate - Group Key: t1.x - -> Hash Join - Hash Cond: (t1.x = t2.y) - -> Seq Scan on pagg_tab1_p1 t1 - -> Hash - -> Seq Scan on pagg_tab2_p1 t2 - -> HashAggregate - Group Key: t1_1.x - -> Hash Join - Hash Cond: (t1_1.x = t2_1.y) - -> Seq Scan on pagg_tab1_p2 t1_1 - -> Hash - -> Seq Scan on pagg_tab2_p2 t2_1 - -> HashAggregate - Group Key: t1_2.x - -> Hash Join - Hash Cond: (t2_2.y = t1_2.x) - -> Seq Scan on pagg_tab2_p3 t2_2 - -> Hash - -> Seq Scan on pagg_tab1_p3 t1_2 -(24 rows) - -SELECT t1.x, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; - x | sum | count -----+------+------- - 0 | 500 | 100 - 6 | 1100 | 100 - 12 | 700 | 100 - 18 | 1300 | 100 - 24 | 900 | 100 -(5 rows) - --- Check with whole-row reference; partitionwise aggregation does not apply -EXPLAIN (COSTS OFF) -SELECT t1.x, sum(t1.y), count(t1) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------- - Sort - Sort Key: t1.x, (sum(t1.y)), (count(((t1.*)::pagg_tab1))) - -> HashAggregate - Group Key: t1.x - -> Hash Join - Hash Cond: (t1.x = t2.y) - -> Append - -> Seq Scan on pagg_tab1_p1 t1_1 - -> Seq Scan on pagg_tab1_p2 t1_2 - -> Seq Scan on pagg_tab1_p3 t1_3 - -> Hash - -> Append - -> Seq Scan on pagg_tab2_p1 t2_1 - -> Seq Scan on pagg_tab2_p2 t2_2 - -> Seq Scan on pagg_tab2_p3 t2_3 -(15 rows) - -SELECT t1.x, sum(t1.y), count(t1) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; - x | sum | count -----+------+------- - 0 | 500 | 100 - 6 | 1100 | 100 - 12 | 700 | 100 - 18 | 1300 | 100 - 24 | 900 | 100 -(5 rows) - --- GROUP BY having other matching key -EXPLAIN (COSTS OFF) -SELECT t2.y, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t2.y ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------- - Sort - Sort Key: t2.y, (sum(t1.y)), (count(*)) - -> Append - -> HashAggregate - Group Key: t2.y - -> Hash Join - Hash Cond: (t1.x = t2.y) - -> Seq Scan on pagg_tab1_p1 t1 - -> Hash - -> Seq Scan on pagg_tab2_p1 t2 - -> HashAggregate - Group Key: t2_1.y - -> Hash Join - Hash Cond: (t1_1.x = t2_1.y) - -> Seq Scan on pagg_tab1_p2 t1_1 - -> Hash - -> Seq Scan on pagg_tab2_p2 t2_1 - -> HashAggregate - Group Key: t2_2.y - -> Hash Join - Hash Cond: (t2_2.y = t1_2.x) - -> Seq Scan on pagg_tab2_p3 t2_2 - -> Hash - -> Seq Scan on pagg_tab1_p3 t1_2 -(24 rows) - --- When GROUP BY clause does not match; partial aggregation is performed for each partition. --- Also test GroupAggregate paths by disabling hash aggregates. -SET enable_hashagg TO false; -EXPLAIN (COSTS OFF) -SELECT t1.y, sum(t1.x), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.y HAVING avg(t1.x) > 10 ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------------------- - Sort - Sort Key: t1.y, (sum(t1.x)), (count(*)) - -> Finalize GroupAggregate - Group Key: t1.y - Filter: (avg(t1.x) > '10'::numeric) - -> Merge Append - Sort Key: t1.y - -> Partial GroupAggregate - Group Key: t1.y - -> Sort - Sort Key: t1.y - -> Hash Join - Hash Cond: (t1.x = t2.y) - -> Seq Scan on pagg_tab1_p1 t1 - -> Hash - -> Seq Scan on pagg_tab2_p1 t2 - -> Partial GroupAggregate - Group Key: t1_1.y - -> Sort - Sort Key: t1_1.y - -> Hash Join - Hash Cond: (t1_1.x = t2_1.y) - -> Seq Scan on pagg_tab1_p2 t1_1 - -> Hash - -> Seq Scan on pagg_tab2_p2 t2_1 - -> Partial GroupAggregate - Group Key: t1_2.y - -> Sort - Sort Key: t1_2.y - -> Hash Join - Hash Cond: (t2_2.y = t1_2.x) - -> Seq Scan on pagg_tab2_p3 t2_2 - -> Hash - -> Seq Scan on pagg_tab1_p3 t1_2 -(34 rows) - -SELECT t1.y, sum(t1.x), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.y HAVING avg(t1.x) > 10 ORDER BY 1, 2, 3; - y | sum | count -----+------+------- - 2 | 600 | 50 - 4 | 1200 | 50 - 8 | 900 | 50 - 12 | 600 | 50 - 14 | 1200 | 50 - 18 | 900 | 50 -(6 rows) - -RESET enable_hashagg; --- Check with LEFT/RIGHT/FULL OUTER JOINs which produces NULL values for --- aggregation --- LEFT JOIN, should produce partial partitionwise aggregation plan as --- GROUP BY is on nullable column -EXPLAIN (COSTS OFF) -SELECT b.y, sum(a.y) FROM pagg_tab1 a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; - QUERY PLAN ------------------------------------------------------------------- - Finalize GroupAggregate - Group Key: b.y - -> Sort - Sort Key: b.y - -> Append - -> Partial HashAggregate - Group Key: b.y - -> Hash Left Join - Hash Cond: (a.x = b.y) - -> Seq Scan on pagg_tab1_p1 a - -> Hash - -> Seq Scan on pagg_tab2_p1 b - -> Partial HashAggregate - Group Key: b_1.y - -> Hash Left Join - Hash Cond: (a_1.x = b_1.y) - -> Seq Scan on pagg_tab1_p2 a_1 - -> Hash - -> Seq Scan on pagg_tab2_p2 b_1 - -> Partial HashAggregate - Group Key: b_2.y - -> Hash Right Join - Hash Cond: (b_2.y = a_2.x) - -> Seq Scan on pagg_tab2_p3 b_2 - -> Hash - -> Seq Scan on pagg_tab1_p3 a_2 -(26 rows) - -SELECT b.y, sum(a.y) FROM pagg_tab1 a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; - y | sum -----+------ - 0 | 500 - 6 | 1100 - 12 | 700 - 18 | 1300 - 24 | 900 - | 900 -(6 rows) - --- RIGHT JOIN, should produce full partitionwise aggregation plan as --- GROUP BY is on non-nullable column -EXPLAIN (COSTS OFF) -SELECT b.y, sum(a.y) FROM pagg_tab1 a RIGHT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; - QUERY PLAN ------------------------------------------------------------- - Sort - Sort Key: b.y - -> Append - -> HashAggregate - Group Key: b.y - -> Hash Right Join - Hash Cond: (a.x = b.y) - -> Seq Scan on pagg_tab1_p1 a - -> Hash - -> Seq Scan on pagg_tab2_p1 b - -> HashAggregate - Group Key: b_1.y - -> Hash Right Join - Hash Cond: (a_1.x = b_1.y) - -> Seq Scan on pagg_tab1_p2 a_1 - -> Hash - -> Seq Scan on pagg_tab2_p2 b_1 - -> HashAggregate - Group Key: b_2.y - -> Hash Left Join - Hash Cond: (b_2.y = a_2.x) - -> Seq Scan on pagg_tab2_p3 b_2 - -> Hash - -> Seq Scan on pagg_tab1_p3 a_2 -(24 rows) - -SELECT b.y, sum(a.y) FROM pagg_tab1 a RIGHT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; - y | sum -----+------ - 0 | 500 - 3 | - 6 | 1100 - 9 | - 12 | 700 - 15 | - 18 | 1300 - 21 | - 24 | 900 - 27 | -(10 rows) - --- FULL JOIN, should produce partial partitionwise aggregation plan as --- GROUP BY is on nullable column -EXPLAIN (COSTS OFF) -SELECT a.x, sum(b.x) FROM pagg_tab1 a FULL OUTER JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x ORDER BY 1 NULLS LAST; - QUERY PLAN ------------------------------------------------------------------- - Finalize GroupAggregate - Group Key: a.x - -> Sort - Sort Key: a.x - -> Append - -> Partial HashAggregate - Group Key: a.x - -> Hash Full Join - Hash Cond: (a.x = b.y) - -> Seq Scan on pagg_tab1_p1 a - -> Hash - -> Seq Scan on pagg_tab2_p1 b - -> Partial HashAggregate - Group Key: a_1.x - -> Hash Full Join - Hash Cond: (a_1.x = b_1.y) - -> Seq Scan on pagg_tab1_p2 a_1 - -> Hash - -> Seq Scan on pagg_tab2_p2 b_1 - -> Partial HashAggregate - Group Key: a_2.x - -> Hash Full Join - Hash Cond: (b_2.y = a_2.x) - -> Seq Scan on pagg_tab2_p3 b_2 - -> Hash - -> Seq Scan on pagg_tab1_p3 a_2 -(26 rows) - -SELECT a.x, sum(b.x) FROM pagg_tab1 a FULL OUTER JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x ORDER BY 1 NULLS LAST; - x | sum -----+------ - 0 | 500 - 2 | - 4 | - 6 | 1100 - 8 | - 10 | - 12 | 700 - 14 | - 16 | - 18 | 1300 - 20 | - 22 | - 24 | 900 - 26 | - 28 | - | 500 -(16 rows) - --- LEFT JOIN, with dummy relation on right side, ideally --- should produce full partitionwise aggregation plan as GROUP BY is on --- non-nullable columns. --- But right now we are unable to do partitionwise join in this case. -EXPLAIN (COSTS OFF) -SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a LEFT JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: pagg_tab1.x, pagg_tab2.y - -> HashAggregate - Group Key: pagg_tab1.x, pagg_tab2.y - -> Hash Left Join - Hash Cond: (pagg_tab1.x = pagg_tab2.y) - Filter: ((pagg_tab1.x > 5) OR (pagg_tab2.y < 20)) - -> Append - -> Seq Scan on pagg_tab1_p1 pagg_tab1_1 - Filter: (x < 20) - -> Seq Scan on pagg_tab1_p2 pagg_tab1_2 - Filter: (x < 20) - -> Hash - -> Append - -> Seq Scan on pagg_tab2_p2 pagg_tab2_1 - Filter: (y > 10) - -> Seq Scan on pagg_tab2_p3 pagg_tab2_2 - Filter: (y > 10) -(18 rows) - -SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a LEFT JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; - x | y | count -----+----+------- - 6 | | 10 - 8 | | 10 - 10 | | 10 - 12 | 12 | 100 - 14 | | 10 - 16 | | 10 - 18 | 18 | 100 -(7 rows) - --- FULL JOIN, with dummy relations on both sides, ideally --- should produce partial partitionwise aggregation plan as GROUP BY is on --- nullable columns. --- But right now we are unable to do partitionwise join in this case. -EXPLAIN (COSTS OFF) -SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a FULL JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: pagg_tab1.x, pagg_tab2.y - -> HashAggregate - Group Key: pagg_tab1.x, pagg_tab2.y - -> Hash Full Join - Hash Cond: (pagg_tab1.x = pagg_tab2.y) - Filter: ((pagg_tab1.x > 5) OR (pagg_tab2.y < 20)) - -> Append - -> Seq Scan on pagg_tab1_p1 pagg_tab1_1 - Filter: (x < 20) - -> Seq Scan on pagg_tab1_p2 pagg_tab1_2 - Filter: (x < 20) - -> Hash - -> Append - -> Seq Scan on pagg_tab2_p2 pagg_tab2_1 - Filter: (y > 10) - -> Seq Scan on pagg_tab2_p3 pagg_tab2_2 - Filter: (y > 10) -(18 rows) - -SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a FULL JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; - x | y | count -----+----+------- - 6 | | 10 - 8 | | 10 - 10 | | 10 - 12 | 12 | 100 - 14 | | 10 - 16 | | 10 - 18 | 18 | 100 - | 15 | 10 -(8 rows) - --- Empty join relation because of empty outer side, no partitionwise agg plan -EXPLAIN (COSTS OFF) -SELECT a.x, a.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x = 1 AND x = 2) a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x, a.y ORDER BY 1, 2; - QUERY PLAN --------------------------------------- - GroupAggregate - Group Key: pagg_tab1.y - -> Sort - Sort Key: pagg_tab1.y - -> Result - One-Time Filter: false -(6 rows) - -SELECT a.x, a.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x = 1 AND x = 2) a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x, a.y ORDER BY 1, 2; - x | y | count ----+---+------- -(0 rows) - --- Partition by multiple columns -CREATE TABLE pagg_tab_m (a int, b int, c int) PARTITION BY RANGE(a, ((a+b)/2)); -CREATE TABLE pagg_tab_m_p1 PARTITION OF pagg_tab_m FOR VALUES FROM (0, 0) TO (12, 12); -CREATE TABLE pagg_tab_m_p2 PARTITION OF pagg_tab_m FOR VALUES FROM (12, 12) TO (22, 22); -CREATE TABLE pagg_tab_m_p3 PARTITION OF pagg_tab_m FOR VALUES FROM (22, 22) TO (30, 30); -INSERT INTO pagg_tab_m SELECT i % 30, i % 40, i % 50 FROM generate_series(0, 2999) i; -ANALYZE pagg_tab_m; --- Partial aggregation as GROUP BY clause does not match with PARTITION KEY -EXPLAIN (COSTS OFF) -SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a HAVING avg(c) < 22 ORDER BY 1, 2, 3; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_m.a, (sum(pagg_tab_m.b)), (avg(pagg_tab_m.c)) - -> Finalize HashAggregate - Group Key: pagg_tab_m.a - Filter: (avg(pagg_tab_m.c) < '22'::numeric) - -> Append - -> Partial HashAggregate - Group Key: pagg_tab_m.a - -> Seq Scan on pagg_tab_m_p1 pagg_tab_m - -> Partial HashAggregate - Group Key: pagg_tab_m_1.a - -> Seq Scan on pagg_tab_m_p2 pagg_tab_m_1 - -> Partial HashAggregate - Group Key: pagg_tab_m_2.a - -> Seq Scan on pagg_tab_m_p3 pagg_tab_m_2 -(15 rows) - -SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a HAVING avg(c) < 22 ORDER BY 1, 2, 3; - a | sum | avg | count -----+------+---------------------+------- - 0 | 1500 | 20.0000000000000000 | 100 - 1 | 1600 | 21.0000000000000000 | 100 - 10 | 1500 | 20.0000000000000000 | 100 - 11 | 1600 | 21.0000000000000000 | 100 - 20 | 1500 | 20.0000000000000000 | 100 - 21 | 1600 | 21.0000000000000000 | 100 -(6 rows) - --- Full aggregation as GROUP BY clause matches with PARTITION KEY -EXPLAIN (COSTS OFF) -SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a, (a+b)/2 HAVING sum(b) < 50 ORDER BY 1, 2, 3; - QUERY PLAN ----------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_m.a, (sum(pagg_tab_m.b)), (avg(pagg_tab_m.c)) - -> Append - -> HashAggregate - Group Key: pagg_tab_m.a, ((pagg_tab_m.a + pagg_tab_m.b) / 2) - Filter: (sum(pagg_tab_m.b) < 50) - -> Seq Scan on pagg_tab_m_p1 pagg_tab_m - -> HashAggregate - Group Key: pagg_tab_m_1.a, ((pagg_tab_m_1.a + pagg_tab_m_1.b) / 2) - Filter: (sum(pagg_tab_m_1.b) < 50) - -> Seq Scan on pagg_tab_m_p2 pagg_tab_m_1 - -> HashAggregate - Group Key: pagg_tab_m_2.a, ((pagg_tab_m_2.a + pagg_tab_m_2.b) / 2) - Filter: (sum(pagg_tab_m_2.b) < 50) - -> Seq Scan on pagg_tab_m_p3 pagg_tab_m_2 -(15 rows) - -SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a, (a+b)/2 HAVING sum(b) < 50 ORDER BY 1, 2, 3; - a | sum | avg | count -----+-----+---------------------+------- - 0 | 0 | 20.0000000000000000 | 25 - 1 | 25 | 21.0000000000000000 | 25 - 10 | 0 | 20.0000000000000000 | 25 - 11 | 25 | 21.0000000000000000 | 25 - 20 | 0 | 20.0000000000000000 | 25 - 21 | 25 | 21.0000000000000000 | 25 -(6 rows) - --- Full aggregation as PARTITION KEY is part of GROUP BY clause -EXPLAIN (COSTS OFF) -SELECT a, c, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY (a+b)/2, 2, 1 HAVING sum(b) = 50 AND avg(c) > 25 ORDER BY 1, 2, 3; - QUERY PLAN --------------------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_m.a, pagg_tab_m.c, (sum(pagg_tab_m.b)) - -> Append - -> HashAggregate - Group Key: ((pagg_tab_m.a + pagg_tab_m.b) / 2), pagg_tab_m.c, pagg_tab_m.a - Filter: ((sum(pagg_tab_m.b) = 50) AND (avg(pagg_tab_m.c) > '25'::numeric)) - -> Seq Scan on pagg_tab_m_p1 pagg_tab_m - -> HashAggregate - Group Key: ((pagg_tab_m_1.a + pagg_tab_m_1.b) / 2), pagg_tab_m_1.c, pagg_tab_m_1.a - Filter: ((sum(pagg_tab_m_1.b) = 50) AND (avg(pagg_tab_m_1.c) > '25'::numeric)) - -> Seq Scan on pagg_tab_m_p2 pagg_tab_m_1 - -> HashAggregate - Group Key: ((pagg_tab_m_2.a + pagg_tab_m_2.b) / 2), pagg_tab_m_2.c, pagg_tab_m_2.a - Filter: ((sum(pagg_tab_m_2.b) = 50) AND (avg(pagg_tab_m_2.c) > '25'::numeric)) - -> Seq Scan on pagg_tab_m_p3 pagg_tab_m_2 -(15 rows) - -SELECT a, c, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY (a+b)/2, 2, 1 HAVING sum(b) = 50 AND avg(c) > 25 ORDER BY 1, 2, 3; - a | c | sum | avg | count -----+----+-----+---------------------+------- - 0 | 30 | 50 | 30.0000000000000000 | 5 - 0 | 40 | 50 | 40.0000000000000000 | 5 - 10 | 30 | 50 | 30.0000000000000000 | 5 - 10 | 40 | 50 | 40.0000000000000000 | 5 - 20 | 30 | 50 | 30.0000000000000000 | 5 - 20 | 40 | 50 | 40.0000000000000000 | 5 -(6 rows) - --- Test with multi-level partitioning scheme -CREATE TABLE pagg_tab_ml (a int, b int, c text) PARTITION BY RANGE(a); -CREATE TABLE pagg_tab_ml_p1 PARTITION OF pagg_tab_ml FOR VALUES FROM (0) TO (12); -CREATE TABLE pagg_tab_ml_p2 PARTITION OF pagg_tab_ml FOR VALUES FROM (12) TO (20) PARTITION BY LIST (c); -CREATE TABLE pagg_tab_ml_p2_s1 PARTITION OF pagg_tab_ml_p2 FOR VALUES IN ('0000', '0001', '0002'); -CREATE TABLE pagg_tab_ml_p2_s2 PARTITION OF pagg_tab_ml_p2 FOR VALUES IN ('0003'); --- This level of partitioning has different column positions than the parent -CREATE TABLE pagg_tab_ml_p3(b int, c text, a int) PARTITION BY RANGE (b); -CREATE TABLE pagg_tab_ml_p3_s1(c text, a int, b int); -CREATE TABLE pagg_tab_ml_p3_s2 PARTITION OF pagg_tab_ml_p3 FOR VALUES FROM (7) TO (10); -ALTER TABLE pagg_tab_ml_p3 ATTACH PARTITION pagg_tab_ml_p3_s1 FOR VALUES FROM (0) TO (7); -ALTER TABLE pagg_tab_ml ATTACH PARTITION pagg_tab_ml_p3 FOR VALUES FROM (20) TO (30); -INSERT INTO pagg_tab_ml SELECT i % 30, i % 10, to_char(i % 4, 'FM0000') FROM generate_series(0, 29999) i; -ANALYZE pagg_tab_ml; --- For Parallel Append -SET max_parallel_workers_per_gather TO 2; -SET parallel_setup_cost = 0; --- Full aggregation at level 1 as GROUP BY clause matches with PARTITION KEY --- for level 1 only. For subpartitions, GROUP BY clause does not match with --- PARTITION KEY, but still we do not see a partial aggregation as array_agg() --- is not partial agg safe. -EXPLAIN (COSTS OFF) -SELECT a, sum(b), array_agg(distinct c), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - QUERY PLAN --------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (array_agg(DISTINCT pagg_tab_ml.c)) - -> Gather - Workers Planned: 2 - -> Parallel Append - -> GroupAggregate - Group Key: pagg_tab_ml.a - Filter: (avg(pagg_tab_ml.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml.a, pagg_tab_ml.c - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> GroupAggregate - Group Key: pagg_tab_ml_5.a - Filter: (avg(pagg_tab_ml_5.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_5.a, pagg_tab_ml_5.c - -> Append - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_5 - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_6 - -> GroupAggregate - Group Key: pagg_tab_ml_2.a - Filter: (avg(pagg_tab_ml_2.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_2.a, pagg_tab_ml_2.c - -> Append - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_2 - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_3 -(27 rows) - -SELECT a, sum(b), array_agg(distinct c), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - a | sum | array_agg | count -----+------+-------------+------- - 0 | 0 | {0000,0002} | 1000 - 1 | 1000 | {0001,0003} | 1000 - 2 | 2000 | {0000,0002} | 1000 - 10 | 0 | {0000,0002} | 1000 - 11 | 1000 | {0001,0003} | 1000 - 12 | 2000 | {0000,0002} | 1000 - 20 | 0 | {0000,0002} | 1000 - 21 | 1000 | {0001,0003} | 1000 - 22 | 2000 | {0000,0002} | 1000 -(9 rows) - --- Without ORDER BY clause, to test Gather at top-most path -EXPLAIN (COSTS OFF) -SELECT a, sum(b), array_agg(distinct c), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3; - QUERY PLAN ---------------------------------------------------------------------------- - Gather - Workers Planned: 2 - -> Parallel Append - -> GroupAggregate - Group Key: pagg_tab_ml.a - Filter: (avg(pagg_tab_ml.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml.a, pagg_tab_ml.c - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> GroupAggregate - Group Key: pagg_tab_ml_5.a - Filter: (avg(pagg_tab_ml_5.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_5.a, pagg_tab_ml_5.c - -> Append - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_5 - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_6 - -> GroupAggregate - Group Key: pagg_tab_ml_2.a - Filter: (avg(pagg_tab_ml_2.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_2.a, pagg_tab_ml_2.c - -> Append - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_2 - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_3 -(25 rows) - -RESET parallel_setup_cost; --- Full aggregation at level 1 as GROUP BY clause matches with PARTITION KEY --- for level 1 only. For subpartitions, GROUP BY clause does not match with --- PARTITION KEY, thus we will have a partial aggregation for them. -EXPLAIN (COSTS OFF) -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - QUERY PLAN ---------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (count(*)) - -> Append - -> HashAggregate - Group Key: pagg_tab_ml.a - Filter: (avg(pagg_tab_ml.b) < '3'::numeric) - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> Finalize GroupAggregate - Group Key: pagg_tab_ml_2.a - Filter: (avg(pagg_tab_ml_2.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_2.a - -> Append - -> Partial HashAggregate - Group Key: pagg_tab_ml_2.a - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_2 - -> Partial HashAggregate - Group Key: pagg_tab_ml_3.a - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_3 - -> Finalize GroupAggregate - Group Key: pagg_tab_ml_5.a - Filter: (avg(pagg_tab_ml_5.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_5.a - -> Append - -> Partial HashAggregate - Group Key: pagg_tab_ml_5.a - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_5 - -> Partial HashAggregate - Group Key: pagg_tab_ml_6.a - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_6 -(31 rows) - -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - a | sum | count -----+------+------- - 0 | 0 | 1000 - 1 | 1000 | 1000 - 2 | 2000 | 1000 - 10 | 0 | 1000 - 11 | 1000 | 1000 - 12 | 2000 | 1000 - 20 | 0 | 1000 - 21 | 1000 | 1000 - 22 | 2000 | 1000 -(9 rows) - --- Partial aggregation at all levels as GROUP BY clause does not match with --- PARTITION KEY -EXPLAIN (COSTS OFF) -SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b ORDER BY 1, 2, 3; - QUERY PLAN ---------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.b, (sum(pagg_tab_ml.a)), (count(*)) - -> Finalize GroupAggregate - Group Key: pagg_tab_ml.b - -> Sort - Sort Key: pagg_tab_ml.b - -> Append - -> Partial HashAggregate - Group Key: pagg_tab_ml.b - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> Partial HashAggregate - Group Key: pagg_tab_ml_1.b - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1 - -> Partial HashAggregate - Group Key: pagg_tab_ml_2.b - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2 - -> Partial HashAggregate - Group Key: pagg_tab_ml_3.b - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3 - -> Partial HashAggregate - Group Key: pagg_tab_ml_4.b - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4 -(22 rows) - -SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b HAVING avg(a) < 15 ORDER BY 1, 2, 3; - b | sum | count ----+-------+------- - 0 | 30000 | 3000 - 1 | 33000 | 3000 - 2 | 36000 | 3000 - 3 | 39000 | 3000 - 4 | 42000 | 3000 -(5 rows) - --- Full aggregation at all levels as GROUP BY clause matches with PARTITION KEY -EXPLAIN (COSTS OFF) -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; - QUERY PLAN ----------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (count(*)) - -> Append - -> HashAggregate - Group Key: pagg_tab_ml.a, pagg_tab_ml.b, pagg_tab_ml.c - Filter: (avg(pagg_tab_ml.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> HashAggregate - Group Key: pagg_tab_ml_1.a, pagg_tab_ml_1.b, pagg_tab_ml_1.c - Filter: (avg(pagg_tab_ml_1.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1 - -> HashAggregate - Group Key: pagg_tab_ml_2.a, pagg_tab_ml_2.b, pagg_tab_ml_2.c - Filter: (avg(pagg_tab_ml_2.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2 - -> HashAggregate - Group Key: pagg_tab_ml_3.a, pagg_tab_ml_3.b, pagg_tab_ml_3.c - Filter: (avg(pagg_tab_ml_3.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3 - -> HashAggregate - Group Key: pagg_tab_ml_4.a, pagg_tab_ml_4.b, pagg_tab_ml_4.c - Filter: (avg(pagg_tab_ml_4.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4 -(23 rows) - -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; - a | sum | count -----+------+------- - 8 | 4000 | 500 - 8 | 4000 | 500 - 9 | 4500 | 500 - 9 | 4500 | 500 - 18 | 4000 | 500 - 18 | 4000 | 500 - 19 | 4500 | 500 - 19 | 4500 | 500 - 28 | 4000 | 500 - 28 | 4000 | 500 - 29 | 4500 | 500 - 29 | 4500 | 500 -(12 rows) - --- Parallelism within partitionwise aggregates -SET min_parallel_table_scan_size TO '8kB'; -SET parallel_setup_cost TO 0; --- Full aggregation at level 1 as GROUP BY clause matches with PARTITION KEY --- for level 1 only. For subpartitions, GROUP BY clause does not match with --- PARTITION KEY, thus we will have a partial aggregation for them. -EXPLAIN (COSTS OFF) -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - QUERY PLAN ------------------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (count(*)) - -> Append - -> Finalize GroupAggregate - Group Key: pagg_tab_ml.a - Filter: (avg(pagg_tab_ml.b) < '3'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_ml.a - -> Partial HashAggregate - Group Key: pagg_tab_ml.a - -> Parallel Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> Finalize GroupAggregate - Group Key: pagg_tab_ml_2.a - Filter: (avg(pagg_tab_ml_2.b) < '3'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_ml_2.a - -> Parallel Append - -> Partial HashAggregate - Group Key: pagg_tab_ml_2.a - -> Parallel Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_2 - -> Partial HashAggregate - Group Key: pagg_tab_ml_3.a - -> Parallel Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_3 - -> Finalize GroupAggregate - Group Key: pagg_tab_ml_5.a - Filter: (avg(pagg_tab_ml_5.b) < '3'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_ml_5.a - -> Parallel Append - -> Partial HashAggregate - Group Key: pagg_tab_ml_5.a - -> Parallel Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_5 - -> Partial HashAggregate - Group Key: pagg_tab_ml_6.a - -> Parallel Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_6 -(41 rows) - -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - a | sum | count -----+------+------- - 0 | 0 | 1000 - 1 | 1000 | 1000 - 2 | 2000 | 1000 - 10 | 0 | 1000 - 11 | 1000 | 1000 - 12 | 2000 | 1000 - 20 | 0 | 1000 - 21 | 1000 | 1000 - 22 | 2000 | 1000 -(9 rows) - --- Partial aggregation at all levels as GROUP BY clause does not match with --- PARTITION KEY -EXPLAIN (COSTS OFF) -SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b ORDER BY 1, 2, 3; - QUERY PLAN ------------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.b, (sum(pagg_tab_ml.a)), (count(*)) - -> Finalize GroupAggregate - Group Key: pagg_tab_ml.b - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_ml.b - -> Parallel Append - -> Partial HashAggregate - Group Key: pagg_tab_ml.b - -> Parallel Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> Partial HashAggregate - Group Key: pagg_tab_ml_3.b - -> Parallel Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3 - -> Partial HashAggregate - Group Key: pagg_tab_ml_1.b - -> Parallel Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1 - -> Partial HashAggregate - Group Key: pagg_tab_ml_4.b - -> Parallel Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4 - -> Partial HashAggregate - Group Key: pagg_tab_ml_2.b - -> Parallel Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2 -(24 rows) - -SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b HAVING avg(a) < 15 ORDER BY 1, 2, 3; - b | sum | count ----+-------+------- - 0 | 30000 | 3000 - 1 | 33000 | 3000 - 2 | 36000 | 3000 - 3 | 39000 | 3000 - 4 | 42000 | 3000 -(5 rows) - --- Full aggregation at all levels as GROUP BY clause matches with PARTITION KEY -EXPLAIN (COSTS OFF) -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; - QUERY PLAN ----------------------------------------------------------------------------------- - Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (count(*)) - -> Parallel Append - -> HashAggregate - Group Key: pagg_tab_ml.a, pagg_tab_ml.b, pagg_tab_ml.c - Filter: (avg(pagg_tab_ml.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> HashAggregate - Group Key: pagg_tab_ml_3.a, pagg_tab_ml_3.b, pagg_tab_ml_3.c - Filter: (avg(pagg_tab_ml_3.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3 - -> HashAggregate - Group Key: pagg_tab_ml_1.a, pagg_tab_ml_1.b, pagg_tab_ml_1.c - Filter: (avg(pagg_tab_ml_1.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1 - -> HashAggregate - Group Key: pagg_tab_ml_4.a, pagg_tab_ml_4.b, pagg_tab_ml_4.c - Filter: (avg(pagg_tab_ml_4.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4 - -> HashAggregate - Group Key: pagg_tab_ml_2.a, pagg_tab_ml_2.b, pagg_tab_ml_2.c - Filter: (avg(pagg_tab_ml_2.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2 -(25 rows) - -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; - a | sum | count -----+------+------- - 8 | 4000 | 500 - 8 | 4000 | 500 - 9 | 4500 | 500 - 9 | 4500 | 500 - 18 | 4000 | 500 - 18 | 4000 | 500 - 19 | 4500 | 500 - 19 | 4500 | 500 - 28 | 4000 | 500 - 28 | 4000 | 500 - 29 | 4500 | 500 - 29 | 4500 | 500 -(12 rows) - --- Parallelism within partitionwise aggregates (single level) --- Add few parallel setup cost, so that we will see a plan which gathers --- partially created paths even for full aggregation and sticks a single Gather --- followed by finalization step. --- Without this, the cost of doing partial aggregation + Gather + finalization --- for each partition and then Append over it turns out to be same and this --- wins as we add it first. This parallel_setup_cost plays a vital role in --- costing such plans. -SET parallel_setup_cost TO 10; -CREATE TABLE pagg_tab_para(x int, y int) PARTITION BY RANGE(x); -CREATE TABLE pagg_tab_para_p1 PARTITION OF pagg_tab_para FOR VALUES FROM (0) TO (12); -CREATE TABLE pagg_tab_para_p2 PARTITION OF pagg_tab_para FOR VALUES FROM (12) TO (22); -CREATE TABLE pagg_tab_para_p3 PARTITION OF pagg_tab_para FOR VALUES FROM (22) TO (30); -INSERT INTO pagg_tab_para SELECT i % 30, i % 20 FROM generate_series(0, 29999) i; -ANALYZE pagg_tab_para; --- When GROUP BY clause matches; full aggregation is performed for each partition. -EXPLAIN (COSTS OFF) -SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_para.x, (sum(pagg_tab_para.y)), (avg(pagg_tab_para.y)) - -> Finalize GroupAggregate - Group Key: pagg_tab_para.x - Filter: (avg(pagg_tab_para.y) < '7'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_para.x - -> Parallel Append - -> Partial HashAggregate - Group Key: pagg_tab_para.x - -> Parallel Seq Scan on pagg_tab_para_p1 pagg_tab_para - -> Partial HashAggregate - Group Key: pagg_tab_para_1.x - -> Parallel Seq Scan on pagg_tab_para_p2 pagg_tab_para_1 - -> Partial HashAggregate - Group Key: pagg_tab_para_2.x - -> Parallel Seq Scan on pagg_tab_para_p3 pagg_tab_para_2 -(19 rows) - -SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - x | sum | avg | count -----+------+--------------------+------- - 0 | 5000 | 5.0000000000000000 | 1000 - 1 | 6000 | 6.0000000000000000 | 1000 - 10 | 5000 | 5.0000000000000000 | 1000 - 11 | 6000 | 6.0000000000000000 | 1000 - 20 | 5000 | 5.0000000000000000 | 1000 - 21 | 6000 | 6.0000000000000000 | 1000 -(6 rows) - --- When GROUP BY clause does not match; partial aggregation is performed for each partition. -EXPLAIN (COSTS OFF) -SELECT y, sum(x), avg(x), count(*) FROM pagg_tab_para GROUP BY y HAVING avg(x) < 12 ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_para.y, (sum(pagg_tab_para.x)), (avg(pagg_tab_para.x)) - -> Finalize GroupAggregate - Group Key: pagg_tab_para.y - Filter: (avg(pagg_tab_para.x) < '12'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_para.y - -> Parallel Append - -> Partial HashAggregate - Group Key: pagg_tab_para.y - -> Parallel Seq Scan on pagg_tab_para_p1 pagg_tab_para - -> Partial HashAggregate - Group Key: pagg_tab_para_1.y - -> Parallel Seq Scan on pagg_tab_para_p2 pagg_tab_para_1 - -> Partial HashAggregate - Group Key: pagg_tab_para_2.y - -> Parallel Seq Scan on pagg_tab_para_p3 pagg_tab_para_2 -(19 rows) - -SELECT y, sum(x), avg(x), count(*) FROM pagg_tab_para GROUP BY y HAVING avg(x) < 12 ORDER BY 1, 2, 3; - y | sum | avg | count -----+-------+---------------------+------- - 0 | 15000 | 10.0000000000000000 | 1500 - 1 | 16500 | 11.0000000000000000 | 1500 - 10 | 15000 | 10.0000000000000000 | 1500 - 11 | 16500 | 11.0000000000000000 | 1500 -(4 rows) - --- Test when parent can produce parallel paths but not any (or some) of its children --- (Use one more aggregate to tilt the cost estimates for the plan we want) -ALTER TABLE pagg_tab_para_p1 SET (parallel_workers = 0); -ALTER TABLE pagg_tab_para_p3 SET (parallel_workers = 0); -ANALYZE pagg_tab_para; -EXPLAIN (COSTS OFF) -SELECT x, sum(y), avg(y), sum(x+y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_para.x, (sum(pagg_tab_para.y)), (avg(pagg_tab_para.y)) - -> Finalize GroupAggregate - Group Key: pagg_tab_para.x - Filter: (avg(pagg_tab_para.y) < '7'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_para.x - -> Partial HashAggregate - Group Key: pagg_tab_para.x - -> Parallel Append - -> Seq Scan on pagg_tab_para_p1 pagg_tab_para_1 - -> Seq Scan on pagg_tab_para_p3 pagg_tab_para_3 - -> Parallel Seq Scan on pagg_tab_para_p2 pagg_tab_para_2 -(15 rows) - -SELECT x, sum(y), avg(y), sum(x+y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - x | sum | avg | sum | count -----+------+--------------------+-------+------- - 0 | 5000 | 5.0000000000000000 | 5000 | 1000 - 1 | 6000 | 6.0000000000000000 | 7000 | 1000 - 10 | 5000 | 5.0000000000000000 | 15000 | 1000 - 11 | 6000 | 6.0000000000000000 | 17000 | 1000 - 20 | 5000 | 5.0000000000000000 | 25000 | 1000 - 21 | 6000 | 6.0000000000000000 | 27000 | 1000 -(6 rows) - -ALTER TABLE pagg_tab_para_p2 SET (parallel_workers = 0); -ANALYZE pagg_tab_para; -EXPLAIN (COSTS OFF) -SELECT x, sum(y), avg(y), sum(x+y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - QUERY PLAN ----------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_para.x, (sum(pagg_tab_para.y)), (avg(pagg_tab_para.y)) - -> Finalize GroupAggregate - Group Key: pagg_tab_para.x - Filter: (avg(pagg_tab_para.y) < '7'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_para.x - -> Partial HashAggregate - Group Key: pagg_tab_para.x - -> Parallel Append - -> Seq Scan on pagg_tab_para_p1 pagg_tab_para_1 - -> Seq Scan on pagg_tab_para_p2 pagg_tab_para_2 - -> Seq Scan on pagg_tab_para_p3 pagg_tab_para_3 -(15 rows) - -SELECT x, sum(y), avg(y), sum(x+y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - x | sum | avg | sum | count -----+------+--------------------+-------+------- - 0 | 5000 | 5.0000000000000000 | 5000 | 1000 - 1 | 6000 | 6.0000000000000000 | 7000 | 1000 - 10 | 5000 | 5.0000000000000000 | 15000 | 1000 - 11 | 6000 | 6.0000000000000000 | 17000 | 1000 - 20 | 5000 | 5.0000000000000000 | 25000 | 1000 - 21 | 6000 | 6.0000000000000000 | 27000 | 1000 -(6 rows) - --- Reset parallelism parameters to get partitionwise aggregation plan. -RESET min_parallel_table_scan_size; -RESET parallel_setup_cost; -EXPLAIN (COSTS OFF) -SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - QUERY PLAN ------------------------------------------------------------------------------ - Sort - Sort Key: pagg_tab_para.x, (sum(pagg_tab_para.y)), (avg(pagg_tab_para.y)) - -> Append - -> HashAggregate - Group Key: pagg_tab_para.x - Filter: (avg(pagg_tab_para.y) < '7'::numeric) - -> Seq Scan on pagg_tab_para_p1 pagg_tab_para - -> HashAggregate - Group Key: pagg_tab_para_1.x - Filter: (avg(pagg_tab_para_1.y) < '7'::numeric) - -> Seq Scan on pagg_tab_para_p2 pagg_tab_para_1 - -> HashAggregate - Group Key: pagg_tab_para_2.x - Filter: (avg(pagg_tab_para_2.y) < '7'::numeric) - -> Seq Scan on pagg_tab_para_p3 pagg_tab_para_2 -(15 rows) - -SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - x | sum | avg | count -----+------+--------------------+------- - 0 | 5000 | 5.0000000000000000 | 1000 - 1 | 6000 | 6.0000000000000000 | 1000 - 10 | 5000 | 5.0000000000000000 | 1000 - 11 | 6000 | 6.0000000000000000 | 1000 - 20 | 5000 | 5.0000000000000000 | 1000 - 21 | 6000 | 6.0000000000000000 | 1000 -(6 rows) - +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/stats.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/stats.out --- /tmp/cirrus-ci-build/src/test/regress/expected/stats.out 2024-03-07 14:25:00.333932000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/stats.out 2024-03-07 14:27:23.679024000 +0000 @@ -1203,447 +1203,10 @@ (1 row) REINDEX index CONCURRENTLY stats_test_idx1; --- false for previous oid -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - f -(1 row) - --- true for new oid -SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid \gset -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - t -(1 row) - --- pg_stat_have_stats returns true for a rolled back drop index with stats -BEGIN; -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - t -(1 row) - -DROP index stats_test_idx1; -ROLLBACK; -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - t -(1 row) - --- put enable_seqscan back to on -SET enable_seqscan TO on; --- ensure that stats accessors handle NULL input correctly -SELECT pg_stat_get_replication_slot(NULL); - pg_stat_get_replication_slot ------------------------------- - -(1 row) - -SELECT pg_stat_get_subscription_stats(NULL); - pg_stat_get_subscription_stats --------------------------------- - -(1 row) - --- Test that the following operations are tracked in pg_stat_io: --- - reads of target blocks into shared buffers --- - writes of shared buffers to permanent storage --- - extends of relations using shared buffers --- - fsyncs done to ensure the durability of data dirtying shared buffers --- - shared buffer hits --- There is no test for blocks evicted from shared buffers, because we cannot --- be sure of the state of shared buffers at the point the test is run. --- Create a regular table and insert some data to generate IOCONTEXT_NORMAL --- extends. -SELECT sum(extends) AS io_sum_shared_before_extends - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset -SELECT sum(writes) AS writes, sum(fsyncs) AS fsyncs - FROM pg_stat_io - WHERE object = 'relation' \gset io_sum_shared_before_ -CREATE TABLE test_io_shared(a int); -INSERT INTO test_io_shared SELECT i FROM generate_series(1,100)i; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(extends) AS io_sum_shared_after_extends - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset -SELECT :io_sum_shared_after_extends > :io_sum_shared_before_extends; - ?column? ----------- - t -(1 row) - --- After a checkpoint, there should be some additional IOCONTEXT_NORMAL writes --- and fsyncs. --- See comment above for rationale for two explicit CHECKPOINTs. -CHECKPOINT; -CHECKPOINT; -SELECT sum(writes) AS writes, sum(fsyncs) AS fsyncs - FROM pg_stat_io - WHERE object = 'relation' \gset io_sum_shared_after_ -SELECT :io_sum_shared_after_writes > :io_sum_shared_before_writes; - ?column? ----------- - t -(1 row) - -SELECT current_setting('fsync') = 'off' - OR :io_sum_shared_after_fsyncs > :io_sum_shared_before_fsyncs; - ?column? ----------- - t -(1 row) - --- Change the tablespace so that the table is rewritten directly, then SELECT --- from it to cause it to be read back into shared buffers. -SELECT sum(reads) AS io_sum_shared_before_reads - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset --- Do this in a transaction to prevent spurious failures due to concurrent accesses to our newly --- rewritten table, e.g. by autovacuum. -BEGIN; -ALTER TABLE test_io_shared SET TABLESPACE regress_tblspace; --- SELECT from the table so that the data is read into shared buffers and --- context 'normal', object 'relation' reads are counted. -SELECT COUNT(*) FROM test_io_shared; - count -------- - 100 -(1 row) - -COMMIT; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(reads) AS io_sum_shared_after_reads - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset -SELECT :io_sum_shared_after_reads > :io_sum_shared_before_reads; - ?column? ----------- - t -(1 row) - -SELECT sum(hits) AS io_sum_shared_before_hits - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset --- Select from the table again to count hits. --- Ensure we generate hits by forcing a nested loop self-join with no --- materialize node. The outer side's buffer will stay pinned, preventing its --- eviction, while we loop through the inner side and generate hits. -BEGIN; -SET LOCAL enable_nestloop TO on; SET LOCAL enable_mergejoin TO off; -SET LOCAL enable_hashjoin TO off; SET LOCAL enable_material TO off; --- ensure plan stays as we expect it to -EXPLAIN (COSTS OFF) SELECT COUNT(*) FROM test_io_shared t1 INNER JOIN test_io_shared t2 USING (a); - QUERY PLAN -------------------------------------------- - Aggregate - -> Nested Loop - Join Filter: (t1.a = t2.a) - -> Seq Scan on test_io_shared t1 - -> Seq Scan on test_io_shared t2 -(5 rows) - -SELECT COUNT(*) FROM test_io_shared t1 INNER JOIN test_io_shared t2 USING (a); - count -------- - 100 -(1 row) - -COMMIT; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(hits) AS io_sum_shared_after_hits - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset -SELECT :io_sum_shared_after_hits > :io_sum_shared_before_hits; - ?column? ----------- - t -(1 row) - -DROP TABLE test_io_shared; --- Test that the follow IOCONTEXT_LOCAL IOOps are tracked in pg_stat_io: --- - eviction of local buffers in order to reuse them --- - reads of temporary table blocks into local buffers --- - writes of local buffers to permanent storage --- - extends of temporary tables --- Set temp_buffers to its minimum so that we can trigger writes with fewer --- inserted tuples. Do so in a new session in case temporary tables have been --- accessed by previous tests in this session. -\c -SET temp_buffers TO 100; -CREATE TEMPORARY TABLE test_io_local(a int, b TEXT); -SELECT sum(extends) AS extends, sum(evictions) AS evictions, sum(writes) AS writes - FROM pg_stat_io - WHERE context = 'normal' AND object = 'temp relation' \gset io_sum_local_before_ --- Insert tuples into the temporary table, generating extends in the stats. --- Insert enough values that we need to reuse and write out dirty local --- buffers, generating evictions and writes. -INSERT INTO test_io_local SELECT generate_series(1, 5000) as id, repeat('a', 200); --- Ensure the table is large enough to exceed our temp_buffers setting. -SELECT pg_relation_size('test_io_local') / current_setting('block_size')::int8 > 100; - ?column? ----------- - t -(1 row) - -SELECT sum(reads) AS io_sum_local_before_reads - FROM pg_stat_io WHERE context = 'normal' AND object = 'temp relation' \gset --- Read in evicted buffers, generating reads. -SELECT COUNT(*) FROM test_io_local; - count -------- - 5000 -(1 row) - -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(evictions) AS evictions, - sum(reads) AS reads, - sum(writes) AS writes, - sum(extends) AS extends - FROM pg_stat_io - WHERE context = 'normal' AND object = 'temp relation' \gset io_sum_local_after_ -SELECT :io_sum_local_after_evictions > :io_sum_local_before_evictions, - :io_sum_local_after_reads > :io_sum_local_before_reads, - :io_sum_local_after_writes > :io_sum_local_before_writes, - :io_sum_local_after_extends > :io_sum_local_before_extends; - ?column? | ?column? | ?column? | ?column? -----------+----------+----------+---------- - t | t | t | t -(1 row) - --- Change the tablespaces so that the temporary table is rewritten to other --- local buffers, exercising a different codepath than standard local buffer --- writes. -ALTER TABLE test_io_local SET TABLESPACE regress_tblspace; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(writes) AS io_sum_local_new_tblspc_writes - FROM pg_stat_io WHERE context = 'normal' AND object = 'temp relation' \gset -SELECT :io_sum_local_new_tblspc_writes > :io_sum_local_after_writes; - ?column? ----------- - t -(1 row) - -RESET temp_buffers; --- Test that reuse of strategy buffers and reads of blocks into these reused --- buffers while VACUUMing are tracked in pg_stat_io. If there is sufficient --- demand for shared buffers from concurrent queries, some buffers may be --- pinned by other backends before they can be reused. In such cases, the --- backend will evict a buffer from outside the ring and add it to the --- ring. This is considered an eviction and not a reuse. --- Set wal_skip_threshold smaller than the expected size of --- test_io_vac_strategy so that, even if wal_level is minimal, VACUUM FULL will --- fsync the newly rewritten test_io_vac_strategy instead of writing it to WAL. --- Writing it to WAL will result in the newly written relation pages being in --- shared buffers -- preventing us from testing BAS_VACUUM BufferAccessStrategy --- reads. -SET wal_skip_threshold = '1 kB'; -SELECT sum(reuses) AS reuses, sum(reads) AS reads, sum(evictions) AS evictions - FROM pg_stat_io WHERE context = 'vacuum' \gset io_sum_vac_strategy_before_ -CREATE TABLE test_io_vac_strategy(a int, b int) WITH (autovacuum_enabled = 'false'); -INSERT INTO test_io_vac_strategy SELECT i, i from generate_series(1, 4500)i; --- Ensure that the next VACUUM will need to perform IO by rewriting the table --- first with VACUUM (FULL). -VACUUM (FULL) test_io_vac_strategy; --- Use the minimum BUFFER_USAGE_LIMIT to cause reuses or evictions with the --- smallest table possible. -VACUUM (PARALLEL 0, BUFFER_USAGE_LIMIT 128) test_io_vac_strategy; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(reuses) AS reuses, sum(reads) AS reads, sum(evictions) AS evictions - FROM pg_stat_io WHERE context = 'vacuum' \gset io_sum_vac_strategy_after_ -SELECT :io_sum_vac_strategy_after_reads > :io_sum_vac_strategy_before_reads; - ?column? ----------- - t -(1 row) - -SELECT (:io_sum_vac_strategy_after_reuses + :io_sum_vac_strategy_after_evictions) > - (:io_sum_vac_strategy_before_reuses + :io_sum_vac_strategy_before_evictions); - ?column? ----------- - t -(1 row) - -RESET wal_skip_threshold; --- Test that extends done by a CTAS, which uses a BAS_BULKWRITE --- BufferAccessStrategy, are tracked in pg_stat_io. -SELECT sum(extends) AS io_sum_bulkwrite_strategy_extends_before - FROM pg_stat_io WHERE context = 'bulkwrite' \gset -CREATE TABLE test_io_bulkwrite_strategy AS SELECT i FROM generate_series(1,100)i; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(extends) AS io_sum_bulkwrite_strategy_extends_after - FROM pg_stat_io WHERE context = 'bulkwrite' \gset -SELECT :io_sum_bulkwrite_strategy_extends_after > :io_sum_bulkwrite_strategy_extends_before; - ?column? ----------- - t -(1 row) - --- Test IO stats reset -SELECT pg_stat_have_stats('io', 0, 0); - pg_stat_have_stats --------------------- - t -(1 row) - -SELECT sum(evictions) + sum(reuses) + sum(extends) + sum(fsyncs) + sum(reads) + sum(writes) + sum(writebacks) + sum(hits) AS io_stats_pre_reset - FROM pg_stat_io \gset -SELECT pg_stat_reset_shared('io'); - pg_stat_reset_shared ----------------------- - -(1 row) - -SELECT sum(evictions) + sum(reuses) + sum(extends) + sum(fsyncs) + sum(reads) + sum(writes) + sum(writebacks) + sum(hits) AS io_stats_post_reset - FROM pg_stat_io \gset -SELECT :io_stats_post_reset < :io_stats_pre_reset; - ?column? ----------- - t -(1 row) - --- test BRIN index doesn't block HOT update -CREATE TABLE brin_hot ( - id integer PRIMARY KEY, - val integer NOT NULL -) WITH (autovacuum_enabled = off, fillfactor = 70); -INSERT INTO brin_hot SELECT *, 0 FROM generate_series(1, 235); -CREATE INDEX val_brin ON brin_hot using brin(val); -CREATE FUNCTION wait_for_hot_stats() RETURNS void AS $$ -DECLARE - start_time timestamptz := clock_timestamp(); - updated bool; -BEGIN - -- we don't want to wait forever; loop will exit after 30 seconds - FOR i IN 1 .. 300 LOOP - SELECT (pg_stat_get_tuples_hot_updated('brin_hot'::regclass::oid) > 0) INTO updated; - EXIT WHEN updated; - - -- wait a little - PERFORM pg_sleep_for('100 milliseconds'); - -- reset stats snapshot so we can test again - PERFORM pg_stat_clear_snapshot(); - END LOOP; - -- report time waited in postmaster log (where it won't change test output) - RAISE log 'wait_for_hot_stats delayed % seconds', - EXTRACT(epoch FROM clock_timestamp() - start_time); -END -$$ LANGUAGE plpgsql; -UPDATE brin_hot SET val = -3 WHERE id = 42; --- We can't just call wait_for_hot_stats() at this point, because we only --- transmit stats when the session goes idle, and we probably didn't --- transmit the last couple of counts yet thanks to the rate-limiting logic --- in pgstat_report_stat(). But instead of waiting for the rate limiter's --- timeout to elapse, let's just start a new session. The old one will --- then send its stats before dying. -\c - -SELECT wait_for_hot_stats(); - wait_for_hot_stats --------------------- - -(1 row) - -SELECT pg_stat_get_tuples_hot_updated('brin_hot'::regclass::oid); - pg_stat_get_tuples_hot_updated --------------------------------- - 1 -(1 row) - -DROP TABLE brin_hot; -DROP FUNCTION wait_for_hot_stats(); --- Test handling of index predicates - updating attributes in precicates --- should not block HOT when summarizing indexes are involved. We update --- a row that was not indexed due to the index predicate, and becomes --- indexable - the HOT-updated tuple is forwarded to the BRIN index. -CREATE TABLE brin_hot_2 (a int, b int); -INSERT INTO brin_hot_2 VALUES (1, 100); -CREATE INDEX ON brin_hot_2 USING brin (b) WHERE a = 2; -UPDATE brin_hot_2 SET a = 2; -EXPLAIN (COSTS OFF) SELECT * FROM brin_hot_2 WHERE a = 2 AND b = 100; - QUERY PLAN ------------------------------------ - Seq Scan on brin_hot_2 - Filter: ((a = 2) AND (b = 100)) -(2 rows) - -SELECT COUNT(*) FROM brin_hot_2 WHERE a = 2 AND b = 100; - count -------- - 1 -(1 row) - -SET enable_seqscan = off; -EXPLAIN (COSTS OFF) SELECT * FROM brin_hot_2 WHERE a = 2 AND b = 100; - QUERY PLAN ---------------------------------------------- - Bitmap Heap Scan on brin_hot_2 - Recheck Cond: ((b = 100) AND (a = 2)) - -> Bitmap Index Scan on brin_hot_2_b_idx - Index Cond: (b = 100) -(4 rows) - -SELECT COUNT(*) FROM brin_hot_2 WHERE a = 2 AND b = 100; - count -------- - 1 -(1 row) - -DROP TABLE brin_hot_2; --- Test that updates to indexed columns are still propagated to the --- BRIN column. --- https://postgr.es/m/05ebcb44-f383-86e3-4f31-0a97a55634cf@enterprisedb.com -CREATE TABLE brin_hot_3 (a int, filler text) WITH (fillfactor = 10); -INSERT INTO brin_hot_3 SELECT 1, repeat(' ', 500) FROM generate_series(1, 20); -CREATE INDEX ON brin_hot_3 USING brin (a) WITH (pages_per_range = 1); -UPDATE brin_hot_3 SET a = 2; -EXPLAIN (COSTS OFF) SELECT * FROM brin_hot_3 WHERE a = 2; - QUERY PLAN ---------------------------------------------- - Bitmap Heap Scan on brin_hot_3 - Recheck Cond: (a = 2) - -> Bitmap Index Scan on brin_hot_3_a_idx - Index Cond: (a = 2) -(4 rows) - -SELECT COUNT(*) FROM brin_hot_3 WHERE a = 2; - count -------- - 20 -(1 row) - -DROP TABLE brin_hot_3; -SET enable_seqscan = on; --- End of Stats Test +WARNING: terminating connection because of crash of another server process +DETAIL: The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory. +HINT: In a moment you should be able to reconnect to the database and repeat your command. +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/oidjoins.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/oidjoins.out --- /tmp/cirrus-ci-build/src/test/regress/expected/oidjoins.out 2024-03-07 14:25:00.332343000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/oidjoins.out 2024-03-07 14:27:23.727658000 +0000 @@ -1,268 +1,2 @@ --- --- Verify system catalog foreign key relationships --- -DO $doblock$ -declare - fk record; - nkeys integer; - cmd text; - err record; -begin - for fk in select * from pg_get_catalog_foreign_keys() - loop - raise notice 'checking % % => % %', - fk.fktable, fk.fkcols, fk.pktable, fk.pkcols; - nkeys := array_length(fk.fkcols, 1); - cmd := 'SELECT ctid'; - for i in 1 .. nkeys loop - cmd := cmd || ', ' || quote_ident(fk.fkcols[i]); - end loop; - if fk.is_array then - cmd := cmd || ' FROM (SELECT ctid'; - for i in 1 .. nkeys-1 loop - cmd := cmd || ', ' || quote_ident(fk.fkcols[i]); - end loop; - cmd := cmd || ', unnest(' || quote_ident(fk.fkcols[nkeys]); - cmd := cmd || ') as ' || quote_ident(fk.fkcols[nkeys]); - cmd := cmd || ' FROM ' || fk.fktable::text || ') fk WHERE '; - else - cmd := cmd || ' FROM ' || fk.fktable::text || ' fk WHERE '; - end if; - if fk.is_opt then - for i in 1 .. nkeys loop - cmd := cmd || quote_ident(fk.fkcols[i]) || ' != 0 AND '; - end loop; - end if; - cmd := cmd || 'NOT EXISTS(SELECT 1 FROM ' || fk.pktable::text || ' pk WHERE '; - for i in 1 .. nkeys loop - if i > 1 then cmd := cmd || ' AND '; end if; - cmd := cmd || 'pk.' || quote_ident(fk.pkcols[i]); - cmd := cmd || ' = fk.' || quote_ident(fk.fkcols[i]); - end loop; - cmd := cmd || ')'; - -- raise notice 'cmd = %', cmd; - for err in execute cmd loop - raise warning 'FK VIOLATION IN %(%): %', fk.fktable, fk.fkcols, err; - end loop; - end loop; -end -$doblock$; -NOTICE: checking pg_proc {pronamespace} => pg_namespace {oid} -NOTICE: checking pg_proc {proowner} => pg_authid {oid} -NOTICE: checking pg_proc {prolang} => pg_language {oid} -NOTICE: checking pg_proc {provariadic} => pg_type {oid} -NOTICE: checking pg_proc {prosupport} => pg_proc {oid} -NOTICE: checking pg_proc {prorettype} => pg_type {oid} -NOTICE: checking pg_proc {proargtypes} => pg_type {oid} -NOTICE: checking pg_proc {proallargtypes} => pg_type {oid} -NOTICE: checking pg_proc {protrftypes} => pg_type {oid} -NOTICE: checking pg_type {typnamespace} => pg_namespace {oid} -NOTICE: checking pg_type {typowner} => pg_authid {oid} -NOTICE: checking pg_type {typrelid} => pg_class {oid} -NOTICE: checking pg_type {typsubscript} => pg_proc {oid} -NOTICE: checking pg_type {typelem} => pg_type {oid} -NOTICE: checking pg_type {typarray} => pg_type {oid} -NOTICE: checking pg_type {typinput} => pg_proc {oid} -NOTICE: checking pg_type {typoutput} => pg_proc {oid} -NOTICE: checking pg_type {typreceive} => pg_proc {oid} -NOTICE: checking pg_type {typsend} => pg_proc {oid} -NOTICE: checking pg_type {typmodin} => pg_proc {oid} -NOTICE: checking pg_type {typmodout} => pg_proc {oid} -NOTICE: checking pg_type {typanalyze} => pg_proc {oid} -NOTICE: checking pg_type {typbasetype} => pg_type {oid} -NOTICE: checking pg_type {typcollation} => pg_collation {oid} -NOTICE: checking pg_attribute {attrelid} => pg_class {oid} -NOTICE: checking pg_attribute {atttypid} => pg_type {oid} -NOTICE: checking pg_attribute {attcollation} => pg_collation {oid} -NOTICE: checking pg_class {relnamespace} => pg_namespace {oid} -NOTICE: checking pg_class {reltype} => pg_type {oid} -NOTICE: checking pg_class {reloftype} => pg_type {oid} -NOTICE: checking pg_class {relowner} => pg_authid {oid} -NOTICE: checking pg_class {relam} => pg_am {oid} -NOTICE: checking pg_class {reltablespace} => pg_tablespace {oid} -NOTICE: checking pg_class {reltoastrelid} => pg_class {oid} -NOTICE: checking pg_class {relrewrite} => pg_class {oid} -NOTICE: checking pg_attrdef {adrelid} => pg_class {oid} -NOTICE: checking pg_attrdef {adrelid,adnum} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_constraint {connamespace} => pg_namespace {oid} -NOTICE: checking pg_constraint {conrelid} => pg_class {oid} -NOTICE: checking pg_constraint {contypid} => pg_type {oid} -NOTICE: checking pg_constraint {conindid} => pg_class {oid} -NOTICE: checking pg_constraint {conparentid} => pg_constraint {oid} -NOTICE: checking pg_constraint {confrelid} => pg_class {oid} -NOTICE: checking pg_constraint {conpfeqop} => pg_operator {oid} -NOTICE: checking pg_constraint {conppeqop} => pg_operator {oid} -NOTICE: checking pg_constraint {conffeqop} => pg_operator {oid} -NOTICE: checking pg_constraint {conexclop} => pg_operator {oid} -NOTICE: checking pg_constraint {conrelid,conkey} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_constraint {confrelid,confkey} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_inherits {inhrelid} => pg_class {oid} -NOTICE: checking pg_inherits {inhparent} => pg_class {oid} -NOTICE: checking pg_index {indexrelid} => pg_class {oid} -NOTICE: checking pg_index {indrelid} => pg_class {oid} -NOTICE: checking pg_index {indcollation} => pg_collation {oid} -NOTICE: checking pg_index {indclass} => pg_opclass {oid} -NOTICE: checking pg_index {indrelid,indkey} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_operator {oprnamespace} => pg_namespace {oid} -NOTICE: checking pg_operator {oprowner} => pg_authid {oid} -NOTICE: checking pg_operator {oprleft} => pg_type {oid} -NOTICE: checking pg_operator {oprright} => pg_type {oid} -NOTICE: checking pg_operator {oprresult} => pg_type {oid} -NOTICE: checking pg_operator {oprcom} => pg_operator {oid} -NOTICE: checking pg_operator {oprnegate} => pg_operator {oid} -NOTICE: checking pg_operator {oprcode} => pg_proc {oid} -NOTICE: checking pg_operator {oprrest} => pg_proc {oid} -NOTICE: checking pg_operator {oprjoin} => pg_proc {oid} -NOTICE: checking pg_opfamily {opfmethod} => pg_am {oid} -NOTICE: checking pg_opfamily {opfnamespace} => pg_namespace {oid} -NOTICE: checking pg_opfamily {opfowner} => pg_authid {oid} -NOTICE: checking pg_opclass {opcmethod} => pg_am {oid} -NOTICE: checking pg_opclass {opcnamespace} => pg_namespace {oid} -NOTICE: checking pg_opclass {opcowner} => pg_authid {oid} -NOTICE: checking pg_opclass {opcfamily} => pg_opfamily {oid} -NOTICE: checking pg_opclass {opcintype} => pg_type {oid} -NOTICE: checking pg_opclass {opckeytype} => pg_type {oid} -NOTICE: checking pg_am {amhandler} => pg_proc {oid} -NOTICE: checking pg_amop {amopfamily} => pg_opfamily {oid} -NOTICE: checking pg_amop {amoplefttype} => pg_type {oid} -NOTICE: checking pg_amop {amoprighttype} => pg_type {oid} -NOTICE: checking pg_amop {amopopr} => pg_operator {oid} -NOTICE: checking pg_amop {amopmethod} => pg_am {oid} -NOTICE: checking pg_amop {amopsortfamily} => pg_opfamily {oid} -NOTICE: checking pg_amproc {amprocfamily} => pg_opfamily {oid} -NOTICE: checking pg_amproc {amproclefttype} => pg_type {oid} -NOTICE: checking pg_amproc {amprocrighttype} => pg_type {oid} -NOTICE: checking pg_amproc {amproc} => pg_proc {oid} -NOTICE: checking pg_language {lanowner} => pg_authid {oid} -NOTICE: checking pg_language {lanplcallfoid} => pg_proc {oid} -NOTICE: checking pg_language {laninline} => pg_proc {oid} -NOTICE: checking pg_language {lanvalidator} => pg_proc {oid} -NOTICE: checking pg_largeobject_metadata {lomowner} => pg_authid {oid} -NOTICE: checking pg_largeobject {loid} => pg_largeobject_metadata {oid} -NOTICE: checking pg_aggregate {aggfnoid} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggtransfn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggfinalfn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggcombinefn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggserialfn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggdeserialfn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggmtransfn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggminvtransfn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggmfinalfn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggsortop} => pg_operator {oid} -NOTICE: checking pg_aggregate {aggtranstype} => pg_type {oid} -NOTICE: checking pg_aggregate {aggmtranstype} => pg_type {oid} -NOTICE: checking pg_statistic {starelid} => pg_class {oid} -NOTICE: checking pg_statistic {staop1} => pg_operator {oid} -NOTICE: checking pg_statistic {staop2} => pg_operator {oid} -NOTICE: checking pg_statistic {staop3} => pg_operator {oid} -NOTICE: checking pg_statistic {staop4} => pg_operator {oid} -NOTICE: checking pg_statistic {staop5} => pg_operator {oid} -NOTICE: checking pg_statistic {stacoll1} => pg_collation {oid} -NOTICE: checking pg_statistic {stacoll2} => pg_collation {oid} -NOTICE: checking pg_statistic {stacoll3} => pg_collation {oid} -NOTICE: checking pg_statistic {stacoll4} => pg_collation {oid} -NOTICE: checking pg_statistic {stacoll5} => pg_collation {oid} -NOTICE: checking pg_statistic {starelid,staattnum} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_statistic_ext {stxrelid} => pg_class {oid} -NOTICE: checking pg_statistic_ext {stxnamespace} => pg_namespace {oid} -NOTICE: checking pg_statistic_ext {stxowner} => pg_authid {oid} -NOTICE: checking pg_statistic_ext {stxrelid,stxkeys} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_statistic_ext_data {stxoid} => pg_statistic_ext {oid} -NOTICE: checking pg_rewrite {ev_class} => pg_class {oid} -NOTICE: checking pg_trigger {tgrelid} => pg_class {oid} -NOTICE: checking pg_trigger {tgparentid} => pg_trigger {oid} -NOTICE: checking pg_trigger {tgfoid} => pg_proc {oid} -NOTICE: checking pg_trigger {tgconstrrelid} => pg_class {oid} -NOTICE: checking pg_trigger {tgconstrindid} => pg_class {oid} -NOTICE: checking pg_trigger {tgconstraint} => pg_constraint {oid} -NOTICE: checking pg_trigger {tgrelid,tgattr} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_event_trigger {evtowner} => pg_authid {oid} -NOTICE: checking pg_event_trigger {evtfoid} => pg_proc {oid} -NOTICE: checking pg_description {classoid} => pg_class {oid} -NOTICE: checking pg_cast {castsource} => pg_type {oid} -NOTICE: checking pg_cast {casttarget} => pg_type {oid} -NOTICE: checking pg_cast {castfunc} => pg_proc {oid} -NOTICE: checking pg_enum {enumtypid} => pg_type {oid} -NOTICE: checking pg_namespace {nspowner} => pg_authid {oid} -NOTICE: checking pg_conversion {connamespace} => pg_namespace {oid} -NOTICE: checking pg_conversion {conowner} => pg_authid {oid} -NOTICE: checking pg_conversion {conproc} => pg_proc {oid} -NOTICE: checking pg_depend {classid} => pg_class {oid} -NOTICE: checking pg_depend {refclassid} => pg_class {oid} -NOTICE: checking pg_database {datdba} => pg_authid {oid} -NOTICE: checking pg_database {dattablespace} => pg_tablespace {oid} -NOTICE: checking pg_db_role_setting {setdatabase} => pg_database {oid} -NOTICE: checking pg_db_role_setting {setrole} => pg_authid {oid} -NOTICE: checking pg_tablespace {spcowner} => pg_authid {oid} -NOTICE: checking pg_auth_members {roleid} => pg_authid {oid} -NOTICE: checking pg_auth_members {member} => pg_authid {oid} -NOTICE: checking pg_auth_members {grantor} => pg_authid {oid} -NOTICE: checking pg_shdepend {dbid} => pg_database {oid} -NOTICE: checking pg_shdepend {classid} => pg_class {oid} -NOTICE: checking pg_shdepend {refclassid} => pg_class {oid} -NOTICE: checking pg_shdescription {classoid} => pg_class {oid} -NOTICE: checking pg_ts_config {cfgnamespace} => pg_namespace {oid} -NOTICE: checking pg_ts_config {cfgowner} => pg_authid {oid} -NOTICE: checking pg_ts_config {cfgparser} => pg_ts_parser {oid} -NOTICE: checking pg_ts_config_map {mapcfg} => pg_ts_config {oid} -NOTICE: checking pg_ts_config_map {mapdict} => pg_ts_dict {oid} -NOTICE: checking pg_ts_dict {dictnamespace} => pg_namespace {oid} -NOTICE: checking pg_ts_dict {dictowner} => pg_authid {oid} -NOTICE: checking pg_ts_dict {dicttemplate} => pg_ts_template {oid} -NOTICE: checking pg_ts_parser {prsnamespace} => pg_namespace {oid} -NOTICE: checking pg_ts_parser {prsstart} => pg_proc {oid} -NOTICE: checking pg_ts_parser {prstoken} => pg_proc {oid} -NOTICE: checking pg_ts_parser {prsend} => pg_proc {oid} -NOTICE: checking pg_ts_parser {prsheadline} => pg_proc {oid} -NOTICE: checking pg_ts_parser {prslextype} => pg_proc {oid} -NOTICE: checking pg_ts_template {tmplnamespace} => pg_namespace {oid} -NOTICE: checking pg_ts_template {tmplinit} => pg_proc {oid} -NOTICE: checking pg_ts_template {tmpllexize} => pg_proc {oid} -NOTICE: checking pg_extension {extowner} => pg_authid {oid} -NOTICE: checking pg_extension {extnamespace} => pg_namespace {oid} -NOTICE: checking pg_extension {extconfig} => pg_class {oid} -NOTICE: checking pg_foreign_data_wrapper {fdwowner} => pg_authid {oid} -NOTICE: checking pg_foreign_data_wrapper {fdwhandler} => pg_proc {oid} -NOTICE: checking pg_foreign_data_wrapper {fdwvalidator} => pg_proc {oid} -NOTICE: checking pg_foreign_server {srvowner} => pg_authid {oid} -NOTICE: checking pg_foreign_server {srvfdw} => pg_foreign_data_wrapper {oid} -NOTICE: checking pg_user_mapping {umuser} => pg_authid {oid} -NOTICE: checking pg_user_mapping {umserver} => pg_foreign_server {oid} -NOTICE: checking pg_foreign_table {ftrelid} => pg_class {oid} -NOTICE: checking pg_foreign_table {ftserver} => pg_foreign_server {oid} -NOTICE: checking pg_policy {polrelid} => pg_class {oid} -NOTICE: checking pg_policy {polroles} => pg_authid {oid} -NOTICE: checking pg_default_acl {defaclrole} => pg_authid {oid} -NOTICE: checking pg_default_acl {defaclnamespace} => pg_namespace {oid} -NOTICE: checking pg_init_privs {classoid} => pg_class {oid} -NOTICE: checking pg_seclabel {classoid} => pg_class {oid} -NOTICE: checking pg_shseclabel {classoid} => pg_class {oid} -NOTICE: checking pg_collation {collnamespace} => pg_namespace {oid} -NOTICE: checking pg_collation {collowner} => pg_authid {oid} -NOTICE: checking pg_partitioned_table {partrelid} => pg_class {oid} -NOTICE: checking pg_partitioned_table {partdefid} => pg_class {oid} -NOTICE: checking pg_partitioned_table {partclass} => pg_opclass {oid} -NOTICE: checking pg_partitioned_table {partcollation} => pg_collation {oid} -NOTICE: checking pg_partitioned_table {partrelid,partattrs} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_range {rngtypid} => pg_type {oid} -NOTICE: checking pg_range {rngsubtype} => pg_type {oid} -NOTICE: checking pg_range {rngmultitypid} => pg_type {oid} -NOTICE: checking pg_range {rngcollation} => pg_collation {oid} -NOTICE: checking pg_range {rngsubopc} => pg_opclass {oid} -NOTICE: checking pg_range {rngcanonical} => pg_proc {oid} -NOTICE: checking pg_range {rngsubdiff} => pg_proc {oid} -NOTICE: checking pg_transform {trftype} => pg_type {oid} -NOTICE: checking pg_transform {trflang} => pg_language {oid} -NOTICE: checking pg_transform {trffromsql} => pg_proc {oid} -NOTICE: checking pg_transform {trftosql} => pg_proc {oid} -NOTICE: checking pg_sequence {seqrelid} => pg_class {oid} -NOTICE: checking pg_sequence {seqtypid} => pg_type {oid} -NOTICE: checking pg_publication {pubowner} => pg_authid {oid} -NOTICE: checking pg_publication_namespace {pnpubid} => pg_publication {oid} -NOTICE: checking pg_publication_namespace {pnnspid} => pg_namespace {oid} -NOTICE: checking pg_publication_rel {prpubid} => pg_publication {oid} -NOTICE: checking pg_publication_rel {prrelid} => pg_class {oid} -NOTICE: checking pg_subscription {subdbid} => pg_database {oid} -NOTICE: checking pg_subscription {subowner} => pg_authid {oid} -NOTICE: checking pg_subscription_rel {srsubid} => pg_subscription {oid} -NOTICE: checking pg_subscription_rel {srrelid} => pg_class {oid} +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached. diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/event_trigger.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/event_trigger.out --- /tmp/cirrus-ci-build/src/test/regress/expected/event_trigger.out 2024-03-07 14:25:00.330276000 +0000 +++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/event_trigger.out 2024-03-07 14:27:23.729296000 +0000 @@ -1,744 +1,2 @@ --- should fail, return type mismatch -create event trigger regress_event_trigger - on ddl_command_start - execute procedure pg_backend_pid(); -ERROR: function pg_backend_pid must return type event_trigger --- OK -create function test_event_trigger() returns event_trigger as $$ -BEGIN - RAISE NOTICE 'test_event_trigger: % %', tg_event, tg_tag; -END -$$ language plpgsql; --- should fail, can't call it as a plain function -SELECT test_event_trigger(); -ERROR: trigger functions can only be called as triggers -CONTEXT: compilation of PL/pgSQL function "test_event_trigger" near line 1 --- should fail, event triggers cannot have declared arguments -create function test_event_trigger_arg(name text) -returns event_trigger as $$ BEGIN RETURN 1; END $$ language plpgsql; -ERROR: event trigger functions cannot have declared arguments -CONTEXT: compilation of PL/pgSQL function "test_event_trigger_arg" near line 1 --- should fail, SQL functions cannot be event triggers -create function test_event_trigger_sql() returns event_trigger as $$ -SELECT 1 $$ language sql; -ERROR: SQL functions cannot return type event_trigger --- should fail, no elephant_bootstrap entry point -create event trigger regress_event_trigger on elephant_bootstrap - execute procedure test_event_trigger(); -ERROR: unrecognized event name "elephant_bootstrap" --- OK -create event trigger regress_event_trigger on ddl_command_start - execute procedure test_event_trigger(); --- OK -create event trigger regress_event_trigger_end on ddl_command_end - execute function test_event_trigger(); --- should fail, food is not a valid filter variable -create event trigger regress_event_trigger2 on ddl_command_start - when food in ('sandwich') - execute procedure test_event_trigger(); -ERROR: unrecognized filter variable "food" --- should fail, sandwich is not a valid command tag -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('sandwich') - execute procedure test_event_trigger(); -ERROR: filter value "sandwich" not recognized for filter variable "tag" --- should fail, create skunkcabbage is not a valid command tag -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('create table', 'create skunkcabbage') - execute procedure test_event_trigger(); -ERROR: filter value "create skunkcabbage" not recognized for filter variable "tag" --- should fail, can't have event triggers on event triggers -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('DROP EVENT TRIGGER') - execute procedure test_event_trigger(); -ERROR: event triggers are not supported for DROP EVENT TRIGGER --- should fail, can't have event triggers on global objects -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('CREATE ROLE') - execute procedure test_event_trigger(); -ERROR: event triggers are not supported for CREATE ROLE --- should fail, can't have event triggers on global objects -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('CREATE DATABASE') - execute procedure test_event_trigger(); -ERROR: event triggers are not supported for CREATE DATABASE --- should fail, can't have event triggers on global objects -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('CREATE TABLESPACE') - execute procedure test_event_trigger(); -ERROR: event triggers are not supported for CREATE TABLESPACE --- should fail, can't have same filter variable twice -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('create table') and tag in ('CREATE FUNCTION') - execute procedure test_event_trigger(); -ERROR: filter variable "tag" specified more than once --- should fail, can't have arguments -create event trigger regress_event_trigger2 on ddl_command_start - execute procedure test_event_trigger('argument not allowed'); -ERROR: syntax error at or near "'argument not allowed'" -LINE 2: execute procedure test_event_trigger('argument not allowe... - ^ --- OK -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('create table', 'CREATE FUNCTION') - execute procedure test_event_trigger(); --- OK -comment on event trigger regress_event_trigger is 'test comment'; --- drop as non-superuser should fail -create role regress_evt_user; -set role regress_evt_user; -create event trigger regress_event_trigger_noperms on ddl_command_start - execute procedure test_event_trigger(); -ERROR: permission denied to create event trigger "regress_event_trigger_noperms" -HINT: Must be superuser to create an event trigger. -reset role; --- test enabling and disabling -alter event trigger regress_event_trigger disable; --- fires _trigger2 and _trigger_end should fire, but not _trigger -create table event_trigger_fire1 (a int); -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -NOTICE: test_event_trigger: ddl_command_end CREATE TABLE -alter event trigger regress_event_trigger enable; -set session_replication_role = replica; --- fires nothing -create table event_trigger_fire2 (a int); -alter event trigger regress_event_trigger enable replica; --- fires only _trigger -create table event_trigger_fire3 (a int); -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -alter event trigger regress_event_trigger enable always; --- fires only _trigger -create table event_trigger_fire4 (a int); -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -reset session_replication_role; --- fires all three -create table event_trigger_fire5 (a int); -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -NOTICE: test_event_trigger: ddl_command_end CREATE TABLE --- non-top-level command -create function f1() returns int -language plpgsql -as $$ -begin - create table event_trigger_fire6 (a int); - return 0; -end $$; -NOTICE: test_event_trigger: ddl_command_start CREATE FUNCTION -NOTICE: test_event_trigger: ddl_command_start CREATE FUNCTION -NOTICE: test_event_trigger: ddl_command_end CREATE FUNCTION -select f1(); -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -NOTICE: test_event_trigger: ddl_command_end CREATE TABLE - f1 ----- - 0 -(1 row) - --- non-top-level command -create procedure p1() -language plpgsql -as $$ -begin - create table event_trigger_fire7 (a int); -end $$; -NOTICE: test_event_trigger: ddl_command_start CREATE PROCEDURE -NOTICE: test_event_trigger: ddl_command_end CREATE PROCEDURE -call p1(); -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -NOTICE: test_event_trigger: ddl_command_end CREATE TABLE --- clean up -alter event trigger regress_event_trigger disable; -drop table event_trigger_fire2, event_trigger_fire3, event_trigger_fire4, event_trigger_fire5, event_trigger_fire6, event_trigger_fire7; -NOTICE: test_event_trigger: ddl_command_end DROP TABLE -drop routine f1(), p1(); -NOTICE: test_event_trigger: ddl_command_end DROP ROUTINE --- regress_event_trigger_end should fire on these commands -grant all on table event_trigger_fire1 to public; -NOTICE: test_event_trigger: ddl_command_end GRANT -comment on table event_trigger_fire1 is 'here is a comment'; -NOTICE: test_event_trigger: ddl_command_end COMMENT -revoke all on table event_trigger_fire1 from public; -NOTICE: test_event_trigger: ddl_command_end REVOKE -drop table event_trigger_fire1; -NOTICE: test_event_trigger: ddl_command_end DROP TABLE -create foreign data wrapper useless; -NOTICE: test_event_trigger: ddl_command_end CREATE FOREIGN DATA WRAPPER -create server useless_server foreign data wrapper useless; -NOTICE: test_event_trigger: ddl_command_end CREATE SERVER -create user mapping for regress_evt_user server useless_server; -NOTICE: test_event_trigger: ddl_command_end CREATE USER MAPPING -alter default privileges for role regress_evt_user - revoke delete on tables from regress_evt_user; -NOTICE: test_event_trigger: ddl_command_end ALTER DEFAULT PRIVILEGES --- alter owner to non-superuser should fail -alter event trigger regress_event_trigger owner to regress_evt_user; -ERROR: permission denied to change owner of event trigger "regress_event_trigger" -HINT: The owner of an event trigger must be a superuser. --- alter owner to superuser should work -alter role regress_evt_user superuser; -alter event trigger regress_event_trigger owner to regress_evt_user; --- should fail, name collision -alter event trigger regress_event_trigger rename to regress_event_trigger2; -ERROR: event trigger "regress_event_trigger2" already exists --- OK -alter event trigger regress_event_trigger rename to regress_event_trigger3; --- should fail, doesn't exist any more -drop event trigger regress_event_trigger; -ERROR: event trigger "regress_event_trigger" does not exist --- should fail, regress_evt_user owns some objects -drop role regress_evt_user; -ERROR: role "regress_evt_user" cannot be dropped because some objects depend on it -DETAIL: owner of event trigger regress_event_trigger3 -owner of user mapping for regress_evt_user on server useless_server -owner of default privileges on new relations belonging to role regress_evt_user --- cleanup before next test --- these are all OK; the second one should emit a NOTICE -drop event trigger if exists regress_event_trigger2; -drop event trigger if exists regress_event_trigger2; -NOTICE: event trigger "regress_event_trigger2" does not exist, skipping -drop event trigger regress_event_trigger3; -drop event trigger regress_event_trigger_end; --- test support for dropped objects -CREATE SCHEMA schema_one authorization regress_evt_user; -CREATE SCHEMA schema_two authorization regress_evt_user; -CREATE SCHEMA audit_tbls authorization regress_evt_user; -CREATE TEMP TABLE a_temp_tbl (); -SET SESSION AUTHORIZATION regress_evt_user; -CREATE TABLE schema_one.table_one(a int); -CREATE TABLE schema_one."table two"(a int); -CREATE TABLE schema_one.table_three(a int); -CREATE TABLE audit_tbls.schema_one_table_two(the_value text); -CREATE TABLE schema_two.table_two(a int); -CREATE TABLE schema_two.table_three(a int, b text); -CREATE TABLE audit_tbls.schema_two_table_three(the_value text); -CREATE OR REPLACE FUNCTION schema_two.add(int, int) RETURNS int LANGUAGE plpgsql - CALLED ON NULL INPUT - AS $$ BEGIN RETURN coalesce($1,0) + coalesce($2,0); END; $$; -CREATE AGGREGATE schema_two.newton - (BASETYPE = int, SFUNC = schema_two.add, STYPE = int); -RESET SESSION AUTHORIZATION; -CREATE TABLE undroppable_objs ( - object_type text, - object_identity text -); -INSERT INTO undroppable_objs VALUES -('table', 'schema_one.table_three'), -('table', 'audit_tbls.schema_two_table_three'); -CREATE TABLE dropped_objects ( - type text, - schema text, - object text -); --- This tests errors raised within event triggers; the one in audit_tbls --- uses 2nd-level recursive invocation via test_evtrig_dropped_objects(). -CREATE OR REPLACE FUNCTION undroppable() RETURNS event_trigger -LANGUAGE plpgsql AS $$ -DECLARE - obj record; -BEGIN - PERFORM 1 FROM pg_tables WHERE tablename = 'undroppable_objs'; - IF NOT FOUND THEN - RAISE NOTICE 'table undroppable_objs not found, skipping'; - RETURN; - END IF; - FOR obj IN - SELECT * FROM pg_event_trigger_dropped_objects() JOIN - undroppable_objs USING (object_type, object_identity) - LOOP - RAISE EXCEPTION 'object % of type % cannot be dropped', - obj.object_identity, obj.object_type; - END LOOP; -END; -$$; -CREATE EVENT TRIGGER undroppable ON sql_drop - EXECUTE PROCEDURE undroppable(); -CREATE OR REPLACE FUNCTION test_evtrig_dropped_objects() RETURNS event_trigger -LANGUAGE plpgsql AS $$ -DECLARE - obj record; -BEGIN - FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() - LOOP - IF obj.object_type = 'table' THEN - EXECUTE format('DROP TABLE IF EXISTS audit_tbls.%I', - format('%s_%s', obj.schema_name, obj.object_name)); - END IF; - - INSERT INTO dropped_objects - (type, schema, object) VALUES - (obj.object_type, obj.schema_name, obj.object_identity); - END LOOP; -END -$$; -CREATE EVENT TRIGGER regress_event_trigger_drop_objects ON sql_drop - WHEN TAG IN ('drop table', 'drop function', 'drop view', - 'drop owned', 'drop schema', 'alter table') - EXECUTE PROCEDURE test_evtrig_dropped_objects(); -ALTER TABLE schema_one.table_one DROP COLUMN a; -DROP SCHEMA schema_one, schema_two CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table schema_two.table_two -drop cascades to table schema_two.table_three -drop cascades to function schema_two.add(integer,integer) -drop cascades to function schema_two.newton(integer) -drop cascades to table schema_one.table_one -drop cascades to table schema_one."table two" -drop cascades to table schema_one.table_three -NOTICE: table "schema_two_table_two" does not exist, skipping -NOTICE: table "audit_tbls_schema_two_table_three" does not exist, skipping -ERROR: object audit_tbls.schema_two_table_three of type table cannot be dropped -CONTEXT: PL/pgSQL function undroppable() line 14 at RAISE -SQL statement "DROP TABLE IF EXISTS audit_tbls.schema_two_table_three" -PL/pgSQL function test_evtrig_dropped_objects() line 8 at EXECUTE -DELETE FROM undroppable_objs WHERE object_identity = 'audit_tbls.schema_two_table_three'; -DROP SCHEMA schema_one, schema_two CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table schema_two.table_two -drop cascades to table schema_two.table_three -drop cascades to function schema_two.add(integer,integer) -drop cascades to function schema_two.newton(integer) -drop cascades to table schema_one.table_one -drop cascades to table schema_one."table two" -drop cascades to table schema_one.table_three -NOTICE: table "schema_two_table_two" does not exist, skipping -NOTICE: table "audit_tbls_schema_two_table_three" does not exist, skipping -NOTICE: table "schema_one_table_one" does not exist, skipping -NOTICE: table "schema_one_table two" does not exist, skipping -NOTICE: table "schema_one_table_three" does not exist, skipping -ERROR: object schema_one.table_three of type table cannot be dropped -CONTEXT: PL/pgSQL function undroppable() line 14 at RAISE -DELETE FROM undroppable_objs WHERE object_identity = 'schema_one.table_three'; -DROP SCHEMA schema_one, schema_two CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table schema_two.table_two -drop cascades to table schema_two.table_three -drop cascades to function schema_two.add(integer,integer) -drop cascades to function schema_two.newton(integer) -drop cascades to table schema_one.table_one -drop cascades to table schema_one."table two" -drop cascades to table schema_one.table_three -NOTICE: table "schema_two_table_two" does not exist, skipping -NOTICE: table "audit_tbls_schema_two_table_three" does not exist, skipping -NOTICE: table "schema_one_table_one" does not exist, skipping -NOTICE: table "schema_one_table two" does not exist, skipping -NOTICE: table "schema_one_table_three" does not exist, skipping -SELECT * FROM dropped_objects WHERE schema IS NULL OR schema <> 'pg_toast'; - type | schema | object ---------------+------------+------------------------------------- - table column | schema_one | schema_one.table_one.a - schema | | schema_two - table | schema_two | schema_two.table_two - type | schema_two | schema_two.table_two - type | schema_two | schema_two.table_two[] - table | audit_tbls | audit_tbls.schema_two_table_three - type | audit_tbls | audit_tbls.schema_two_table_three - type | audit_tbls | audit_tbls.schema_two_table_three[] - table | schema_two | schema_two.table_three - type | schema_two | schema_two.table_three - type | schema_two | schema_two.table_three[] - function | schema_two | schema_two.add(integer,integer) - aggregate | schema_two | schema_two.newton(integer) - schema | | schema_one - table | schema_one | schema_one.table_one - type | schema_one | schema_one.table_one - type | schema_one | schema_one.table_one[] - table | schema_one | schema_one."table two" - type | schema_one | schema_one."table two" - type | schema_one | schema_one."table two"[] - table | schema_one | schema_one.table_three - type | schema_one | schema_one.table_three - type | schema_one | schema_one.table_three[] -(23 rows) - -DROP OWNED BY regress_evt_user; -NOTICE: schema "audit_tbls" does not exist, skipping -SELECT * FROM dropped_objects WHERE type = 'schema'; - type | schema | object ---------+--------+------------ - schema | | schema_two - schema | | schema_one - schema | | audit_tbls -(3 rows) - -DROP ROLE regress_evt_user; -DROP EVENT TRIGGER regress_event_trigger_drop_objects; -DROP EVENT TRIGGER undroppable; --- Event triggers on relations. -CREATE OR REPLACE FUNCTION event_trigger_report_dropped() - RETURNS event_trigger - LANGUAGE plpgsql -AS $$ -DECLARE r record; -BEGIN - FOR r IN SELECT * from pg_event_trigger_dropped_objects() - LOOP - IF NOT r.normal AND NOT r.original THEN - CONTINUE; - END IF; - RAISE NOTICE 'NORMAL: orig=% normal=% istemp=% type=% identity=% name=% args=%', - r.original, r.normal, r.is_temporary, r.object_type, - r.object_identity, r.address_names, r.address_args; - END LOOP; -END; $$; -CREATE EVENT TRIGGER regress_event_trigger_report_dropped ON sql_drop - EXECUTE PROCEDURE event_trigger_report_dropped(); -CREATE OR REPLACE FUNCTION event_trigger_report_end() - RETURNS event_trigger - LANGUAGE plpgsql -AS $$ -DECLARE r RECORD; -BEGIN - FOR r IN SELECT * FROM pg_event_trigger_ddl_commands() - LOOP - RAISE NOTICE 'END: command_tag=% type=% identity=%', - r.command_tag, r.object_type, r.object_identity; - END LOOP; -END; $$; -CREATE EVENT TRIGGER regress_event_trigger_report_end ON ddl_command_end - EXECUTE PROCEDURE event_trigger_report_end(); -CREATE SCHEMA evttrig - CREATE TABLE one (col_a SERIAL PRIMARY KEY, col_b text DEFAULT 'forty two', col_c SERIAL) - CREATE INDEX one_idx ON one (col_b) - CREATE TABLE two (col_c INTEGER CHECK (col_c > 0) REFERENCES one DEFAULT 42) - CREATE TABLE id (col_d int NOT NULL GENERATED ALWAYS AS IDENTITY); -NOTICE: END: command_tag=CREATE SCHEMA type=schema identity=evttrig -NOTICE: END: command_tag=CREATE SEQUENCE type=sequence identity=evttrig.one_col_a_seq -NOTICE: END: command_tag=CREATE SEQUENCE type=sequence identity=evttrig.one_col_c_seq -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.one -NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.one -NOTICE: END: command_tag=CREATE INDEX type=index identity=evttrig.one_pkey -NOTICE: END: command_tag=ALTER SEQUENCE type=sequence identity=evttrig.one_col_a_seq -NOTICE: END: command_tag=ALTER SEQUENCE type=sequence identity=evttrig.one_col_c_seq -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.two -NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.two -NOTICE: END: command_tag=CREATE SEQUENCE type=sequence identity=evttrig.id_col_d_seq -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.id -NOTICE: END: command_tag=ALTER SEQUENCE type=sequence identity=evttrig.id_col_d_seq -NOTICE: END: command_tag=CREATE INDEX type=index identity=evttrig.one_idx --- Partitioned tables with a partitioned index -CREATE TABLE evttrig.parted ( - id int PRIMARY KEY) - PARTITION BY RANGE (id); -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.parted -NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.parted -NOTICE: END: command_tag=CREATE INDEX type=index identity=evttrig.parted_pkey -CREATE TABLE evttrig.part_1_10 PARTITION OF evttrig.parted (id) - FOR VALUES FROM (1) TO (10); -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.part_1_10 -CREATE TABLE evttrig.part_10_20 PARTITION OF evttrig.parted (id) - FOR VALUES FROM (10) TO (20) PARTITION BY RANGE (id); -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.part_10_20 -CREATE TABLE evttrig.part_10_15 PARTITION OF evttrig.part_10_20 (id) - FOR VALUES FROM (10) TO (15); -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.part_10_15 -CREATE TABLE evttrig.part_15_20 PARTITION OF evttrig.part_10_20 (id) - FOR VALUES FROM (15) TO (20); -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.part_15_20 -ALTER TABLE evttrig.two DROP COLUMN col_c; -NOTICE: NORMAL: orig=t normal=f istemp=f type=table column identity=evttrig.two.col_c name={evttrig,two,col_c} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table constraint identity=two_col_c_check on evttrig.two name={evttrig,two,two_col_c_check} args={} -NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.two -ALTER TABLE evttrig.one ALTER COLUMN col_b DROP DEFAULT; -NOTICE: NORMAL: orig=t normal=f istemp=f type=default value identity=for evttrig.one.col_b name={evttrig,one,col_b} args={} -NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.one -ALTER TABLE evttrig.one DROP CONSTRAINT one_pkey; -NOTICE: NORMAL: orig=t normal=f istemp=f type=table constraint identity=one_pkey on evttrig.one name={evttrig,one,one_pkey} args={} -NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.one -ALTER TABLE evttrig.one DROP COLUMN col_c; -NOTICE: NORMAL: orig=t normal=f istemp=f type=table column identity=evttrig.one.col_c name={evttrig,one,col_c} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=default value identity=for evttrig.one.col_c name={evttrig,one,col_c} args={} -NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.one -ALTER TABLE evttrig.id ALTER COLUMN col_d SET DATA TYPE bigint; -NOTICE: END: command_tag=ALTER SEQUENCE type=sequence identity=evttrig.id_col_d_seq -NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.id -ALTER TABLE evttrig.id ALTER COLUMN col_d DROP IDENTITY, - ALTER COLUMN col_d SET DATA TYPE int; -NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.id -DROP INDEX evttrig.one_idx; -NOTICE: NORMAL: orig=t normal=f istemp=f type=index identity=evttrig.one_idx name={evttrig,one_idx} args={} -DROP SCHEMA evttrig CASCADE; -NOTICE: drop cascades to 4 other objects -DETAIL: drop cascades to table evttrig.one -drop cascades to table evttrig.two -drop cascades to table evttrig.id -drop cascades to table evttrig.parted -NOTICE: NORMAL: orig=t normal=f istemp=f type=schema identity=evttrig name={evttrig} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.one name={evttrig,one} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=sequence identity=evttrig.one_col_a_seq name={evttrig,one_col_a_seq} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=default value identity=for evttrig.one.col_a name={evttrig,one,col_a} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.two name={evttrig,two} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.id name={evttrig,id} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.parted name={evttrig,parted} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_1_10 name={evttrig,part_1_10} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_10_20 name={evttrig,part_10_20} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_10_15 name={evttrig,part_10_15} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_15_20 name={evttrig,part_15_20} args={} -DROP TABLE a_temp_tbl; -NOTICE: NORMAL: orig=t normal=f istemp=t type=table identity=pg_temp.a_temp_tbl name={pg_temp,a_temp_tbl} args={} --- CREATE OPERATOR CLASS without FAMILY clause should report --- both CREATE OPERATOR FAMILY and CREATE OPERATOR CLASS -CREATE OPERATOR CLASS evttrigopclass FOR TYPE int USING btree AS STORAGE int; -NOTICE: END: command_tag=CREATE OPERATOR FAMILY type=operator family identity=public.evttrigopclass USING btree -NOTICE: END: command_tag=CREATE OPERATOR CLASS type=operator class identity=public.evttrigopclass USING btree -DROP EVENT TRIGGER regress_event_trigger_report_dropped; -DROP EVENT TRIGGER regress_event_trigger_report_end; --- only allowed from within an event trigger function, should fail -select pg_event_trigger_table_rewrite_oid(); -ERROR: pg_event_trigger_table_rewrite_oid() can only be called in a table_rewrite event trigger function --- test Table Rewrite Event Trigger -CREATE OR REPLACE FUNCTION test_evtrig_no_rewrite() RETURNS event_trigger -LANGUAGE plpgsql AS $$ -BEGIN - RAISE EXCEPTION 'rewrites not allowed'; -END; -$$; -create event trigger no_rewrite_allowed on table_rewrite - execute procedure test_evtrig_no_rewrite(); -create table rewriteme (id serial primary key, foo float, bar timestamptz); -insert into rewriteme - select x * 1.001 from generate_series(1, 500) as t(x); -alter table rewriteme alter column foo type numeric; -ERROR: rewrites not allowed -CONTEXT: PL/pgSQL function test_evtrig_no_rewrite() line 3 at RAISE -alter table rewriteme add column baz int default 0; --- test with more than one reason to rewrite a single table -CREATE OR REPLACE FUNCTION test_evtrig_no_rewrite() RETURNS event_trigger -LANGUAGE plpgsql AS $$ -BEGIN - RAISE NOTICE 'Table ''%'' is being rewritten (reason = %)', - pg_event_trigger_table_rewrite_oid()::regclass, - pg_event_trigger_table_rewrite_reason(); -END; -$$; -alter table rewriteme - add column onemore int default 0, - add column another int default -1, - alter column foo type numeric(10,4); -NOTICE: Table 'rewriteme' is being rewritten (reason = 4) --- matview rewrite when changing access method -CREATE MATERIALIZED VIEW heapmv USING heap AS SELECT 1 AS a; -ALTER MATERIALIZED VIEW heapmv SET ACCESS METHOD heap2; -NOTICE: Table 'heapmv' is being rewritten (reason = 8) -DROP MATERIALIZED VIEW heapmv; --- shouldn't trigger a table_rewrite event -alter table rewriteme alter column foo type numeric(12,4); -begin; -set timezone to 'UTC'; -alter table rewriteme alter column bar type timestamp; -set timezone to '0'; -alter table rewriteme alter column bar type timestamptz; -set timezone to 'Europe/London'; -alter table rewriteme alter column bar type timestamp; -- does rewrite -NOTICE: Table 'rewriteme' is being rewritten (reason = 4) -rollback; --- typed tables are rewritten when their type changes. Don't emit table --- name, because firing order is not stable. -CREATE OR REPLACE FUNCTION test_evtrig_no_rewrite() RETURNS event_trigger -LANGUAGE plpgsql AS $$ -BEGIN - RAISE NOTICE 'Table is being rewritten (reason = %)', - pg_event_trigger_table_rewrite_reason(); -END; -$$; -create type rewritetype as (a int); -create table rewritemetoo1 of rewritetype; -create table rewritemetoo2 of rewritetype; -alter type rewritetype alter attribute a type text cascade; -NOTICE: Table is being rewritten (reason = 4) -NOTICE: Table is being rewritten (reason = 4) --- but this doesn't work -create table rewritemetoo3 (a rewritetype); -alter type rewritetype alter attribute a type varchar cascade; -ERROR: cannot alter type "rewritetype" because column "rewritemetoo3.a" uses it -drop table rewriteme; -drop event trigger no_rewrite_allowed; -drop function test_evtrig_no_rewrite(); --- Tests for REINDEX -CREATE OR REPLACE FUNCTION reindex_start_command() -RETURNS event_trigger AS $$ -BEGIN - RAISE NOTICE 'REINDEX START: % %', tg_event, tg_tag; -END; -$$ LANGUAGE plpgsql; -CREATE EVENT TRIGGER regress_reindex_start ON ddl_command_start - WHEN TAG IN ('REINDEX') - EXECUTE PROCEDURE reindex_start_command(); -CREATE FUNCTION reindex_end_command() -RETURNS event_trigger AS $$ -DECLARE - obj record; -BEGIN - FOR obj IN SELECT * FROM pg_event_trigger_ddl_commands() - LOOP - RAISE NOTICE 'REINDEX END: command_tag=% type=% identity=%', - obj.command_tag, obj.object_type, obj.object_identity; - END LOOP; -END; -$$ LANGUAGE plpgsql; -CREATE EVENT TRIGGER regress_reindex_end ON ddl_command_end - WHEN TAG IN ('REINDEX') - EXECUTE PROCEDURE reindex_end_command(); --- Extra event to force the use of a snapshot. -CREATE FUNCTION reindex_end_command_snap() RETURNS EVENT_TRIGGER - AS $$ BEGIN PERFORM 1; END $$ LANGUAGE plpgsql; -CREATE EVENT TRIGGER regress_reindex_end_snap ON ddl_command_end - EXECUTE FUNCTION reindex_end_command_snap(); --- With simple relation -CREATE TABLE concur_reindex_tab (c1 int); -CREATE INDEX concur_reindex_ind ON concur_reindex_tab (c1); --- Both start and end triggers enabled. -REINDEX INDEX concur_reindex_ind; -NOTICE: REINDEX START: ddl_command_start REINDEX -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_ind -REINDEX TABLE concur_reindex_tab; -NOTICE: REINDEX START: ddl_command_start REINDEX -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_ind -REINDEX INDEX CONCURRENTLY concur_reindex_ind; -NOTICE: REINDEX START: ddl_command_start REINDEX -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_ind -REINDEX TABLE CONCURRENTLY concur_reindex_tab; -NOTICE: REINDEX START: ddl_command_start REINDEX -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_ind --- with start trigger disabled. -ALTER EVENT TRIGGER regress_reindex_start DISABLE; -REINDEX INDEX concur_reindex_ind; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_ind -REINDEX INDEX CONCURRENTLY concur_reindex_ind; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_ind --- without an index -DROP INDEX concur_reindex_ind; -REINDEX TABLE concur_reindex_tab; -NOTICE: table "concur_reindex_tab" has no indexes to reindex -REINDEX TABLE CONCURRENTLY concur_reindex_tab; -NOTICE: table "concur_reindex_tab" has no indexes that can be reindexed concurrently --- With a Schema -CREATE SCHEMA concur_reindex_schema; --- No indexes -REINDEX SCHEMA concur_reindex_schema; -REINDEX SCHEMA CONCURRENTLY concur_reindex_schema; -CREATE TABLE concur_reindex_schema.tab (a int); -CREATE INDEX ind ON concur_reindex_schema.tab (a); --- One index reported -REINDEX SCHEMA concur_reindex_schema; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=concur_reindex_schema.ind -REINDEX SCHEMA CONCURRENTLY concur_reindex_schema; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=concur_reindex_schema.ind --- One table on schema but no indexes -DROP INDEX concur_reindex_schema.ind; -REINDEX SCHEMA concur_reindex_schema; -REINDEX SCHEMA CONCURRENTLY concur_reindex_schema; -DROP SCHEMA concur_reindex_schema CASCADE; -NOTICE: drop cascades to table concur_reindex_schema.tab --- With a partitioned table, and nothing else. -CREATE TABLE concur_reindex_part (id int) PARTITION BY RANGE (id); -REINDEX TABLE concur_reindex_part; -REINDEX TABLE CONCURRENTLY concur_reindex_part; --- Partition that would be reindexed, still nothing. -CREATE TABLE concur_reindex_child PARTITION OF concur_reindex_part - FOR VALUES FROM (0) TO (10); -REINDEX TABLE concur_reindex_part; -REINDEX TABLE CONCURRENTLY concur_reindex_part; --- Now add some indexes. -CREATE INDEX concur_reindex_partidx ON concur_reindex_part (id); -REINDEX INDEX concur_reindex_partidx; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_child_id_idx -REINDEX INDEX CONCURRENTLY concur_reindex_partidx; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_child_id_idx -REINDEX TABLE concur_reindex_part; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_child_id_idx -REINDEX TABLE CONCURRENTLY concur_reindex_part; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_child_id_idx -DROP TABLE concur_reindex_part; --- Clean up -DROP EVENT TRIGGER regress_reindex_start; -DROP EVENT TRIGGER regress_reindex_end; -DROP EVENT TRIGGER regress_reindex_end_snap; -DROP FUNCTION reindex_end_command(); -DROP FUNCTION reindex_end_command_snap(); -DROP FUNCTION reindex_start_command(); -DROP TABLE concur_reindex_tab; --- test Row Security Event Trigger -RESET SESSION AUTHORIZATION; -CREATE TABLE event_trigger_test (a integer, b text); -CREATE OR REPLACE FUNCTION start_command() -RETURNS event_trigger AS $$ -BEGIN -RAISE NOTICE '% - ddl_command_start', tg_tag; -END; -$$ LANGUAGE plpgsql; -CREATE OR REPLACE FUNCTION end_command() -RETURNS event_trigger AS $$ -BEGIN -RAISE NOTICE '% - ddl_command_end', tg_tag; -END; -$$ LANGUAGE plpgsql; -CREATE OR REPLACE FUNCTION drop_sql_command() -RETURNS event_trigger AS $$ -BEGIN -RAISE NOTICE '% - sql_drop', tg_tag; -END; -$$ LANGUAGE plpgsql; -CREATE EVENT TRIGGER start_rls_command ON ddl_command_start - WHEN TAG IN ('CREATE POLICY', 'ALTER POLICY', 'DROP POLICY') EXECUTE PROCEDURE start_command(); -CREATE EVENT TRIGGER end_rls_command ON ddl_command_end - WHEN TAG IN ('CREATE POLICY', 'ALTER POLICY', 'DROP POLICY') EXECUTE PROCEDURE end_command(); -CREATE EVENT TRIGGER sql_drop_command ON sql_drop - WHEN TAG IN ('DROP POLICY') EXECUTE PROCEDURE drop_sql_command(); -CREATE POLICY p1 ON event_trigger_test USING (FALSE); -NOTICE: CREATE POLICY - ddl_command_start -NOTICE: CREATE POLICY - ddl_command_end -ALTER POLICY p1 ON event_trigger_test USING (TRUE); -NOTICE: ALTER POLICY - ddl_command_start -NOTICE: ALTER POLICY - ddl_command_end -ALTER POLICY p1 ON event_trigger_test RENAME TO p2; -NOTICE: ALTER POLICY - ddl_command_start -NOTICE: ALTER POLICY - ddl_command_end -DROP POLICY p2 ON event_trigger_test; -NOTICE: DROP POLICY - ddl_command_start -NOTICE: DROP POLICY - sql_drop -NOTICE: DROP POLICY - ddl_command_end --- Check the object addresses of all the event triggers. -SELECT - e.evtname, - pg_describe_object('pg_event_trigger'::regclass, e.oid, 0) as descr, - b.type, b.object_names, b.object_args, - pg_identify_object(a.classid, a.objid, a.objsubid) as ident - FROM pg_event_trigger as e, - LATERAL pg_identify_object_as_address('pg_event_trigger'::regclass, e.oid, 0) as b, - LATERAL pg_get_object_address(b.type, b.object_names, b.object_args) as a - ORDER BY e.evtname; - evtname | descr | type | object_names | object_args | ident --------------------+---------------------------------+---------------+---------------------+-------------+-------------------------------------------------------- - end_rls_command | event trigger end_rls_command | event trigger | {end_rls_command} | {} | ("event trigger",,end_rls_command,end_rls_command) - sql_drop_command | event trigger sql_drop_command | event trigger | {sql_drop_command} | {} | ("event trigger",,sql_drop_command,sql_drop_command) - start_rls_command | event trigger start_rls_command | event trigger | {start_rls_command} | {} | ("event trigger",,start_rls_command,start_rls_command) -(3 rows) - -DROP EVENT TRIGGER start_rls_command; -DROP EVENT TRIGGER end_rls_command; -DROP EVENT TRIGGER sql_drop_command; --- Check the GUC for disabling event triggers -CREATE FUNCTION test_event_trigger_guc() RETURNS event_trigger -LANGUAGE plpgsql AS $$ -DECLARE - obj record; -BEGIN - FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() - LOOP - RAISE NOTICE '% dropped %', tg_tag, obj.object_type; - END LOOP; -END; -$$; -CREATE EVENT TRIGGER test_event_trigger_guc - ON sql_drop - WHEN TAG IN ('DROP POLICY') EXECUTE FUNCTION test_event_trigger_guc(); -SET event_triggers = 'on'; -CREATE POLICY pguc ON event_trigger_test USING (FALSE); -DROP POLICY pguc ON event_trigger_test; -NOTICE: DROP POLICY dropped policy -CREATE POLICY pguc ON event_trigger_test USING (FALSE); -SET event_triggers = 'off'; -DROP POLICY pguc ON event_trigger_test; +psql: error: connection to server on socket "/tmp/pg_regress-zV0LjT/.s.PGSQL.40051" failed: FATAL: the database system is not yet accepting connections +DETAIL: Consistent recovery state has not been yet reached.