diff -U3 /Users/admin/pgsql/src/test/regress/expected/brin.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/brin.out --- /Users/admin/pgsql/src/test/regress/expected/brin.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/brin.out 2024-12-13 13:20:09 @@ -539,53 +539,10 @@ -- vacuum actually removes the TOAST rows. Creating an index concurrently -- is a one way to achieve that, because it does exactly such wait. CREATE INDEX CONCURRENTLY brin_test_temp_idx ON brintest_3(a); -DROP INDEX brin_test_temp_idx; --- vacuum the table, to discard TOAST data -VACUUM brintest_3; --- retry insert with a different random-looking (but deterministic) value --- the value is different, and so should replace either min or max in the --- brin summary -WITH rand_value AS (SELECT string_agg(fipshash((-i)::text),'') AS val FROM generate_series(1,60) s(i)) -INSERT INTO brintest_3 -SELECT val, val, val, val FROM rand_value; --- now try some queries, accessing the brin index -SET enable_seqscan = off; -EXPLAIN (COSTS OFF) -SELECT * FROM brintest_3 WHERE b < '0'; - QUERY PLAN ------------------------------------------------- - Bitmap Heap Scan on brintest_3 - Recheck Cond: (b < '0'::text) - -> Bitmap Index Scan on brin_test_toast_idx - Index Cond: (b < '0'::text) -(4 rows) - -SELECT * FROM brintest_3 WHERE b < '0'; - a | b | c | d ----+---+---+--- -(0 rows) - -DROP TABLE brintest_3; -RESET enable_seqscan; --- test parallel build with immutable function. -CREATE TABLE brintest_expr (n int); -CREATE FUNCTION brintest_func() RETURNS int LANGUAGE sql IMMUTABLE RETURN 0; -BEGIN; -SET LOCAL min_parallel_table_scan_size = 0; -SET LOCAL max_parallel_maintenance_workers = 4; -CREATE INDEX brintest_expr_idx ON brintest_expr USING brin (brintest_func()); -COMMIT; -DROP TABLE brintest_expr; -DROP FUNCTION brintest_func(); --- test an unlogged table, mostly to get coverage of brinbuildempty -CREATE UNLOGGED TABLE brintest_unlogged (n numrange); -CREATE INDEX brinidx_unlogged ON brintest_unlogged USING brin (n); -INSERT INTO brintest_unlogged VALUES (numrange(0, 2^1000::numeric)); -DROP TABLE brintest_unlogged; --- test that the insert optimization works if no rows end up inserted -CREATE TABLE brin_insert_optimization (a int); -INSERT INTO brin_insert_optimization VALUES (1); -CREATE INDEX brin_insert_optimization_idx ON brin_insert_optimization USING brin (a); -UPDATE brin_insert_optimization SET a = a; -REINDEX INDEX CONCURRENTLY brin_insert_optimization_idx; -DROP TABLE brin_insert_optimization; +WARNING: terminating connection because of crash of another server process +DETAIL: The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory. +HINT: In a moment you should be able to reconnect to the database and repeat your command. +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /Users/admin/pgsql/src/test/regress/expected/privileges.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/privileges.out --- /Users/admin/pgsql/src/test/regress/expected/privileges.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/privileges.out 2024-12-13 13:20:09 @@ -1846,1413 +1846,10 @@ REINDEX TABLE sro_tab; REINDEX INDEX sro_idx; REINDEX TABLE CONCURRENTLY sro_tab; -DROP INDEX sro_idx; --- CLUSTER -CREATE INDEX sro_cluster_idx ON sro_tab ((sro_ifun(a) + sro_ifun(0))); -CLUSTER sro_tab USING sro_cluster_idx; -DROP INDEX sro_cluster_idx; --- BRIN index -CREATE INDEX sro_brin ON sro_tab USING brin ((sro_ifun(a) + sro_ifun(0))); -SELECT brin_desummarize_range('sro_brin', 0); - brin_desummarize_range ------------------------- - -(1 row) - -SELECT brin_summarize_range('sro_brin', 0); - brin_summarize_range ----------------------- - 1 -(1 row) - -DROP TABLE sro_tab; --- Check with a partitioned table -CREATE TABLE sro_ptab (a int) PARTITION BY RANGE (a); -ALTER TABLE sro_ptab OWNER TO regress_sro_user; -CREATE TABLE sro_part PARTITION OF sro_ptab FOR VALUES FROM (1) TO (10); -ALTER TABLE sro_part OWNER TO regress_sro_user; -INSERT INTO sro_ptab VALUES (1), (2), (3); -CREATE INDEX sro_pidx ON sro_ptab ((sro_ifun(a) + sro_ifun(0))) - WHERE sro_ifun(a + 10) > sro_ifun(10); -REINDEX TABLE sro_ptab; -REINDEX INDEX CONCURRENTLY sro_pidx; -SET SESSION AUTHORIZATION regress_sro_user; -CREATE FUNCTION unwanted_grant() RETURNS void LANGUAGE sql AS - 'GRANT regress_priv_group2 TO regress_sro_user'; -CREATE FUNCTION mv_action() RETURNS bool LANGUAGE sql AS - 'DECLARE c CURSOR WITH HOLD FOR SELECT public.unwanted_grant(); SELECT true'; --- REFRESH of this MV will queue a GRANT at end of transaction -CREATE MATERIALIZED VIEW sro_mv AS SELECT mv_action() WITH NO DATA; -REFRESH MATERIALIZED VIEW sro_mv; -ERROR: cannot create a cursor WITH HOLD within security-restricted operation -CONTEXT: SQL function "mv_action" statement 1 -\c - -REFRESH MATERIALIZED VIEW sro_mv; -ERROR: cannot create a cursor WITH HOLD within security-restricted operation -CONTEXT: SQL function "mv_action" statement 1 -SET SESSION AUTHORIZATION regress_sro_user; --- INSERT to this table will queue a GRANT at end of transaction -CREATE TABLE sro_trojan_table (); -CREATE FUNCTION sro_trojan() RETURNS trigger LANGUAGE plpgsql AS - 'BEGIN PERFORM public.unwanted_grant(); RETURN NULL; END'; -CREATE CONSTRAINT TRIGGER t AFTER INSERT ON sro_trojan_table - INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE sro_trojan(); --- Now, REFRESH will issue such an INSERT, queueing the GRANT -CREATE OR REPLACE FUNCTION mv_action() RETURNS bool LANGUAGE sql AS - 'INSERT INTO public.sro_trojan_table DEFAULT VALUES; SELECT true'; -REFRESH MATERIALIZED VIEW sro_mv; -ERROR: cannot fire deferred trigger within security-restricted operation -CONTEXT: SQL function "mv_action" statement 1 -\c - -REFRESH MATERIALIZED VIEW sro_mv; -ERROR: cannot fire deferred trigger within security-restricted operation -CONTEXT: SQL function "mv_action" statement 1 -BEGIN; SET CONSTRAINTS ALL IMMEDIATE; REFRESH MATERIALIZED VIEW sro_mv; COMMIT; -ERROR: permission denied to grant role "regress_priv_group2" -DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may grant this role. -CONTEXT: SQL function "unwanted_grant" statement 1 -SQL statement "SELECT public.unwanted_grant()" -PL/pgSQL function public.sro_trojan() line 1 at PERFORM -SQL function "mv_action" statement 1 --- REFRESH MATERIALIZED VIEW CONCURRENTLY use of eval_const_expressions() -SET SESSION AUTHORIZATION regress_sro_user; -CREATE FUNCTION unwanted_grant_nofail(int) RETURNS int - IMMUTABLE LANGUAGE plpgsql AS $$ -BEGIN - PERFORM public.unwanted_grant(); - RAISE WARNING 'owned'; - RETURN 1; -EXCEPTION WHEN OTHERS THEN - RETURN 2; -END$$; -CREATE MATERIALIZED VIEW sro_index_mv AS SELECT 1 AS c; -CREATE UNIQUE INDEX ON sro_index_mv (c) WHERE unwanted_grant_nofail(1) > 0; -\c - -REFRESH MATERIALIZED VIEW CONCURRENTLY sro_index_mv; -REFRESH MATERIALIZED VIEW sro_index_mv; -DROP OWNED BY regress_sro_user; -DROP ROLE regress_sro_user; --- Admin options -SET SESSION AUTHORIZATION regress_priv_user4; -CREATE FUNCTION dogrant_ok() RETURNS void LANGUAGE sql SECURITY DEFINER AS - 'GRANT regress_priv_group2 TO regress_priv_user5'; -GRANT regress_priv_group2 TO regress_priv_user5; -- ok: had ADMIN OPTION -SET ROLE regress_priv_group2; -GRANT regress_priv_group2 TO regress_priv_user5; -- fails: SET ROLE suspended privilege -ERROR: permission denied to grant role "regress_priv_group2" -DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may grant this role. -SET SESSION AUTHORIZATION regress_priv_user1; -GRANT regress_priv_group2 TO regress_priv_user5; -- fails: no ADMIN OPTION -ERROR: permission denied to grant role "regress_priv_group2" -DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may grant this role. -SELECT dogrant_ok(); -- ok: SECURITY DEFINER conveys ADMIN -NOTICE: role "regress_priv_user5" has already been granted membership in role "regress_priv_group2" by role "regress_priv_user4" - dogrant_ok ------------- - -(1 row) - -SET ROLE regress_priv_group2; -GRANT regress_priv_group2 TO regress_priv_user5; -- fails: SET ROLE did not help -ERROR: permission denied to grant role "regress_priv_group2" -DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may grant this role. -SET SESSION AUTHORIZATION regress_priv_group2; -GRANT regress_priv_group2 TO regress_priv_user5; -- fails: no self-admin -ERROR: permission denied to grant role "regress_priv_group2" -DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may grant this role. -SET SESSION AUTHORIZATION regress_priv_user4; -DROP FUNCTION dogrant_ok(); -REVOKE regress_priv_group2 FROM regress_priv_user5; --- has_sequence_privilege tests -\c - -CREATE SEQUENCE x_seq; -GRANT USAGE on x_seq to regress_priv_user2; -SELECT has_sequence_privilege('regress_priv_user1', 'atest1', 'SELECT'); -ERROR: "atest1" is not a sequence -SELECT has_sequence_privilege('regress_priv_user1', 'x_seq', 'INSERT'); -ERROR: unrecognized privilege type: "INSERT" -SELECT has_sequence_privilege('regress_priv_user1', 'x_seq', 'SELECT'); - has_sequence_privilege ------------------------- - f -(1 row) - -SET SESSION AUTHORIZATION regress_priv_user2; -SELECT has_sequence_privilege('x_seq', 'USAGE'); - has_sequence_privilege ------------------------- - t -(1 row) - --- largeobject privilege tests -\c - -SET SESSION AUTHORIZATION regress_priv_user1; -SELECT lo_create(1001); - lo_create ------------ - 1001 -(1 row) - -SELECT lo_create(1002); - lo_create ------------ - 1002 -(1 row) - -SELECT lo_create(1003); - lo_create ------------ - 1003 -(1 row) - -SELECT lo_create(1004); - lo_create ------------ - 1004 -(1 row) - -SELECT lo_create(1005); - lo_create ------------ - 1005 -(1 row) - -GRANT ALL ON LARGE OBJECT 1001 TO PUBLIC; -GRANT SELECT ON LARGE OBJECT 1003 TO regress_priv_user2; -GRANT SELECT,UPDATE ON LARGE OBJECT 1004 TO regress_priv_user2; -GRANT ALL ON LARGE OBJECT 1005 TO regress_priv_user2; -GRANT SELECT ON LARGE OBJECT 1005 TO regress_priv_user2 WITH GRANT OPTION; -GRANT SELECT, INSERT ON LARGE OBJECT 1001 TO PUBLIC; -- to be failed -ERROR: invalid privilege type INSERT for large object -GRANT SELECT, UPDATE ON LARGE OBJECT 1001 TO nosuchuser; -- to be failed -ERROR: role "nosuchuser" does not exist -GRANT SELECT, UPDATE ON LARGE OBJECT 999 TO PUBLIC; -- to be failed -ERROR: large object 999 does not exist -\c - -SET SESSION AUTHORIZATION regress_priv_user2; -SELECT lo_create(2001); - lo_create ------------ - 2001 -(1 row) - -SELECT lo_create(2002); - lo_create ------------ - 2002 -(1 row) - -SELECT loread(lo_open(1001, x'20000'::int), 32); -- allowed, for now - loread --------- - \x -(1 row) - -SELECT lowrite(lo_open(1001, x'40000'::int), 'abcd'); -- fail, wrong mode -ERROR: large object descriptor 0 was not opened for writing -SELECT loread(lo_open(1001, x'40000'::int), 32); - loread --------- - \x -(1 row) - -SELECT loread(lo_open(1002, x'40000'::int), 32); -- to be denied -ERROR: permission denied for large object 1002 -SELECT loread(lo_open(1003, x'40000'::int), 32); - loread --------- - \x -(1 row) - -SELECT loread(lo_open(1004, x'40000'::int), 32); - loread --------- - \x -(1 row) - -SELECT lowrite(lo_open(1001, x'20000'::int), 'abcd'); - lowrite ---------- - 4 -(1 row) - -SELECT lowrite(lo_open(1002, x'20000'::int), 'abcd'); -- to be denied -ERROR: permission denied for large object 1002 -SELECT lowrite(lo_open(1003, x'20000'::int), 'abcd'); -- to be denied -ERROR: permission denied for large object 1003 -SELECT lowrite(lo_open(1004, x'20000'::int), 'abcd'); - lowrite ---------- - 4 -(1 row) - -GRANT SELECT ON LARGE OBJECT 1005 TO regress_priv_user3; -GRANT UPDATE ON LARGE OBJECT 1006 TO regress_priv_user3; -- to be denied -ERROR: large object 1006 does not exist -REVOKE ALL ON LARGE OBJECT 2001, 2002 FROM PUBLIC; -GRANT ALL ON LARGE OBJECT 2001 TO regress_priv_user3; -SELECT lo_unlink(1001); -- to be denied -ERROR: must be owner of large object 1001 -SELECT lo_unlink(2002); - lo_unlink ------------ - 1 -(1 row) - -\c - --- confirm ACL setting -SELECT oid, pg_get_userbyid(lomowner) ownername, lomacl FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid; - oid | ownername | lomacl -------+--------------------+------------------------------------------------------------------------------------------------------------------------------ - 1001 | regress_priv_user1 | {regress_priv_user1=rw/regress_priv_user1,=rw/regress_priv_user1} - 1002 | regress_priv_user1 | - 1003 | regress_priv_user1 | {regress_priv_user1=rw/regress_priv_user1,regress_priv_user2=r/regress_priv_user1} - 1004 | regress_priv_user1 | {regress_priv_user1=rw/regress_priv_user1,regress_priv_user2=rw/regress_priv_user1} - 1005 | regress_priv_user1 | {regress_priv_user1=rw/regress_priv_user1,regress_priv_user2=r*w/regress_priv_user1,regress_priv_user3=r/regress_priv_user2} - 2001 | regress_priv_user2 | {regress_priv_user2=rw/regress_priv_user2,regress_priv_user3=rw/regress_priv_user2} -(6 rows) - -SET SESSION AUTHORIZATION regress_priv_user3; -SELECT loread(lo_open(1001, x'40000'::int), 32); - loread ------------- - \x61626364 -(1 row) - -SELECT loread(lo_open(1003, x'40000'::int), 32); -- to be denied -ERROR: permission denied for large object 1003 -SELECT loread(lo_open(1005, x'40000'::int), 32); - loread --------- - \x -(1 row) - -SELECT lo_truncate(lo_open(1005, x'20000'::int), 10); -- to be denied -ERROR: permission denied for large object 1005 -SELECT lo_truncate(lo_open(2001, x'20000'::int), 10); - lo_truncate -------------- - 0 -(1 row) - --- has_largeobject_privilege function --- superuser -\c - -SELECT has_largeobject_privilege(1001, 'SELECT'); - has_largeobject_privilege ---------------------------- - t -(1 row) - -SELECT has_largeobject_privilege(1002, 'SELECT'); - has_largeobject_privilege ---------------------------- - t -(1 row) - -SELECT has_largeobject_privilege(1003, 'SELECT'); - has_largeobject_privilege ---------------------------- - t -(1 row) - -SELECT has_largeobject_privilege(1004, 'SELECT'); - has_largeobject_privilege ---------------------------- - t -(1 row) - -SELECT has_largeobject_privilege(1001, 'UPDATE'); - has_largeobject_privilege ---------------------------- - t -(1 row) - -SELECT has_largeobject_privilege(1002, 'UPDATE'); - has_largeobject_privilege ---------------------------- - t -(1 row) - -SELECT has_largeobject_privilege(1003, 'UPDATE'); - has_largeobject_privilege ---------------------------- - t -(1 row) - -SELECT has_largeobject_privilege(1004, 'UPDATE'); - has_largeobject_privilege ---------------------------- - t -(1 row) - --- not-existing large object -SELECT has_largeobject_privilege(9999, 'SELECT'); -- NULL - has_largeobject_privilege ---------------------------- - -(1 row) - --- non-superuser -SET SESSION AUTHORIZATION regress_priv_user2; -SELECT has_largeobject_privilege(1001, 'SELECT'); - has_largeobject_privilege ---------------------------- - t -(1 row) - -SELECT has_largeobject_privilege(1002, 'SELECT'); -- false - has_largeobject_privilege ---------------------------- - f -(1 row) - -SELECT has_largeobject_privilege(1003, 'SELECT'); - has_largeobject_privilege ---------------------------- - t -(1 row) - -SELECT has_largeobject_privilege(1004, 'SELECT'); - has_largeobject_privilege ---------------------------- - t -(1 row) - -SELECT has_largeobject_privilege(1001, 'UPDATE'); - has_largeobject_privilege ---------------------------- - t -(1 row) - -SELECT has_largeobject_privilege(1002, 'UPDATE'); -- false - has_largeobject_privilege ---------------------------- - f -(1 row) - -SELECT has_largeobject_privilege(1003, 'UPDATE'); -- false - has_largeobject_privilege ---------------------------- - f -(1 row) - -SELECT has_largeobject_privilege(1004, 'UPDATE'); - has_largeobject_privilege ---------------------------- - t -(1 row) - -SELECT has_largeobject_privilege('regress_priv_user3', 1001, 'SELECT'); - has_largeobject_privilege ---------------------------- - t -(1 row) - -SELECT has_largeobject_privilege('regress_priv_user3', 1003, 'SELECT'); -- false - has_largeobject_privilege ---------------------------- - f -(1 row) - -SELECT has_largeobject_privilege('regress_priv_user3', 1005, 'SELECT'); - has_largeobject_privilege ---------------------------- - t -(1 row) - -SELECT has_largeobject_privilege('regress_priv_user3', 1005, 'UPDATE'); -- false - has_largeobject_privilege ---------------------------- - f -(1 row) - -SELECT has_largeobject_privilege('regress_priv_user3', 2001, 'UPDATE'); - has_largeobject_privilege ---------------------------- - t -(1 row) - --- compatibility mode in largeobject permission -\c - -SET lo_compat_privileges = false; -- default setting -SET SESSION AUTHORIZATION regress_priv_user4; -SELECT has_largeobject_privilege(1002, 'SELECT'); -- false - has_largeobject_privilege ---------------------------- - f -(1 row) - -SELECT has_largeobject_privilege(1002, 'UPDATE'); -- false - has_largeobject_privilege ---------------------------- - f -(1 row) - -SELECT loread(lo_open(1002, x'40000'::int), 32); -- to be denied -ERROR: permission denied for large object 1002 -SELECT lowrite(lo_open(1002, x'20000'::int), 'abcd'); -- to be denied -ERROR: permission denied for large object 1002 -SELECT lo_truncate(lo_open(1002, x'20000'::int), 10); -- to be denied -ERROR: permission denied for large object 1002 -SELECT lo_put(1002, 1, 'abcd'); -- to be denied -ERROR: permission denied for large object 1002 -SELECT lo_unlink(1002); -- to be denied -ERROR: must be owner of large object 1002 -SELECT lo_export(1001, '/dev/null'); -- to be denied -ERROR: permission denied for function lo_export -SELECT lo_import('/dev/null'); -- to be denied -ERROR: permission denied for function lo_import -SELECT lo_import('/dev/null', 2003); -- to be denied -ERROR: permission denied for function lo_import -\c - -SET lo_compat_privileges = true; -- compatibility mode -SET SESSION AUTHORIZATION regress_priv_user4; -SELECT has_largeobject_privilege(1002, 'SELECT'); -- true - has_largeobject_privilege ---------------------------- - t -(1 row) - -SELECT has_largeobject_privilege(1002, 'UPDATE'); -- true - has_largeobject_privilege ---------------------------- - t -(1 row) - -SELECT loread(lo_open(1002, x'40000'::int), 32); - loread --------- - \x -(1 row) - -SELECT lowrite(lo_open(1002, x'20000'::int), 'abcd'); - lowrite ---------- - 4 -(1 row) - -SELECT lo_truncate(lo_open(1002, x'20000'::int), 10); - lo_truncate -------------- - 0 -(1 row) - -SELECT lo_unlink(1002); - lo_unlink ------------ - 1 -(1 row) - -SELECT lo_export(1001, '/dev/null'); -- to be denied -ERROR: permission denied for function lo_export --- don't allow unpriv users to access pg_largeobject contents -\c - -SELECT * FROM pg_largeobject LIMIT 0; - loid | pageno | data -------+--------+------ -(0 rows) - -SET SESSION AUTHORIZATION regress_priv_user1; -SELECT * FROM pg_largeobject LIMIT 0; -- to be denied -ERROR: permission denied for table pg_largeobject --- pg_signal_backend can't signal superusers -RESET SESSION AUTHORIZATION; -BEGIN; -CREATE OR REPLACE FUNCTION terminate_nothrow(pid int) RETURNS bool - LANGUAGE plpgsql SECURITY DEFINER SET client_min_messages = error AS $$ -BEGIN - RETURN pg_terminate_backend($1); -EXCEPTION WHEN OTHERS THEN - RETURN false; -END$$; -ALTER FUNCTION terminate_nothrow OWNER TO pg_signal_backend; -SELECT backend_type FROM pg_stat_activity -WHERE CASE WHEN COALESCE(usesysid, 10) = 10 THEN terminate_nothrow(pid) END; - backend_type --------------- -(0 rows) - -ROLLBACK; --- test pg_database_owner -RESET SESSION AUTHORIZATION; -GRANT pg_database_owner TO regress_priv_user1; -ERROR: role "pg_database_owner" cannot have explicit members -GRANT regress_priv_user1 TO pg_database_owner; -ERROR: role "pg_database_owner" cannot be a member of any role -CREATE TABLE datdba_only (); -ALTER TABLE datdba_only OWNER TO pg_database_owner; -REVOKE DELETE ON datdba_only FROM pg_database_owner; -SELECT - pg_has_role('regress_priv_user1', 'pg_database_owner', 'USAGE') as priv, - pg_has_role('regress_priv_user1', 'pg_database_owner', 'MEMBER') as mem, - pg_has_role('regress_priv_user1', 'pg_database_owner', - 'MEMBER WITH ADMIN OPTION') as admin; - priv | mem | admin -------+-----+------- - f | f | f -(1 row) - -BEGIN; -DO $$BEGIN EXECUTE format( - 'ALTER DATABASE %I OWNER TO regress_priv_group2', current_catalog); END$$; -SELECT - pg_has_role('regress_priv_user1', 'pg_database_owner', 'USAGE') as priv, - pg_has_role('regress_priv_user1', 'pg_database_owner', 'MEMBER') as mem, - pg_has_role('regress_priv_user1', 'pg_database_owner', - 'MEMBER WITH ADMIN OPTION') as admin; - priv | mem | admin -------+-----+------- - t | t | f -(1 row) - -SET SESSION AUTHORIZATION regress_priv_user1; -TABLE information_schema.enabled_roles ORDER BY role_name COLLATE "C"; - role_name ---------------------- - pg_database_owner - regress_priv_group2 - regress_priv_user1 -(3 rows) - -TABLE information_schema.applicable_roles ORDER BY role_name COLLATE "C"; - grantee | role_name | is_grantable ----------------------+---------------------+-------------- - regress_priv_group2 | pg_database_owner | NO - regress_priv_user1 | regress_priv_group2 | NO -(2 rows) - -INSERT INTO datdba_only DEFAULT VALUES; -SAVEPOINT q; DELETE FROM datdba_only; ROLLBACK TO q; -ERROR: permission denied for table datdba_only -SET SESSION AUTHORIZATION regress_priv_user2; -TABLE information_schema.enabled_roles; - role_name --------------------- - regress_priv_user2 -(1 row) - -INSERT INTO datdba_only DEFAULT VALUES; -ERROR: permission denied for table datdba_only -ROLLBACK; --- test default ACLs -\c - -CREATE SCHEMA testns; -GRANT ALL ON SCHEMA testns TO regress_priv_user1; -CREATE TABLE testns.acltest1 (x int); -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- no - has_table_privilege ---------------------- - f -(1 row) - -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no - has_table_privilege ---------------------- - f -(1 row) - --- placeholder for test with duplicated schema and role names -ALTER DEFAULT PRIVILEGES IN SCHEMA testns,testns GRANT SELECT ON TABLES TO public,public; -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- no - has_table_privilege ---------------------- - f -(1 row) - -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no - has_table_privilege ---------------------- - f -(1 row) - -DROP TABLE testns.acltest1; -CREATE TABLE testns.acltest1 (x int); -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- yes - has_table_privilege ---------------------- - t -(1 row) - -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no - has_table_privilege ---------------------- - f -(1 row) - -ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT INSERT ON TABLES TO regress_priv_user1; -DROP TABLE testns.acltest1; -CREATE TABLE testns.acltest1 (x int); -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- yes - has_table_privilege ---------------------- - t -(1 row) - -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- yes - has_table_privilege ---------------------- - t -(1 row) - -ALTER DEFAULT PRIVILEGES IN SCHEMA testns REVOKE INSERT ON TABLES FROM regress_priv_user1; -DROP TABLE testns.acltest1; -CREATE TABLE testns.acltest1 (x int); -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- yes - has_table_privilege ---------------------- - t -(1 row) - -SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no - has_table_privilege ---------------------- - f -(1 row) - -ALTER DEFAULT PRIVILEGES FOR ROLE regress_priv_user1 REVOKE EXECUTE ON FUNCTIONS FROM public; -ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT USAGE ON SCHEMAS TO regress_priv_user2; -- error -ERROR: cannot use IN SCHEMA clause when using GRANT/REVOKE ON SCHEMAS --- Test makeaclitem() -SELECT makeaclitem('regress_priv_user1'::regrole, 'regress_priv_user2'::regrole, - 'SELECT', TRUE); -- single privilege - makeaclitem ------------------------------------------- - regress_priv_user1=r*/regress_priv_user2 -(1 row) - -SELECT makeaclitem('regress_priv_user1'::regrole, 'regress_priv_user2'::regrole, - 'SELECT, INSERT, UPDATE , DELETE ', FALSE); -- multiple privileges - makeaclitem --------------------------------------------- - regress_priv_user1=arwd/regress_priv_user2 -(1 row) - -SELECT makeaclitem('regress_priv_user1'::regrole, 'regress_priv_user2'::regrole, - 'SELECT, fake_privilege', FALSE); -- error -ERROR: unrecognized privilege type: "fake_privilege" --- Test non-throwing aclitem I/O -SELECT pg_input_is_valid('regress_priv_user1=r/regress_priv_user2', 'aclitem'); - pg_input_is_valid -------------------- - t -(1 row) - -SELECT pg_input_is_valid('regress_priv_user1=r/', 'aclitem'); - pg_input_is_valid -------------------- - f -(1 row) - -SELECT * FROM pg_input_error_info('regress_priv_user1=r/', 'aclitem'); - message | detail | hint | sql_error_code ----------------------------------+--------+------+---------------- - a name must follow the "/" sign | | | 22P02 -(1 row) - -SELECT pg_input_is_valid('regress_priv_user1=r/regress_no_such_user', 'aclitem'); - pg_input_is_valid -------------------- - f -(1 row) - -SELECT * FROM pg_input_error_info('regress_priv_user1=r/regress_no_such_user', 'aclitem'); - message | detail | hint | sql_error_code ---------------------------------------------+--------+------+---------------- - role "regress_no_such_user" does not exist | | | 42704 -(1 row) - -SELECT pg_input_is_valid('regress_priv_user1=rY', 'aclitem'); - pg_input_is_valid -------------------- - f -(1 row) - -SELECT * FROM pg_input_error_info('regress_priv_user1=rY', 'aclitem'); - message | detail | hint | sql_error_code -----------------------------------------------------------+--------+------+---------------- - invalid mode character: must be one of "arwdDxtXUCTcsAm" | | | 22P02 -(1 row) - --- --- Testing blanket default grants is very hazardous since it might change --- the privileges attached to objects created by concurrent regression tests. --- To avoid that, be sure to revoke the privileges again before committing. --- -BEGIN; -ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS TO regress_priv_user2; -CREATE SCHEMA testns2; -SELECT has_schema_privilege('regress_priv_user2', 'testns2', 'USAGE'); -- yes - has_schema_privilege ----------------------- - t -(1 row) - -SELECT has_schema_privilege('regress_priv_user6', 'testns2', 'USAGE'); -- yes - has_schema_privilege ----------------------- - t -(1 row) - -SELECT has_schema_privilege('regress_priv_user2', 'testns2', 'CREATE'); -- no - has_schema_privilege ----------------------- - f -(1 row) - -ALTER DEFAULT PRIVILEGES REVOKE USAGE ON SCHEMAS FROM regress_priv_user2; -CREATE SCHEMA testns3; -SELECT has_schema_privilege('regress_priv_user2', 'testns3', 'USAGE'); -- no - has_schema_privilege ----------------------- - f -(1 row) - -SELECT has_schema_privilege('regress_priv_user2', 'testns3', 'CREATE'); -- no - has_schema_privilege ----------------------- - f -(1 row) - -ALTER DEFAULT PRIVILEGES GRANT ALL ON SCHEMAS TO regress_priv_user2; -CREATE SCHEMA testns4; -SELECT has_schema_privilege('regress_priv_user2', 'testns4', 'USAGE'); -- yes - has_schema_privilege ----------------------- - t -(1 row) - -SELECT has_schema_privilege('regress_priv_user2', 'testns4', 'CREATE'); -- yes - has_schema_privilege ----------------------- - t -(1 row) - -ALTER DEFAULT PRIVILEGES REVOKE ALL ON SCHEMAS FROM regress_priv_user2; -COMMIT; --- Test for DROP OWNED BY with shared dependencies. This is done in a --- separate, rollbacked, transaction to avoid any trouble with other --- regression sessions. -BEGIN; -ALTER DEFAULT PRIVILEGES GRANT ALL ON FUNCTIONS TO regress_priv_user2; -ALTER DEFAULT PRIVILEGES GRANT ALL ON SCHEMAS TO regress_priv_user2; -ALTER DEFAULT PRIVILEGES GRANT ALL ON SEQUENCES TO regress_priv_user2; -ALTER DEFAULT PRIVILEGES GRANT ALL ON TABLES TO regress_priv_user2; -ALTER DEFAULT PRIVILEGES GRANT ALL ON TYPES TO regress_priv_user2; -SELECT count(*) FROM pg_shdepend - WHERE deptype = 'a' AND - refobjid = 'regress_priv_user2'::regrole AND - classid = 'pg_default_acl'::regclass; - count -------- - 5 -(1 row) - -DROP OWNED BY regress_priv_user2, regress_priv_user2; -SELECT count(*) FROM pg_shdepend - WHERE deptype = 'a' AND - refobjid = 'regress_priv_user2'::regrole AND - classid = 'pg_default_acl'::regclass; - count -------- - 0 -(1 row) - -ROLLBACK; -CREATE SCHEMA testns5; -SELECT has_schema_privilege('regress_priv_user2', 'testns5', 'USAGE'); -- no - has_schema_privilege ----------------------- - f -(1 row) - -SELECT has_schema_privilege('regress_priv_user2', 'testns5', 'CREATE'); -- no - has_schema_privilege ----------------------- - f -(1 row) - -SET ROLE regress_priv_user1; -CREATE FUNCTION testns.foo() RETURNS int AS 'select 1' LANGUAGE sql; -CREATE AGGREGATE testns.agg1(int) (sfunc = int4pl, stype = int4); -CREATE PROCEDURE testns.bar() AS 'select 1' LANGUAGE sql; -SELECT has_function_privilege('regress_priv_user2', 'testns.foo()', 'EXECUTE'); -- no - has_function_privilege ------------------------- - f -(1 row) - -SELECT has_function_privilege('regress_priv_user2', 'testns.agg1(int)', 'EXECUTE'); -- no - has_function_privilege ------------------------- - f -(1 row) - -SELECT has_function_privilege('regress_priv_user2', 'testns.bar()', 'EXECUTE'); -- no - has_function_privilege ------------------------- - f -(1 row) - -ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT EXECUTE ON ROUTINES to public; -DROP FUNCTION testns.foo(); -CREATE FUNCTION testns.foo() RETURNS int AS 'select 1' LANGUAGE sql; -DROP AGGREGATE testns.agg1(int); -CREATE AGGREGATE testns.agg1(int) (sfunc = int4pl, stype = int4); -DROP PROCEDURE testns.bar(); -CREATE PROCEDURE testns.bar() AS 'select 1' LANGUAGE sql; -SELECT has_function_privilege('regress_priv_user2', 'testns.foo()', 'EXECUTE'); -- yes - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_priv_user2', 'testns.agg1(int)', 'EXECUTE'); -- yes - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_priv_user2', 'testns.bar()', 'EXECUTE'); -- yes (counts as function here) - has_function_privilege ------------------------- - t -(1 row) - -DROP FUNCTION testns.foo(); -DROP AGGREGATE testns.agg1(int); -DROP PROCEDURE testns.bar(); -ALTER DEFAULT PRIVILEGES FOR ROLE regress_priv_user1 REVOKE USAGE ON TYPES FROM public; -CREATE DOMAIN testns.priv_testdomain1 AS int; -SELECT has_type_privilege('regress_priv_user2', 'testns.priv_testdomain1', 'USAGE'); -- no - has_type_privilege --------------------- - f -(1 row) - -ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT USAGE ON TYPES to public; -DROP DOMAIN testns.priv_testdomain1; -CREATE DOMAIN testns.priv_testdomain1 AS int; -SELECT has_type_privilege('regress_priv_user2', 'testns.priv_testdomain1', 'USAGE'); -- yes - has_type_privilege --------------------- - t -(1 row) - -DROP DOMAIN testns.priv_testdomain1; -RESET ROLE; -SELECT count(*) - FROM pg_default_acl d LEFT JOIN pg_namespace n ON defaclnamespace = n.oid - WHERE nspname = 'testns'; - count -------- - 3 -(1 row) - -DROP SCHEMA testns CASCADE; -NOTICE: drop cascades to table testns.acltest1 -DROP SCHEMA testns2 CASCADE; -DROP SCHEMA testns3 CASCADE; -DROP SCHEMA testns4 CASCADE; -DROP SCHEMA testns5 CASCADE; -SELECT d.* -- check that entries went away - FROM pg_default_acl d LEFT JOIN pg_namespace n ON defaclnamespace = n.oid - WHERE nspname IS NULL AND defaclnamespace != 0; - oid | defaclrole | defaclnamespace | defaclobjtype | defaclacl ------+------------+-----------------+---------------+----------- -(0 rows) - --- Grant on all objects of given type in a schema -\c - -CREATE SCHEMA testns; -CREATE TABLE testns.t1 (f1 int); -CREATE TABLE testns.t2 (f1 int); -SELECT has_table_privilege('regress_priv_user1', 'testns.t1', 'SELECT'); -- false - has_table_privilege ---------------------- - f -(1 row) - -GRANT ALL ON ALL TABLES IN SCHEMA testns TO regress_priv_user1; -SELECT has_table_privilege('regress_priv_user1', 'testns.t1', 'SELECT'); -- true - has_table_privilege ---------------------- - t -(1 row) - -SELECT has_table_privilege('regress_priv_user1', 'testns.t2', 'SELECT'); -- true - has_table_privilege ---------------------- - t -(1 row) - -REVOKE ALL ON ALL TABLES IN SCHEMA testns FROM regress_priv_user1; -SELECT has_table_privilege('regress_priv_user1', 'testns.t1', 'SELECT'); -- false - has_table_privilege ---------------------- - f -(1 row) - -SELECT has_table_privilege('regress_priv_user1', 'testns.t2', 'SELECT'); -- false - has_table_privilege ---------------------- - f -(1 row) - -CREATE FUNCTION testns.priv_testfunc(int) RETURNS int AS 'select 3 * $1;' LANGUAGE sql; -CREATE AGGREGATE testns.priv_testagg(int) (sfunc = int4pl, stype = int4); -CREATE PROCEDURE testns.priv_testproc(int) AS 'select 3' LANGUAGE sql; -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testfunc(int)', 'EXECUTE'); -- true by default - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testagg(int)', 'EXECUTE'); -- true by default - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- true by default - has_function_privilege ------------------------- - t -(1 row) - -REVOKE ALL ON ALL FUNCTIONS IN SCHEMA testns FROM PUBLIC; -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testfunc(int)', 'EXECUTE'); -- false - has_function_privilege ------------------------- - f -(1 row) - -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testagg(int)', 'EXECUTE'); -- false - has_function_privilege ------------------------- - f -(1 row) - -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- still true, not a function - has_function_privilege ------------------------- - t -(1 row) - -REVOKE ALL ON ALL PROCEDURES IN SCHEMA testns FROM PUBLIC; -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- now false - has_function_privilege ------------------------- - f -(1 row) - -GRANT ALL ON ALL ROUTINES IN SCHEMA testns TO PUBLIC; -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testfunc(int)', 'EXECUTE'); -- true - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testagg(int)', 'EXECUTE'); -- true - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- true - has_function_privilege ------------------------- - t -(1 row) - -DROP SCHEMA testns CASCADE; -NOTICE: drop cascades to 5 other objects -DETAIL: drop cascades to table testns.t1 -drop cascades to table testns.t2 -drop cascades to function testns.priv_testfunc(integer) -drop cascades to function testns.priv_testagg(integer) -drop cascades to function testns.priv_testproc(integer) --- Change owner of the schema & and rename of new schema owner -\c - -CREATE ROLE regress_schemauser1 superuser login; -CREATE ROLE regress_schemauser2 superuser login; -SET SESSION ROLE regress_schemauser1; -CREATE SCHEMA testns; -SELECT nspname, rolname FROM pg_namespace, pg_roles WHERE pg_namespace.nspname = 'testns' AND pg_namespace.nspowner = pg_roles.oid; - nspname | rolname ----------+--------------------- - testns | regress_schemauser1 -(1 row) - -ALTER SCHEMA testns OWNER TO regress_schemauser2; -ALTER ROLE regress_schemauser2 RENAME TO regress_schemauser_renamed; -SELECT nspname, rolname FROM pg_namespace, pg_roles WHERE pg_namespace.nspname = 'testns' AND pg_namespace.nspowner = pg_roles.oid; - nspname | rolname ----------+---------------------------- - testns | regress_schemauser_renamed -(1 row) - -set session role regress_schemauser_renamed; -DROP SCHEMA testns CASCADE; --- clean up -\c - -DROP ROLE regress_schemauser1; -DROP ROLE regress_schemauser_renamed; --- test that dependent privileges are revoked (or not) properly -\c - -set session role regress_priv_user1; -create table dep_priv_test (a int); -grant select on dep_priv_test to regress_priv_user2 with grant option; -grant select on dep_priv_test to regress_priv_user3 with grant option; -set session role regress_priv_user2; -grant select on dep_priv_test to regress_priv_user4 with grant option; -set session role regress_priv_user3; -grant select on dep_priv_test to regress_priv_user4 with grant option; -set session role regress_priv_user4; -grant select on dep_priv_test to regress_priv_user5; -\dp dep_priv_test - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+---------------+-------+------------------------------------------------+-------------------+---------- - public | dep_priv_test | table | regress_priv_user1=arwdDxtm/regress_priv_user1+| | - | | | regress_priv_user2=r*/regress_priv_user1 +| | - | | | regress_priv_user3=r*/regress_priv_user1 +| | - | | | regress_priv_user4=r*/regress_priv_user2 +| | - | | | regress_priv_user4=r*/regress_priv_user3 +| | - | | | regress_priv_user5=r/regress_priv_user4 | | -(1 row) - -set session role regress_priv_user2; -revoke select on dep_priv_test from regress_priv_user4 cascade; -\dp dep_priv_test - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+---------------+-------+------------------------------------------------+-------------------+---------- - public | dep_priv_test | table | regress_priv_user1=arwdDxtm/regress_priv_user1+| | - | | | regress_priv_user2=r*/regress_priv_user1 +| | - | | | regress_priv_user3=r*/regress_priv_user1 +| | - | | | regress_priv_user4=r*/regress_priv_user3 +| | - | | | regress_priv_user5=r/regress_priv_user4 | | -(1 row) - -set session role regress_priv_user3; -revoke select on dep_priv_test from regress_priv_user4 cascade; -\dp dep_priv_test - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+---------------+-------+------------------------------------------------+-------------------+---------- - public | dep_priv_test | table | regress_priv_user1=arwdDxtm/regress_priv_user1+| | - | | | regress_priv_user2=r*/regress_priv_user1 +| | - | | | regress_priv_user3=r*/regress_priv_user1 | | -(1 row) - -set session role regress_priv_user1; -drop table dep_priv_test; --- clean up -\c -drop sequence x_seq; -DROP AGGREGATE priv_testagg1(int); -DROP FUNCTION priv_testfunc2(int); -DROP FUNCTION priv_testfunc4(boolean); -DROP PROCEDURE priv_testproc1(int); -DROP VIEW atestv0; -DROP VIEW atestv1; -DROP VIEW atestv2; --- this should cascade to drop atestv4 -DROP VIEW atestv3 CASCADE; -NOTICE: drop cascades to view atestv4 --- this should complain "does not exist" -DROP VIEW atestv4; -ERROR: view "atestv4" does not exist -DROP TABLE atest1; -DROP TABLE atest2; -DROP TABLE atest3; -DROP TABLE atest4; -DROP TABLE atest5; -DROP TABLE atest6; -DROP TABLE atestc; -DROP TABLE atestp1; -DROP TABLE atestp2; -SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid; - lo_unlink ------------ - 1 - 1 - 1 - 1 - 1 -(5 rows) - -DROP GROUP regress_priv_group1; -DROP GROUP regress_priv_group2; --- these are needed to clean up permissions -REVOKE USAGE ON LANGUAGE sql FROM regress_priv_user1; -DROP OWNED BY regress_priv_user1; -DROP USER regress_priv_user1; -DROP USER regress_priv_user2; -DROP USER regress_priv_user3; -DROP USER regress_priv_user4; -DROP USER regress_priv_user5; -DROP USER regress_priv_user6; -DROP USER regress_priv_user7; -DROP USER regress_priv_user8; -- does not exist -ERROR: role "regress_priv_user8" does not exist --- permissions with LOCK TABLE -CREATE USER regress_locktable_user; -CREATE TABLE lock_table (a int); --- LOCK TABLE and SELECT permission -GRANT SELECT ON lock_table TO regress_locktable_user; -SET SESSION AUTHORIZATION regress_locktable_user; -BEGIN; -LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass -COMMIT; -BEGIN; -LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should fail -ERROR: permission denied for table lock_table -ROLLBACK; -BEGIN; -LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should fail -ERROR: permission denied for table lock_table -ROLLBACK; -\c -REVOKE SELECT ON lock_table FROM regress_locktable_user; --- LOCK TABLE and INSERT permission -GRANT INSERT ON lock_table TO regress_locktable_user; -SET SESSION AUTHORIZATION regress_locktable_user; -BEGIN; -LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass -ROLLBACK; -BEGIN; -LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass -COMMIT; -BEGIN; -LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should fail -ERROR: permission denied for table lock_table -ROLLBACK; -\c -REVOKE INSERT ON lock_table FROM regress_locktable_user; --- LOCK TABLE and UPDATE permission -GRANT UPDATE ON lock_table TO regress_locktable_user; -SET SESSION AUTHORIZATION regress_locktable_user; -BEGIN; -LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass -ROLLBACK; -BEGIN; -LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass -COMMIT; -BEGIN; -LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass -COMMIT; -\c -REVOKE UPDATE ON lock_table FROM regress_locktable_user; --- LOCK TABLE and DELETE permission -GRANT DELETE ON lock_table TO regress_locktable_user; -SET SESSION AUTHORIZATION regress_locktable_user; -BEGIN; -LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass -ROLLBACK; -BEGIN; -LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass -COMMIT; -BEGIN; -LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass -COMMIT; -\c -REVOKE DELETE ON lock_table FROM regress_locktable_user; --- LOCK TABLE and TRUNCATE permission -GRANT TRUNCATE ON lock_table TO regress_locktable_user; -SET SESSION AUTHORIZATION regress_locktable_user; -BEGIN; -LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass -ROLLBACK; -BEGIN; -LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass -COMMIT; -BEGIN; -LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass -COMMIT; -\c -REVOKE TRUNCATE ON lock_table FROM regress_locktable_user; --- LOCK TABLE and MAINTAIN permission -GRANT MAINTAIN ON lock_table TO regress_locktable_user; -SET SESSION AUTHORIZATION regress_locktable_user; -BEGIN; -LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass -ROLLBACK; -BEGIN; -LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass -COMMIT; -BEGIN; -LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass -COMMIT; -\c -REVOKE MAINTAIN ON lock_table FROM regress_locktable_user; --- clean up -DROP TABLE lock_table; -DROP USER regress_locktable_user; --- test to check privileges of system views pg_shmem_allocations and --- pg_backend_memory_contexts. --- switch to superuser -\c - -CREATE ROLE regress_readallstats; -SELECT has_table_privilege('regress_readallstats','pg_backend_memory_contexts','SELECT'); -- no - has_table_privilege ---------------------- - f -(1 row) - -SELECT has_table_privilege('regress_readallstats','pg_shmem_allocations','SELECT'); -- no - has_table_privilege ---------------------- - f -(1 row) - -GRANT pg_read_all_stats TO regress_readallstats; -SELECT has_table_privilege('regress_readallstats','pg_backend_memory_contexts','SELECT'); -- yes - has_table_privilege ---------------------- - t -(1 row) - -SELECT has_table_privilege('regress_readallstats','pg_shmem_allocations','SELECT'); -- yes - has_table_privilege ---------------------- - t -(1 row) - --- run query to ensure that functions within views can be executed -SET ROLE regress_readallstats; -SELECT COUNT(*) >= 0 AS ok FROM pg_backend_memory_contexts; - ok ----- - t -(1 row) - -SELECT COUNT(*) >= 0 AS ok FROM pg_shmem_allocations; - ok ----- - t -(1 row) - -RESET ROLE; --- clean up -DROP ROLE regress_readallstats; --- test role grantor machinery -CREATE ROLE regress_group; -CREATE ROLE regress_group_direct_manager; -CREATE ROLE regress_group_indirect_manager; -CREATE ROLE regress_group_member; -GRANT regress_group TO regress_group_direct_manager WITH INHERIT FALSE, ADMIN TRUE; -GRANT regress_group_direct_manager TO regress_group_indirect_manager; -SET SESSION AUTHORIZATION regress_group_direct_manager; -GRANT regress_group TO regress_group_member; -SELECT member::regrole::text, CASE WHEN grantor = 10 THEN 'BOOTSTRAP SUPERUSER' ELSE grantor::regrole::text END FROM pg_auth_members WHERE roleid = 'regress_group'::regrole ORDER BY 1, 2; - member | grantor -------------------------------+------------------------------ - regress_group_direct_manager | BOOTSTRAP SUPERUSER - regress_group_member | regress_group_direct_manager -(2 rows) - -REVOKE regress_group FROM regress_group_member; -SET SESSION AUTHORIZATION regress_group_indirect_manager; -GRANT regress_group TO regress_group_member; -SELECT member::regrole::text, CASE WHEN grantor = 10 THEN 'BOOTSTRAP SUPERUSER' ELSE grantor::regrole::text END FROM pg_auth_members WHERE roleid = 'regress_group'::regrole ORDER BY 1, 2; - member | grantor -------------------------------+------------------------------ - regress_group_direct_manager | BOOTSTRAP SUPERUSER - regress_group_member | regress_group_direct_manager -(2 rows) - -REVOKE regress_group FROM regress_group_member; -RESET SESSION AUTHORIZATION; -DROP ROLE regress_group; -DROP ROLE regress_group_direct_manager; -DROP ROLE regress_group_indirect_manager; -DROP ROLE regress_group_member; --- test SET and INHERIT options with object ownership changes -CREATE ROLE regress_roleoption_protagonist; -CREATE ROLE regress_roleoption_donor; -CREATE ROLE regress_roleoption_recipient; -CREATE SCHEMA regress_roleoption; -GRANT CREATE, USAGE ON SCHEMA regress_roleoption TO PUBLIC; -GRANT regress_roleoption_donor TO regress_roleoption_protagonist WITH INHERIT TRUE, SET FALSE; -GRANT regress_roleoption_recipient TO regress_roleoption_protagonist WITH INHERIT FALSE, SET TRUE; -SET SESSION AUTHORIZATION regress_roleoption_protagonist; -CREATE TABLE regress_roleoption.t1 (a int); -CREATE TABLE regress_roleoption.t2 (a int); -SET SESSION AUTHORIZATION regress_roleoption_donor; -CREATE TABLE regress_roleoption.t3 (a int); -SET SESSION AUTHORIZATION regress_roleoption_recipient; -CREATE TABLE regress_roleoption.t4 (a int); -SET SESSION AUTHORIZATION regress_roleoption_protagonist; -ALTER TABLE regress_roleoption.t1 OWNER TO regress_roleoption_donor; -- fails, can't be come donor -ERROR: must be able to SET ROLE "regress_roleoption_donor" -ALTER TABLE regress_roleoption.t2 OWNER TO regress_roleoption_recipient; -- works -ALTER TABLE regress_roleoption.t3 OWNER TO regress_roleoption_protagonist; -- works -ALTER TABLE regress_roleoption.t4 OWNER TO regress_roleoption_protagonist; -- fails, we don't inherit from recipient -ERROR: must be owner of table t4 -RESET SESSION AUTHORIZATION; -DROP TABLE regress_roleoption.t1; -DROP TABLE regress_roleoption.t2; -DROP TABLE regress_roleoption.t3; -DROP TABLE regress_roleoption.t4; -DROP SCHEMA regress_roleoption; -DROP ROLE regress_roleoption_protagonist; -DROP ROLE regress_roleoption_donor; -DROP ROLE regress_roleoption_recipient; --- MAINTAIN -CREATE ROLE regress_no_maintain; -CREATE ROLE regress_maintain; -CREATE ROLE regress_maintain_all IN ROLE pg_maintain; -CREATE TABLE maintain_test (a INT); -CREATE INDEX ON maintain_test (a); -GRANT MAINTAIN ON maintain_test TO regress_maintain; -CREATE MATERIALIZED VIEW refresh_test AS SELECT 1; -GRANT MAINTAIN ON refresh_test TO regress_maintain; -CREATE SCHEMA reindex_test; --- negative tests; should fail -SET ROLE regress_no_maintain; -VACUUM maintain_test; -WARNING: permission denied to vacuum "maintain_test", skipping it -ANALYZE maintain_test; -WARNING: permission denied to analyze "maintain_test", skipping it -VACUUM (ANALYZE) maintain_test; -WARNING: permission denied to vacuum "maintain_test", skipping it -CLUSTER maintain_test USING maintain_test_a_idx; -ERROR: permission denied for table maintain_test -REFRESH MATERIALIZED VIEW refresh_test; -ERROR: permission denied for materialized view refresh_test -REINDEX TABLE maintain_test; -ERROR: permission denied for table maintain_test -REINDEX INDEX maintain_test_a_idx; -ERROR: permission denied for index maintain_test_a_idx -REINDEX SCHEMA reindex_test; -ERROR: must be owner of schema reindex_test -RESET ROLE; -SET ROLE regress_maintain; -VACUUM maintain_test; -ANALYZE maintain_test; -VACUUM (ANALYZE) maintain_test; -CLUSTER maintain_test USING maintain_test_a_idx; -REFRESH MATERIALIZED VIEW refresh_test; -REINDEX TABLE maintain_test; -REINDEX INDEX maintain_test_a_idx; -REINDEX SCHEMA reindex_test; -ERROR: must be owner of schema reindex_test -RESET ROLE; -SET ROLE regress_maintain_all; -VACUUM maintain_test; -ANALYZE maintain_test; -VACUUM (ANALYZE) maintain_test; -CLUSTER maintain_test USING maintain_test_a_idx; -REFRESH MATERIALIZED VIEW refresh_test; -REINDEX TABLE maintain_test; -REINDEX INDEX maintain_test_a_idx; -REINDEX SCHEMA reindex_test; -RESET ROLE; -DROP TABLE maintain_test; -DROP MATERIALIZED VIEW refresh_test; -DROP SCHEMA reindex_test; -DROP ROLE regress_no_maintain; -DROP ROLE regress_maintain; -DROP ROLE regress_maintain_all; +WARNING: terminating connection because of crash of another server process +DETAIL: The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory. +HINT: In a moment you should be able to reconnect to the database and repeat your command. +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /Users/admin/pgsql/src/test/regress/expected/matview.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/matview.out --- /Users/admin/pgsql/src/test/regress/expected/matview.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/matview.out 2024-12-13 13:20:09 @@ -731,205 +731,7 @@ -- replace query with data CREATE OR REPLACE MATERIALIZED VIEW mvtest_replace AS SELECT 2 AS a; -SELECT * FROM mvtest_replace; - a ---- - 2 -(1 row) - --- replace query without data -CREATE OR REPLACE MATERIALIZED VIEW mvtest_replace AS - SELECT 3 AS a - WITH NO DATA; -SELECT * FROM mvtest_replace; -- error: not populated -ERROR: materialized view "mvtest_replace" has not been populated -HINT: Use the REFRESH MATERIALIZED VIEW command. -REFRESH MATERIALIZED VIEW mvtest_replace; -SELECT * FROM mvtest_replace; - a ---- - 3 -(1 row) - --- replace query but keep old data -CREATE OR REPLACE MATERIALIZED VIEW mvtest_replace AS - SELECT 5 AS a - WITH OLD DATA; -SELECT * FROM mvtest_replace; - a ---- - 3 -(1 row) - -REFRESH MATERIALIZED VIEW mvtest_replace; -SELECT * FROM mvtest_replace; - a ---- - 5 -(1 row) - --- add column -CREATE OR REPLACE MATERIALIZED VIEW mvtest_replace AS - SELECT 4 AS a, 1 b; -SELECT * FROM mvtest_replace; - a | b ----+--- - 4 | 1 -(1 row) - --- replace table options -SELECT m.*, c.relname, c.reloptions, s.spcname, a.amname - FROM mvtest_replace m - CROSS JOIN pg_class c - LEFT JOIN pg_tablespace s ON s.oid = c.reltablespace - LEFT JOIN pg_am a ON a.oid = c.relam - WHERE c.relname = 'mvtest_replace'; - a | b | relname | reloptions | spcname | amname ----+---+----------------+------------+---------+-------- - 4 | 1 | mvtest_replace | | | heap -(1 row) - -CREATE OR REPLACE MATERIALIZED VIEW mvtest_replace - USING heap2 - WITH (fillfactor = 50) - TABLESPACE regress_tblspace - AS SELECT 5 AS a, 1 AS b; -SELECT m.*, c.relname, c.reloptions, s.spcname, a.amname - FROM mvtest_replace m - CROSS JOIN pg_class c - LEFT JOIN pg_tablespace s ON s.oid = c.reltablespace - LEFT JOIN pg_am a ON a.oid = c.relam - WHERE c.relname = 'mvtest_replace'; - a | b | relname | reloptions | spcname | amname ----+---+----------------+-----------------+------------------+-------- - 5 | 1 | mvtest_replace | {fillfactor=50} | regress_tblspace | heap2 -(1 row) - --- can replace matview that has a dependent view -CREATE VIEW mvtest_replace_v AS - SELECT * FROM mvtest_replace; -CREATE OR REPLACE MATERIALIZED VIEW mvtest_replace AS - SELECT 6 AS a, 1 AS b; -SELECT * FROM mvtest_replace, mvtest_replace_v; - a | b | a | b ----+---+---+--- - 6 | 1 | 6 | 1 -(1 row) - -DROP VIEW mvtest_replace_v; --- index gets rebuilt when replacing with data -CREATE OR REPLACE MATERIALIZED VIEW mvtest_replace AS - SELECT 7 AS a, 1 AS b; -CREATE UNIQUE INDEX ON mvtest_replace (b); -SELECT * FROM mvtest_replace; - a | b ----+--- - 7 | 1 -(1 row) - -SET enable_seqscan = off; -- force index scan -EXPLAIN (COSTS OFF) SELECT * FROM mvtest_replace WHERE b = 1; - QUERY PLAN ---------------------------------------------------------- - Index Scan using mvtest_replace_b_idx on mvtest_replace - Index Cond: (b = 1) -(2 rows) - -SELECT * FROM mvtest_replace WHERE b = 1; - a | b ----+--- - 7 | 1 -(1 row) - -RESET enable_seqscan; -CREATE OR REPLACE MATERIALIZED VIEW mvtest_replace AS - SELECT 8 AS a, 1 AS b; -SET enable_seqscan = off; -- force index scan -EXPLAIN (COSTS OFF) SELECT * FROM mvtest_replace WHERE b = 1; - QUERY PLAN ---------------------------------------------------------- - Index Scan using mvtest_replace_b_idx on mvtest_replace - Index Cond: (b = 1) -(2 rows) - -SELECT * FROM mvtest_replace WHERE b = 1; - a | b ----+--- - 8 | 1 -(1 row) - -RESET enable_seqscan; --- cannot change column data type -CREATE OR REPLACE MATERIALIZED VIEW mvtest_replace AS - SELECT 9 AS a, 'x' AS b; -- error -ERROR: cannot change data type of materialized view column "b" from integer to text -SELECT * FROM mvtest_replace; - a | b ----+--- - 8 | 1 -(1 row) - --- cannot rename column -CREATE OR REPLACE MATERIALIZED VIEW mvtest_replace AS - SELECT 10 AS a, 1 AS b2; -- error -ERROR: cannot change name of materialized view column "b" to "b2" -HINT: Use ALTER MATERIALIZED VIEW ... RENAME COLUMN ... to change name of materialized view column instead. -SELECT * FROM mvtest_replace; - a | b ----+--- - 8 | 1 -(1 row) - -CREATE OR REPLACE MATERIALIZED VIEW mvtest_replace AS - SELECT 11 AS a, 1 AS b, 'y' COLLATE "C" AS c; -SELECT * FROM mvtest_replace; - a | b | c -----+---+--- - 11 | 1 | y -(1 row) - --- cannot change column collation -CREATE OR REPLACE MATERIALIZED VIEW mvtest_replace AS - SELECT 12 AS a, 1 AS b, 'x' COLLATE "POSIX" AS c; -- error -ERROR: cannot change collation of materialized view column "c" from "C" to "POSIX" -SELECT * FROM mvtest_replace; - a | b | c -----+---+--- - 11 | 1 | y -(1 row) - --- cannot drop column -CREATE OR REPLACE MATERIALIZED VIEW mvtest_replace AS - SELECT 13 AS a, 1 AS b; -- error -ERROR: cannot drop columns from materialized view -SELECT * FROM mvtest_replace; - a | b | c -----+---+--- - 11 | 1 | y -(1 row) - --- must target a matview -CREATE VIEW mvtest_not_mv AS - SELECT 1 AS a; -CREATE OR REPLACE MATERIALIZED VIEW mvtest_not_mv AS - SELECT 1 AS a; -- error -ERROR: "mvtest_not_mv" is not a materialized view -DROP VIEW mvtest_not_mv; --- cannot use OR REPLACE with IF NOT EXISTS -CREATE OR REPLACE MATERIALIZED VIEW IF NOT EXISTS mvtest_replace AS - SELECT 1 AS a; -ERROR: syntax error at or near "NOT" -LINE 1: CREATE OR REPLACE MATERIALIZED VIEW IF NOT EXISTS mvtest_rep... - ^ -DROP MATERIALIZED VIEW mvtest_replace; --- Create new matview WITH OLD DATA. This populates the new matview as if --- WITH DATA had been specified. -CREATE OR REPLACE MATERIALIZED VIEW mvtest_replace AS - SELECT 17 AS a - WITH OLD DATA; -SELECT * FROM mvtest_replace; - a ----- - 17 -(1 row) - +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /Users/admin/pgsql/src/test/regress/expected/generated_stored.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/generated_stored.out --- /Users/admin/pgsql/src/test/regress/expected/generated_stored.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/generated_stored.out 2024-12-13 13:20:09 @@ -1170,189 +1170,10 @@ b int GENERATED ALWAYS AS (a * 2) STORED ); CREATE TABLE gtest30_1 () INHERITS (gtest30); -ALTER TABLE ONLY gtest30 ALTER COLUMN b DROP EXPRESSION; -- error -ERROR: ALTER TABLE / DROP EXPRESSION must be applied to child tables too -\d gtest30 - Table "generated_stored_tests.gtest30" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - a | integer | | | - b | integer | | | generated always as (a * 2) stored -Number of child tables: 1 (Use \d+ to list them.) - -\d gtest30_1 - Table "generated_stored_tests.gtest30_1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - a | integer | | | - b | integer | | | generated always as (a * 2) stored -Inherits: gtest30 - -ALTER TABLE gtest30_1 ALTER COLUMN b DROP EXPRESSION; -- error -ERROR: cannot drop generation expression from inherited column --- triggers -CREATE TABLE gtest26 ( - a int PRIMARY KEY, - b int GENERATED ALWAYS AS (a * 2) STORED -); -CREATE FUNCTION gtest_trigger_func() RETURNS trigger - LANGUAGE plpgsql -AS $$ -BEGIN - IF tg_op IN ('DELETE', 'UPDATE') THEN - RAISE INFO '%: %: old = %', TG_NAME, TG_WHEN, OLD; - END IF; - IF tg_op IN ('INSERT', 'UPDATE') THEN - RAISE INFO '%: %: new = %', TG_NAME, TG_WHEN, NEW; - END IF; - IF tg_op = 'DELETE' THEN - RETURN OLD; - ELSE - RETURN NEW; - END IF; -END -$$; -CREATE TRIGGER gtest1 BEFORE DELETE OR UPDATE ON gtest26 - FOR EACH ROW - WHEN (OLD.b < 0) -- ok - EXECUTE PROCEDURE gtest_trigger_func(); -CREATE TRIGGER gtest2a BEFORE INSERT OR UPDATE ON gtest26 - FOR EACH ROW - WHEN (NEW.b < 0) -- error - EXECUTE PROCEDURE gtest_trigger_func(); -ERROR: BEFORE trigger's WHEN condition cannot reference NEW generated columns -LINE 3: WHEN (NEW.b < 0) -- error - ^ -DETAIL: Column "b" is a generated column. -CREATE TRIGGER gtest2b BEFORE INSERT OR UPDATE ON gtest26 - FOR EACH ROW - WHEN (NEW.* IS NOT NULL) -- error - EXECUTE PROCEDURE gtest_trigger_func(); -ERROR: BEFORE trigger's WHEN condition cannot reference NEW generated columns -LINE 3: WHEN (NEW.* IS NOT NULL) -- error - ^ -DETAIL: A whole-row reference is used and the table contains generated columns. -CREATE TRIGGER gtest2 BEFORE INSERT ON gtest26 - FOR EACH ROW - WHEN (NEW.a < 0) - EXECUTE PROCEDURE gtest_trigger_func(); -CREATE TRIGGER gtest3 AFTER DELETE OR UPDATE ON gtest26 - FOR EACH ROW - WHEN (OLD.b < 0) -- ok - EXECUTE PROCEDURE gtest_trigger_func(); -CREATE TRIGGER gtest4 AFTER INSERT OR UPDATE ON gtest26 - FOR EACH ROW - WHEN (NEW.b < 0) -- ok - EXECUTE PROCEDURE gtest_trigger_func(); -INSERT INTO gtest26 (a) VALUES (-2), (0), (3); -INFO: gtest2: BEFORE: new = (-2,) -INFO: gtest4: AFTER: new = (-2,-4) -SELECT * FROM gtest26 ORDER BY a; - a | b -----+---- - -2 | -4 - 0 | 0 - 3 | 6 -(3 rows) - -UPDATE gtest26 SET a = a * -2; -INFO: gtest1: BEFORE: old = (-2,-4) -INFO: gtest1: BEFORE: new = (4,) -INFO: gtest3: AFTER: old = (-2,-4) -INFO: gtest3: AFTER: new = (4,8) -INFO: gtest4: AFTER: old = (3,6) -INFO: gtest4: AFTER: new = (-6,-12) -SELECT * FROM gtest26 ORDER BY a; - a | b -----+----- - -6 | -12 - 0 | 0 - 4 | 8 -(3 rows) - -DELETE FROM gtest26 WHERE a = -6; -INFO: gtest1: BEFORE: old = (-6,-12) -INFO: gtest3: AFTER: old = (-6,-12) -SELECT * FROM gtest26 ORDER BY a; - a | b ----+--- - 0 | 0 - 4 | 8 -(2 rows) - -DROP TRIGGER gtest1 ON gtest26; -DROP TRIGGER gtest2 ON gtest26; -DROP TRIGGER gtest3 ON gtest26; --- Check that an UPDATE of "a" fires the trigger for UPDATE OF b, per --- SQL standard. -CREATE FUNCTION gtest_trigger_func3() RETURNS trigger - LANGUAGE plpgsql -AS $$ -BEGIN - RAISE NOTICE 'OK'; - RETURN NEW; -END -$$; -CREATE TRIGGER gtest11 BEFORE UPDATE OF b ON gtest26 - FOR EACH ROW - EXECUTE PROCEDURE gtest_trigger_func3(); -UPDATE gtest26 SET a = 1 WHERE a = 0; -NOTICE: OK -DROP TRIGGER gtest11 ON gtest26; -TRUNCATE gtest26; --- check that modifications of stored generated columns in triggers do --- not get propagated -CREATE FUNCTION gtest_trigger_func4() RETURNS trigger - LANGUAGE plpgsql -AS $$ -BEGIN - NEW.a = 10; - NEW.b = 300; - RETURN NEW; -END; -$$; -CREATE TRIGGER gtest12_01 BEFORE UPDATE ON gtest26 - FOR EACH ROW - EXECUTE PROCEDURE gtest_trigger_func(); -CREATE TRIGGER gtest12_02 BEFORE UPDATE ON gtest26 - FOR EACH ROW - EXECUTE PROCEDURE gtest_trigger_func4(); -CREATE TRIGGER gtest12_03 BEFORE UPDATE ON gtest26 - FOR EACH ROW - EXECUTE PROCEDURE gtest_trigger_func(); -INSERT INTO gtest26 (a) VALUES (1); -UPDATE gtest26 SET a = 11 WHERE a = 1; -INFO: gtest12_01: BEFORE: old = (1,2) -INFO: gtest12_01: BEFORE: new = (11,) -INFO: gtest12_03: BEFORE: old = (1,2) -INFO: gtest12_03: BEFORE: new = (10,) -SELECT * FROM gtest26 ORDER BY a; - a | b -----+---- - 10 | 20 -(1 row) - --- LIKE INCLUDING GENERATED and dropped column handling -CREATE TABLE gtest28a ( - a int, - b int, - c int, - x int GENERATED ALWAYS AS (b * 2) STORED -); -ALTER TABLE gtest28a DROP COLUMN a; -CREATE TABLE gtest28b (LIKE gtest28a INCLUDING GENERATED); -\d gtest28* - Table "generated_stored_tests.gtest28a" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - b | integer | | | - c | integer | | | - x | integer | | | generated always as (b * 2) stored - - Table "generated_stored_tests.gtest28b" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - b | integer | | | - c | integer | | | - x | integer | | | generated always as (b * 2) stored - +WARNING: terminating connection because of crash of another server process +DETAIL: The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory. +HINT: In a moment you should be able to reconnect to the database and repeat your command. +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /Users/admin/pgsql/src/test/regress/expected/brin_bloom.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/brin_bloom.out --- /Users/admin/pgsql/src/test/regress/expected/brin_bloom.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/brin_bloom.out 2024-12-13 13:20:09 @@ -1,428 +1,2 @@ -CREATE TABLE brintest_bloom (byteacol bytea, - charcol "char", - namecol name, - int8col bigint, - int2col smallint, - int4col integer, - textcol text, - oidcol oid, - float4col real, - float8col double precision, - macaddrcol macaddr, - inetcol inet, - cidrcol cidr, - bpcharcol character, - datecol date, - timecol time without time zone, - timestampcol timestamp without time zone, - timestamptzcol timestamp with time zone, - intervalcol interval, - timetzcol time with time zone, - numericcol numeric, - uuidcol uuid, - lsncol pg_lsn -) WITH (fillfactor=10); -INSERT INTO brintest_bloom SELECT - repeat(stringu1, 8)::bytea, - substr(stringu1, 1, 1)::"char", - stringu1::name, 142857 * tenthous, - thousand, - twothousand, - repeat(stringu1, 8), - unique1::oid, - (four + 1.0)/(hundred+1), - odd::float8 / (tenthous + 1), - format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, - inet '10.2.3.4/24' + tenthous, - cidr '10.2.3/24' + tenthous, - substr(stringu1, 1, 1)::bpchar, - date '1995-08-15' + tenthous, - time '01:20:30' + thousand * interval '18.5 second', - timestamp '1942-07-23 03:05:09' + tenthous * interval '36.38 hours', - timestamptz '1972-10-10 03:00' + thousand * interval '1 hour', - justify_days(justify_hours(tenthous * interval '12 minutes')), - timetz '01:30:20+02' + hundred * interval '15 seconds', - tenthous::numeric(36,30) * fivethous * even / (hundred + 1), - format('%s%s-%s-%s-%s-%s%s%s', to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'))::uuid, - format('%s/%s%s', odd, even, tenthous)::pg_lsn -FROM tenk1 ORDER BY unique2 LIMIT 100; --- throw in some NULL's and different values -INSERT INTO brintest_bloom (inetcol, cidrcol) SELECT - inet 'fe80::6e40:8ff:fea9:8c46' + tenthous, - cidr 'fe80::6e40:8ff:fea9:8c46' + tenthous -FROM tenk1 ORDER BY thousand, tenthous LIMIT 25; --- test bloom specific index options --- ndistinct must be >= -1.0 -CREATE INDEX brinidx_bloom ON brintest_bloom USING brin ( - byteacol bytea_bloom_ops(n_distinct_per_range = -1.1) -); -ERROR: value -1.1 out of bounds for option "n_distinct_per_range" -DETAIL: Valid values are between "-1.000000" and "2147483647.000000". --- false_positive_rate must be between 0.0001 and 0.25 -CREATE INDEX brinidx_bloom ON brintest_bloom USING brin ( - byteacol bytea_bloom_ops(false_positive_rate = 0.00009) -); -ERROR: value 0.00009 out of bounds for option "false_positive_rate" -DETAIL: Valid values are between "0.000100" and "0.250000". -CREATE INDEX brinidx_bloom ON brintest_bloom USING brin ( - byteacol bytea_bloom_ops(false_positive_rate = 0.26) -); -ERROR: value 0.26 out of bounds for option "false_positive_rate" -DETAIL: Valid values are between "0.000100" and "0.250000". -CREATE INDEX brinidx_bloom ON brintest_bloom USING brin ( - byteacol bytea_bloom_ops, - charcol char_bloom_ops, - namecol name_bloom_ops, - int8col int8_bloom_ops, - int2col int2_bloom_ops, - int4col int4_bloom_ops, - textcol text_bloom_ops, - oidcol oid_bloom_ops, - float4col float4_bloom_ops, - float8col float8_bloom_ops, - macaddrcol macaddr_bloom_ops, - inetcol inet_bloom_ops, - cidrcol inet_bloom_ops, - bpcharcol bpchar_bloom_ops, - datecol date_bloom_ops, - timecol time_bloom_ops, - timestampcol timestamp_bloom_ops, - timestamptzcol timestamptz_bloom_ops, - intervalcol interval_bloom_ops, - timetzcol timetz_bloom_ops, - numericcol numeric_bloom_ops, - uuidcol uuid_bloom_ops, - lsncol pg_lsn_bloom_ops -) with (pages_per_range = 1); -CREATE TABLE brinopers_bloom (colname name, typ text, - op text[], value text[], matches int[], - check (cardinality(op) = cardinality(value)), - check (cardinality(op) = cardinality(matches))); -INSERT INTO brinopers_bloom VALUES - ('byteacol', 'bytea', - '{=}', - '{BNAAAABNAAAABNAAAABNAAAABNAAAABNAAAABNAAAABNAAAA}', - '{1}'), - ('charcol', '"char"', - '{=}', - '{M}', - '{6}'), - ('namecol', 'name', - '{=}', - '{MAAAAA}', - '{2}'), - ('int2col', 'int2', - '{=}', - '{800}', - '{1}'), - ('int4col', 'int4', - '{=}', - '{800}', - '{1}'), - ('int8col', 'int8', - '{=}', - '{1257141600}', - '{1}'), - ('textcol', 'text', - '{=}', - '{BNAAAABNAAAABNAAAABNAAAABNAAAABNAAAABNAAAABNAAAA}', - '{1}'), - ('oidcol', 'oid', - '{=}', - '{8800}', - '{1}'), - ('float4col', 'float4', - '{=}', - '{1}', - '{4}'), - ('float8col', 'float8', - '{=}', - '{0}', - '{1}'), - ('macaddrcol', 'macaddr', - '{=}', - '{2c:00:2d:00:16:00}', - '{2}'), - ('inetcol', 'inet', - '{=}', - '{10.2.14.231/24}', - '{1}'), - ('inetcol', 'cidr', - '{=}', - '{fe80::6e40:8ff:fea9:8c46}', - '{1}'), - ('cidrcol', 'inet', - '{=}', - '{10.2.14/24}', - '{2}'), - ('cidrcol', 'inet', - '{=}', - '{fe80::6e40:8ff:fea9:8c46}', - '{1}'), - ('cidrcol', 'cidr', - '{=}', - '{10.2.14/24}', - '{2}'), - ('cidrcol', 'cidr', - '{=}', - '{fe80::6e40:8ff:fea9:8c46}', - '{1}'), - ('bpcharcol', 'bpchar', - '{=}', - '{W}', - '{6}'), - ('datecol', 'date', - '{=}', - '{2009-12-01}', - '{1}'), - ('timecol', 'time', - '{=}', - '{02:28:57}', - '{1}'), - ('timestampcol', 'timestamp', - '{=}', - '{1964-03-24 19:26:45}', - '{1}'), - ('timestamptzcol', 'timestamptz', - '{=}', - '{1972-10-19 09:00:00-07}', - '{1}'), - ('intervalcol', 'interval', - '{=}', - '{1 mons 13 days 12:24}', - '{1}'), - ('timetzcol', 'timetz', - '{=}', - '{01:35:50+02}', - '{2}'), - ('numericcol', 'numeric', - '{=}', - '{2268164.347826086956521739130434782609}', - '{1}'), - ('uuidcol', 'uuid', - '{=}', - '{52225222-5222-5222-5222-522252225222}', - '{1}'), - ('lsncol', 'pg_lsn', - '{=, IS, IS NOT}', - '{44/455222, NULL, NULL}', - '{1, 25, 100}'); -DO $x$ -DECLARE - r record; - r2 record; - cond text; - idx_ctids tid[]; - ss_ctids tid[]; - count int; - plan_ok bool; - plan_line text; -BEGIN - FOR r IN SELECT colname, oper, typ, value[ordinality], matches[ordinality] FROM brinopers_bloom, unnest(op) WITH ORDINALITY AS oper LOOP - - -- prepare the condition - IF r.value IS NULL THEN - cond := format('%I %s %L', r.colname, r.oper, r.value); - ELSE - cond := format('%I %s %L::%s', r.colname, r.oper, r.value, r.typ); - END IF; - - -- run the query using the brin index - SET enable_seqscan = 0; - SET enable_bitmapscan = 1; - - plan_ok := false; - FOR plan_line IN EXECUTE format($y$EXPLAIN SELECT array_agg(ctid) FROM brintest_bloom WHERE %s $y$, cond) LOOP - IF plan_line LIKE '%Bitmap Heap Scan on brintest_bloom%' THEN - plan_ok := true; - END IF; - END LOOP; - IF NOT plan_ok THEN - RAISE WARNING 'did not get bitmap indexscan plan for %', r; - END IF; - - EXECUTE format($y$SELECT array_agg(ctid) FROM brintest_bloom WHERE %s $y$, cond) - INTO idx_ctids; - - -- run the query using a seqscan - SET enable_seqscan = 1; - SET enable_bitmapscan = 0; - - plan_ok := false; - FOR plan_line IN EXECUTE format($y$EXPLAIN SELECT array_agg(ctid) FROM brintest_bloom WHERE %s $y$, cond) LOOP - IF plan_line LIKE '%Seq Scan on brintest_bloom%' THEN - plan_ok := true; - END IF; - END LOOP; - IF NOT plan_ok THEN - RAISE WARNING 'did not get seqscan plan for %', r; - END IF; - - EXECUTE format($y$SELECT array_agg(ctid) FROM brintest_bloom WHERE %s $y$, cond) - INTO ss_ctids; - - -- make sure both return the same results - count := array_length(idx_ctids, 1); - - IF NOT (count = array_length(ss_ctids, 1) AND - idx_ctids @> ss_ctids AND - idx_ctids <@ ss_ctids) THEN - -- report the results of each scan to make the differences obvious - RAISE WARNING 'something not right in %: count %', r, count; - SET enable_seqscan = 1; - SET enable_bitmapscan = 0; - FOR r2 IN EXECUTE 'SELECT ' || r.colname || ' FROM brintest_bloom WHERE ' || cond LOOP - RAISE NOTICE 'seqscan: %', r2; - END LOOP; - - SET enable_seqscan = 0; - SET enable_bitmapscan = 1; - FOR r2 IN EXECUTE 'SELECT ' || r.colname || ' FROM brintest_bloom WHERE ' || cond LOOP - RAISE NOTICE 'bitmapscan: %', r2; - END LOOP; - END IF; - - -- make sure we found expected number of matches - IF count != r.matches THEN RAISE WARNING 'unexpected number of results % for %', count, r; END IF; - END LOOP; -END; -$x$; -RESET enable_seqscan; -RESET enable_bitmapscan; -INSERT INTO brintest_bloom SELECT - repeat(stringu1, 42)::bytea, - substr(stringu1, 1, 1)::"char", - stringu1::name, 142857 * tenthous, - thousand, - twothousand, - repeat(stringu1, 42), - unique1::oid, - (four + 1.0)/(hundred+1), - odd::float8 / (tenthous + 1), - format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, - inet '10.2.3.4' + tenthous, - cidr '10.2.3/24' + tenthous, - substr(stringu1, 1, 1)::bpchar, - date '1995-08-15' + tenthous, - time '01:20:30' + thousand * interval '18.5 second', - timestamp '1942-07-23 03:05:09' + tenthous * interval '36.38 hours', - timestamptz '1972-10-10 03:00' + thousand * interval '1 hour', - justify_days(justify_hours(tenthous * interval '12 minutes')), - timetz '01:30:20' + hundred * interval '15 seconds', - tenthous::numeric(36,30) * fivethous * even / (hundred + 1), - format('%s%s-%s-%s-%s-%s%s%s', to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'))::uuid, - format('%s/%s%s', odd, even, tenthous)::pg_lsn -FROM tenk1 ORDER BY unique2 LIMIT 5 OFFSET 5; -SELECT brin_desummarize_range('brinidx_bloom', 0); - brin_desummarize_range ------------------------- - -(1 row) - -VACUUM brintest_bloom; -- force a summarization cycle in brinidx -UPDATE brintest_bloom SET int8col = int8col * int4col; -UPDATE brintest_bloom SET textcol = '' WHERE textcol IS NOT NULL; --- Tests for brin_summarize_new_values -SELECT brin_summarize_new_values('brintest_bloom'); -- error, not an index -ERROR: "brintest_bloom" is not an index -SELECT brin_summarize_new_values('tenk1_unique1'); -- error, not a BRIN index -ERROR: "tenk1_unique1" is not a BRIN index -SELECT brin_summarize_new_values('brinidx_bloom'); -- ok, no change expected - brin_summarize_new_values ---------------------------- - 0 -(1 row) - --- Tests for brin_desummarize_range -SELECT brin_desummarize_range('brinidx_bloom', -1); -- error, invalid range -ERROR: block number out of range: -1 -SELECT brin_desummarize_range('brinidx_bloom', 0); - brin_desummarize_range ------------------------- - -(1 row) - -SELECT brin_desummarize_range('brinidx_bloom', 0); - brin_desummarize_range ------------------------- - -(1 row) - -SELECT brin_desummarize_range('brinidx_bloom', 100000000); - brin_desummarize_range ------------------------- - -(1 row) - --- Test brin_summarize_range -CREATE TABLE brin_summarize_bloom ( - value int -) WITH (fillfactor=10, autovacuum_enabled=false); -CREATE INDEX brin_summarize_bloom_idx ON brin_summarize_bloom USING brin (value) WITH (pages_per_range=2); --- Fill a few pages -DO $$ -DECLARE curtid tid; -BEGIN - LOOP - INSERT INTO brin_summarize_bloom VALUES (1) RETURNING ctid INTO curtid; - EXIT WHEN curtid > tid '(2, 0)'; - END LOOP; -END; -$$; --- summarize one range -SELECT brin_summarize_range('brin_summarize_bloom_idx', 0); - brin_summarize_range ----------------------- - 0 -(1 row) - --- nothing: already summarized -SELECT brin_summarize_range('brin_summarize_bloom_idx', 1); - brin_summarize_range ----------------------- - 0 -(1 row) - --- summarize one range -SELECT brin_summarize_range('brin_summarize_bloom_idx', 2); - brin_summarize_range ----------------------- - 1 -(1 row) - --- nothing: page doesn't exist in table -SELECT brin_summarize_range('brin_summarize_bloom_idx', 4294967295); - brin_summarize_range ----------------------- - 0 -(1 row) - --- invalid block number values -SELECT brin_summarize_range('brin_summarize_bloom_idx', -1); -ERROR: block number out of range: -1 -SELECT brin_summarize_range('brin_summarize_bloom_idx', 4294967296); -ERROR: block number out of range: 4294967296 --- test brin cost estimates behave sanely based on correlation of values -CREATE TABLE brin_test_bloom (a INT, b INT); -INSERT INTO brin_test_bloom SELECT x/100,x%100 FROM generate_series(1,10000) x(x); -CREATE INDEX brin_test_bloom_a_idx ON brin_test_bloom USING brin (a) WITH (pages_per_range = 2); -CREATE INDEX brin_test_bloom_b_idx ON brin_test_bloom USING brin (b) WITH (pages_per_range = 2); -VACUUM ANALYZE brin_test_bloom; --- Ensure brin index is used when columns are perfectly correlated -EXPLAIN (COSTS OFF) SELECT * FROM brin_test_bloom WHERE a = 1; - QUERY PLAN --------------------------------------------------- - Bitmap Heap Scan on brin_test_bloom - Recheck Cond: (a = 1) - -> Bitmap Index Scan on brin_test_bloom_a_idx - Index Cond: (a = 1) -(4 rows) - --- Ensure brin index is not used when values are not correlated -EXPLAIN (COSTS OFF) SELECT * FROM brin_test_bloom WHERE b = 1; - QUERY PLAN ------------------------------ - Seq Scan on brin_test_bloom - Filter: (b = 1) -(2 rows) - +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/brin_multi.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/brin_multi.out --- /Users/admin/pgsql/src/test/regress/expected/brin_multi.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/brin_multi.out 2024-12-13 13:20:09 @@ -1,974 +1,2 @@ -CREATE TABLE brintest_multi ( - int8col bigint, - int2col smallint, - int4col integer, - oidcol oid, - tidcol tid, - float4col real, - float8col double precision, - macaddrcol macaddr, - macaddr8col macaddr8, - inetcol inet, - cidrcol cidr, - datecol date, - timecol time without time zone, - timestampcol timestamp without time zone, - timestamptzcol timestamp with time zone, - intervalcol interval, - timetzcol time with time zone, - numericcol numeric, - uuidcol uuid, - lsncol pg_lsn -) WITH (fillfactor=10); -INSERT INTO brintest_multi SELECT - 142857 * tenthous, - thousand, - twothousand, - unique1::oid, - format('(%s,%s)', tenthous, twenty)::tid, - (four + 1.0)/(hundred+1), - odd::float8 / (tenthous + 1), - format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, - substr(fipshash(unique1::text), 1, 16)::macaddr8, - inet '10.2.3.4/24' + tenthous, - cidr '10.2.3/24' + tenthous, - date '1995-08-15' + tenthous, - time '01:20:30' + thousand * interval '18.5 second', - timestamp '1942-07-23 03:05:09' + tenthous * interval '36.38 hours', - timestamptz '1972-10-10 03:00' + thousand * interval '1 hour', - justify_days(justify_hours(tenthous * interval '12 minutes')), - timetz '01:30:20+02' + hundred * interval '15 seconds', - tenthous::numeric(36,30) * fivethous * even / (hundred + 1), - format('%s%s-%s-%s-%s-%s%s%s', to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'))::uuid, - format('%s/%s%s', odd, even, tenthous)::pg_lsn -FROM tenk1 ORDER BY unique2 LIMIT 100; --- throw in some NULL's and different values -INSERT INTO brintest_multi (inetcol, cidrcol) SELECT - inet 'fe80::6e40:8ff:fea9:8c46' + tenthous, - cidr 'fe80::6e40:8ff:fea9:8c46' + tenthous -FROM tenk1 ORDER BY thousand, tenthous LIMIT 25; --- test minmax-multi specific index options --- number of values must be >= 16 -CREATE INDEX brinidx_multi ON brintest_multi USING brin ( - int8col int8_minmax_multi_ops(values_per_range = 7) -); -ERROR: value 7 out of bounds for option "values_per_range" -DETAIL: Valid values are between "8" and "256". --- number of values must be <= 256 -CREATE INDEX brinidx_multi ON brintest_multi USING brin ( - int8col int8_minmax_multi_ops(values_per_range = 257) -); -ERROR: value 257 out of bounds for option "values_per_range" -DETAIL: Valid values are between "8" and "256". --- first create an index with a single page range, to force compaction --- due to exceeding the number of values per summary -CREATE INDEX brinidx_multi ON brintest_multi USING brin ( - int8col int8_minmax_multi_ops, - int2col int2_minmax_multi_ops, - int4col int4_minmax_multi_ops, - oidcol oid_minmax_multi_ops, - tidcol tid_minmax_multi_ops, - float4col float4_minmax_multi_ops, - float8col float8_minmax_multi_ops, - macaddrcol macaddr_minmax_multi_ops, - macaddr8col macaddr8_minmax_multi_ops, - inetcol inet_minmax_multi_ops, - cidrcol inet_minmax_multi_ops, - datecol date_minmax_multi_ops, - timecol time_minmax_multi_ops, - timestampcol timestamp_minmax_multi_ops, - timestamptzcol timestamptz_minmax_multi_ops, - intervalcol interval_minmax_multi_ops, - timetzcol timetz_minmax_multi_ops, - numericcol numeric_minmax_multi_ops, - uuidcol uuid_minmax_multi_ops, - lsncol pg_lsn_minmax_multi_ops -); -DROP INDEX brinidx_multi; -CREATE INDEX brinidx_multi ON brintest_multi USING brin ( - int8col int8_minmax_multi_ops, - int2col int2_minmax_multi_ops, - int4col int4_minmax_multi_ops, - oidcol oid_minmax_multi_ops, - tidcol tid_minmax_multi_ops, - float4col float4_minmax_multi_ops, - float8col float8_minmax_multi_ops, - macaddrcol macaddr_minmax_multi_ops, - macaddr8col macaddr8_minmax_multi_ops, - inetcol inet_minmax_multi_ops, - cidrcol inet_minmax_multi_ops, - datecol date_minmax_multi_ops, - timecol time_minmax_multi_ops, - timestampcol timestamp_minmax_multi_ops, - timestamptzcol timestamptz_minmax_multi_ops, - intervalcol interval_minmax_multi_ops, - timetzcol timetz_minmax_multi_ops, - numericcol numeric_minmax_multi_ops, - uuidcol uuid_minmax_multi_ops, - lsncol pg_lsn_minmax_multi_ops -) with (pages_per_range = 1); -CREATE TABLE brinopers_multi (colname name, typ text, - op text[], value text[], matches int[], - check (cardinality(op) = cardinality(value)), - check (cardinality(op) = cardinality(matches))); -INSERT INTO brinopers_multi VALUES - ('int2col', 'int2', - '{>, >=, =, <=, <}', - '{0, 0, 800, 999, 999}', - '{100, 100, 1, 100, 100}'), - ('int2col', 'int4', - '{>, >=, =, <=, <}', - '{0, 0, 800, 999, 1999}', - '{100, 100, 1, 100, 100}'), - ('int2col', 'int8', - '{>, >=, =, <=, <}', - '{0, 0, 800, 999, 1428427143}', - '{100, 100, 1, 100, 100}'), - ('int4col', 'int2', - '{>, >=, =, <=, <}', - '{0, 0, 800, 1999, 1999}', - '{100, 100, 1, 100, 100}'), - ('int4col', 'int4', - '{>, >=, =, <=, <}', - '{0, 0, 800, 1999, 1999}', - '{100, 100, 1, 100, 100}'), - ('int4col', 'int8', - '{>, >=, =, <=, <}', - '{0, 0, 800, 1999, 1428427143}', - '{100, 100, 1, 100, 100}'), - ('int8col', 'int2', - '{>, >=}', - '{0, 0}', - '{100, 100}'), - ('int8col', 'int4', - '{>, >=}', - '{0, 0}', - '{100, 100}'), - ('int8col', 'int8', - '{>, >=, =, <=, <}', - '{0, 0, 1257141600, 1428427143, 1428427143}', - '{100, 100, 1, 100, 100}'), - ('oidcol', 'oid', - '{>, >=, =, <=, <}', - '{0, 0, 8800, 9999, 9999}', - '{100, 100, 1, 100, 100}'), - ('tidcol', 'tid', - '{>, >=, =, <=, <}', - '{"(0,0)", "(0,0)", "(8800,0)", "(9999,19)", "(9999,19)"}', - '{100, 100, 1, 100, 100}'), - ('float4col', 'float4', - '{>, >=, =, <=, <}', - '{0.0103093, 0.0103093, 1, 1, 1}', - '{100, 100, 4, 100, 96}'), - ('float4col', 'float8', - '{>, >=, =, <=, <}', - '{0.0103093, 0.0103093, 1, 1, 1}', - '{100, 100, 4, 100, 96}'), - ('float8col', 'float4', - '{>, >=, =, <=, <}', - '{0, 0, 0, 1.98, 1.98}', - '{99, 100, 1, 100, 100}'), - ('float8col', 'float8', - '{>, >=, =, <=, <}', - '{0, 0, 0, 1.98, 1.98}', - '{99, 100, 1, 100, 100}'), - ('macaddrcol', 'macaddr', - '{>, >=, =, <=, <}', - '{00:00:01:00:00:00, 00:00:01:00:00:00, 2c:00:2d:00:16:00, ff:fe:00:00:00:00, ff:fe:00:00:00:00}', - '{99, 100, 2, 100, 100}'), - ('macaddr8col', 'macaddr8', - '{>, >=, =, <=, <}', - '{b1:d1:0e:7b:af:a4:42:12, d9:35:91:bd:f7:86:0e:1e, 72:8f:20:6c:2a:01:bf:57, 23:e8:46:63:86:07:ad:cb, 13:16:8e:6a:2e:6c:84:b4}', - '{31, 17, 1, 11, 4}'), - ('inetcol', 'inet', - '{=, <, <=, >, >=}', - '{10.2.14.231/24, 255.255.255.255, 255.255.255.255, 0.0.0.0, 0.0.0.0}', - '{1, 100, 100, 125, 125}'), - ('inetcol', 'cidr', - '{<, <=, >, >=}', - '{255.255.255.255, 255.255.255.255, 0.0.0.0, 0.0.0.0}', - '{100, 100, 125, 125}'), - ('cidrcol', 'inet', - '{=, <, <=, >, >=}', - '{10.2.14/24, 255.255.255.255, 255.255.255.255, 0.0.0.0, 0.0.0.0}', - '{2, 100, 100, 125, 125}'), - ('cidrcol', 'cidr', - '{=, <, <=, >, >=}', - '{10.2.14/24, 255.255.255.255, 255.255.255.255, 0.0.0.0, 0.0.0.0}', - '{2, 100, 100, 125, 125}'), - ('datecol', 'date', - '{>, >=, =, <=, <}', - '{1995-08-15, 1995-08-15, 2009-12-01, 2022-12-30, 2022-12-30}', - '{100, 100, 1, 100, 100}'), - ('timecol', 'time', - '{>, >=, =, <=, <}', - '{01:20:30, 01:20:30, 02:28:57, 06:28:31.5, 06:28:31.5}', - '{100, 100, 1, 100, 100}'), - ('timestampcol', 'timestamp', - '{>, >=, =, <=, <}', - '{1942-07-23 03:05:09, 1942-07-23 03:05:09, 1964-03-24 19:26:45, 1984-01-20 22:42:21, 1984-01-20 22:42:21}', - '{100, 100, 1, 100, 100}'), - ('timestampcol', 'timestamptz', - '{>, >=, =, <=, <}', - '{1942-07-23 03:05:09, 1942-07-23 03:05:09, 1964-03-24 19:26:45, 1984-01-20 22:42:21, 1984-01-20 22:42:21}', - '{100, 100, 1, 100, 100}'), - ('timestamptzcol', 'timestamptz', - '{>, >=, =, <=, <}', - '{1972-10-10 03:00:00-04, 1972-10-10 03:00:00-04, 1972-10-19 09:00:00-07, 1972-11-20 19:00:00-03, 1972-11-20 19:00:00-03}', - '{100, 100, 1, 100, 100}'), - ('intervalcol', 'interval', - '{>, >=, =, <=, <}', - '{00:00:00, 00:00:00, 1 mons 13 days 12:24, 2 mons 23 days 07:48:00, 1 year}', - '{100, 100, 1, 100, 100}'), - ('timetzcol', 'timetz', - '{>, >=, =, <=, <}', - '{01:30:20+02, 01:30:20+02, 01:35:50+02, 23:55:05+02, 23:55:05+02}', - '{99, 100, 2, 100, 100}'), - ('numericcol', 'numeric', - '{>, >=, =, <=, <}', - '{0.00, 0.01, 2268164.347826086956521739130434782609, 99470151.9, 99470151.9}', - '{100, 100, 1, 100, 100}'), - ('uuidcol', 'uuid', - '{>, >=, =, <=, <}', - '{00040004-0004-0004-0004-000400040004, 00040004-0004-0004-0004-000400040004, 52225222-5222-5222-5222-522252225222, 99989998-9998-9998-9998-999899989998, 99989998-9998-9998-9998-999899989998}', - '{100, 100, 1, 100, 100}'), - ('lsncol', 'pg_lsn', - '{>, >=, =, <=, <, IS, IS NOT}', - '{0/1200, 0/1200, 44/455222, 198/1999799, 198/1999799, NULL, NULL}', - '{100, 100, 1, 100, 100, 25, 100}'); -DO $x$ -DECLARE - r record; - r2 record; - cond text; - idx_ctids tid[]; - ss_ctids tid[]; - count int; - plan_ok bool; - plan_line text; -BEGIN - FOR r IN SELECT colname, oper, typ, value[ordinality], matches[ordinality] FROM brinopers_multi, unnest(op) WITH ORDINALITY AS oper LOOP - - -- prepare the condition - IF r.value IS NULL THEN - cond := format('%I %s %L', r.colname, r.oper, r.value); - ELSE - cond := format('%I %s %L::%s', r.colname, r.oper, r.value, r.typ); - END IF; - - -- run the query using the brin index - SET enable_seqscan = 0; - SET enable_bitmapscan = 1; - - plan_ok := false; - FOR plan_line IN EXECUTE format($y$EXPLAIN SELECT array_agg(ctid) FROM brintest_multi WHERE %s $y$, cond) LOOP - IF plan_line LIKE '%Bitmap Heap Scan on brintest_multi%' THEN - plan_ok := true; - END IF; - END LOOP; - IF NOT plan_ok THEN - RAISE WARNING 'did not get bitmap indexscan plan for %', r; - END IF; - - EXECUTE format($y$SELECT array_agg(ctid) FROM brintest_multi WHERE %s $y$, cond) - INTO idx_ctids; - - -- run the query using a seqscan - SET enable_seqscan = 1; - SET enable_bitmapscan = 0; - - plan_ok := false; - FOR plan_line IN EXECUTE format($y$EXPLAIN SELECT array_agg(ctid) FROM brintest_multi WHERE %s $y$, cond) LOOP - IF plan_line LIKE '%Seq Scan on brintest_multi%' THEN - plan_ok := true; - END IF; - END LOOP; - IF NOT plan_ok THEN - RAISE WARNING 'did not get seqscan plan for %', r; - END IF; - - EXECUTE format($y$SELECT array_agg(ctid) FROM brintest_multi WHERE %s $y$, cond) - INTO ss_ctids; - - -- make sure both return the same results - count := array_length(idx_ctids, 1); - - IF NOT (count = array_length(ss_ctids, 1) AND - idx_ctids @> ss_ctids AND - idx_ctids <@ ss_ctids) THEN - -- report the results of each scan to make the differences obvious - RAISE WARNING 'something not right in %: count %', r, count; - SET enable_seqscan = 1; - SET enable_bitmapscan = 0; - FOR r2 IN EXECUTE 'SELECT ' || r.colname || ' FROM brintest_multi WHERE ' || cond LOOP - RAISE NOTICE 'seqscan: %', r2; - END LOOP; - - SET enable_seqscan = 0; - SET enable_bitmapscan = 1; - FOR r2 IN EXECUTE 'SELECT ' || r.colname || ' FROM brintest_multi WHERE ' || cond LOOP - RAISE NOTICE 'bitmapscan: %', r2; - END LOOP; - END IF; - - -- make sure we found expected number of matches - IF count != r.matches THEN RAISE WARNING 'unexpected number of results % for %', count, r; END IF; - END LOOP; -END; -$x$; -RESET enable_seqscan; -RESET enable_bitmapscan; -INSERT INTO brintest_multi SELECT - 142857 * tenthous, - thousand, - twothousand, - unique1::oid, - format('(%s,%s)', tenthous, twenty)::tid, - (four + 1.0)/(hundred+1), - odd::float8 / (tenthous + 1), - format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, - substr(fipshash(unique1::text), 1, 16)::macaddr8, - inet '10.2.3.4' + tenthous, - cidr '10.2.3/24' + tenthous, - date '1995-08-15' + tenthous, - time '01:20:30' + thousand * interval '18.5 second', - timestamp '1942-07-23 03:05:09' + tenthous * interval '36.38 hours', - timestamptz '1972-10-10 03:00' + thousand * interval '1 hour', - justify_days(justify_hours(tenthous * interval '12 minutes')), - timetz '01:30:20' + hundred * interval '15 seconds', - tenthous::numeric(36,30) * fivethous * even / (hundred + 1), - format('%s%s-%s-%s-%s-%s%s%s', to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'))::uuid, - format('%s/%s%s', odd, even, tenthous)::pg_lsn -FROM tenk1 ORDER BY unique2 LIMIT 5 OFFSET 5; -SELECT brin_desummarize_range('brinidx_multi', 0); - brin_desummarize_range ------------------------- - -(1 row) - -VACUUM brintest_multi; -- force a summarization cycle in brinidx --- Try inserting a values with NaN, to test distance calculation. -insert into public.brintest_multi (float4col) values (real 'nan'); -insert into public.brintest_multi (float8col) values (real 'nan'); -UPDATE brintest_multi SET int8col = int8col * int4col; --- Test handling of inet netmasks with inet_minmax_multi_ops -CREATE TABLE brin_test_inet (a inet); -CREATE INDEX ON brin_test_inet USING brin (a inet_minmax_multi_ops); -INSERT INTO brin_test_inet VALUES ('127.0.0.1/0'); -INSERT INTO brin_test_inet VALUES ('0.0.0.0/12'); -DROP TABLE brin_test_inet; --- Tests for brin_summarize_new_values -SELECT brin_summarize_new_values('brintest_multi'); -- error, not an index -ERROR: "brintest_multi" is not an index -SELECT brin_summarize_new_values('tenk1_unique1'); -- error, not a BRIN index -ERROR: "tenk1_unique1" is not a BRIN index -SELECT brin_summarize_new_values('brinidx_multi'); -- ok, no change expected - brin_summarize_new_values ---------------------------- - 0 -(1 row) - --- Tests for brin_desummarize_range -SELECT brin_desummarize_range('brinidx_multi', -1); -- error, invalid range -ERROR: block number out of range: -1 -SELECT brin_desummarize_range('brinidx_multi', 0); - brin_desummarize_range ------------------------- - -(1 row) - -SELECT brin_desummarize_range('brinidx_multi', 0); - brin_desummarize_range ------------------------- - -(1 row) - -SELECT brin_desummarize_range('brinidx_multi', 100000000); - brin_desummarize_range ------------------------- - -(1 row) - --- test building an index with many values, to force compaction of the buffer -CREATE TABLE brin_large_range (a int4); -INSERT INTO brin_large_range SELECT i FROM generate_series(1,10000) s(i); -CREATE INDEX brin_large_range_idx ON brin_large_range USING brin (a int4_minmax_multi_ops); -DROP TABLE brin_large_range; --- Test brin_summarize_range -CREATE TABLE brin_summarize_multi ( - value int -) WITH (fillfactor=10, autovacuum_enabled=false); -CREATE INDEX brin_summarize_multi_idx ON brin_summarize_multi USING brin (value) WITH (pages_per_range=2); --- Fill a few pages -DO $$ -DECLARE curtid tid; -BEGIN - LOOP - INSERT INTO brin_summarize_multi VALUES (1) RETURNING ctid INTO curtid; - EXIT WHEN curtid > tid '(2, 0)'; - END LOOP; -END; -$$; --- summarize one range -SELECT brin_summarize_range('brin_summarize_multi_idx', 0); - brin_summarize_range ----------------------- - 0 -(1 row) - --- nothing: already summarized -SELECT brin_summarize_range('brin_summarize_multi_idx', 1); - brin_summarize_range ----------------------- - 0 -(1 row) - --- summarize one range -SELECT brin_summarize_range('brin_summarize_multi_idx', 2); - brin_summarize_range ----------------------- - 1 -(1 row) - --- nothing: page doesn't exist in table -SELECT brin_summarize_range('brin_summarize_multi_idx', 4294967295); - brin_summarize_range ----------------------- - 0 -(1 row) - --- invalid block number values -SELECT brin_summarize_range('brin_summarize_multi_idx', -1); -ERROR: block number out of range: -1 -SELECT brin_summarize_range('brin_summarize_multi_idx', 4294967296); -ERROR: block number out of range: 4294967296 --- test brin cost estimates behave sanely based on correlation of values -CREATE TABLE brin_test_multi (a INT, b INT); -INSERT INTO brin_test_multi SELECT x/100,x%100 FROM generate_series(1,10000) x(x); -CREATE INDEX brin_test_multi_a_idx ON brin_test_multi USING brin (a) WITH (pages_per_range = 2); -CREATE INDEX brin_test_multi_b_idx ON brin_test_multi USING brin (b) WITH (pages_per_range = 2); -VACUUM ANALYZE brin_test_multi; --- Ensure brin index is used when columns are perfectly correlated -EXPLAIN (COSTS OFF) SELECT * FROM brin_test_multi WHERE a = 1; - QUERY PLAN --------------------------------------------------- - Bitmap Heap Scan on brin_test_multi - Recheck Cond: (a = 1) - -> Bitmap Index Scan on brin_test_multi_a_idx - Index Cond: (a = 1) -(4 rows) - --- Ensure brin index is not used when values are not correlated -EXPLAIN (COSTS OFF) SELECT * FROM brin_test_multi WHERE b = 1; - QUERY PLAN ------------------------------ - Seq Scan on brin_test_multi - Filter: (b = 1) -(2 rows) - --- do some inequality tests -CREATE TABLE brin_test_multi_1 (a INT, b BIGINT) WITH (fillfactor=10); -INSERT INTO brin_test_multi_1 -SELECT i/5 + mod(911 * i + 483, 25), - i/10 + mod(751 * i + 221, 41) - FROM generate_series(1,1000) s(i); -CREATE INDEX brin_test_multi_1_idx_1 ON brin_test_multi_1 USING brin (a int4_minmax_multi_ops) WITH (pages_per_range=5); -CREATE INDEX brin_test_multi_1_idx_2 ON brin_test_multi_1 USING brin (b int8_minmax_multi_ops) WITH (pages_per_range=5); -SET enable_seqscan=off; --- int: less than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a < 37; - count -------- - 124 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a < 113; - count -------- - 504 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a <= 177; - count -------- - 829 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a <= 25; - count -------- - 69 -(1 row) - --- int: greater than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a > 120; - count -------- - 456 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a >= 180; - count -------- - 161 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a > 71; - count -------- - 701 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a >= 63; - count -------- - 746 -(1 row) - --- int: equals -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a = 207; - count -------- - 3 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a = 177; - count -------- - 5 -(1 row) - --- bigint: less than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b < 73; - count -------- - 529 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b <= 47; - count -------- - 279 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b < 199; - count -------- - 1000 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b <= 150; - count -------- - 1000 -(1 row) - --- bigint: greater than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b > 93; - count -------- - 261 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b > 37; - count -------- - 821 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b >= 215; - count -------- - 0 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b > 201; - count -------- - 0 -(1 row) - --- bigint: equals -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b = 88; - count -------- - 10 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b = 103; - count -------- - 9 -(1 row) - --- now do the same, but insert the rows with the indexes already created --- so that we don't use the "build callback" and instead use the regular --- approach of adding rows into existing ranges -TRUNCATE brin_test_multi_1; -INSERT INTO brin_test_multi_1 -SELECT i/5 + mod(911 * i + 483, 25), - i/10 + mod(751 * i + 221, 41) - FROM generate_series(1,1000) s(i); --- int: less than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a < 37; - count -------- - 124 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a < 113; - count -------- - 504 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a <= 177; - count -------- - 829 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a <= 25; - count -------- - 69 -(1 row) - --- int: greater than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a > 120; - count -------- - 456 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a >= 180; - count -------- - 161 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a > 71; - count -------- - 701 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a >= 63; - count -------- - 746 -(1 row) - --- int: equals -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a = 207; - count -------- - 3 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE a = 177; - count -------- - 5 -(1 row) - --- bigint: less than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b < 73; - count -------- - 529 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b <= 47; - count -------- - 279 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b < 199; - count -------- - 1000 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b <= 150; - count -------- - 1000 -(1 row) - --- bigint: greater than -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b > 93; - count -------- - 261 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b > 37; - count -------- - 821 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b >= 215; - count -------- - 0 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b > 201; - count -------- - 0 -(1 row) - --- bigint: equals -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b = 88; - count -------- - 10 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_1 WHERE b = 103; - count -------- - 9 -(1 row) - -DROP TABLE brin_test_multi_1; -RESET enable_seqscan; --- do some inequality tests for varlena data types -CREATE TABLE brin_test_multi_2 (a UUID) WITH (fillfactor=10); -INSERT INTO brin_test_multi_2 -SELECT v::uuid FROM (SELECT row_number() OVER (ORDER BY v) c, v FROM (SELECT fipshash((i/13)::text) AS v FROM generate_series(1,1000) s(i)) foo) bar ORDER BY c + 25 * random(); -CREATE INDEX brin_test_multi_2_idx ON brin_test_multi_2 USING brin (a uuid_minmax_multi_ops) WITH (pages_per_range=5); -SET enable_seqscan=off; -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a < '3d914f93-48c9-cc0f-f8a7-9716700b9fcd'; - count -------- - 195 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a > '3d914f93-48c9-cc0f-f8a7-9716700b9fcd'; - count -------- - 792 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a <= 'f369cb89-fc62-7e66-8987-007d121ed1ea'; - count -------- - 961 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a >= 'aea92132-c4cb-eb26-3e6a-c2bf6c183b5d'; - count -------- - 273 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a = '5feceb66-ffc8-6f38-d952-786c6d696c79'; - count -------- - 12 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a = '86e50149-6586-6131-2a9e-0b35558d84f6'; - count -------- - 13 -(1 row) - --- now do the same, but insert the rows with the indexes already created --- so that we don't use the "build callback" and instead use the regular --- approach of adding rows into existing ranges -TRUNCATE brin_test_multi_2; -INSERT INTO brin_test_multi_2 -SELECT v::uuid FROM (SELECT row_number() OVER (ORDER BY v) c, v FROM (SELECT fipshash((i/13)::text) AS v FROM generate_series(1,1000) s(i)) foo) bar ORDER BY c + 25 * random(); -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a < '3d914f93-48c9-cc0f-f8a7-9716700b9fcd'; - count -------- - 195 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a > '3d914f93-48c9-cc0f-f8a7-9716700b9fcd'; - count -------- - 792 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a <= 'f369cb89-fc62-7e66-8987-007d121ed1ea'; - count -------- - 961 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a >= 'aea92132-c4cb-eb26-3e6a-c2bf6c183b5d'; - count -------- - 273 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a = '5feceb66-ffc8-6f38-d952-786c6d696c79'; - count -------- - 12 -(1 row) - -SELECT COUNT(*) FROM brin_test_multi_2 WHERE a = '86e50149-6586-6131-2a9e-0b35558d84f6'; - count -------- - 13 -(1 row) - -DROP TABLE brin_test_multi_2; -RESET enable_seqscan; --- test overflows during CREATE INDEX with extreme timestamp values -CREATE TABLE brin_timestamp_test(a TIMESTAMPTZ); -SET datestyle TO iso; --- values close to timestamp minimum -INSERT INTO brin_timestamp_test -SELECT '4713-01-01 00:00:01 BC'::timestamptz + (i || ' seconds')::interval - FROM generate_series(1,30) s(i); --- values close to timestamp maximum -INSERT INTO brin_timestamp_test -SELECT '294276-12-01 00:00:01'::timestamptz + (i || ' seconds')::interval - FROM generate_series(1,30) s(i); -CREATE INDEX ON brin_timestamp_test USING brin (a timestamptz_minmax_multi_ops) WITH (pages_per_range=1); -DROP TABLE brin_timestamp_test; --- test overflows during CREATE INDEX with extreme date values -CREATE TABLE brin_date_test(a DATE); --- insert values close to date minimum -INSERT INTO brin_date_test SELECT '4713-01-01 BC'::date + i FROM generate_series(1, 30) s(i); --- insert values close to date minimum -INSERT INTO brin_date_test SELECT '5874897-12-01'::date + i FROM generate_series(1, 30) s(i); -CREATE INDEX ON brin_date_test USING brin (a date_minmax_multi_ops) WITH (pages_per_range=1); -SET enable_seqscan = off; --- make sure the ranges were built correctly and 2023-01-01 eliminates all -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF, BUFFERS OFF) -SELECT * FROM brin_date_test WHERE a = '2023-01-01'::date; - QUERY PLAN -------------------------------------------------------------------------- - Bitmap Heap Scan on brin_date_test (actual rows=0 loops=1) - Recheck Cond: (a = '2023-01-01'::date) - -> Bitmap Index Scan on brin_date_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '2023-01-01'::date) -(4 rows) - -DROP TABLE brin_date_test; -RESET enable_seqscan; --- test handling of infinite timestamp values -CREATE TABLE brin_timestamp_test(a TIMESTAMP); -INSERT INTO brin_timestamp_test VALUES ('-infinity'), ('infinity'); -INSERT INTO brin_timestamp_test -SELECT i FROM generate_series('2000-01-01'::timestamp, '2000-02-09'::timestamp, '1 day'::interval) s(i); -CREATE INDEX ON brin_timestamp_test USING brin (a timestamp_minmax_multi_ops) WITH (pages_per_range=1); -SET enable_seqscan = off; -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF, BUFFERS OFF) -SELECT * FROM brin_timestamp_test WHERE a = '2023-01-01'::timestamp; - QUERY PLAN ------------------------------------------------------------------------------- - Bitmap Heap Scan on brin_timestamp_test (actual rows=0 loops=1) - Recheck Cond: (a = '2023-01-01 00:00:00'::timestamp without time zone) - -> Bitmap Index Scan on brin_timestamp_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '2023-01-01 00:00:00'::timestamp without time zone) -(4 rows) - -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF, BUFFERS OFF) -SELECT * FROM brin_timestamp_test WHERE a = '1900-01-01'::timestamp; - QUERY PLAN ------------------------------------------------------------------------------- - Bitmap Heap Scan on brin_timestamp_test (actual rows=0 loops=1) - Recheck Cond: (a = '1900-01-01 00:00:00'::timestamp without time zone) - -> Bitmap Index Scan on brin_timestamp_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '1900-01-01 00:00:00'::timestamp without time zone) -(4 rows) - -DROP TABLE brin_timestamp_test; -RESET enable_seqscan; --- test handling of infinite date values -CREATE TABLE brin_date_test(a DATE); -INSERT INTO brin_date_test VALUES ('-infinity'), ('infinity'); -INSERT INTO brin_date_test SELECT '2000-01-01'::date + i FROM generate_series(1, 40) s(i); -CREATE INDEX ON brin_date_test USING brin (a date_minmax_multi_ops) WITH (pages_per_range=1); -SET enable_seqscan = off; -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF, BUFFERS OFF) -SELECT * FROM brin_date_test WHERE a = '2023-01-01'::date; - QUERY PLAN -------------------------------------------------------------------------- - Bitmap Heap Scan on brin_date_test (actual rows=0 loops=1) - Recheck Cond: (a = '2023-01-01'::date) - -> Bitmap Index Scan on brin_date_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '2023-01-01'::date) -(4 rows) - -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF, BUFFERS OFF) -SELECT * FROM brin_date_test WHERE a = '1900-01-01'::date; - QUERY PLAN -------------------------------------------------------------------------- - Bitmap Heap Scan on brin_date_test (actual rows=0 loops=1) - Recheck Cond: (a = '1900-01-01'::date) - -> Bitmap Index Scan on brin_date_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '1900-01-01'::date) -(4 rows) - -DROP TABLE brin_date_test; -RESET enable_seqscan; -RESET datestyle; --- test handling of overflow for interval values -CREATE TABLE brin_interval_test(a INTERVAL); -INSERT INTO brin_interval_test SELECT (i || ' years')::interval FROM generate_series(-178000000, -177999980) s(i); -INSERT INTO brin_interval_test SELECT (i || ' years')::interval FROM generate_series( 177999980, 178000000) s(i); -CREATE INDEX ON brin_interval_test USING brin (a interval_minmax_multi_ops) WITH (pages_per_range=1); -SET enable_seqscan = off; -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF, BUFFERS OFF) -SELECT * FROM brin_interval_test WHERE a = '-30 years'::interval; - QUERY PLAN ------------------------------------------------------------------------------ - Bitmap Heap Scan on brin_interval_test (actual rows=0 loops=1) - Recheck Cond: (a = '@ 30 years ago'::interval) - -> Bitmap Index Scan on brin_interval_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '@ 30 years ago'::interval) -(4 rows) - -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF, BUFFERS OFF) -SELECT * FROM brin_interval_test WHERE a = '30 years'::interval; - QUERY PLAN ------------------------------------------------------------------------------ - Bitmap Heap Scan on brin_interval_test (actual rows=0 loops=1) - Recheck Cond: (a = '@ 30 years'::interval) - -> Bitmap Index Scan on brin_interval_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '@ 30 years'::interval) -(4 rows) - -DROP TABLE brin_interval_test; -RESET enable_seqscan; --- test handling of infinite interval values -CREATE TABLE brin_interval_test(a INTERVAL); -INSERT INTO brin_interval_test VALUES ('-infinity'), ('infinity'); -INSERT INTO brin_interval_test SELECT (i || ' days')::interval FROM generate_series(100, 140) s(i); -CREATE INDEX ON brin_interval_test USING brin (a interval_minmax_multi_ops) WITH (pages_per_range=1); -SET enable_seqscan = off; -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF, BUFFERS OFF) -SELECT * FROM brin_interval_test WHERE a = '-30 years'::interval; - QUERY PLAN ------------------------------------------------------------------------------ - Bitmap Heap Scan on brin_interval_test (actual rows=0 loops=1) - Recheck Cond: (a = '@ 30 years ago'::interval) - -> Bitmap Index Scan on brin_interval_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '@ 30 years ago'::interval) -(4 rows) - -EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF, BUFFERS OFF) -SELECT * FROM brin_interval_test WHERE a = '30 years'::interval; - QUERY PLAN ------------------------------------------------------------------------------ - Bitmap Heap Scan on brin_interval_test (actual rows=0 loops=1) - Recheck Cond: (a = '@ 30 years'::interval) - -> Bitmap Index Scan on brin_interval_test_a_idx (actual rows=0 loops=1) - Index Cond: (a = '@ 30 years'::interval) -(4 rows) - -DROP TABLE brin_interval_test; -RESET enable_seqscan; -RESET datestyle; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/create_table_like.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/create_table_like.out --- /Users/admin/pgsql/src/test/regress/expected/create_table_like.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/create_table_like.out 2024-12-13 13:20:10 @@ -1,557 +1,2 @@ -/* Test inheritance of structure (LIKE) */ -CREATE TABLE inhx (xx text DEFAULT 'text'); -/* - * Test double inheritance - * - * Ensure that defaults are NOT included unless - * INCLUDING DEFAULTS is specified - */ -CREATE TABLE ctla (aa TEXT); -CREATE TABLE ctlb (bb TEXT) INHERITS (ctla); -CREATE TABLE foo (LIKE nonexistent); -ERROR: relation "nonexistent" does not exist -LINE 1: CREATE TABLE foo (LIKE nonexistent); - ^ -CREATE TABLE inhe (ee text, LIKE inhx) inherits (ctlb); -INSERT INTO inhe VALUES ('ee-col1', 'ee-col2', DEFAULT, 'ee-col4'); -SELECT * FROM inhe; /* Columns aa, bb, xx value NULL, ee */ - aa | bb | ee | xx ----------+---------+----+--------- - ee-col1 | ee-col2 | | ee-col4 -(1 row) - -SELECT * FROM inhx; /* Empty set since LIKE inherits structure only */ - xx ----- -(0 rows) - -SELECT * FROM ctlb; /* Has ee entry */ - aa | bb ----------+--------- - ee-col1 | ee-col2 -(1 row) - -SELECT * FROM ctla; /* Has ee entry */ - aa ---------- - ee-col1 -(1 row) - -CREATE TABLE inhf (LIKE inhx, LIKE inhx); /* Throw error */ -ERROR: column "xx" specified more than once -CREATE TABLE inhf (LIKE inhx INCLUDING DEFAULTS INCLUDING CONSTRAINTS); -INSERT INTO inhf DEFAULT VALUES; -SELECT * FROM inhf; /* Single entry with value 'text' */ - xx ------- - text -(1 row) - -ALTER TABLE inhx add constraint foo CHECK (xx = 'text'); -ALTER TABLE inhx ADD PRIMARY KEY (xx); -CREATE TABLE inhg (LIKE inhx); /* Doesn't copy constraint */ -INSERT INTO inhg VALUES ('foo'); -DROP TABLE inhg; -CREATE TABLE inhg (x text, LIKE inhx INCLUDING CONSTRAINTS, y text); /* Copies constraints */ -INSERT INTO inhg VALUES ('x', 'text', 'y'); /* Succeeds */ -INSERT INTO inhg VALUES ('x', 'text', 'y'); /* Succeeds -- Unique constraints not copied */ -INSERT INTO inhg VALUES ('x', 'foo', 'y'); /* fails due to constraint */ -ERROR: new row for relation "inhg" violates check constraint "foo" -DETAIL: Failing row contains (x, foo, y). -SELECT * FROM inhg; /* Two records with three columns in order x=x, xx=text, y=y */ - x | xx | y ----+------+--- - x | text | y - x | text | y -(2 rows) - -DROP TABLE inhg; -CREATE TABLE test_like_id_1 (a bigint GENERATED ALWAYS AS IDENTITY, b text); -\d test_like_id_1 - Table "public.test_like_id_1" - Column | Type | Collation | Nullable | Default ---------+--------+-----------+----------+------------------------------ - a | bigint | | not null | generated always as identity - b | text | | | - -INSERT INTO test_like_id_1 (b) VALUES ('b1'); -SELECT * FROM test_like_id_1; - a | b ----+---- - 1 | b1 -(1 row) - -CREATE TABLE test_like_id_2 (LIKE test_like_id_1); -\d test_like_id_2 - Table "public.test_like_id_2" - Column | Type | Collation | Nullable | Default ---------+--------+-----------+----------+--------- - a | bigint | | not null | - b | text | | | - -INSERT INTO test_like_id_2 (b) VALUES ('b2'); -ERROR: null value in column "a" of relation "test_like_id_2" violates not-null constraint -DETAIL: Failing row contains (null, b2). -SELECT * FROM test_like_id_2; -- identity was not copied - a | b ----+--- -(0 rows) - -CREATE TABLE test_like_id_3 (LIKE test_like_id_1 INCLUDING IDENTITY); -\d test_like_id_3 - Table "public.test_like_id_3" - Column | Type | Collation | Nullable | Default ---------+--------+-----------+----------+------------------------------ - a | bigint | | not null | generated always as identity - b | text | | | - -INSERT INTO test_like_id_3 (b) VALUES ('b3'); -SELECT * FROM test_like_id_3; -- identity was copied and applied - a | b ----+---- - 1 | b3 -(1 row) - -DROP TABLE test_like_id_1, test_like_id_2, test_like_id_3; -CREATE TABLE test_like_gen_1 (a int, b int GENERATED ALWAYS AS (a * 2) STORED); -\d test_like_gen_1 - Table "public.test_like_gen_1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - a | integer | | | - b | integer | | | generated always as (a * 2) stored - -INSERT INTO test_like_gen_1 (a) VALUES (1); -SELECT * FROM test_like_gen_1; - a | b ----+--- - 1 | 2 -(1 row) - -CREATE TABLE test_like_gen_2 (LIKE test_like_gen_1); -\d test_like_gen_2 - Table "public.test_like_gen_2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | - -INSERT INTO test_like_gen_2 (a) VALUES (1); -SELECT * FROM test_like_gen_2; - a | b ----+--- - 1 | -(1 row) - -CREATE TABLE test_like_gen_3 (LIKE test_like_gen_1 INCLUDING GENERATED); -\d test_like_gen_3 - Table "public.test_like_gen_3" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - a | integer | | | - b | integer | | | generated always as (a * 2) stored - -INSERT INTO test_like_gen_3 (a) VALUES (1); -SELECT * FROM test_like_gen_3; - a | b ----+--- - 1 | 2 -(1 row) - -DROP TABLE test_like_gen_1, test_like_gen_2, test_like_gen_3; --- also test generated column with a "forward" reference (bug #16342) -CREATE TABLE test_like_4 (b int DEFAULT 42, - c int GENERATED ALWAYS AS (a * 2) STORED, - a int CHECK (a > 0)); -\d test_like_4 - Table "public.test_like_4" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - b | integer | | | 42 - c | integer | | | generated always as (a * 2) stored - a | integer | | | -Check constraints: - "test_like_4_a_check" CHECK (a > 0) - -CREATE TABLE test_like_4a (LIKE test_like_4); -CREATE TABLE test_like_4b (LIKE test_like_4 INCLUDING DEFAULTS); -CREATE TABLE test_like_4c (LIKE test_like_4 INCLUDING GENERATED); -CREATE TABLE test_like_4d (LIKE test_like_4 INCLUDING DEFAULTS INCLUDING GENERATED); -\d test_like_4a - Table "public.test_like_4a" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - b | integer | | | - c | integer | | | - a | integer | | | - -INSERT INTO test_like_4a (a) VALUES(11); -SELECT a, b, c FROM test_like_4a; - a | b | c -----+---+--- - 11 | | -(1 row) - -\d test_like_4b - Table "public.test_like_4b" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - b | integer | | | 42 - c | integer | | | - a | integer | | | - -INSERT INTO test_like_4b (a) VALUES(11); -SELECT a, b, c FROM test_like_4b; - a | b | c -----+----+--- - 11 | 42 | -(1 row) - -\d test_like_4c - Table "public.test_like_4c" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - b | integer | | | - c | integer | | | generated always as (a * 2) stored - a | integer | | | - -INSERT INTO test_like_4c (a) VALUES(11); -SELECT a, b, c FROM test_like_4c; - a | b | c -----+---+---- - 11 | | 22 -(1 row) - -\d test_like_4d - Table "public.test_like_4d" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - b | integer | | | 42 - c | integer | | | generated always as (a * 2) stored - a | integer | | | - -INSERT INTO test_like_4d (a) VALUES(11); -SELECT a, b, c FROM test_like_4d; - a | b | c -----+----+---- - 11 | 42 | 22 -(1 row) - --- Test renumbering of Vars when combining LIKE with inheritance -CREATE TABLE test_like_5 (x point, y point, z point); -CREATE TABLE test_like_5x (p int CHECK (p > 0), - q int GENERATED ALWAYS AS (p * 2) STORED); -CREATE TABLE test_like_5c (LIKE test_like_4 INCLUDING ALL) - INHERITS (test_like_5, test_like_5x); -\d test_like_5c - Table "public.test_like_5c" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------------ - x | point | | | - y | point | | | - z | point | | | - p | integer | | | - q | integer | | | generated always as (p * 2) stored - b | integer | | | 42 - c | integer | | | generated always as (a * 2) stored - a | integer | | | -Check constraints: - "test_like_4_a_check" CHECK (a > 0) - "test_like_5x_p_check" CHECK (p > 0) -Inherits: test_like_5, - test_like_5x - --- Test updating of column numbers in statistics expressions (bug #18468) -CREATE TABLE test_like_6 (a int, c text, b text); -CREATE STATISTICS ext_stat ON (a || b) FROM test_like_6; -ALTER TABLE test_like_6 DROP COLUMN c; -CREATE TABLE test_like_6c (LIKE test_like_6 INCLUDING ALL); -\d+ test_like_6c - Table "public.test_like_6c" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - a | integer | | | | plain | | - b | text | | | | extended | | -Statistics objects: - "public.test_like_6c_expr_stat" ON (a || b) FROM test_like_6c - -DROP TABLE test_like_4, test_like_4a, test_like_4b, test_like_4c, test_like_4d; -DROP TABLE test_like_5, test_like_5x, test_like_5c; -DROP TABLE test_like_6, test_like_6c; -CREATE TABLE inhg (x text, LIKE inhx INCLUDING INDEXES, y text); /* copies indexes */ -INSERT INTO inhg VALUES (5, 10); -INSERT INTO inhg VALUES (20, 10); -- should fail -ERROR: duplicate key value violates unique constraint "inhg_pkey" -DETAIL: Key (xx)=(10) already exists. -DROP TABLE inhg; -/* Multiple primary keys creation should fail */ -CREATE TABLE inhg (x text, LIKE inhx INCLUDING INDEXES, PRIMARY KEY(x)); /* fails */ -ERROR: multiple primary keys for table "inhg" are not allowed -CREATE TABLE inhz (xx text DEFAULT 'text', yy int UNIQUE); -CREATE UNIQUE INDEX inhz_xx_idx on inhz (xx) WHERE xx <> 'test'; -/* Ok to create multiple unique indexes */ -CREATE TABLE inhg (x text UNIQUE, LIKE inhz INCLUDING INDEXES); -INSERT INTO inhg (xx, yy, x) VALUES ('test', 5, 10); -INSERT INTO inhg (xx, yy, x) VALUES ('test', 10, 15); -INSERT INTO inhg (xx, yy, x) VALUES ('foo', 10, 15); -- should fail -ERROR: duplicate key value violates unique constraint "inhg_x_key" -DETAIL: Key (x)=(15) already exists. -DROP TABLE inhg; -DROP TABLE inhz; -/* Use primary key imported by LIKE for self-referential FK constraint */ -CREATE TABLE inhz (x text REFERENCES inhz, LIKE inhx INCLUDING INDEXES); -\d inhz - Table "public.inhz" - Column | Type | Collation | Nullable | Default ---------+------+-----------+----------+--------- - x | text | | | - xx | text | | not null | -Indexes: - "inhz_pkey" PRIMARY KEY, btree (xx) -Foreign-key constraints: - "inhz_x_fkey" FOREIGN KEY (x) REFERENCES inhz(xx) -Referenced by: - TABLE "inhz" CONSTRAINT "inhz_x_fkey" FOREIGN KEY (x) REFERENCES inhz(xx) - -DROP TABLE inhz; --- including storage and comments -CREATE TABLE ctlt1 (a text CHECK (length(a) > 2) PRIMARY KEY, b text); -CREATE INDEX ctlt1_b_key ON ctlt1 (b); -CREATE INDEX ctlt1_fnidx ON ctlt1 ((a || b)); -CREATE STATISTICS ctlt1_a_b_stat ON a,b FROM ctlt1; -CREATE STATISTICS ctlt1_expr_stat ON (a || b) FROM ctlt1; -COMMENT ON STATISTICS ctlt1_a_b_stat IS 'ab stats'; -COMMENT ON STATISTICS ctlt1_expr_stat IS 'ab expr stats'; -COMMENT ON COLUMN ctlt1.a IS 'A'; -COMMENT ON COLUMN ctlt1.b IS 'B'; -COMMENT ON CONSTRAINT ctlt1_a_check ON ctlt1 IS 't1_a_check'; -COMMENT ON INDEX ctlt1_pkey IS 'index pkey'; -COMMENT ON INDEX ctlt1_b_key IS 'index b_key'; -ALTER TABLE ctlt1 ALTER COLUMN a SET STORAGE MAIN; -CREATE TABLE ctlt2 (c text); -ALTER TABLE ctlt2 ALTER COLUMN c SET STORAGE EXTERNAL; -COMMENT ON COLUMN ctlt2.c IS 'C'; -CREATE TABLE ctlt3 (a text CHECK (length(a) < 5), c text CHECK (length(c) < 7)); -ALTER TABLE ctlt3 ALTER COLUMN c SET STORAGE EXTERNAL; -ALTER TABLE ctlt3 ALTER COLUMN a SET STORAGE MAIN; -CREATE INDEX ctlt3_fnidx ON ctlt3 ((a || c)); -COMMENT ON COLUMN ctlt3.a IS 'A3'; -COMMENT ON COLUMN ctlt3.c IS 'C'; -COMMENT ON CONSTRAINT ctlt3_a_check ON ctlt3 IS 't3_a_check'; -CREATE TABLE ctlt4 (a text, c text); -ALTER TABLE ctlt4 ALTER COLUMN c SET STORAGE EXTERNAL; -CREATE TABLE ctlt12_storage (LIKE ctlt1 INCLUDING STORAGE, LIKE ctlt2 INCLUDING STORAGE); -\d+ ctlt12_storage - Table "public.ctlt12_storage" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | main | | - b | text | | | | extended | | - c | text | | | | external | | -Not-null constraints: - "ctlt1_a_not_null" NOT NULL "a" - -CREATE TABLE ctlt12_comments (LIKE ctlt1 INCLUDING COMMENTS, LIKE ctlt2 INCLUDING COMMENTS); -\d+ ctlt12_comments - Table "public.ctlt12_comments" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | extended | | A - b | text | | | | extended | | B - c | text | | | | extended | | C -Not-null constraints: - "ctlt1_a_not_null" NOT NULL "a" - -CREATE TABLE ctlt1_inh (LIKE ctlt1 INCLUDING CONSTRAINTS INCLUDING COMMENTS) INHERITS (ctlt1); -NOTICE: merging column "a" with inherited definition -NOTICE: merging column "b" with inherited definition -NOTICE: merging constraint "ctlt1_a_check" with inherited definition -\d+ ctlt1_inh - Table "public.ctlt1_inh" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | main | | A - b | text | | | | extended | | B -Check constraints: - "ctlt1_a_check" CHECK (length(a) > 2) -Not-null constraints: - "ctlt1_a_not_null" NOT NULL "a" (local, inherited) -Inherits: ctlt1 - -SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt1_inh'::regclass; - description -------------- - t1_a_check -(1 row) - -CREATE TABLE ctlt13_inh () INHERITS (ctlt1, ctlt3); -NOTICE: merging multiple inherited definitions of column "a" -\d+ ctlt13_inh - Table "public.ctlt13_inh" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | main | | - b | text | | | | extended | | - c | text | | | | external | | -Check constraints: - "ctlt1_a_check" CHECK (length(a) > 2) - "ctlt3_a_check" CHECK (length(a) < 5) - "ctlt3_c_check" CHECK (length(c) < 7) -Not-null constraints: - "ctlt1_a_not_null" NOT NULL "a" (inherited) -Inherits: ctlt1, - ctlt3 - -CREATE TABLE ctlt13_like (LIKE ctlt3 INCLUDING CONSTRAINTS INCLUDING INDEXES INCLUDING COMMENTS INCLUDING STORAGE) INHERITS (ctlt1); -NOTICE: merging column "a" with inherited definition -\d+ ctlt13_like - Table "public.ctlt13_like" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | main | | A3 - b | text | | | | extended | | - c | text | | | | external | | C -Indexes: - "ctlt13_like_expr_idx" btree ((a || c)) -Check constraints: - "ctlt1_a_check" CHECK (length(a) > 2) - "ctlt3_a_check" CHECK (length(a) < 5) - "ctlt3_c_check" CHECK (length(c) < 7) -Not-null constraints: - "ctlt1_a_not_null" NOT NULL "a" (inherited) -Inherits: ctlt1 - -SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt13_like'::regclass; - description -------------- - t3_a_check -(1 row) - -CREATE TABLE ctlt_all (LIKE ctlt1 INCLUDING ALL); -\d+ ctlt_all - Table "public.ctlt_all" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | main | | A - b | text | | | | extended | | B -Indexes: - "ctlt_all_pkey" PRIMARY KEY, btree (a) - "ctlt_all_b_idx" btree (b) - "ctlt_all_expr_idx" btree ((a || b)) -Check constraints: - "ctlt1_a_check" CHECK (length(a) > 2) -Statistics objects: - "public.ctlt_all_a_b_stat" ON a, b FROM ctlt_all - "public.ctlt_all_expr_stat" ON (a || b) FROM ctlt_all -Not-null constraints: - "ctlt1_a_not_null" NOT NULL "a" - -SELECT c.relname, objsubid, description FROM pg_description, pg_index i, pg_class c WHERE classoid = 'pg_class'::regclass AND objoid = i.indexrelid AND c.oid = i.indexrelid AND i.indrelid = 'ctlt_all'::regclass ORDER BY c.relname, objsubid; - relname | objsubid | description -----------------+----------+------------- - ctlt_all_b_idx | 0 | index b_key - ctlt_all_pkey | 0 | index pkey -(2 rows) - -SELECT s.stxname, objsubid, description FROM pg_description, pg_statistic_ext s WHERE classoid = 'pg_statistic_ext'::regclass AND objoid = s.oid AND s.stxrelid = 'ctlt_all'::regclass ORDER BY s.stxname, objsubid; - stxname | objsubid | description ---------------------+----------+--------------- - ctlt_all_a_b_stat | 0 | ab stats - ctlt_all_expr_stat | 0 | ab expr stats -(2 rows) - -CREATE TABLE inh_error1 () INHERITS (ctlt1, ctlt4); -NOTICE: merging multiple inherited definitions of column "a" -ERROR: inherited column "a" has a storage parameter conflict -DETAIL: MAIN versus EXTENDED -CREATE TABLE inh_error2 (LIKE ctlt4 INCLUDING STORAGE) INHERITS (ctlt1); -NOTICE: merging column "a" with inherited definition -ERROR: column "a" has a storage parameter conflict -DETAIL: MAIN versus EXTENDED --- Check that LIKE isn't confused by a system catalog of the same name -CREATE TABLE pg_attrdef (LIKE ctlt1 INCLUDING ALL); -\d+ public.pg_attrdef - Table "public.pg_attrdef" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | main | | A - b | text | | | | extended | | B -Indexes: - "pg_attrdef_pkey" PRIMARY KEY, btree (a) - "pg_attrdef_b_idx" btree (b) - "pg_attrdef_expr_idx" btree ((a || b)) -Check constraints: - "ctlt1_a_check" CHECK (length(a) > 2) -Statistics objects: - "public.pg_attrdef_a_b_stat" ON a, b FROM public.pg_attrdef - "public.pg_attrdef_expr_stat" ON (a || b) FROM public.pg_attrdef -Not-null constraints: - "ctlt1_a_not_null" NOT NULL "a" - -DROP TABLE public.pg_attrdef; --- Check that LIKE isn't confused when new table masks the old, either -BEGIN; -CREATE SCHEMA ctl_schema; -SET LOCAL search_path = ctl_schema, public; -CREATE TABLE ctlt1 (LIKE ctlt1 INCLUDING ALL); -\d+ ctlt1 - Table "ctl_schema.ctlt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------+-----------+----------+---------+----------+--------------+------------- - a | text | | not null | | main | | A - b | text | | | | extended | | B -Indexes: - "ctlt1_pkey" PRIMARY KEY, btree (a) - "ctlt1_b_idx" btree (b) - "ctlt1_expr_idx" btree ((a || b)) -Check constraints: - "ctlt1_a_check" CHECK (length(a) > 2) -Statistics objects: - "ctl_schema.ctlt1_a_b_stat" ON a, b FROM ctlt1 - "ctl_schema.ctlt1_expr_stat" ON (a || b) FROM ctlt1 -Not-null constraints: - "ctlt1_a_not_null" NOT NULL "a" - -ROLLBACK; -DROP TABLE ctlt1, ctlt2, ctlt3, ctlt4, ctlt12_storage, ctlt12_comments, ctlt1_inh, ctlt13_inh, ctlt13_like, ctlt_all, ctla, ctlb CASCADE; -NOTICE: drop cascades to table inhe --- LIKE must respect NO INHERIT property of constraints -CREATE TABLE noinh_con_copy (a int CHECK (a > 0) NO INHERIT, b int not null, - c int not null no inherit); -CREATE TABLE noinh_con_copy1 (LIKE noinh_con_copy INCLUDING CONSTRAINTS); -\d+ noinh_con_copy1 - Table "public.noinh_con_copy1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - a | integer | | | | plain | | - b | integer | | not null | | plain | | - c | integer | | not null | | plain | | -Check constraints: - "noinh_con_copy_a_check" CHECK (a > 0) NO INHERIT -Not-null constraints: - "noinh_con_copy_b_not_null" NOT NULL "b" - "noinh_con_copy_c_not_null" NOT NULL "c" NO INHERIT - --- fail, as partitioned tables don't allow NO INHERIT constraints -CREATE TABLE noinh_con_copy1_parted (LIKE noinh_con_copy INCLUDING ALL) - PARTITION BY LIST (a); -ERROR: cannot add NO INHERIT constraint to partitioned table "noinh_con_copy1_parted" -DROP TABLE noinh_con_copy, noinh_con_copy1; -/* LIKE with other relation kinds */ -CREATE TABLE ctlt4 (a int, b text); -CREATE SEQUENCE ctlseq1; -CREATE TABLE ctlt10 (LIKE ctlseq1); -- fail -ERROR: relation "ctlseq1" is invalid in LIKE clause -LINE 1: CREATE TABLE ctlt10 (LIKE ctlseq1); - ^ -DETAIL: This operation is not supported for sequences. -CREATE VIEW ctlv1 AS SELECT * FROM ctlt4; -CREATE TABLE ctlt11 (LIKE ctlv1); -CREATE TABLE ctlt11a (LIKE ctlv1 INCLUDING ALL); -CREATE TYPE ctlty1 AS (a int, b text); -CREATE TABLE ctlt12 (LIKE ctlty1); -DROP SEQUENCE ctlseq1; -DROP TYPE ctlty1; -DROP VIEW ctlv1; -DROP TABLE IF EXISTS ctlt4, ctlt10, ctlt11, ctlt11a, ctlt12; -NOTICE: table "ctlt10" does not exist, skipping +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/alter_generic.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/alter_generic.out --- /Users/admin/pgsql/src/test/regress/expected/alter_generic.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/alter_generic.out 2024-12-13 13:20:09 @@ -1,755 +1,2 @@ --- --- Test for ALTER some_object {RENAME TO, OWNER TO, SET SCHEMA} --- --- directory paths and dlsuffix are passed to us in environment variables -\getenv libdir PG_LIBDIR -\getenv dlsuffix PG_DLSUFFIX -\set regresslib :libdir '/regress' :dlsuffix -CREATE FUNCTION test_opclass_options_func(internal) - RETURNS void - AS :'regresslib', 'test_opclass_options_func' - LANGUAGE C; --- Clean up in case a prior regression run failed -SET client_min_messages TO 'warning'; -DROP ROLE IF EXISTS regress_alter_generic_user1; -DROP ROLE IF EXISTS regress_alter_generic_user2; -DROP ROLE IF EXISTS regress_alter_generic_user3; -RESET client_min_messages; -CREATE USER regress_alter_generic_user3; -CREATE USER regress_alter_generic_user2; -CREATE USER regress_alter_generic_user1 IN ROLE regress_alter_generic_user3; -CREATE SCHEMA alt_nsp1; -CREATE SCHEMA alt_nsp2; -GRANT ALL ON SCHEMA alt_nsp1, alt_nsp2 TO public; -SET search_path = alt_nsp1, public; --- --- Function and Aggregate --- -SET SESSION AUTHORIZATION regress_alter_generic_user1; -CREATE FUNCTION alt_func1(int) RETURNS int LANGUAGE sql - AS 'SELECT $1 + 1'; -CREATE FUNCTION alt_func2(int) RETURNS int LANGUAGE sql - AS 'SELECT $1 - 1'; -CREATE AGGREGATE alt_agg1 ( - sfunc1 = int4pl, basetype = int4, stype1 = int4, initcond = 0 -); -CREATE AGGREGATE alt_agg2 ( - sfunc1 = int4mi, basetype = int4, stype1 = int4, initcond = 0 -); -ALTER AGGREGATE alt_func1(int) RENAME TO alt_func3; -- failed (not aggregate) -ERROR: function alt_func1(integer) is not an aggregate -ALTER AGGREGATE alt_func1(int) OWNER TO regress_alter_generic_user3; -- failed (not aggregate) -ERROR: function alt_func1(integer) is not an aggregate -ALTER AGGREGATE alt_func1(int) SET SCHEMA alt_nsp2; -- failed (not aggregate) -ERROR: function alt_func1(integer) is not an aggregate -ALTER FUNCTION alt_func1(int) RENAME TO alt_func2; -- failed (name conflict) -ERROR: function alt_func2(integer) already exists in schema "alt_nsp1" -ALTER FUNCTION alt_func1(int) RENAME TO alt_func3; -- OK -ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_generic_user3; -- OK -ALTER FUNCTION alt_func2(int) SET SCHEMA alt_nsp1; -- OK, already there -ALTER FUNCTION alt_func2(int) SET SCHEMA alt_nsp2; -- OK -ALTER AGGREGATE alt_agg1(int) RENAME TO alt_agg2; -- failed (name conflict) -ERROR: function alt_agg2(integer) already exists in schema "alt_nsp1" -ALTER AGGREGATE alt_agg1(int) RENAME TO alt_agg3; -- OK -ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user3; -- OK -ALTER AGGREGATE alt_agg2(int) SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_generic_user2; -CREATE FUNCTION alt_func1(int) RETURNS int LANGUAGE sql - AS 'SELECT $1 + 2'; -CREATE FUNCTION alt_func2(int) RETURNS int LANGUAGE sql - AS 'SELECT $1 - 2'; -CREATE AGGREGATE alt_agg1 ( - sfunc1 = int4pl, basetype = int4, stype1 = int4, initcond = 100 -); -CREATE AGGREGATE alt_agg2 ( - sfunc1 = int4mi, basetype = int4, stype1 = int4, initcond = -100 -); -ALTER FUNCTION alt_func3(int) RENAME TO alt_func4; -- failed (not owner) -ERROR: must be owner of function alt_func3 -ALTER FUNCTION alt_func1(int) RENAME TO alt_func4; -- OK -ALTER FUNCTION alt_func3(int) OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of function alt_func3 -ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER FUNCTION alt_func3(int) SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of function alt_func3 -ALTER FUNCTION alt_func2(int) SET SCHEMA alt_nsp2; -- failed (name conflicts) -ERROR: function alt_func2(integer) already exists in schema "alt_nsp2" -ALTER AGGREGATE alt_agg3(int) RENAME TO alt_agg4; -- failed (not owner) -ERROR: must be owner of function alt_agg3 -ALTER AGGREGATE alt_agg1(int) RENAME TO alt_agg4; -- OK -ALTER AGGREGATE alt_agg3(int) OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of function alt_agg3 -ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER AGGREGATE alt_agg3(int) SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of function alt_agg3 -ALTER AGGREGATE alt_agg2(int) SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: function alt_agg2(integer) already exists in schema "alt_nsp2" -RESET SESSION AUTHORIZATION; -SELECT n.nspname, proname, prorettype::regtype, prokind, a.rolname - FROM pg_proc p, pg_namespace n, pg_authid a - WHERE p.pronamespace = n.oid AND p.proowner = a.oid - AND n.nspname IN ('alt_nsp1', 'alt_nsp2') - ORDER BY nspname, proname; - nspname | proname | prorettype | prokind | rolname -----------+-----------+------------+---------+----------------------------- - alt_nsp1 | alt_agg2 | integer | a | regress_alter_generic_user2 - alt_nsp1 | alt_agg3 | integer | a | regress_alter_generic_user1 - alt_nsp1 | alt_agg4 | integer | a | regress_alter_generic_user2 - alt_nsp1 | alt_func2 | integer | f | regress_alter_generic_user2 - alt_nsp1 | alt_func3 | integer | f | regress_alter_generic_user1 - alt_nsp1 | alt_func4 | integer | f | regress_alter_generic_user2 - alt_nsp2 | alt_agg2 | integer | a | regress_alter_generic_user3 - alt_nsp2 | alt_func2 | integer | f | regress_alter_generic_user3 -(8 rows) - --- --- We would test collations here, but it's not possible because the error --- messages tend to be nonportable. --- --- --- Conversion --- -SET SESSION AUTHORIZATION regress_alter_generic_user1; -CREATE CONVERSION alt_conv1 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; -CREATE CONVERSION alt_conv2 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; -ALTER CONVERSION alt_conv1 RENAME TO alt_conv2; -- failed (name conflict) -ERROR: conversion "alt_conv2" already exists in schema "alt_nsp1" -ALTER CONVERSION alt_conv1 RENAME TO alt_conv3; -- OK -ALTER CONVERSION alt_conv2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER CONVERSION alt_conv2 OWNER TO regress_alter_generic_user3; -- OK -ALTER CONVERSION alt_conv2 SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_generic_user2; -CREATE CONVERSION alt_conv1 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; -CREATE CONVERSION alt_conv2 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; -ALTER CONVERSION alt_conv3 RENAME TO alt_conv4; -- failed (not owner) -ERROR: must be owner of conversion alt_conv3 -ALTER CONVERSION alt_conv1 RENAME TO alt_conv4; -- OK -ALTER CONVERSION alt_conv3 OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of conversion alt_conv3 -ALTER CONVERSION alt_conv2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER CONVERSION alt_conv3 SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of conversion alt_conv3 -ALTER CONVERSION alt_conv2 SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: conversion "alt_conv2" already exists in schema "alt_nsp2" -RESET SESSION AUTHORIZATION; -SELECT n.nspname, c.conname, a.rolname - FROM pg_conversion c, pg_namespace n, pg_authid a - WHERE c.connamespace = n.oid AND c.conowner = a.oid - AND n.nspname IN ('alt_nsp1', 'alt_nsp2') - ORDER BY nspname, conname; - nspname | conname | rolname -----------+-----------+----------------------------- - alt_nsp1 | alt_conv2 | regress_alter_generic_user2 - alt_nsp1 | alt_conv3 | regress_alter_generic_user1 - alt_nsp1 | alt_conv4 | regress_alter_generic_user2 - alt_nsp2 | alt_conv2 | regress_alter_generic_user3 -(4 rows) - --- --- Foreign Data Wrapper and Foreign Server --- -CREATE FOREIGN DATA WRAPPER alt_fdw1; -CREATE FOREIGN DATA WRAPPER alt_fdw2; -CREATE SERVER alt_fserv1 FOREIGN DATA WRAPPER alt_fdw1; -CREATE SERVER alt_fserv2 FOREIGN DATA WRAPPER alt_fdw2; -ALTER FOREIGN DATA WRAPPER alt_fdw1 RENAME TO alt_fdw2; -- failed (name conflict) -ERROR: foreign-data wrapper "alt_fdw2" already exists -ALTER FOREIGN DATA WRAPPER alt_fdw1 RENAME TO alt_fdw3; -- OK -ALTER SERVER alt_fserv1 RENAME TO alt_fserv2; -- failed (name conflict) -ERROR: server "alt_fserv2" already exists -ALTER SERVER alt_fserv1 RENAME TO alt_fserv3; -- OK -SELECT fdwname FROM pg_foreign_data_wrapper WHERE fdwname like 'alt_fdw%'; - fdwname ----------- - alt_fdw2 - alt_fdw3 -(2 rows) - -SELECT srvname FROM pg_foreign_server WHERE srvname like 'alt_fserv%'; - srvname ------------- - alt_fserv2 - alt_fserv3 -(2 rows) - --- --- Procedural Language --- -CREATE LANGUAGE alt_lang1 HANDLER plpgsql_call_handler; -CREATE LANGUAGE alt_lang2 HANDLER plpgsql_call_handler; -ALTER LANGUAGE alt_lang1 OWNER TO regress_alter_generic_user1; -- OK -ALTER LANGUAGE alt_lang2 OWNER TO regress_alter_generic_user2; -- OK -SET SESSION AUTHORIZATION regress_alter_generic_user1; -ALTER LANGUAGE alt_lang1 RENAME TO alt_lang2; -- failed (name conflict) -ERROR: language "alt_lang2" already exists -ALTER LANGUAGE alt_lang2 RENAME TO alt_lang3; -- failed (not owner) -ERROR: must be owner of language alt_lang2 -ALTER LANGUAGE alt_lang1 RENAME TO alt_lang3; -- OK -ALTER LANGUAGE alt_lang2 OWNER TO regress_alter_generic_user3; -- failed (not owner) -ERROR: must be owner of language alt_lang2 -ALTER LANGUAGE alt_lang3 OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER LANGUAGE alt_lang3 OWNER TO regress_alter_generic_user3; -- OK -RESET SESSION AUTHORIZATION; -SELECT lanname, a.rolname - FROM pg_language l, pg_authid a - WHERE l.lanowner = a.oid AND l.lanname like 'alt_lang%' - ORDER BY lanname; - lanname | rolname ------------+----------------------------- - alt_lang2 | regress_alter_generic_user2 - alt_lang3 | regress_alter_generic_user3 -(2 rows) - --- --- Operator --- -SET SESSION AUTHORIZATION regress_alter_generic_user1; -CREATE OPERATOR @-@ ( leftarg = int4, rightarg = int4, procedure = int4mi ); -CREATE OPERATOR @+@ ( leftarg = int4, rightarg = int4, procedure = int4pl ); -ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_generic_user3; -- OK -ALTER OPERATOR @-@(int4, int4) SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_generic_user2; -CREATE OPERATOR @-@ ( leftarg = int4, rightarg = int4, procedure = int4mi ); -ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of operator @+@ -ALTER OPERATOR @-@(int4, int4) OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER OPERATOR @+@(int4, int4) SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of operator @+@ --- can't test this: the error message includes the raw oid of namespace --- ALTER OPERATOR @-@(int4, int4) SET SCHEMA alt_nsp2; -- failed (name conflict) -RESET SESSION AUTHORIZATION; -SELECT n.nspname, oprname, a.rolname, - oprleft::regtype, oprright::regtype, oprcode::regproc - FROM pg_operator o, pg_namespace n, pg_authid a - WHERE o.oprnamespace = n.oid AND o.oprowner = a.oid - AND n.nspname IN ('alt_nsp1', 'alt_nsp2') - ORDER BY nspname, oprname; - nspname | oprname | rolname | oprleft | oprright | oprcode -----------+---------+-----------------------------+---------+----------+--------- - alt_nsp1 | @+@ | regress_alter_generic_user3 | integer | integer | int4pl - alt_nsp1 | @-@ | regress_alter_generic_user2 | integer | integer | int4mi - alt_nsp2 | @-@ | regress_alter_generic_user1 | integer | integer | int4mi -(3 rows) - --- --- OpFamily and OpClass --- -CREATE OPERATOR FAMILY alt_opf1 USING hash; -CREATE OPERATOR FAMILY alt_opf2 USING hash; -ALTER OPERATOR FAMILY alt_opf1 USING hash OWNER TO regress_alter_generic_user1; -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user1; -CREATE OPERATOR CLASS alt_opc1 FOR TYPE uuid USING hash AS STORAGE uuid; -CREATE OPERATOR CLASS alt_opc2 FOR TYPE uuid USING hash AS STORAGE uuid; -ALTER OPERATOR CLASS alt_opc1 USING hash OWNER TO regress_alter_generic_user1; -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user1; -SET SESSION AUTHORIZATION regress_alter_generic_user1; -ALTER OPERATOR FAMILY alt_opf1 USING hash RENAME TO alt_opf2; -- failed (name conflict) -ERROR: operator family "alt_opf2" for access method "hash" already exists in schema "alt_nsp1" -ALTER OPERATOR FAMILY alt_opf1 USING hash RENAME TO alt_opf3; -- OK -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user3; -- OK -ALTER OPERATOR FAMILY alt_opf2 USING hash SET SCHEMA alt_nsp2; -- OK -ALTER OPERATOR CLASS alt_opc1 USING hash RENAME TO alt_opc2; -- failed (name conflict) -ERROR: operator class "alt_opc2" for access method "hash" already exists in schema "alt_nsp1" -ALTER OPERATOR CLASS alt_opc1 USING hash RENAME TO alt_opc3; -- OK -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user3; -- OK -ALTER OPERATOR CLASS alt_opc2 USING hash SET SCHEMA alt_nsp2; -- OK -RESET SESSION AUTHORIZATION; -CREATE OPERATOR FAMILY alt_opf1 USING hash; -CREATE OPERATOR FAMILY alt_opf2 USING hash; -ALTER OPERATOR FAMILY alt_opf1 USING hash OWNER TO regress_alter_generic_user2; -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user2; -CREATE OPERATOR CLASS alt_opc1 FOR TYPE macaddr USING hash AS STORAGE macaddr; -CREATE OPERATOR CLASS alt_opc2 FOR TYPE macaddr USING hash AS STORAGE macaddr; -ALTER OPERATOR CLASS alt_opc1 USING hash OWNER TO regress_alter_generic_user2; -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user2; -SET SESSION AUTHORIZATION regress_alter_generic_user2; -ALTER OPERATOR FAMILY alt_opf3 USING hash RENAME TO alt_opf4; -- failed (not owner) -ERROR: must be owner of operator family alt_opf3 -ALTER OPERATOR FAMILY alt_opf1 USING hash RENAME TO alt_opf4; -- OK -ALTER OPERATOR FAMILY alt_opf3 USING hash OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of operator family alt_opf3 -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER OPERATOR FAMILY alt_opf3 USING hash SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of operator family alt_opf3 -ALTER OPERATOR FAMILY alt_opf2 USING hash SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: operator family "alt_opf2" for access method "hash" already exists in schema "alt_nsp2" -ALTER OPERATOR CLASS alt_opc3 USING hash RENAME TO alt_opc4; -- failed (not owner) -ERROR: must be owner of operator class alt_opc3 -ALTER OPERATOR CLASS alt_opc1 USING hash RENAME TO alt_opc4; -- OK -ALTER OPERATOR CLASS alt_opc3 USING hash OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of operator class alt_opc3 -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER OPERATOR CLASS alt_opc3 USING hash SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of operator class alt_opc3 -ALTER OPERATOR CLASS alt_opc2 USING hash SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: operator class "alt_opc2" for access method "hash" already exists in schema "alt_nsp2" -RESET SESSION AUTHORIZATION; -SELECT nspname, opfname, amname, rolname - FROM pg_opfamily o, pg_am m, pg_namespace n, pg_authid a - WHERE o.opfmethod = m.oid AND o.opfnamespace = n.oid AND o.opfowner = a.oid - AND n.nspname IN ('alt_nsp1', 'alt_nsp2') - AND NOT opfname LIKE 'alt_opc%' - ORDER BY nspname, opfname; - nspname | opfname | amname | rolname -----------+----------+--------+----------------------------- - alt_nsp1 | alt_opf2 | hash | regress_alter_generic_user2 - alt_nsp1 | alt_opf3 | hash | regress_alter_generic_user1 - alt_nsp1 | alt_opf4 | hash | regress_alter_generic_user2 - alt_nsp2 | alt_opf2 | hash | regress_alter_generic_user3 -(4 rows) - -SELECT nspname, opcname, amname, rolname - FROM pg_opclass o, pg_am m, pg_namespace n, pg_authid a - WHERE o.opcmethod = m.oid AND o.opcnamespace = n.oid AND o.opcowner = a.oid - AND n.nspname IN ('alt_nsp1', 'alt_nsp2') - ORDER BY nspname, opcname; - nspname | opcname | amname | rolname -----------+----------+--------+----------------------------- - alt_nsp1 | alt_opc2 | hash | regress_alter_generic_user2 - alt_nsp1 | alt_opc3 | hash | regress_alter_generic_user1 - alt_nsp1 | alt_opc4 | hash | regress_alter_generic_user2 - alt_nsp2 | alt_opc2 | hash | regress_alter_generic_user3 -(4 rows) - --- ALTER OPERATOR FAMILY ... ADD/DROP --- Should work. Textbook case of CREATE / ALTER ADD / ALTER DROP / DROP -BEGIN TRANSACTION; -CREATE OPERATOR FAMILY alt_opf4 USING btree; -ALTER OPERATOR FAMILY alt_opf4 USING btree ADD - -- int4 vs int2 - OPERATOR 1 < (int4, int2) , - OPERATOR 2 <= (int4, int2) , - OPERATOR 3 = (int4, int2) , - OPERATOR 4 >= (int4, int2) , - OPERATOR 5 > (int4, int2) , - FUNCTION 1 btint42cmp(int4, int2); -ALTER OPERATOR FAMILY alt_opf4 USING btree DROP - -- int4 vs int2 - OPERATOR 1 (int4, int2) , - OPERATOR 2 (int4, int2) , - OPERATOR 3 (int4, int2) , - OPERATOR 4 (int4, int2) , - OPERATOR 5 (int4, int2) , - FUNCTION 1 (int4, int2) ; -DROP OPERATOR FAMILY alt_opf4 USING btree; -ROLLBACK; --- Should fail. Invalid values for ALTER OPERATOR FAMILY .. ADD / DROP -CREATE OPERATOR FAMILY alt_opf4 USING btree; -ALTER OPERATOR FAMILY alt_opf4 USING invalid_index_method ADD OPERATOR 1 < (int4, int2); -- invalid indexing_method -ERROR: access method "invalid_index_method" does not exist -ALTER OPERATOR FAMILY alt_opf4 USING btree ADD OPERATOR 6 < (int4, int2); -- operator number should be between 1 and 5 -ERROR: invalid operator number 6, must be between 1 and 5 -ALTER OPERATOR FAMILY alt_opf4 USING btree ADD OPERATOR 0 < (int4, int2); -- operator number should be between 1 and 5 -ERROR: invalid operator number 0, must be between 1 and 5 -ALTER OPERATOR FAMILY alt_opf4 USING btree ADD OPERATOR 1 < ; -- operator without argument types -ERROR: operator argument types must be specified in ALTER OPERATOR FAMILY -ALTER OPERATOR FAMILY alt_opf4 USING btree ADD FUNCTION 0 btint42cmp(int4, int2); -- invalid options parsing function -ERROR: invalid function number 0, must be between 1 and 5 -ALTER OPERATOR FAMILY alt_opf4 USING btree ADD FUNCTION 6 btint42cmp(int4, int2); -- function number should be between 1 and 5 -ERROR: invalid function number 6, must be between 1 and 5 -ALTER OPERATOR FAMILY alt_opf4 USING btree ADD STORAGE invalid_storage; -- Ensure STORAGE is not a part of ALTER OPERATOR FAMILY -ERROR: STORAGE cannot be specified in ALTER OPERATOR FAMILY -DROP OPERATOR FAMILY alt_opf4 USING btree; --- Should fail. Need to be SUPERUSER to do ALTER OPERATOR FAMILY .. ADD / DROP -BEGIN TRANSACTION; -CREATE ROLE regress_alter_generic_user5 NOSUPERUSER; -CREATE OPERATOR FAMILY alt_opf5 USING btree; -SET ROLE regress_alter_generic_user5; -ALTER OPERATOR FAMILY alt_opf5 USING btree ADD OPERATOR 1 < (int4, int2), FUNCTION 1 btint42cmp(int4, int2); -ERROR: must be superuser to alter an operator family -RESET ROLE; -ERROR: current transaction is aborted, commands ignored until end of transaction block -DROP OPERATOR FAMILY alt_opf5 USING btree; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- Should fail. Need rights to namespace for ALTER OPERATOR FAMILY .. ADD / DROP -BEGIN TRANSACTION; -CREATE ROLE regress_alter_generic_user6; -CREATE SCHEMA alt_nsp6; -REVOKE ALL ON SCHEMA alt_nsp6 FROM regress_alter_generic_user6; -CREATE OPERATOR FAMILY alt_nsp6.alt_opf6 USING btree; -SET ROLE regress_alter_generic_user6; -ALTER OPERATOR FAMILY alt_nsp6.alt_opf6 USING btree ADD OPERATOR 1 < (int4, int2); -ERROR: permission denied for schema alt_nsp6 -ROLLBACK; --- Should fail. Only two arguments required for ALTER OPERATOR FAMILY ... DROP OPERATOR -CREATE OPERATOR FAMILY alt_opf7 USING btree; -ALTER OPERATOR FAMILY alt_opf7 USING btree ADD OPERATOR 1 < (int4, int2); -ALTER OPERATOR FAMILY alt_opf7 USING btree DROP OPERATOR 1 (int4, int2, int8); -ERROR: one or two argument types must be specified -DROP OPERATOR FAMILY alt_opf7 USING btree; --- Should work. During ALTER OPERATOR FAMILY ... DROP OPERATOR --- when left type is the same as right type, a DROP with only one argument type should work -CREATE OPERATOR FAMILY alt_opf8 USING btree; -ALTER OPERATOR FAMILY alt_opf8 USING btree ADD OPERATOR 1 < (int4, int4); -DROP OPERATOR FAMILY alt_opf8 USING btree; --- Should work. Textbook case of ALTER OPERATOR FAMILY ... ADD OPERATOR with FOR ORDER BY -CREATE OPERATOR FAMILY alt_opf9 USING gist; -ALTER OPERATOR FAMILY alt_opf9 USING gist ADD OPERATOR 1 < (int4, int4) FOR ORDER BY float_ops; -DROP OPERATOR FAMILY alt_opf9 USING gist; --- Should fail. Ensure correct ordering methods in ALTER OPERATOR FAMILY ... ADD OPERATOR .. FOR ORDER BY -CREATE OPERATOR FAMILY alt_opf10 USING btree; -ALTER OPERATOR FAMILY alt_opf10 USING btree ADD OPERATOR 1 < (int4, int4) FOR ORDER BY float_ops; -ERROR: access method "btree" does not support ordering operators -DROP OPERATOR FAMILY alt_opf10 USING btree; --- Should work. Textbook case of ALTER OPERATOR FAMILY ... ADD OPERATOR with FOR ORDER BY -CREATE OPERATOR FAMILY alt_opf11 USING gist; -ALTER OPERATOR FAMILY alt_opf11 USING gist ADD OPERATOR 1 < (int4, int4) FOR ORDER BY float_ops; -ALTER OPERATOR FAMILY alt_opf11 USING gist DROP OPERATOR 1 (int4, int4); -DROP OPERATOR FAMILY alt_opf11 USING gist; --- Should fail. btree comparison functions should return INTEGER in ALTER OPERATOR FAMILY ... ADD FUNCTION -BEGIN TRANSACTION; -CREATE OPERATOR FAMILY alt_opf12 USING btree; -CREATE FUNCTION fn_opf12 (int4, int2) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; -ALTER OPERATOR FAMILY alt_opf12 USING btree ADD FUNCTION 1 fn_opf12(int4, int2); -ERROR: btree comparison functions must return integer -DROP OPERATOR FAMILY alt_opf12 USING btree; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- Should fail. hash comparison functions should return INTEGER in ALTER OPERATOR FAMILY ... ADD FUNCTION -BEGIN TRANSACTION; -CREATE OPERATOR FAMILY alt_opf13 USING hash; -CREATE FUNCTION fn_opf13 (int4) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; -ALTER OPERATOR FAMILY alt_opf13 USING hash ADD FUNCTION 1 fn_opf13(int4); -ERROR: hash function 1 must return integer -DROP OPERATOR FAMILY alt_opf13 USING hash; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- Should fail. btree comparison functions should have two arguments in ALTER OPERATOR FAMILY ... ADD FUNCTION -BEGIN TRANSACTION; -CREATE OPERATOR FAMILY alt_opf14 USING btree; -CREATE FUNCTION fn_opf14 (int4) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; -ALTER OPERATOR FAMILY alt_opf14 USING btree ADD FUNCTION 1 fn_opf14(int4); -ERROR: btree comparison functions must have two arguments -DROP OPERATOR FAMILY alt_opf14 USING btree; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- Should fail. hash comparison functions should have one argument in ALTER OPERATOR FAMILY ... ADD FUNCTION -BEGIN TRANSACTION; -CREATE OPERATOR FAMILY alt_opf15 USING hash; -CREATE FUNCTION fn_opf15 (int4, int2) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; -ALTER OPERATOR FAMILY alt_opf15 USING hash ADD FUNCTION 1 fn_opf15(int4, int2); -ERROR: hash function 1 must have one argument -DROP OPERATOR FAMILY alt_opf15 USING hash; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- Should fail. In gist throw an error when giving different data types for function argument --- without defining left / right type in ALTER OPERATOR FAMILY ... ADD FUNCTION -CREATE OPERATOR FAMILY alt_opf16 USING gist; -ALTER OPERATOR FAMILY alt_opf16 USING gist ADD FUNCTION 1 btint42cmp(int4, int2); -ERROR: associated data types must be specified for index support function -DROP OPERATOR FAMILY alt_opf16 USING gist; --- Should fail. duplicate operator number / function number in ALTER OPERATOR FAMILY ... ADD FUNCTION -CREATE OPERATOR FAMILY alt_opf17 USING btree; -ALTER OPERATOR FAMILY alt_opf17 USING btree ADD OPERATOR 1 < (int4, int4), OPERATOR 1 < (int4, int4); -- operator # appears twice in same statement -ERROR: operator number 1 for (integer,integer) appears more than once -ALTER OPERATOR FAMILY alt_opf17 USING btree ADD OPERATOR 1 < (int4, int4); -- operator 1 requested first-time -ALTER OPERATOR FAMILY alt_opf17 USING btree ADD OPERATOR 1 < (int4, int4); -- operator 1 requested again in separate statement -ERROR: operator 1(integer,integer) already exists in operator family "alt_opf17" -ALTER OPERATOR FAMILY alt_opf17 USING btree ADD - OPERATOR 1 < (int4, int2) , - OPERATOR 2 <= (int4, int2) , - OPERATOR 3 = (int4, int2) , - OPERATOR 4 >= (int4, int2) , - OPERATOR 5 > (int4, int2) , - FUNCTION 1 btint42cmp(int4, int2) , - FUNCTION 1 btint42cmp(int4, int2); -- procedure 1 appears twice in same statement -ERROR: function number 1 for (integer,smallint) appears more than once -ALTER OPERATOR FAMILY alt_opf17 USING btree ADD - OPERATOR 1 < (int4, int2) , - OPERATOR 2 <= (int4, int2) , - OPERATOR 3 = (int4, int2) , - OPERATOR 4 >= (int4, int2) , - OPERATOR 5 > (int4, int2) , - FUNCTION 1 btint42cmp(int4, int2); -- procedure 1 appears first time -ALTER OPERATOR FAMILY alt_opf17 USING btree ADD - OPERATOR 1 < (int4, int2) , - OPERATOR 2 <= (int4, int2) , - OPERATOR 3 = (int4, int2) , - OPERATOR 4 >= (int4, int2) , - OPERATOR 5 > (int4, int2) , - FUNCTION 1 btint42cmp(int4, int2); -- procedure 1 requested again in separate statement -ERROR: operator 1(integer,smallint) already exists in operator family "alt_opf17" -DROP OPERATOR FAMILY alt_opf17 USING btree; --- Should fail. Ensure that DROP requests for missing OPERATOR / FUNCTIONS --- return appropriate message in ALTER OPERATOR FAMILY ... DROP OPERATOR / FUNCTION -CREATE OPERATOR FAMILY alt_opf18 USING btree; -ALTER OPERATOR FAMILY alt_opf18 USING btree DROP OPERATOR 1 (int4, int4); -ERROR: operator 1(integer,integer) does not exist in operator family "alt_opf18" -ALTER OPERATOR FAMILY alt_opf18 USING btree ADD - OPERATOR 1 < (int4, int2) , - OPERATOR 2 <= (int4, int2) , - OPERATOR 3 = (int4, int2) , - OPERATOR 4 >= (int4, int2) , - OPERATOR 5 > (int4, int2) , - FUNCTION 1 btint42cmp(int4, int2); --- Should fail. Not allowed to have cross-type equalimage function. -ALTER OPERATOR FAMILY alt_opf18 USING btree - ADD FUNCTION 4 (int4, int2) btequalimage(oid); -ERROR: btree equal image functions must not be cross-type -ALTER OPERATOR FAMILY alt_opf18 USING btree DROP FUNCTION 2 (int4, int4); -ERROR: function 2(integer,integer) does not exist in operator family "alt_opf18" -DROP OPERATOR FAMILY alt_opf18 USING btree; --- Should fail. Invalid opclass options function (#5) specifications. -CREATE OPERATOR FAMILY alt_opf19 USING btree; -ALTER OPERATOR FAMILY alt_opf19 USING btree ADD FUNCTION 5 test_opclass_options_func(internal, text[], bool); -ERROR: function test_opclass_options_func(internal, text[], boolean) does not exist -ALTER OPERATOR FAMILY alt_opf19 USING btree ADD FUNCTION 5 (int4) btint42cmp(int4, int2); -ERROR: invalid operator class options parsing function -HINT: Valid signature of operator class options parsing function is (internal) RETURNS void. -ALTER OPERATOR FAMILY alt_opf19 USING btree ADD FUNCTION 5 (int4, int2) btint42cmp(int4, int2); -ERROR: left and right associated data types for operator class options parsing functions must match -ALTER OPERATOR FAMILY alt_opf19 USING btree ADD FUNCTION 5 (int4) test_opclass_options_func(internal); -- Ok -ALTER OPERATOR FAMILY alt_opf19 USING btree DROP FUNCTION 5 (int4, int4); -DROP OPERATOR FAMILY alt_opf19 USING btree; --- --- Statistics --- -SET SESSION AUTHORIZATION regress_alter_generic_user1; -CREATE TABLE alt_regress_1 (a INTEGER, b INTEGER); -CREATE STATISTICS alt_stat1 ON a, b FROM alt_regress_1; -CREATE STATISTICS alt_stat2 ON a, b FROM alt_regress_1; -ALTER STATISTICS alt_stat1 RENAME TO alt_stat2; -- failed (name conflict) -ERROR: statistics object "alt_stat2" already exists in schema "alt_nsp1" -ALTER STATISTICS alt_stat1 RENAME TO alt_stat3; -- OK -ALTER STATISTICS alt_stat2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER STATISTICS alt_stat2 OWNER TO regress_alter_generic_user3; -- OK -ALTER STATISTICS alt_stat2 SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_generic_user2; -CREATE TABLE alt_regress_2 (a INTEGER, b INTEGER); -CREATE STATISTICS alt_stat1 ON a, b FROM alt_regress_2; -CREATE STATISTICS alt_stat2 ON a, b FROM alt_regress_2; -ALTER STATISTICS alt_stat3 RENAME TO alt_stat4; -- failed (not owner) -ERROR: must be owner of statistics object alt_stat3 -ALTER STATISTICS alt_stat1 RENAME TO alt_stat4; -- OK -ALTER STATISTICS alt_stat3 OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of statistics object alt_stat3 -ALTER STATISTICS alt_stat2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER STATISTICS alt_stat3 SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of statistics object alt_stat3 -ALTER STATISTICS alt_stat2 SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: statistics object "alt_stat2" already exists in schema "alt_nsp2" -RESET SESSION AUTHORIZATION; -SELECT nspname, stxname, rolname - FROM pg_statistic_ext s, pg_namespace n, pg_authid a - WHERE s.stxnamespace = n.oid AND s.stxowner = a.oid - AND n.nspname in ('alt_nsp1', 'alt_nsp2') - ORDER BY nspname, stxname; - nspname | stxname | rolname -----------+-----------+----------------------------- - alt_nsp1 | alt_stat2 | regress_alter_generic_user2 - alt_nsp1 | alt_stat3 | regress_alter_generic_user1 - alt_nsp1 | alt_stat4 | regress_alter_generic_user2 - alt_nsp2 | alt_stat2 | regress_alter_generic_user3 -(4 rows) - --- --- Text Search Dictionary --- -SET SESSION AUTHORIZATION regress_alter_generic_user1; -CREATE TEXT SEARCH DICTIONARY alt_ts_dict1 (template=simple); -CREATE TEXT SEARCH DICTIONARY alt_ts_dict2 (template=simple); -ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 RENAME TO alt_ts_dict2; -- failed (name conflict) -ERROR: text search dictionary "alt_ts_dict2" already exists in schema "alt_nsp1" -ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 RENAME TO alt_ts_dict3; -- OK -ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_generic_user3; -- OK -ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_generic_user2; -CREATE TEXT SEARCH DICTIONARY alt_ts_dict1 (template=simple); -CREATE TEXT SEARCH DICTIONARY alt_ts_dict2 (template=simple); -ALTER TEXT SEARCH DICTIONARY alt_ts_dict3 RENAME TO alt_ts_dict4; -- failed (not owner) -ERROR: must be owner of text search dictionary alt_ts_dict3 -ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 RENAME TO alt_ts_dict4; -- OK -ALTER TEXT SEARCH DICTIONARY alt_ts_dict3 OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of text search dictionary alt_ts_dict3 -ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER TEXT SEARCH DICTIONARY alt_ts_dict3 SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of text search dictionary alt_ts_dict3 -ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: text search dictionary "alt_ts_dict2" already exists in schema "alt_nsp2" -RESET SESSION AUTHORIZATION; -SELECT nspname, dictname, rolname - FROM pg_ts_dict t, pg_namespace n, pg_authid a - WHERE t.dictnamespace = n.oid AND t.dictowner = a.oid - AND n.nspname in ('alt_nsp1', 'alt_nsp2') - ORDER BY nspname, dictname; - nspname | dictname | rolname -----------+--------------+----------------------------- - alt_nsp1 | alt_ts_dict2 | regress_alter_generic_user2 - alt_nsp1 | alt_ts_dict3 | regress_alter_generic_user1 - alt_nsp1 | alt_ts_dict4 | regress_alter_generic_user2 - alt_nsp2 | alt_ts_dict2 | regress_alter_generic_user3 -(4 rows) - --- --- Text Search Configuration --- -SET SESSION AUTHORIZATION regress_alter_generic_user1; -CREATE TEXT SEARCH CONFIGURATION alt_ts_conf1 (copy=english); -CREATE TEXT SEARCH CONFIGURATION alt_ts_conf2 (copy=english); -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 RENAME TO alt_ts_conf2; -- failed (name conflict) -ERROR: text search configuration "alt_ts_conf2" already exists in schema "alt_nsp1" -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 RENAME TO alt_ts_conf3; -- OK -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_generic_user3; -- OK -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_generic_user2; -CREATE TEXT SEARCH CONFIGURATION alt_ts_conf1 (copy=english); -CREATE TEXT SEARCH CONFIGURATION alt_ts_conf2 (copy=english); -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf3 RENAME TO alt_ts_conf4; -- failed (not owner) -ERROR: must be owner of text search configuration alt_ts_conf3 -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 RENAME TO alt_ts_conf4; -- OK -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf3 OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of text search configuration alt_ts_conf3 -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf3 SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of text search configuration alt_ts_conf3 -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: text search configuration "alt_ts_conf2" already exists in schema "alt_nsp2" -RESET SESSION AUTHORIZATION; -SELECT nspname, cfgname, rolname - FROM pg_ts_config t, pg_namespace n, pg_authid a - WHERE t.cfgnamespace = n.oid AND t.cfgowner = a.oid - AND n.nspname in ('alt_nsp1', 'alt_nsp2') - ORDER BY nspname, cfgname; - nspname | cfgname | rolname -----------+--------------+----------------------------- - alt_nsp1 | alt_ts_conf2 | regress_alter_generic_user2 - alt_nsp1 | alt_ts_conf3 | regress_alter_generic_user1 - alt_nsp1 | alt_ts_conf4 | regress_alter_generic_user2 - alt_nsp2 | alt_ts_conf2 | regress_alter_generic_user3 -(4 rows) - --- --- Text Search Template --- -CREATE TEXT SEARCH TEMPLATE alt_ts_temp1 (lexize=dsimple_lexize); -CREATE TEXT SEARCH TEMPLATE alt_ts_temp2 (lexize=dsimple_lexize); -ALTER TEXT SEARCH TEMPLATE alt_ts_temp1 RENAME TO alt_ts_temp2; -- failed (name conflict) -ERROR: text search template "alt_ts_temp2" already exists in schema "alt_nsp1" -ALTER TEXT SEARCH TEMPLATE alt_ts_temp1 RENAME TO alt_ts_temp3; -- OK -ALTER TEXT SEARCH TEMPLATE alt_ts_temp2 SET SCHEMA alt_nsp2; -- OK -CREATE TEXT SEARCH TEMPLATE alt_ts_temp2 (lexize=dsimple_lexize); -ALTER TEXT SEARCH TEMPLATE alt_ts_temp2 SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: text search template "alt_ts_temp2" already exists in schema "alt_nsp2" --- invalid: non-lowercase quoted identifiers -CREATE TEXT SEARCH TEMPLATE tstemp_case ("Init" = init_function); -ERROR: text search template parameter "Init" not recognized -SELECT nspname, tmplname - FROM pg_ts_template t, pg_namespace n - WHERE t.tmplnamespace = n.oid AND nspname like 'alt_nsp%' - ORDER BY nspname, tmplname; - nspname | tmplname -----------+-------------- - alt_nsp1 | alt_ts_temp2 - alt_nsp1 | alt_ts_temp3 - alt_nsp2 | alt_ts_temp2 -(3 rows) - --- --- Text Search Parser --- -CREATE TEXT SEARCH PARSER alt_ts_prs1 - (start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype); -CREATE TEXT SEARCH PARSER alt_ts_prs2 - (start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype); -ALTER TEXT SEARCH PARSER alt_ts_prs1 RENAME TO alt_ts_prs2; -- failed (name conflict) -ERROR: text search parser "alt_ts_prs2" already exists in schema "alt_nsp1" -ALTER TEXT SEARCH PARSER alt_ts_prs1 RENAME TO alt_ts_prs3; -- OK -ALTER TEXT SEARCH PARSER alt_ts_prs2 SET SCHEMA alt_nsp2; -- OK -CREATE TEXT SEARCH PARSER alt_ts_prs2 - (start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype); -ALTER TEXT SEARCH PARSER alt_ts_prs2 SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: text search parser "alt_ts_prs2" already exists in schema "alt_nsp2" --- invalid: non-lowercase quoted identifiers -CREATE TEXT SEARCH PARSER tspars_case ("Start" = start_function); -ERROR: text search parser parameter "Start" not recognized -SELECT nspname, prsname - FROM pg_ts_parser t, pg_namespace n - WHERE t.prsnamespace = n.oid AND nspname like 'alt_nsp%' - ORDER BY nspname, prsname; - nspname | prsname -----------+------------- - alt_nsp1 | alt_ts_prs2 - alt_nsp1 | alt_ts_prs3 - alt_nsp2 | alt_ts_prs2 -(3 rows) - ---- ---- Cleanup resources ---- -DROP FOREIGN DATA WRAPPER alt_fdw2 CASCADE; -NOTICE: drop cascades to server alt_fserv2 -DROP FOREIGN DATA WRAPPER alt_fdw3 CASCADE; -NOTICE: drop cascades to server alt_fserv3 -DROP LANGUAGE alt_lang2 CASCADE; -DROP LANGUAGE alt_lang3 CASCADE; -DROP SCHEMA alt_nsp1 CASCADE; -NOTICE: drop cascades to 28 other objects -DETAIL: drop cascades to function alt_func3(integer) -drop cascades to function alt_agg3(integer) -drop cascades to function alt_func4(integer) -drop cascades to function alt_func2(integer) -drop cascades to function alt_agg4(integer) -drop cascades to function alt_agg2(integer) -drop cascades to conversion alt_conv3 -drop cascades to conversion alt_conv4 -drop cascades to conversion alt_conv2 -drop cascades to operator @+@(integer,integer) -drop cascades to operator @-@(integer,integer) -drop cascades to operator family alt_opf3 for access method hash -drop cascades to operator family alt_opc1 for access method hash -drop cascades to operator family alt_opc2 for access method hash -drop cascades to operator family alt_opf4 for access method hash -drop cascades to operator family alt_opf2 for access method hash -drop cascades to table alt_regress_1 -drop cascades to table alt_regress_2 -drop cascades to text search dictionary alt_ts_dict3 -drop cascades to text search dictionary alt_ts_dict4 -drop cascades to text search dictionary alt_ts_dict2 -drop cascades to text search configuration alt_ts_conf3 -drop cascades to text search configuration alt_ts_conf4 -drop cascades to text search configuration alt_ts_conf2 -drop cascades to text search template alt_ts_temp3 -drop cascades to text search template alt_ts_temp2 -drop cascades to text search parser alt_ts_prs3 -drop cascades to text search parser alt_ts_prs2 -DROP SCHEMA alt_nsp2 CASCADE; -NOTICE: drop cascades to 9 other objects -DETAIL: drop cascades to function alt_nsp2.alt_func2(integer) -drop cascades to function alt_nsp2.alt_agg2(integer) -drop cascades to conversion alt_nsp2.alt_conv2 -drop cascades to operator alt_nsp2.@-@(integer,integer) -drop cascades to operator family alt_nsp2.alt_opf2 for access method hash -drop cascades to text search dictionary alt_nsp2.alt_ts_dict2 -drop cascades to text search configuration alt_nsp2.alt_ts_conf2 -drop cascades to text search template alt_nsp2.alt_ts_temp2 -drop cascades to text search parser alt_nsp2.alt_ts_prs2 -DROP USER regress_alter_generic_user1; -DROP USER regress_alter_generic_user2; -DROP USER regress_alter_generic_user3; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/alter_operator.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/alter_operator.out --- /Users/admin/pgsql/src/test/regress/expected/alter_operator.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/alter_operator.out 2024-12-13 13:20:10 @@ -1,267 +1,2 @@ -CREATE FUNCTION alter_op_test_fn(boolean, boolean) -RETURNS boolean AS $$ SELECT NULL::BOOLEAN; $$ LANGUAGE sql IMMUTABLE; -CREATE FUNCTION customcontsel(internal, oid, internal, integer) -RETURNS float8 AS 'contsel' LANGUAGE internal STABLE STRICT; -CREATE OPERATOR === ( - LEFTARG = boolean, - RIGHTARG = boolean, - PROCEDURE = alter_op_test_fn, - COMMUTATOR = ===, - NEGATOR = !==, - RESTRICT = customcontsel, - JOIN = contjoinsel, - HASHES, MERGES -); -SELECT pg_describe_object(refclassid,refobjid,refobjsubid) as ref, deptype -FROM pg_depend -WHERE classid = 'pg_operator'::regclass AND - objid = '===(bool,bool)'::regoperator -ORDER BY 1; - ref | deptype --------------------------------------------------------+--------- - function alter_op_test_fn(boolean,boolean) | n - function customcontsel(internal,oid,internal,integer) | n - schema public | n -(3 rows) - --- --- Test resetting and setting restrict and join attributes. --- -ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = NONE); -ALTER OPERATOR === (boolean, boolean) SET (JOIN = NONE); -SELECT oprrest, oprjoin FROM pg_operator WHERE oprname = '===' - AND oprleft = 'boolean'::regtype AND oprright = 'boolean'::regtype; - oprrest | oprjoin ----------+--------- - - | - -(1 row) - -SELECT pg_describe_object(refclassid,refobjid,refobjsubid) as ref, deptype -FROM pg_depend -WHERE classid = 'pg_operator'::regclass AND - objid = '===(bool,bool)'::regoperator -ORDER BY 1; - ref | deptype ---------------------------------------------+--------- - function alter_op_test_fn(boolean,boolean) | n - schema public | n -(2 rows) - -ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = contsel); -ALTER OPERATOR === (boolean, boolean) SET (JOIN = contjoinsel); -SELECT oprrest, oprjoin FROM pg_operator WHERE oprname = '===' - AND oprleft = 'boolean'::regtype AND oprright = 'boolean'::regtype; - oprrest | oprjoin ----------+------------- - contsel | contjoinsel -(1 row) - -SELECT pg_describe_object(refclassid,refobjid,refobjsubid) as ref, deptype -FROM pg_depend -WHERE classid = 'pg_operator'::regclass AND - objid = '===(bool,bool)'::regoperator -ORDER BY 1; - ref | deptype ---------------------------------------------+--------- - function alter_op_test_fn(boolean,boolean) | n - schema public | n -(2 rows) - -ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = NONE, JOIN = NONE); -SELECT oprrest, oprjoin FROM pg_operator WHERE oprname = '===' - AND oprleft = 'boolean'::regtype AND oprright = 'boolean'::regtype; - oprrest | oprjoin ----------+--------- - - | - -(1 row) - -SELECT pg_describe_object(refclassid,refobjid,refobjsubid) as ref, deptype -FROM pg_depend -WHERE classid = 'pg_operator'::regclass AND - objid = '===(bool,bool)'::regoperator -ORDER BY 1; - ref | deptype ---------------------------------------------+--------- - function alter_op_test_fn(boolean,boolean) | n - schema public | n -(2 rows) - -ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = customcontsel, JOIN = contjoinsel); -SELECT oprrest, oprjoin FROM pg_operator WHERE oprname = '===' - AND oprleft = 'boolean'::regtype AND oprright = 'boolean'::regtype; - oprrest | oprjoin ----------------+------------- - customcontsel | contjoinsel -(1 row) - -SELECT pg_describe_object(refclassid,refobjid,refobjsubid) as ref, deptype -FROM pg_depend -WHERE classid = 'pg_operator'::regclass AND - objid = '===(bool,bool)'::regoperator -ORDER BY 1; - ref | deptype --------------------------------------------------------+--------- - function alter_op_test_fn(boolean,boolean) | n - function customcontsel(internal,oid,internal,integer) | n - schema public | n -(3 rows) - --- --- Test invalid options. --- -ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = non_existent_func); -ERROR: function non_existent_func(internal, oid, internal, integer) does not exist -ALTER OPERATOR === (boolean, boolean) SET (JOIN = non_existent_func); -ERROR: function non_existent_func(internal, oid, internal, smallint, internal) does not exist --- invalid: non-lowercase quoted identifiers -ALTER OPERATOR & (bit, bit) SET ("Restrict" = _int_contsel, "Join" = _int_contjoinsel); -ERROR: operator attribute "Restrict" not recognized --- --- Test permission check. Must be owner to ALTER OPERATOR. --- -CREATE USER regress_alter_op_user; -SET SESSION AUTHORIZATION regress_alter_op_user; -ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = NONE); -ERROR: must be owner of operator === -RESET SESSION AUTHORIZATION; --- --- Test setting commutator, negator, merges, and hashes attributes, --- which can only be set if not already set --- -CREATE FUNCTION alter_op_test_fn_bool_real(boolean, real) -RETURNS boolean AS $$ SELECT NULL::BOOLEAN; $$ LANGUAGE sql IMMUTABLE; -CREATE FUNCTION alter_op_test_fn_real_bool(real, boolean) -RETURNS boolean AS $$ SELECT NULL::BOOLEAN; $$ LANGUAGE sql IMMUTABLE; --- operator -CREATE OPERATOR === ( - LEFTARG = boolean, - RIGHTARG = real, - PROCEDURE = alter_op_test_fn_bool_real -); --- commutator -CREATE OPERATOR ==== ( - LEFTARG = real, - RIGHTARG = boolean, - PROCEDURE = alter_op_test_fn_real_bool -); --- negator -CREATE OPERATOR !==== ( - LEFTARG = boolean, - RIGHTARG = real, - PROCEDURE = alter_op_test_fn_bool_real -); --- No-op setting already false hashes and merges to false works -ALTER OPERATOR === (boolean, real) SET (MERGES = false); -ALTER OPERATOR === (boolean, real) SET (HASHES = false); --- Test setting merges and hashes -ALTER OPERATOR === (boolean, real) SET (MERGES); -ALTER OPERATOR === (boolean, real) SET (HASHES); -SELECT oprcanmerge, oprcanhash -FROM pg_operator WHERE oprname = '===' - AND oprleft = 'boolean'::regtype AND oprright = 'real'::regtype; - oprcanmerge | oprcanhash --------------+------------ - t | t -(1 row) - --- Test setting commutator -ALTER OPERATOR === (boolean, real) SET (COMMUTATOR = ====); --- Check that oprcom has been set on both the operator and commutator, --- that they reference each other, and that the operator used is the existing --- one we created and not a new shell operator. -SELECT op.oprname AS operator_name, com.oprname AS commutator_name, - com.oprcode AS commutator_func - FROM pg_operator op - INNER JOIN pg_operator com ON (op.oid = com.oprcom AND op.oprcom = com.oid) - WHERE op.oprname = '===' - AND op.oprleft = 'boolean'::regtype AND op.oprright = 'real'::regtype; - operator_name | commutator_name | commutator_func ----------------+-----------------+---------------------------- - === | ==== | alter_op_test_fn_real_bool -(1 row) - --- Cannot set self as negator -ALTER OPERATOR === (boolean, real) SET (NEGATOR = ===); -ERROR: operator cannot be its own negator --- Test setting negator -ALTER OPERATOR === (boolean, real) SET (NEGATOR = !====); --- Check that oprnegate has been set on both the operator and negator, --- that they reference each other, and that the operator used is the existing --- one we created and not a new shell operator. -SELECT op.oprname AS operator_name, neg.oprname AS negator_name, - neg.oprcode AS negator_func - FROM pg_operator op - INNER JOIN pg_operator neg ON (op.oid = neg.oprnegate AND op.oprnegate = neg.oid) - WHERE op.oprname = '===' - AND op.oprleft = 'boolean'::regtype AND op.oprright = 'real'::regtype; - operator_name | negator_name | negator_func ----------------+--------------+---------------------------- - === | !==== | alter_op_test_fn_bool_real -(1 row) - --- Test that no-op set succeeds -ALTER OPERATOR === (boolean, real) SET (NEGATOR = !====); -ALTER OPERATOR === (boolean, real) SET (COMMUTATOR = ====); -ALTER OPERATOR === (boolean, real) SET (MERGES); -ALTER OPERATOR === (boolean, real) SET (HASHES); --- Check that the final state of the operator is as we expect -SELECT oprcanmerge, oprcanhash, - pg_describe_object('pg_operator'::regclass, oprcom, 0) AS commutator, - pg_describe_object('pg_operator'::regclass, oprnegate, 0) AS negator - FROM pg_operator WHERE oprname = '===' - AND oprleft = 'boolean'::regtype AND oprright = 'real'::regtype; - oprcanmerge | oprcanhash | commutator | negator --------------+------------+-----------------------------+------------------------------ - t | t | operator ====(real,boolean) | operator !====(boolean,real) -(1 row) - --- Cannot change commutator, negator, merges, and hashes when already set -CREATE OPERATOR @= ( - LEFTARG = real, - RIGHTARG = boolean, - PROCEDURE = alter_op_test_fn_real_bool -); -CREATE OPERATOR @!= ( - LEFTARG = boolean, - RIGHTARG = real, - PROCEDURE = alter_op_test_fn_bool_real -); -ALTER OPERATOR === (boolean, real) SET (COMMUTATOR = @=); -ERROR: operator attribute "commutator" cannot be changed if it has already been set -ALTER OPERATOR === (boolean, real) SET (NEGATOR = @!=); -ERROR: operator attribute "negator" cannot be changed if it has already been set -ALTER OPERATOR === (boolean, real) SET (MERGES = false); -ERROR: operator attribute "merges" cannot be changed if it has already been set -ALTER OPERATOR === (boolean, real) SET (HASHES = false); -ERROR: operator attribute "hashes" cannot be changed if it has already been set --- Cannot set an operator that already has a commutator as the commutator -ALTER OPERATOR @=(real, boolean) SET (COMMUTATOR = ===); -ERROR: commutator operator === is already the commutator of operator ==== --- Cannot set an operator that already has a negator as the negator -ALTER OPERATOR @!=(boolean, real) SET (NEGATOR = ===); -ERROR: negator operator === is already the negator of operator !==== --- Check no changes made -SELECT oprcanmerge, oprcanhash, - pg_describe_object('pg_operator'::regclass, oprcom, 0) AS commutator, - pg_describe_object('pg_operator'::regclass, oprnegate, 0) AS negator - FROM pg_operator WHERE oprname = '===' - AND oprleft = 'boolean'::regtype AND oprright = 'real'::regtype; - oprcanmerge | oprcanhash | commutator | negator --------------+------------+-----------------------------+------------------------------ - t | t | operator ====(real,boolean) | operator !====(boolean,real) -(1 row) - --- --- Clean up --- -DROP USER regress_alter_op_user; -DROP OPERATOR === (boolean, boolean); -DROP OPERATOR === (boolean, real); -DROP OPERATOR ==== (real, boolean); -DROP OPERATOR !==== (boolean, real); -DROP OPERATOR @= (real, boolean); -DROP OPERATOR @!= (boolean, real); -DROP FUNCTION customcontsel(internal, oid, internal, integer); -DROP FUNCTION alter_op_test_fn(boolean, boolean); -DROP FUNCTION alter_op_test_fn_bool_real(boolean, real); -DROP FUNCTION alter_op_test_fn_real_bool(real, boolean); +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/misc.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/misc.out --- /Users/admin/pgsql/src/test/regress/expected/misc.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/misc.out 2024-12-13 13:20:09 @@ -1,398 +1,2 @@ --- --- MISC --- --- directory paths and dlsuffix are passed to us in environment variables -\getenv abs_srcdir PG_ABS_SRCDIR -\getenv abs_builddir PG_ABS_BUILDDIR -\getenv libdir PG_LIBDIR -\getenv dlsuffix PG_DLSUFFIX -\set regresslib :libdir '/regress' :dlsuffix -CREATE FUNCTION overpaid(emp) - RETURNS bool - AS :'regresslib' - LANGUAGE C STRICT; -CREATE FUNCTION reverse_name(name) - RETURNS name - AS :'regresslib' - LANGUAGE C STRICT; --- --- BTREE --- -UPDATE onek - SET unique1 = onek.unique1 + 1; -UPDATE onek - SET unique1 = onek.unique1 - 1; --- --- BTREE partial --- --- UPDATE onek2 --- SET unique1 = onek2.unique1 + 1; ---UPDATE onek2 --- SET unique1 = onek2.unique1 - 1; --- --- BTREE shutting out non-functional updates --- --- the following two tests seem to take a long time on some --- systems. This non-func update stuff needs to be examined --- more closely. - jolly (2/22/96) --- -SELECT two, stringu1, ten, string4 - INTO TABLE tmp - FROM onek; -UPDATE tmp - SET stringu1 = reverse_name(onek.stringu1) - FROM onek - WHERE onek.stringu1 = 'JBAAAA' and - onek.stringu1 = tmp.stringu1; -UPDATE tmp - SET stringu1 = reverse_name(onek2.stringu1) - FROM onek2 - WHERE onek2.stringu1 = 'JCAAAA' and - onek2.stringu1 = tmp.stringu1; -DROP TABLE tmp; ---UPDATE person* --- SET age = age + 1; ---UPDATE person* --- SET age = age + 3 --- WHERE name = 'linda'; --- --- copy --- -\set filename :abs_builddir '/results/onek.data' -COPY onek TO :'filename'; -CREATE TEMP TABLE onek_copy (LIKE onek); -COPY onek_copy FROM :'filename'; -SELECT * FROM onek EXCEPT ALL SELECT * FROM onek_copy; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- -(0 rows) - -SELECT * FROM onek_copy EXCEPT ALL SELECT * FROM onek; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- -(0 rows) - -\set filename :abs_builddir '/results/stud_emp.data' -COPY BINARY stud_emp TO :'filename'; -CREATE TEMP TABLE stud_emp_copy (LIKE stud_emp); -COPY BINARY stud_emp_copy FROM :'filename'; -SELECT * FROM stud_emp_copy; - name | age | location | salary | manager | gpa | percent --------+-----+------------+--------+---------+-----+--------- - jeff | 23 | (8,7.7) | 600 | sharon | 3.5 | - cim | 30 | (10.5,4.7) | 400 | | 3.4 | - linda | 19 | (0.9,6.1) | 100 | | 2.9 | -(3 rows) - --- --- test data for postquel functions --- -CREATE TABLE hobbies_r ( - name text, - person text -); -CREATE TABLE equipment_r ( - name text, - hobby text -); -INSERT INTO hobbies_r (name, person) - SELECT 'posthacking', p.name - FROM person* p - WHERE p.name = 'mike' or p.name = 'jeff'; -INSERT INTO hobbies_r (name, person) - SELECT 'basketball', p.name - FROM person p - WHERE p.name = 'joe' or p.name = 'sally'; -INSERT INTO hobbies_r (name) VALUES ('skywalking'); -INSERT INTO equipment_r (name, hobby) VALUES ('advil', 'posthacking'); -INSERT INTO equipment_r (name, hobby) VALUES ('peet''s coffee', 'posthacking'); -INSERT INTO equipment_r (name, hobby) VALUES ('hightops', 'basketball'); -INSERT INTO equipment_r (name, hobby) VALUES ('guts', 'skywalking'); --- --- postquel functions --- -CREATE FUNCTION hobbies(person) - RETURNS setof hobbies_r - AS 'select * from hobbies_r where person = $1.name' - LANGUAGE SQL; -CREATE FUNCTION hobby_construct(text, text) - RETURNS hobbies_r - AS 'select $1 as name, $2 as hobby' - LANGUAGE SQL; -CREATE FUNCTION hobby_construct_named(name text, hobby text) - RETURNS hobbies_r - AS 'select name, hobby' - LANGUAGE SQL; -CREATE FUNCTION hobbies_by_name(hobbies_r.name%TYPE) - RETURNS hobbies_r.person%TYPE - AS 'select person from hobbies_r where name = $1' - LANGUAGE SQL; -NOTICE: type reference hobbies_r.name%TYPE converted to text -NOTICE: type reference hobbies_r.person%TYPE converted to text -CREATE FUNCTION equipment(hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where hobby = $1.name' - LANGUAGE SQL; -CREATE FUNCTION equipment_named(hobby hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where equipment_r.hobby = equipment_named.hobby.name' - LANGUAGE SQL; -CREATE FUNCTION equipment_named_ambiguous_1a(hobby hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where hobby = equipment_named_ambiguous_1a.hobby.name' - LANGUAGE SQL; -CREATE FUNCTION equipment_named_ambiguous_1b(hobby hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where equipment_r.hobby = hobby.name' - LANGUAGE SQL; -CREATE FUNCTION equipment_named_ambiguous_1c(hobby hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where hobby = hobby.name' - LANGUAGE SQL; -CREATE FUNCTION equipment_named_ambiguous_2a(hobby text) - RETURNS setof equipment_r - AS 'select * from equipment_r where hobby = equipment_named_ambiguous_2a.hobby' - LANGUAGE SQL; -CREATE FUNCTION equipment_named_ambiguous_2b(hobby text) - RETURNS setof equipment_r - AS 'select * from equipment_r where equipment_r.hobby = hobby' - LANGUAGE SQL; --- --- mike does post_hacking, --- joe and sally play basketball, and --- everyone else does nothing. --- -SELECT p.name, name(p.hobbies) FROM ONLY person p; - name | name --------+------------- - mike | posthacking - joe | basketball - sally | basketball -(3 rows) - --- --- as above, but jeff also does post_hacking. --- -SELECT p.name, name(p.hobbies) FROM person* p; - name | name --------+------------- - mike | posthacking - joe | basketball - sally | basketball - jeff | posthacking -(4 rows) - --- --- the next two queries demonstrate how functions generate bogus duplicates. --- this is a "feature" .. --- -SELECT DISTINCT hobbies_r.name, name(hobbies_r.equipment) FROM hobbies_r - ORDER BY 1,2; - name | name --------------+--------------- - basketball | hightops - posthacking | advil - posthacking | peet's coffee - skywalking | guts -(4 rows) - -SELECT hobbies_r.name, (hobbies_r.equipment).name FROM hobbies_r; - name | name --------------+--------------- - posthacking | advil - posthacking | peet's coffee - posthacking | advil - posthacking | peet's coffee - basketball | hightops - basketball | hightops - skywalking | guts -(7 rows) - --- --- mike needs advil and peet's coffee, --- joe and sally need hightops, and --- everyone else is fine. --- -SELECT p.name, name(p.hobbies), name(equipment(p.hobbies)) FROM ONLY person p; - name | name | name --------+-------------+--------------- - mike | posthacking | advil - mike | posthacking | peet's coffee - joe | basketball | hightops - sally | basketball | hightops -(4 rows) - --- --- as above, but jeff needs advil and peet's coffee as well. --- -SELECT p.name, name(p.hobbies), name(equipment(p.hobbies)) FROM person* p; - name | name | name --------+-------------+--------------- - mike | posthacking | advil - mike | posthacking | peet's coffee - joe | basketball | hightops - sally | basketball | hightops - jeff | posthacking | advil - jeff | posthacking | peet's coffee -(6 rows) - --- --- just like the last two, but make sure that the target list fixup and --- unflattening is being done correctly. --- -SELECT name(equipment(p.hobbies)), p.name, name(p.hobbies) FROM ONLY person p; - name | name | name ----------------+-------+------------- - advil | mike | posthacking - peet's coffee | mike | posthacking - hightops | joe | basketball - hightops | sally | basketball -(4 rows) - -SELECT (p.hobbies).equipment.name, p.name, name(p.hobbies) FROM person* p; - name | name | name ----------------+-------+------------- - advil | mike | posthacking - peet's coffee | mike | posthacking - hightops | joe | basketball - hightops | sally | basketball - advil | jeff | posthacking - peet's coffee | jeff | posthacking -(6 rows) - -SELECT (p.hobbies).equipment.name, name(p.hobbies), p.name FROM ONLY person p; - name | name | name ----------------+-------------+------- - advil | posthacking | mike - peet's coffee | posthacking | mike - hightops | basketball | joe - hightops | basketball | sally -(4 rows) - -SELECT name(equipment(p.hobbies)), name(p.hobbies), p.name FROM person* p; - name | name | name ----------------+-------------+------- - advil | posthacking | mike - peet's coffee | posthacking | mike - hightops | basketball | joe - hightops | basketball | sally - advil | posthacking | jeff - peet's coffee | posthacking | jeff -(6 rows) - -SELECT name(equipment(hobby_construct(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment(hobby_construct_named(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment_named(hobby_construct_named(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment_named_ambiguous_1a(hobby_construct_named(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment_named_ambiguous_1b(hobby_construct_named(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment_named_ambiguous_1c(hobby_construct_named(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment_named_ambiguous_2a(text 'skywalking')); - name ------- - guts -(1 row) - -SELECT name(equipment_named_ambiguous_2b(text 'skywalking')); - name ---------------- - advil - peet's coffee - hightops - guts -(4 rows) - -SELECT hobbies_by_name('basketball'); - hobbies_by_name ------------------ - joe -(1 row) - -SELECT name, overpaid(emp.*) FROM emp; - name | overpaid ---------+---------- - sharon | t - sam | t - bill | t - jeff | f - cim | f - linda | f -(6 rows) - --- --- Try a few cases with SQL-spec row constructor expressions --- -SELECT * FROM equipment(ROW('skywalking', 'mer')); - name | hobby -------+------------ - guts | skywalking -(1 row) - -SELECT name(equipment(ROW('skywalking', 'mer'))); - name ------- - guts -(1 row) - -SELECT *, name(equipment(h.*)) FROM hobbies_r h; - name | person | name --------------+--------+--------------- - posthacking | mike | advil - posthacking | mike | peet's coffee - posthacking | jeff | advil - posthacking | jeff | peet's coffee - basketball | joe | hightops - basketball | sally | hightops - skywalking | | guts -(7 rows) - -SELECT *, (equipment(CAST((h.*) AS hobbies_r))).name FROM hobbies_r h; - name | person | name --------------+--------+--------------- - posthacking | mike | advil - posthacking | mike | peet's coffee - posthacking | jeff | advil - posthacking | jeff | peet's coffee - basketball | joe | hightops - basketball | sally | hightops - skywalking | | guts -(7 rows) - --- --- functional joins --- --- --- instance rules --- --- --- rewrite rules --- +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/async.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/async.out --- /Users/admin/pgsql/src/test/regress/expected/async.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/async.out 2024-12-13 13:20:09 @@ -1,42 +1,2 @@ --- --- ASYNC --- ---Should work. Send a valid message via a valid channel name -SELECT pg_notify('notify_async1','sample message1'); - pg_notify ------------ - -(1 row) - -SELECT pg_notify('notify_async1',''); - pg_notify ------------ - -(1 row) - -SELECT pg_notify('notify_async1',NULL); - pg_notify ------------ - -(1 row) - --- Should fail. Send a valid message via an invalid channel name -SELECT pg_notify('','sample message1'); -ERROR: channel name cannot be empty -SELECT pg_notify(NULL,'sample message1'); -ERROR: channel name cannot be empty -SELECT pg_notify('notify_async_channel_name_too_long______________________________','sample_message1'); -ERROR: channel name too long ---Should work. Valid NOTIFY/LISTEN/UNLISTEN commands -NOTIFY notify_async2; -LISTEN notify_async2; -UNLISTEN notify_async2; -UNLISTEN *; --- Should return zero while there are no pending notifications. --- src/test/isolation/specs/async-notify.spec tests for actual usage. -SELECT pg_notification_queue_usage(); - pg_notification_queue_usage ------------------------------ - 0 -(1 row) - +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/dbsize.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/dbsize.out --- /Users/admin/pgsql/src/test/regress/expected/dbsize.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/dbsize.out 2024-12-13 13:20:09 @@ -1,203 +1,2 @@ -SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM - (VALUES (10::bigint), (1000::bigint), (1000000::bigint), - (1000000000::bigint), (1000000000000::bigint), - (1000000000000000::bigint)) x(size); - size | pg_size_pretty | pg_size_pretty -------------------+----------------+---------------- - 10 | 10 bytes | -10 bytes - 1000 | 1000 bytes | -1000 bytes - 1000000 | 977 kB | -977 kB - 1000000000 | 954 MB | -954 MB - 1000000000000 | 931 GB | -931 GB - 1000000000000000 | 909 TB | -909 TB -(6 rows) - -SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM - (VALUES (10::numeric), (1000::numeric), (1000000::numeric), - (1000000000::numeric), (1000000000000::numeric), - (1000000000000000::numeric), - (10.5::numeric), (1000.5::numeric), (1000000.5::numeric), - (1000000000.5::numeric), (1000000000000.5::numeric), - (1000000000000000.5::numeric)) x(size); - size | pg_size_pretty | pg_size_pretty ---------------------+----------------+---------------- - 10 | 10 bytes | -10 bytes - 1000 | 1000 bytes | -1000 bytes - 1000000 | 977 kB | -977 kB - 1000000000 | 954 MB | -954 MB - 1000000000000 | 931 GB | -931 GB - 1000000000000000 | 909 TB | -909 TB - 10.5 | 10.5 bytes | -10.5 bytes - 1000.5 | 1000.5 bytes | -1000.5 bytes - 1000000.5 | 977 kB | -977 kB - 1000000000.5 | 954 MB | -954 MB - 1000000000000.5 | 931 GB | -931 GB - 1000000000000000.5 | 909 TB | -909 TB -(12 rows) - --- test where units change up -SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM - (VALUES (10239::bigint), (10240::bigint), - (10485247::bigint), (10485248::bigint), - (10736893951::bigint), (10736893952::bigint), - (10994579406847::bigint), (10994579406848::bigint), - (11258449312612351::bigint), (11258449312612352::bigint)) x(size); - size | pg_size_pretty | pg_size_pretty --------------------+----------------+---------------- - 10239 | 10239 bytes | -10239 bytes - 10240 | 10 kB | -10 kB - 10485247 | 10239 kB | -10239 kB - 10485248 | 10 MB | -10 MB - 10736893951 | 10239 MB | -10239 MB - 10736893952 | 10 GB | -10 GB - 10994579406847 | 10239 GB | -10239 GB - 10994579406848 | 10 TB | -10 TB - 11258449312612351 | 10239 TB | -10239 TB - 11258449312612352 | 10 PB | -10 PB -(10 rows) - -SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM - (VALUES (10239::numeric), (10240::numeric), - (10485247::numeric), (10485248::numeric), - (10736893951::numeric), (10736893952::numeric), - (10994579406847::numeric), (10994579406848::numeric), - (11258449312612351::numeric), (11258449312612352::numeric), - (11528652096115048447::numeric), (11528652096115048448::numeric)) x(size); - size | pg_size_pretty | pg_size_pretty -----------------------+----------------+---------------- - 10239 | 10239 bytes | -10239 bytes - 10240 | 10 kB | -10 kB - 10485247 | 10239 kB | -10239 kB - 10485248 | 10 MB | -10 MB - 10736893951 | 10239 MB | -10239 MB - 10736893952 | 10 GB | -10 GB - 10994579406847 | 10239 GB | -10239 GB - 10994579406848 | 10 TB | -10 TB - 11258449312612351 | 10239 TB | -10239 TB - 11258449312612352 | 10 PB | -10 PB - 11528652096115048447 | 10239 PB | -10239 PB - 11528652096115048448 | 10240 PB | -10240 PB -(12 rows) - --- Ensure we get the expected results when passing the extremities of bigint -SELECT pg_size_pretty('-9223372036854775808'::bigint), - pg_size_pretty('9223372036854775807'::bigint); - pg_size_pretty | pg_size_pretty -----------------+---------------- - -8192 PB | 8192 PB -(1 row) - --- pg_size_bytes() tests -SELECT size, pg_size_bytes(size) FROM - (VALUES ('1'), ('123bytes'), ('256 B'), ('1kB'), ('1MB'), (' 1 GB'), ('1.5 GB '), - ('1TB'), ('3000 TB'), ('1e6 MB'), ('99 PB')) x(size); - size | pg_size_bytes -----------+-------------------- - 1 | 1 - 123bytes | 123 - 256 B | 256 - 1kB | 1024 - 1MB | 1048576 - 1 GB | 1073741824 - 1.5 GB | 1610612736 - 1TB | 1099511627776 - 3000 TB | 3298534883328000 - 1e6 MB | 1048576000000 - 99 PB | 111464090777419776 -(11 rows) - --- case-insensitive units are supported -SELECT size, pg_size_bytes(size) FROM - (VALUES ('1'), ('123bYteS'), ('1kb'), ('1mb'), (' 1 Gb'), ('1.5 gB '), - ('1tb'), ('3000 tb'), ('1e6 mb'), ('99 pb')) x(size); - size | pg_size_bytes -----------+-------------------- - 1 | 1 - 123bYteS | 123 - 1kb | 1024 - 1mb | 1048576 - 1 Gb | 1073741824 - 1.5 gB | 1610612736 - 1tb | 1099511627776 - 3000 tb | 3298534883328000 - 1e6 mb | 1048576000000 - 99 pb | 111464090777419776 -(10 rows) - --- negative numbers are supported -SELECT size, pg_size_bytes(size) FROM - (VALUES ('-1'), ('-123bytes'), ('-1kb'), ('-1mb'), (' -1 Gb'), ('-1.5 gB '), - ('-1tb'), ('-3000 TB'), ('-10e-1 MB'), ('-99 PB')) x(size); - size | pg_size_bytes ------------+--------------------- - -1 | -1 - -123bytes | -123 - -1kb | -1024 - -1mb | -1048576 - -1 Gb | -1073741824 - -1.5 gB | -1610612736 - -1tb | -1099511627776 - -3000 TB | -3298534883328000 - -10e-1 MB | -1048576 - -99 PB | -111464090777419776 -(10 rows) - --- different cases with allowed points -SELECT size, pg_size_bytes(size) FROM - (VALUES ('-1.'), ('-1.kb'), ('-1. kb'), ('-0. gb'), - ('-.1'), ('-.1kb'), ('-.1 kb'), ('-.0 gb')) x(size); - size | pg_size_bytes ---------+--------------- - -1. | -1 - -1.kb | -1024 - -1. kb | -1024 - -0. gb | 0 - -.1 | 0 - -.1kb | -102 - -.1 kb | -102 - -.0 gb | 0 -(8 rows) - --- invalid inputs -SELECT pg_size_bytes('1 AB'); -ERROR: invalid size: "1 AB" -DETAIL: Invalid size unit: "AB". -HINT: Valid units are "bytes", "B", "kB", "MB", "GB", "TB", and "PB". -SELECT pg_size_bytes('1 AB A'); -ERROR: invalid size: "1 AB A" -DETAIL: Invalid size unit: "AB A". -HINT: Valid units are "bytes", "B", "kB", "MB", "GB", "TB", and "PB". -SELECT pg_size_bytes('1 AB A '); -ERROR: invalid size: "1 AB A " -DETAIL: Invalid size unit: "AB A". -HINT: Valid units are "bytes", "B", "kB", "MB", "GB", "TB", and "PB". -SELECT pg_size_bytes('9223372036854775807.9'); -ERROR: bigint out of range -SELECT pg_size_bytes('1e100'); -ERROR: bigint out of range -SELECT pg_size_bytes('1e1000000000000000000'); -ERROR: value overflows numeric format -SELECT pg_size_bytes('1 byte'); -- the singular "byte" is not supported -ERROR: invalid size: "1 byte" -DETAIL: Invalid size unit: "byte". -HINT: Valid units are "bytes", "B", "kB", "MB", "GB", "TB", and "PB". -SELECT pg_size_bytes(''); -ERROR: invalid size: "" -SELECT pg_size_bytes('kb'); -ERROR: invalid size: "kb" -SELECT pg_size_bytes('..'); -ERROR: invalid size: ".." -SELECT pg_size_bytes('-.'); -ERROR: invalid size: "-." -SELECT pg_size_bytes('-.kb'); -ERROR: invalid size: "-.kb" -SELECT pg_size_bytes('-. kb'); -ERROR: invalid size: "-. kb" -SELECT pg_size_bytes('.+912'); -ERROR: invalid size: ".+912" -SELECT pg_size_bytes('+912+ kB'); -ERROR: invalid size: "+912+ kB" -DETAIL: Invalid size unit: "+ kB". -HINT: Valid units are "bytes", "B", "kB", "MB", "GB", "TB", and "PB". -SELECT pg_size_bytes('++123 kB'); -ERROR: invalid size: "++123 kB" +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/merge.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/merge.out --- /Users/admin/pgsql/src/test/regress/expected/merge.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/merge.out 2024-12-13 13:20:09 @@ -1,2773 +1,2 @@ --- --- MERGE --- -CREATE USER regress_merge_privs; -CREATE USER regress_merge_no_privs; -CREATE USER regress_merge_none; -DROP TABLE IF EXISTS target; -NOTICE: table "target" does not exist, skipping -DROP TABLE IF EXISTS source; -NOTICE: table "source" does not exist, skipping -CREATE TABLE target (tid integer, balance integer) - WITH (autovacuum_enabled=off); -CREATE TABLE source (sid integer, delta integer) -- no index - WITH (autovacuum_enabled=off); -INSERT INTO target VALUES (1, 10); -INSERT INTO target VALUES (2, 20); -INSERT INTO target VALUES (3, 30); -SELECT t.ctid is not null as matched, t.*, s.* FROM source s FULL OUTER JOIN target t ON s.sid = t.tid ORDER BY t.tid, s.sid; - matched | tid | balance | sid | delta ----------+-----+---------+-----+------- - t | 1 | 10 | | - t | 2 | 20 | | - t | 3 | 30 | | -(3 rows) - -ALTER TABLE target OWNER TO regress_merge_privs; -ALTER TABLE source OWNER TO regress_merge_privs; -CREATE TABLE target2 (tid integer, balance integer) - WITH (autovacuum_enabled=off); -CREATE TABLE source2 (sid integer, delta integer) - WITH (autovacuum_enabled=off); -ALTER TABLE target2 OWNER TO regress_merge_no_privs; -ALTER TABLE source2 OWNER TO regress_merge_no_privs; -GRANT INSERT ON target TO regress_merge_no_privs; -SET SESSION AUTHORIZATION regress_merge_privs; -EXPLAIN (COSTS OFF) -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - DELETE; - QUERY PLAN ----------------------------------------- - Merge on target t - -> Merge Join - Merge Cond: (t.tid = s.sid) - -> Sort - Sort Key: t.tid - -> Seq Scan on target t - -> Sort - Sort Key: s.sid - -> Seq Scan on source s -(9 rows) - --- --- Errors --- -MERGE INTO target t RANDOMWORD -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -ERROR: syntax error at or near "RANDOMWORD" -LINE 1: MERGE INTO target t RANDOMWORD - ^ --- MATCHED/INSERT error -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - INSERT DEFAULT VALUES; -ERROR: syntax error at or near "INSERT" -LINE 5: INSERT DEFAULT VALUES; - ^ --- NOT MATCHED BY SOURCE/INSERT error -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED BY SOURCE THEN - INSERT DEFAULT VALUES; -ERROR: syntax error at or near "INSERT" -LINE 5: INSERT DEFAULT VALUES; - ^ --- incorrectly specifying INTO target -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT INTO target DEFAULT VALUES; -ERROR: syntax error at or near "INTO" -LINE 5: INSERT INTO target DEFAULT VALUES; - ^ --- Multiple VALUES clause -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT VALUES (1,1), (2,2); -ERROR: syntax error at or near "," -LINE 5: INSERT VALUES (1,1), (2,2); - ^ --- SELECT query for INSERT -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT SELECT (1, 1); -ERROR: syntax error at or near "SELECT" -LINE 5: INSERT SELECT (1, 1); - ^ --- NOT MATCHED/UPDATE -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - UPDATE SET balance = 0; -ERROR: syntax error at or near "UPDATE" -LINE 5: UPDATE SET balance = 0; - ^ --- NOT MATCHED BY TARGET/UPDATE -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED BY TARGET THEN - UPDATE SET balance = 0; -ERROR: syntax error at or near "UPDATE" -LINE 5: UPDATE SET balance = 0; - ^ --- UPDATE tablename -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE target SET balance = 0; -ERROR: syntax error at or near "target" -LINE 5: UPDATE target SET balance = 0; - ^ --- source and target names the same -MERGE INTO target -USING target -ON tid = tid -WHEN MATCHED THEN DO NOTHING; -ERROR: name "target" specified more than once -DETAIL: The name is used both as MERGE target table and data source. --- used in a CTE without RETURNING -WITH foo AS ( - MERGE INTO target USING source ON (true) - WHEN MATCHED THEN DELETE -) SELECT * FROM foo; -ERROR: WITH query "foo" does not have a RETURNING clause -LINE 4: ) SELECT * FROM foo; - ^ --- used in COPY without RETURNING -COPY ( - MERGE INTO target USING source ON (true) - WHEN MATCHED THEN DELETE -) TO stdout; -ERROR: COPY query must have a RETURNING clause --- unsupported relation types --- materialized view -CREATE MATERIALIZED VIEW mv AS SELECT * FROM target; -MERGE INTO mv t -USING source s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT DEFAULT VALUES; -ERROR: cannot execute MERGE on relation "mv" -DETAIL: This operation is not supported for materialized views. -DROP MATERIALIZED VIEW mv; --- permissions -SET SESSION AUTHORIZATION regress_merge_none; -MERGE INTO target -USING (SELECT 1) -ON true -WHEN MATCHED THEN - DO NOTHING; -ERROR: permission denied for table target -SET SESSION AUTHORIZATION regress_merge_privs; -MERGE INTO target -USING source2 -ON target.tid = source2.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -ERROR: permission denied for table source2 -GRANT INSERT ON target TO regress_merge_no_privs; -SET SESSION AUTHORIZATION regress_merge_no_privs; -MERGE INTO target -USING source2 -ON target.tid = source2.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -ERROR: permission denied for table target -GRANT UPDATE ON target2 TO regress_merge_privs; -SET SESSION AUTHORIZATION regress_merge_privs; -MERGE INTO target2 -USING source -ON target2.tid = source.sid -WHEN MATCHED THEN - DELETE; -ERROR: permission denied for table target2 -MERGE INTO target2 -USING source -ON target2.tid = source.sid -WHEN NOT MATCHED THEN - INSERT DEFAULT VALUES; -ERROR: permission denied for table target2 --- check if the target can be accessed from source relation subquery; we should --- not be able to do so -MERGE INTO target t -USING (SELECT * FROM source WHERE t.tid > sid) s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT DEFAULT VALUES; -ERROR: invalid reference to FROM-clause entry for table "t" -LINE 2: USING (SELECT * FROM source WHERE t.tid > sid) s - ^ -DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. --- --- initial tests --- --- zero rows in source has no effect -MERGE INTO target -USING source -ON target.tid = source.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - DELETE; -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT DEFAULT VALUES; -ROLLBACK; --- insert some non-matching source rows to work from -INSERT INTO source VALUES (4, 40); -SELECT * FROM source ORDER BY sid; - sid | delta ------+------- - 4 | 40 -(1 row) - -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 -(3 rows) - -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - DO NOTHING; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - DELETE; -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT DEFAULT VALUES; -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - | -(4 rows) - -ROLLBACK; --- DELETE/INSERT not matched by source/target -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED BY SOURCE THEN - DELETE -WHEN NOT MATCHED BY TARGET THEN - INSERT VALUES (s.sid, s.delta) -RETURNING merge_action(), t.*; - merge_action | tid | balance ---------------+-----+--------- - DELETE | 1 | 10 - DELETE | 2 | 20 - DELETE | 3 | 30 - INSERT | 4 | 40 -(4 rows) - -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 4 | 40 -(1 row) - -ROLLBACK; --- index plans -INSERT INTO target SELECT generate_series(1000,2500), 0; -ALTER TABLE target ADD PRIMARY KEY (tid); -ANALYZE target; -EXPLAIN (COSTS OFF) -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; - QUERY PLAN ----------------------------------------- - Merge on target t - -> Hash Join - Hash Cond: (s.sid = t.tid) - -> Seq Scan on source s - -> Hash - -> Seq Scan on target t -(6 rows) - -EXPLAIN (COSTS OFF) -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - DELETE; - QUERY PLAN ----------------------------------------- - Merge on target t - -> Hash Join - Hash Cond: (s.sid = t.tid) - -> Seq Scan on source s - -> Hash - -> Seq Scan on target t -(6 rows) - -EXPLAIN (COSTS OFF) -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT VALUES (4, NULL); - QUERY PLAN ----------------------------------------- - Merge on target t - -> Hash Left Join - Hash Cond: (s.sid = t.tid) - -> Seq Scan on source s - -> Hash - -> Seq Scan on target t -(6 rows) - -DELETE FROM target WHERE tid > 100; -ANALYZE target; --- insert some matching source rows to work from -INSERT INTO source VALUES (2, 5); -INSERT INTO source VALUES (3, 20); -SELECT * FROM source ORDER BY sid; - sid | delta ------+------- - 2 | 5 - 3 | 20 - 4 | 40 -(3 rows) - -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 -(3 rows) - --- equivalent of an UPDATE join -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 0 - 3 | 0 -(3 rows) - -ROLLBACK; --- equivalent of a DELETE join -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - DELETE; -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 -(1 row) - -ROLLBACK; -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - DO NOTHING; -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 -(3 rows) - -ROLLBACK; -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT VALUES (4, NULL); -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 4 | -(4 rows) - -ROLLBACK; --- duplicate source row causes multiple target row update ERROR -INSERT INTO source VALUES (2, 5); -SELECT * FROM source ORDER BY sid; - sid | delta ------+------- - 2 | 5 - 2 | 5 - 3 | 20 - 4 | 40 -(4 rows) - -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 -(3 rows) - -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = 0; -ERROR: MERGE command cannot affect row a second time -HINT: Ensure that not more than one source row matches any one target row. -ROLLBACK; -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - DELETE; -ERROR: MERGE command cannot affect row a second time -HINT: Ensure that not more than one source row matches any one target row. -ROLLBACK; --- remove duplicate MATCHED data from source data -DELETE FROM source WHERE sid = 2; -INSERT INTO source VALUES (2, 5); -SELECT * FROM source ORDER BY sid; - sid | delta ------+------- - 2 | 5 - 3 | 20 - 4 | 40 -(3 rows) - -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 -(3 rows) - --- duplicate source row on INSERT should fail because of target_pkey -INSERT INTO source VALUES (4, 40); -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT VALUES (4, NULL); -ERROR: duplicate key value violates unique constraint "target_pkey" -DETAIL: Key (tid)=(4) already exists. -SELECT * FROM target ORDER BY tid; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- remove duplicate NOT MATCHED data from source data -DELETE FROM source WHERE sid = 4; -INSERT INTO source VALUES (4, 40); -SELECT * FROM source ORDER BY sid; - sid | delta ------+------- - 2 | 5 - 3 | 20 - 4 | 40 -(3 rows) - -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 -(3 rows) - --- remove constraints -alter table target drop CONSTRAINT target_pkey; -alter table target alter column tid drop not null; --- multiple actions -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT VALUES (4, 4) -WHEN MATCHED THEN - UPDATE SET balance = 0; -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 0 - 3 | 0 - 4 | 4 -(4 rows) - -ROLLBACK; --- should be equivalent -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = 0 -WHEN NOT MATCHED THEN - INSERT VALUES (4, 4); -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 0 - 3 | 0 - 4 | 4 -(4 rows) - -ROLLBACK; --- column references --- do a simple equivalent of an UPDATE join -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = t.balance + s.delta; -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 25 - 3 | 50 -(3 rows) - -ROLLBACK; --- do a simple equivalent of an INSERT SELECT -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT VALUES (s.sid, s.delta); -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 4 | 40 -(4 rows) - -ROLLBACK; --- and again with duplicate source rows -INSERT INTO source VALUES (5, 50); -INSERT INTO source VALUES (5, 50); --- do a simple equivalent of an INSERT SELECT -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT VALUES (s.sid, s.delta); -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 4 | 40 - 5 | 50 - 5 | 50 -(6 rows) - -ROLLBACK; --- removing duplicate source rows -DELETE FROM source WHERE sid = 5; --- and again with explicitly identified column list -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (s.sid, s.delta); -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 4 | 40 -(4 rows) - -ROLLBACK; --- and again with a subtle error: referring to non-existent target row for NOT MATCHED -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (t.tid, s.delta); -ERROR: invalid reference to FROM-clause entry for table "t" -LINE 5: INSERT (tid, balance) VALUES (t.tid, s.delta); - ^ -DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. --- and again with a constant ON clause -BEGIN; -MERGE INTO target t -USING source AS s -ON (SELECT true) -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (t.tid, s.delta); -ERROR: invalid reference to FROM-clause entry for table "t" -LINE 5: INSERT (tid, balance) VALUES (t.tid, s.delta); - ^ -DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. -SELECT * FROM target ORDER BY tid; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; --- now the classic UPSERT -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = t.balance + s.delta -WHEN NOT MATCHED THEN - INSERT VALUES (s.sid, s.delta); -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 25 - 3 | 50 - 4 | 40 -(4 rows) - -ROLLBACK; --- unreachable WHEN clause should ERROR -BEGIN; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED THEN /* Terminal WHEN clause for MATCHED */ - DELETE -WHEN MATCHED THEN - UPDATE SET balance = t.balance - s.delta; -ERROR: unreachable WHEN clause specified after unconditional WHEN clause -ROLLBACK; --- conditional WHEN clause -CREATE TABLE wq_target (tid integer not null, balance integer DEFAULT -1) - WITH (autovacuum_enabled=off); -CREATE TABLE wq_source (balance integer, sid integer) - WITH (autovacuum_enabled=off); -INSERT INTO wq_source (sid, balance) VALUES (1, 100); -BEGIN; --- try a simple INSERT with default values first -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid) VALUES (s.sid); -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | -1 -(1 row) - -ROLLBACK; --- this time with a FALSE condition -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN NOT MATCHED AND FALSE THEN - INSERT (tid) VALUES (s.sid); -SELECT * FROM wq_target; - tid | balance ------+--------- -(0 rows) - --- this time with an actual condition which returns false -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN NOT MATCHED AND s.balance <> 100 THEN - INSERT (tid) VALUES (s.sid); -SELECT * FROM wq_target; - tid | balance ------+--------- -(0 rows) - -BEGIN; --- and now with a condition which returns true -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN NOT MATCHED AND s.balance = 100 THEN - INSERT (tid) VALUES (s.sid); -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | -1 -(1 row) - -ROLLBACK; --- conditions in the NOT MATCHED clause can only refer to source columns -BEGIN; -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN NOT MATCHED AND t.balance = 100 THEN - INSERT (tid) VALUES (s.sid); -ERROR: invalid reference to FROM-clause entry for table "t" -LINE 3: WHEN NOT MATCHED AND t.balance = 100 THEN - ^ -DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. -SELECT * FROM wq_target; -ERROR: current transaction is aborted, commands ignored until end of transaction block -ROLLBACK; -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN NOT MATCHED AND s.balance = 100 THEN - INSERT (tid) VALUES (s.sid); -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | -1 -(1 row) - --- conditions in NOT MATCHED BY SOURCE clause can only refer to target columns -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN NOT MATCHED BY SOURCE AND s.balance = 100 THEN - DELETE; -ERROR: invalid reference to FROM-clause entry for table "s" -LINE 3: WHEN NOT MATCHED BY SOURCE AND s.balance = 100 THEN - ^ -DETAIL: There is an entry for table "s", but it cannot be referenced from this part of the query. -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN NOT MATCHED BY SOURCE AND t.balance = 100 THEN - DELETE; --- conditions in MATCHED clause can refer to both source and target -SELECT * FROM wq_source; - balance | sid ----------+----- - 100 | 1 -(1 row) - -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND s.balance = 100 THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 99 -(1 row) - -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.balance = 100 THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 99 -(1 row) - --- check if AND works -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.balance = 99 AND s.balance > 100 THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 99 -(1 row) - -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.balance = 99 AND s.balance = 100 THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 199 -(1 row) - --- check if OR works -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.balance = 99 OR s.balance > 100 THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 199 -(1 row) - -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.balance = 199 OR s.balance > 100 THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 299 -(1 row) - --- check source-side whole-row references -BEGIN; -MERGE INTO wq_target t -USING wq_source s ON (t.tid = s.sid) -WHEN matched and t = s or t.tid = s.sid THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 399 -(1 row) - -ROLLBACK; --- check if subqueries work in the conditions? -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.balance > (SELECT max(balance) FROM target) THEN - UPDATE SET balance = t.balance + s.balance; --- check if we can access system columns in the conditions -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.xmin = t.xmax THEN - UPDATE SET balance = t.balance + s.balance; -ERROR: cannot use system column "xmin" in MERGE WHEN condition -LINE 3: WHEN MATCHED AND t.xmin = t.xmax THEN - ^ -MERGE INTO wq_target t -USING wq_source s ON t.tid = s.sid -WHEN MATCHED AND t.tableoid >= 0 THEN - UPDATE SET balance = t.balance + s.balance; -SELECT * FROM wq_target; - tid | balance ------+--------- - 1 | 499 -(1 row) - -DROP TABLE wq_target, wq_source; --- test triggers -create or replace function merge_trigfunc () returns trigger -language plpgsql as -$$ -DECLARE - line text; -BEGIN - SELECT INTO line format('%s %s %s trigger%s', - TG_WHEN, TG_OP, TG_LEVEL, CASE - WHEN TG_OP = 'INSERT' AND TG_LEVEL = 'ROW' - THEN format(' row: %s', NEW) - WHEN TG_OP = 'UPDATE' AND TG_LEVEL = 'ROW' - THEN format(' row: %s -> %s', OLD, NEW) - WHEN TG_OP = 'DELETE' AND TG_LEVEL = 'ROW' - THEN format(' row: %s', OLD) - END); - - RAISE NOTICE '%', line; - IF (TG_WHEN = 'BEFORE' AND TG_LEVEL = 'ROW') THEN - IF (TG_OP = 'DELETE') THEN - RETURN OLD; - ELSE - RETURN NEW; - END IF; - ELSE - RETURN NULL; - END IF; -END; -$$; -CREATE TRIGGER merge_bsi BEFORE INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_bsu BEFORE UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_bsd BEFORE DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_asi AFTER INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_asu AFTER UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_asd AFTER DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_bri BEFORE INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_bru BEFORE UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_brd BEFORE DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_ari AFTER INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_aru AFTER UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); -CREATE TRIGGER merge_ard AFTER DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); --- now the classic UPSERT, with a DELETE -BEGIN; -UPDATE target SET balance = 0 WHERE tid = 3; -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE UPDATE ROW trigger row: (3,30) -> (3,0) -NOTICE: AFTER UPDATE ROW trigger row: (3,30) -> (3,0) -NOTICE: AFTER UPDATE STATEMENT trigger ---EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED AND t.balance > s.delta THEN - UPDATE SET balance = t.balance - s.delta -WHEN MATCHED THEN - DELETE -WHEN NOT MATCHED THEN - INSERT VALUES (s.sid, s.delta); -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE DELETE STATEMENT trigger -NOTICE: BEFORE DELETE ROW trigger row: (3,0) -NOTICE: BEFORE UPDATE ROW trigger row: (2,20) -> (2,15) -NOTICE: BEFORE INSERT ROW trigger row: (4,40) -NOTICE: AFTER DELETE ROW trigger row: (3,0) -NOTICE: AFTER UPDATE ROW trigger row: (2,20) -> (2,15) -NOTICE: AFTER INSERT ROW trigger row: (4,40) -NOTICE: AFTER DELETE STATEMENT trigger -NOTICE: AFTER UPDATE STATEMENT trigger -NOTICE: AFTER INSERT STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 15 - 4 | 40 -(3 rows) - -ROLLBACK; --- UPSERT with UPDATE/DELETE when not matched by source -BEGIN; -DELETE FROM SOURCE WHERE sid = 2; -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED AND t.balance > s.delta THEN - UPDATE SET balance = t.balance - s.delta -WHEN MATCHED THEN - UPDATE SET balance = 0 -WHEN NOT MATCHED THEN - INSERT VALUES (s.sid, s.delta) -WHEN NOT MATCHED BY SOURCE AND tid = 1 THEN - UPDATE SET balance = 0 -WHEN NOT MATCHED BY SOURCE THEN - DELETE -RETURNING merge_action(), t.*; -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE DELETE STATEMENT trigger -NOTICE: BEFORE UPDATE ROW trigger row: (3,30) -> (3,10) -NOTICE: BEFORE INSERT ROW trigger row: (4,40) -NOTICE: BEFORE DELETE ROW trigger row: (2,20) -NOTICE: BEFORE UPDATE ROW trigger row: (1,10) -> (1,0) -NOTICE: AFTER UPDATE ROW trigger row: (3,30) -> (3,10) -NOTICE: AFTER INSERT ROW trigger row: (4,40) -NOTICE: AFTER DELETE ROW trigger row: (2,20) -NOTICE: AFTER UPDATE ROW trigger row: (1,10) -> (1,0) -NOTICE: AFTER DELETE STATEMENT trigger -NOTICE: AFTER UPDATE STATEMENT trigger -NOTICE: AFTER INSERT STATEMENT trigger - merge_action | tid | balance ---------------+-----+--------- - UPDATE | 3 | 10 - INSERT | 4 | 40 - DELETE | 2 | 20 - UPDATE | 1 | 0 -(4 rows) - -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 0 - 3 | 10 - 4 | 40 -(3 rows) - -ROLLBACK; --- Test behavior of triggers that turn UPDATE/DELETE into no-ops -create or replace function skip_merge_op() returns trigger -language plpgsql as -$$ -BEGIN - RETURN NULL; -END; -$$; -SELECT * FROM target full outer join source on (sid = tid); - tid | balance | sid | delta ------+---------+-----+------- - 3 | 30 | 3 | 20 - 2 | 20 | 2 | 5 - | | 4 | 40 - 1 | 10 | | -(4 rows) - -create trigger merge_skip BEFORE INSERT OR UPDATE or DELETE - ON target FOR EACH ROW EXECUTE FUNCTION skip_merge_op(); -DO $$ -DECLARE - result integer; -BEGIN -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED AND s.sid = 3 THEN UPDATE SET balance = t.balance + s.delta -WHEN MATCHED THEN DELETE -WHEN NOT MATCHED THEN INSERT VALUES (sid, delta); -IF FOUND THEN - RAISE NOTICE 'Found'; -ELSE - RAISE NOTICE 'Not found'; -END IF; -GET DIAGNOSTICS result := ROW_COUNT; -RAISE NOTICE 'ROW_COUNT = %', result; -END; -$$; -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE DELETE STATEMENT trigger -NOTICE: BEFORE UPDATE ROW trigger row: (3,30) -> (3,50) -NOTICE: BEFORE DELETE ROW trigger row: (2,20) -NOTICE: BEFORE INSERT ROW trigger row: (4,40) -NOTICE: AFTER DELETE STATEMENT trigger -NOTICE: AFTER UPDATE STATEMENT trigger -NOTICE: AFTER INSERT STATEMENT trigger -NOTICE: Not found -NOTICE: ROW_COUNT = 0 -SELECT * FROM target FULL OUTER JOIN source ON (sid = tid); - tid | balance | sid | delta ------+---------+-----+------- - 3 | 30 | 3 | 20 - 2 | 20 | 2 | 5 - | | 4 | 40 - 1 | 10 | | -(4 rows) - -DROP TRIGGER merge_skip ON target; -DROP FUNCTION skip_merge_op(); --- test from PL/pgSQL --- make sure MERGE INTO isn't interpreted to mean returning variables like SELECT INTO -BEGIN; -DO LANGUAGE plpgsql $$ -BEGIN -MERGE INTO target t -USING source AS s -ON t.tid = s.sid -WHEN MATCHED AND t.balance > s.delta THEN - UPDATE SET balance = t.balance - s.delta; -END; -$$; -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE UPDATE ROW trigger row: (3,30) -> (3,10) -NOTICE: BEFORE UPDATE ROW trigger row: (2,20) -> (2,15) -NOTICE: AFTER UPDATE ROW trigger row: (3,30) -> (3,10) -NOTICE: AFTER UPDATE ROW trigger row: (2,20) -> (2,15) -NOTICE: AFTER UPDATE STATEMENT trigger -ROLLBACK; ---source constants -BEGIN; -MERGE INTO target t -USING (SELECT 9 AS sid, 57 AS delta) AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (s.sid, s.delta); -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: BEFORE INSERT ROW trigger row: (9,57) -NOTICE: AFTER INSERT ROW trigger row: (9,57) -NOTICE: AFTER INSERT STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 9 | 57 -(4 rows) - -ROLLBACK; ---source query -BEGIN; -MERGE INTO target t -USING (SELECT sid, delta FROM source WHERE delta > 0) AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (s.sid, s.delta); -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: BEFORE INSERT ROW trigger row: (4,40) -NOTICE: AFTER INSERT ROW trigger row: (4,40) -NOTICE: AFTER INSERT STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 4 | 40 -(4 rows) - -ROLLBACK; -BEGIN; -MERGE INTO target t -USING (SELECT sid, delta as newname FROM source WHERE delta > 0) AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (s.sid, s.newname); -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: BEFORE INSERT ROW trigger row: (4,40) -NOTICE: AFTER INSERT ROW trigger row: (4,40) -NOTICE: AFTER INSERT STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 4 | 40 -(4 rows) - -ROLLBACK; ---self-merge -BEGIN; -MERGE INTO target t1 -USING target t2 -ON t1.tid = t2.tid -WHEN MATCHED THEN - UPDATE SET balance = t1.balance + t2.balance -WHEN NOT MATCHED THEN - INSERT VALUES (t2.tid, t2.balance); -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE UPDATE ROW trigger row: (1,10) -> (1,20) -NOTICE: BEFORE UPDATE ROW trigger row: (2,20) -> (2,40) -NOTICE: BEFORE UPDATE ROW trigger row: (3,30) -> (3,60) -NOTICE: AFTER UPDATE ROW trigger row: (1,10) -> (1,20) -NOTICE: AFTER UPDATE ROW trigger row: (2,20) -> (2,40) -NOTICE: AFTER UPDATE ROW trigger row: (3,30) -> (3,60) -NOTICE: AFTER UPDATE STATEMENT trigger -NOTICE: AFTER INSERT STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 20 - 2 | 40 - 3 | 60 -(3 rows) - -ROLLBACK; -BEGIN; -MERGE INTO target t -USING (SELECT tid as sid, balance as delta FROM target WHERE balance > 0) AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (s.sid, s.delta); -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: AFTER INSERT STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 -(3 rows) - -ROLLBACK; -BEGIN; -MERGE INTO target t -USING -(SELECT sid, max(delta) AS delta - FROM source - GROUP BY sid - HAVING count(*) = 1 - ORDER BY sid ASC) AS s -ON t.tid = s.sid -WHEN NOT MATCHED THEN - INSERT (tid, balance) VALUES (s.sid, s.delta); -NOTICE: BEFORE INSERT STATEMENT trigger -NOTICE: BEFORE INSERT ROW trigger row: (4,40) -NOTICE: AFTER INSERT ROW trigger row: (4,40) -NOTICE: AFTER INSERT STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 30 - 4 | 40 -(4 rows) - -ROLLBACK; --- plpgsql parameters and results -BEGIN; -CREATE FUNCTION merge_func (p_id integer, p_bal integer) -RETURNS INTEGER -LANGUAGE plpgsql -AS $$ -DECLARE - result integer; -BEGIN -MERGE INTO target t -USING (SELECT p_id AS sid) AS s -ON t.tid = s.sid -WHEN MATCHED THEN - UPDATE SET balance = t.balance - p_bal; -IF FOUND THEN - GET DIAGNOSTICS result := ROW_COUNT; -END IF; -RETURN result; -END; -$$; -SELECT merge_func(3, 4); -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE UPDATE ROW trigger row: (3,30) -> (3,26) -NOTICE: AFTER UPDATE ROW trigger row: (3,30) -> (3,26) -NOTICE: AFTER UPDATE STATEMENT trigger - merge_func ------------- - 1 -(1 row) - -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 10 - 2 | 20 - 3 | 26 -(3 rows) - -ROLLBACK; --- PREPARE -BEGIN; -prepare foom as merge into target t using (select 1 as sid) s on (t.tid = s.sid) when matched then update set balance = 1; -execute foom; -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE UPDATE ROW trigger row: (1,10) -> (1,1) -NOTICE: AFTER UPDATE ROW trigger row: (1,10) -> (1,1) -NOTICE: AFTER UPDATE STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 1 - 2 | 20 - 3 | 30 -(3 rows) - -ROLLBACK; -BEGIN; -PREPARE foom2 (integer, integer) AS -MERGE INTO target t -USING (SELECT 1) s -ON t.tid = $1 -WHEN MATCHED THEN -UPDATE SET balance = $2; ---EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) -execute foom2 (1, 1); -NOTICE: BEFORE UPDATE STATEMENT trigger -NOTICE: BEFORE UPDATE ROW trigger row: (1,10) -> (1,1) -NOTICE: AFTER UPDATE ROW trigger row: (1,10) -> (1,1) -NOTICE: AFTER UPDATE STATEMENT trigger -SELECT * FROM target ORDER BY tid; - tid | balance ------+--------- - 1 | 1 - 2 | 20 - 3 | 30 -(3 rows) - -ROLLBACK; --- subqueries in source relation -CREATE TABLE sq_target (tid integer NOT NULL, balance integer) - WITH (autovacuum_enabled=off); -CREATE TABLE sq_source (delta integer, sid integer, balance integer DEFAULT 0) - WITH (autovacuum_enabled=off); -INSERT INTO sq_target(tid, balance) VALUES (1,100), (2,200), (3,300); -INSERT INTO sq_source(sid, delta) VALUES (1,10), (2,20), (4,40); -BEGIN; -MERGE INTO sq_target t -USING (SELECT * FROM sq_source) s -ON tid = sid -WHEN MATCHED AND t.balance > delta THEN - UPDATE SET balance = t.balance + delta; -SELECT * FROM sq_target; - tid | balance ------+--------- - 3 | 300 - 1 | 110 - 2 | 220 -(3 rows) - -ROLLBACK; --- try a view -CREATE VIEW v AS SELECT * FROM sq_source WHERE sid < 2; -BEGIN; -MERGE INTO sq_target -USING v -ON tid = sid -WHEN MATCHED THEN - UPDATE SET balance = v.balance + delta; -SELECT * FROM sq_target; - tid | balance ------+--------- - 2 | 200 - 3 | 300 - 1 | 10 -(3 rows) - -ROLLBACK; --- ambiguous reference to a column -BEGIN; -MERGE INTO sq_target -USING v -ON tid = sid -WHEN MATCHED AND tid >= 2 THEN - UPDATE SET balance = balance + delta -WHEN NOT MATCHED THEN - INSERT (balance, tid) VALUES (balance + delta, sid) -WHEN MATCHED AND tid < 2 THEN - DELETE; -ERROR: column reference "balance" is ambiguous -LINE 5: UPDATE SET balance = balance + delta - ^ -ROLLBACK; -BEGIN; -INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); -MERGE INTO sq_target t -USING v -ON tid = sid -WHEN MATCHED AND tid >= 2 THEN - UPDATE SET balance = t.balance + delta -WHEN NOT MATCHED THEN - INSERT (balance, tid) VALUES (balance + delta, sid) -WHEN MATCHED AND tid < 2 THEN - DELETE; -SELECT * FROM sq_target; - tid | balance ------+--------- - 2 | 200 - 3 | 300 - -1 | -11 -(3 rows) - -ROLLBACK; --- CTEs -BEGIN; -INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); -WITH targq AS ( - SELECT * FROM v -) -MERGE INTO sq_target t -USING v -ON tid = sid -WHEN MATCHED AND tid >= 2 THEN - UPDATE SET balance = t.balance + delta -WHEN NOT MATCHED THEN - INSERT (balance, tid) VALUES (balance + delta, sid) -WHEN MATCHED AND tid < 2 THEN - DELETE; -ROLLBACK; --- RETURNING -SELECT * FROM sq_source ORDER BY sid; - delta | sid | balance --------+-----+--------- - 10 | 1 | 0 - 20 | 2 | 0 - 40 | 4 | 0 -(3 rows) - -SELECT * FROM sq_target ORDER BY tid; - tid | balance ------+--------- - 1 | 100 - 2 | 200 - 3 | 300 -(3 rows) - -BEGIN; -CREATE TABLE merge_actions(action text, abbrev text); -INSERT INTO merge_actions VALUES ('INSERT', 'ins'), ('UPDATE', 'upd'), ('DELETE', 'del'); -MERGE INTO sq_target t -USING sq_source s -ON tid = sid -WHEN MATCHED AND tid >= 2 THEN - UPDATE SET balance = t.balance + delta -WHEN NOT MATCHED THEN - INSERT (balance, tid) VALUES (balance + delta, sid) -WHEN MATCHED AND tid < 2 THEN - DELETE -RETURNING (SELECT abbrev FROM merge_actions - WHERE action = merge_action()) AS action, - t.*, - CASE merge_action() - WHEN 'INSERT' THEN 'Inserted '||t - WHEN 'UPDATE' THEN 'Added '||delta||' to balance' - WHEN 'DELETE' THEN 'Removed '||t - END AS description; - action | tid | balance | description ---------+-----+---------+--------------------- - del | 1 | 100 | Removed (1,100) - upd | 2 | 220 | Added 20 to balance - ins | 4 | 40 | Inserted (4,40) -(3 rows) - -ROLLBACK; --- error when using merge_action() outside MERGE -SELECT merge_action() FROM sq_target; -ERROR: MERGE_ACTION() can only be used in the RETURNING list of a MERGE command -LINE 1: SELECT merge_action() FROM sq_target; - ^ -UPDATE sq_target SET balance = balance + 1 RETURNING merge_action(); -ERROR: MERGE_ACTION() can only be used in the RETURNING list of a MERGE command -LINE 1: ...ATE sq_target SET balance = balance + 1 RETURNING merge_acti... - ^ --- RETURNING in CTEs -CREATE TABLE sq_target_merge_log (tid integer NOT NULL, last_change text); -INSERT INTO sq_target_merge_log VALUES (1, 'Original value'); -BEGIN; -WITH m AS ( - MERGE INTO sq_target t - USING sq_source s - ON tid = sid - WHEN MATCHED AND tid >= 2 THEN - UPDATE SET balance = t.balance + delta - WHEN NOT MATCHED THEN - INSERT (balance, tid) VALUES (balance + delta, sid) - WHEN MATCHED AND tid < 2 THEN - DELETE - RETURNING merge_action() AS action, t.*, - CASE merge_action() - WHEN 'INSERT' THEN 'Inserted '||t - WHEN 'UPDATE' THEN 'Added '||delta||' to balance' - WHEN 'DELETE' THEN 'Removed '||t - END AS description -), m2 AS ( - MERGE INTO sq_target_merge_log l - USING m - ON l.tid = m.tid - WHEN MATCHED THEN - UPDATE SET last_change = description - WHEN NOT MATCHED THEN - INSERT VALUES (m.tid, description) - RETURNING action, merge_action() AS log_action, l.* -) -SELECT * FROM m2; - action | log_action | tid | last_change ---------+------------+-----+--------------------- - DELETE | UPDATE | 1 | Removed (1,100) - UPDATE | INSERT | 2 | Added 20 to balance - INSERT | INSERT | 4 | Inserted (4,40) -(3 rows) - -SELECT * FROM sq_target_merge_log ORDER BY tid; - tid | last_change ------+--------------------- - 1 | Removed (1,100) - 2 | Added 20 to balance - 4 | Inserted (4,40) -(3 rows) - -ROLLBACK; --- COPY (MERGE ... RETURNING) TO ... -BEGIN; -COPY ( - MERGE INTO sq_target t - USING sq_source s - ON tid = sid - WHEN MATCHED AND tid >= 2 THEN - UPDATE SET balance = t.balance + delta - WHEN NOT MATCHED THEN - INSERT (balance, tid) VALUES (balance + delta, sid) - WHEN MATCHED AND tid < 2 THEN - DELETE - RETURNING merge_action(), t.* -) TO stdout; -DELETE 1 100 -UPDATE 2 220 -INSERT 4 40 -ROLLBACK; --- SQL function with MERGE ... RETURNING -BEGIN; -CREATE FUNCTION merge_into_sq_target(sid int, balance int, delta int, - OUT action text, OUT tid int, OUT new_balance int) -LANGUAGE sql AS -$$ - MERGE INTO sq_target t - USING (VALUES ($1, $2, $3)) AS v(sid, balance, delta) - ON tid = v.sid - WHEN MATCHED AND tid >= 2 THEN - UPDATE SET balance = t.balance + v.delta - WHEN NOT MATCHED THEN - INSERT (balance, tid) VALUES (v.balance + v.delta, v.sid) - WHEN MATCHED AND tid < 2 THEN - DELETE - RETURNING merge_action(), t.*; -$$; -SELECT m.* -FROM (VALUES (1, 0, 0), (3, 0, 20), (4, 100, 10)) AS v(sid, balance, delta), -LATERAL (SELECT action, tid, new_balance FROM merge_into_sq_target(sid, balance, delta)) m; - action | tid | new_balance ---------+-----+------------- - DELETE | 1 | 100 - UPDATE | 3 | 320 - INSERT | 4 | 110 -(3 rows) - -ROLLBACK; --- SQL SRF with MERGE ... RETURNING -BEGIN; -CREATE FUNCTION merge_sq_source_into_sq_target() -RETURNS TABLE (action text, tid int, balance int) -LANGUAGE sql AS -$$ - MERGE INTO sq_target t - USING sq_source s - ON tid = sid - WHEN MATCHED AND tid >= 2 THEN - UPDATE SET balance = t.balance + delta - WHEN NOT MATCHED THEN - INSERT (balance, tid) VALUES (balance + delta, sid) - WHEN MATCHED AND tid < 2 THEN - DELETE - RETURNING merge_action(), t.*; -$$; -SELECT * FROM merge_sq_source_into_sq_target(); - action | tid | balance ---------+-----+--------- - DELETE | 1 | 100 - UPDATE | 2 | 220 - INSERT | 4 | 40 -(3 rows) - -ROLLBACK; --- PL/pgSQL function with MERGE ... RETURNING ... INTO -BEGIN; -CREATE FUNCTION merge_into_sq_target(sid int, balance int, delta int, - OUT r_action text, OUT r_tid int, OUT r_balance int) -LANGUAGE plpgsql AS -$$ -BEGIN - MERGE INTO sq_target t - USING (VALUES ($1, $2, $3)) AS v(sid, balance, delta) - ON tid = v.sid - WHEN MATCHED AND tid >= 2 THEN - UPDATE SET balance = t.balance + v.delta - WHEN NOT MATCHED THEN - INSERT (balance, tid) VALUES (v.balance + v.delta, v.sid) - WHEN MATCHED AND tid < 2 THEN - DELETE - RETURNING merge_action(), t.* INTO r_action, r_tid, r_balance; -END; -$$; -SELECT m.* -FROM (VALUES (1, 0, 0), (3, 0, 20), (4, 100, 10)) AS v(sid, balance, delta), -LATERAL (SELECT r_action, r_tid, r_balance FROM merge_into_sq_target(sid, balance, delta)) m; - r_action | r_tid | r_balance -----------+-------+----------- - DELETE | 1 | 100 - UPDATE | 3 | 320 - INSERT | 4 | 110 -(3 rows) - -ROLLBACK; --- EXPLAIN -CREATE TABLE ex_mtarget (a int, b int) - WITH (autovacuum_enabled=off); -CREATE TABLE ex_msource (a int, b int) - WITH (autovacuum_enabled=off); -INSERT INTO ex_mtarget SELECT i, i*10 FROM generate_series(1,100,2) i; -INSERT INTO ex_msource SELECT i, i*10 FROM generate_series(1,100,1) i; -CREATE FUNCTION explain_merge(query text) RETURNS SETOF text -LANGUAGE plpgsql AS -$$ -DECLARE ln text; -BEGIN - FOR ln IN - EXECUTE 'explain (analyze, timing off, summary off, costs off, buffers off) ' || - query - LOOP - ln := regexp_replace(ln, '(Memory( Usage)?|Buckets|Batches): \S*', '\1: xxx', 'g'); - RETURN NEXT ln; - END LOOP; -END; -$$; --- only updates -SELECT explain_merge(' -MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a -WHEN MATCHED THEN - UPDATE SET b = t.b + 1'); - explain_merge ----------------------------------------------------------------------- - Merge on ex_mtarget t (actual rows=0 loops=1) - Tuples: updated=50 - -> Merge Join (actual rows=50 loops=1) - Merge Cond: (t.a = s.a) - -> Sort (actual rows=50 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_mtarget t (actual rows=50 loops=1) - -> Sort (actual rows=100 loops=1) - Sort Key: s.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_msource s (actual rows=100 loops=1) -(12 rows) - --- only updates to selected tuples -SELECT explain_merge(' -MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a -WHEN MATCHED AND t.a < 10 THEN - UPDATE SET b = t.b + 1'); - explain_merge ----------------------------------------------------------------------- - Merge on ex_mtarget t (actual rows=0 loops=1) - Tuples: updated=5 skipped=45 - -> Merge Join (actual rows=50 loops=1) - Merge Cond: (t.a = s.a) - -> Sort (actual rows=50 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_mtarget t (actual rows=50 loops=1) - -> Sort (actual rows=100 loops=1) - Sort Key: s.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_msource s (actual rows=100 loops=1) -(12 rows) - --- updates + deletes -SELECT explain_merge(' -MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a -WHEN MATCHED AND t.a < 10 THEN - UPDATE SET b = t.b + 1 -WHEN MATCHED AND t.a >= 10 AND t.a <= 20 THEN - DELETE'); - explain_merge ----------------------------------------------------------------------- - Merge on ex_mtarget t (actual rows=0 loops=1) - Tuples: updated=5 deleted=5 skipped=40 - -> Merge Join (actual rows=50 loops=1) - Merge Cond: (t.a = s.a) - -> Sort (actual rows=50 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_mtarget t (actual rows=50 loops=1) - -> Sort (actual rows=100 loops=1) - Sort Key: s.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_msource s (actual rows=100 loops=1) -(12 rows) - --- only inserts -SELECT explain_merge(' -MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a -WHEN NOT MATCHED AND s.a < 10 THEN - INSERT VALUES (a, b)'); - explain_merge ----------------------------------------------------------------------- - Merge on ex_mtarget t (actual rows=0 loops=1) - Tuples: inserted=4 skipped=96 - -> Merge Left Join (actual rows=100 loops=1) - Merge Cond: (s.a = t.a) - -> Sort (actual rows=100 loops=1) - Sort Key: s.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_msource s (actual rows=100 loops=1) - -> Sort (actual rows=45 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_mtarget t (actual rows=45 loops=1) -(12 rows) - --- all three -SELECT explain_merge(' -MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a -WHEN MATCHED AND t.a < 10 THEN - UPDATE SET b = t.b + 1 -WHEN MATCHED AND t.a >= 30 AND t.a <= 40 THEN - DELETE -WHEN NOT MATCHED AND s.a < 20 THEN - INSERT VALUES (a, b)'); - explain_merge ----------------------------------------------------------------------- - Merge on ex_mtarget t (actual rows=0 loops=1) - Tuples: inserted=10 updated=9 deleted=5 skipped=76 - -> Merge Left Join (actual rows=100 loops=1) - Merge Cond: (s.a = t.a) - -> Sort (actual rows=100 loops=1) - Sort Key: s.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_msource s (actual rows=100 loops=1) - -> Sort (actual rows=49 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_mtarget t (actual rows=49 loops=1) -(12 rows) - --- not matched by source -SELECT explain_merge(' -MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a -WHEN NOT MATCHED BY SOURCE and t.a < 10 THEN - DELETE'); - explain_merge ----------------------------------------------------------------------- - Merge on ex_mtarget t (actual rows=0 loops=1) - Tuples: skipped=54 - -> Merge Left Join (actual rows=54 loops=1) - Merge Cond: (t.a = s.a) - -> Sort (actual rows=54 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_mtarget t (actual rows=54 loops=1) - -> Sort (actual rows=100 loops=1) - Sort Key: s.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_msource s (actual rows=100 loops=1) -(12 rows) - --- not matched by source and target -SELECT explain_merge(' -MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a -WHEN NOT MATCHED BY SOURCE AND t.a < 10 THEN - DELETE -WHEN NOT MATCHED BY TARGET AND s.a < 20 THEN - INSERT VALUES (a, b)'); - explain_merge ----------------------------------------------------------------------- - Merge on ex_mtarget t (actual rows=0 loops=1) - Tuples: skipped=100 - -> Merge Full Join (actual rows=100 loops=1) - Merge Cond: (t.a = s.a) - -> Sort (actual rows=54 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_mtarget t (actual rows=54 loops=1) - -> Sort (actual rows=100 loops=1) - Sort Key: s.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_msource s (actual rows=100 loops=1) -(12 rows) - --- nothing -SELECT explain_merge(' -MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a AND t.a < -1000 -WHEN MATCHED AND t.a < 10 THEN - DO NOTHING'); - explain_merge --------------------------------------------------------------------- - Merge on ex_mtarget t (actual rows=0 loops=1) - -> Merge Join (actual rows=0 loops=1) - Merge Cond: (t.a = s.a) - -> Sort (actual rows=0 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: xxx - -> Seq Scan on ex_mtarget t (actual rows=0 loops=1) - Filter: (a < '-1000'::integer) - Rows Removed by Filter: 54 - -> Sort (never executed) - Sort Key: s.a - -> Seq Scan on ex_msource s (never executed) -(12 rows) - -DROP TABLE ex_msource, ex_mtarget; -DROP FUNCTION explain_merge(text); --- EXPLAIN SubPlans and InitPlans -CREATE TABLE src (a int, b int, c int, d int); -CREATE TABLE tgt (a int, b int, c int, d int); -CREATE TABLE ref (ab int, cd int); -EXPLAIN (verbose, costs off) -MERGE INTO tgt t -USING (SELECT *, (SELECT count(*) FROM ref r - WHERE r.ab = s.a + s.b - AND r.cd = s.c - s.d) cnt - FROM src s) s -ON t.a = s.a AND t.b < s.cnt -WHEN MATCHED AND t.c > s.cnt THEN - UPDATE SET (b, c) = (SELECT s.b, s.cnt); - QUERY PLAN -------------------------------------------------------------------------------------- - Merge on public.tgt t - -> Hash Join - Output: t.ctid, s.a, s.b, s.c, s.d, s.ctid - Hash Cond: (t.a = s.a) - Join Filter: (t.b < (SubPlan 1)) - -> Seq Scan on public.tgt t - Output: t.ctid, t.a, t.b - -> Hash - Output: s.a, s.b, s.c, s.d, s.ctid - -> Seq Scan on public.src s - Output: s.a, s.b, s.c, s.d, s.ctid - SubPlan 1 - -> Aggregate - Output: count(*) - -> Seq Scan on public.ref r - Output: r.ab, r.cd - Filter: ((r.ab = (s.a + s.b)) AND (r.cd = (s.c - s.d))) - SubPlan 4 - -> Aggregate - Output: count(*) - -> Seq Scan on public.ref r_2 - Output: r_2.ab, r_2.cd - Filter: ((r_2.ab = (s.a + s.b)) AND (r_2.cd = (s.c - s.d))) - SubPlan 3 - -> Result - Output: s.b, (InitPlan 2).col1 - InitPlan 2 - -> Aggregate - Output: count(*) - -> Seq Scan on public.ref r_1 - Output: r_1.ab, r_1.cd - Filter: ((r_1.ab = (s.a + s.b)) AND (r_1.cd = (s.c - s.d))) -(32 rows) - -DROP TABLE src, tgt, ref; --- Subqueries -BEGIN; -MERGE INTO sq_target t -USING v -ON tid = sid -WHEN MATCHED THEN - UPDATE SET balance = (SELECT count(*) FROM sq_target); -SELECT * FROM sq_target WHERE tid = 1; - tid | balance ------+--------- - 1 | 3 -(1 row) - -ROLLBACK; -BEGIN; -MERGE INTO sq_target t -USING v -ON tid = sid -WHEN MATCHED AND (SELECT count(*) > 0 FROM sq_target) THEN - UPDATE SET balance = 42; -SELECT * FROM sq_target WHERE tid = 1; - tid | balance ------+--------- - 1 | 42 -(1 row) - -ROLLBACK; -BEGIN; -MERGE INTO sq_target t -USING v -ON tid = sid AND (SELECT count(*) > 0 FROM sq_target) -WHEN MATCHED THEN - UPDATE SET balance = 42; -SELECT * FROM sq_target WHERE tid = 1; - tid | balance ------+--------- - 1 | 42 -(1 row) - -ROLLBACK; -DROP TABLE sq_target, sq_target_merge_log, sq_source CASCADE; -NOTICE: drop cascades to view v -CREATE TABLE pa_target (tid integer, balance float, val text) - PARTITION BY LIST (tid); -CREATE TABLE part1 PARTITION OF pa_target FOR VALUES IN (1,4) - WITH (autovacuum_enabled=off); -CREATE TABLE part2 PARTITION OF pa_target FOR VALUES IN (2,5,6) - WITH (autovacuum_enabled=off); -CREATE TABLE part3 PARTITION OF pa_target FOR VALUES IN (3,8,9) - WITH (autovacuum_enabled=off); -CREATE TABLE part4 PARTITION OF pa_target DEFAULT - WITH (autovacuum_enabled=off); -CREATE TABLE pa_source (sid integer, delta float); --- insert many rows to the source table -INSERT INTO pa_source SELECT id, id * 10 FROM generate_series(1,14) AS id; --- insert a few rows in the target table (odd numbered tid) -INSERT INTO pa_target SELECT id, id * 100, 'initial' FROM generate_series(1,15,2) AS id; --- try simple MERGE -BEGIN; -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid - WHEN MATCHED THEN - UPDATE SET balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge') - WHEN NOT MATCHED BY SOURCE THEN - UPDATE SET val = val || ' not matched by source'; -SELECT * FROM pa_target ORDER BY tid, val; - tid | balance | val ------+---------+------------------------------- - 1 | 110 | initial updated by merge - 2 | 20 | inserted by merge - 3 | 330 | initial updated by merge - 4 | 40 | inserted by merge - 5 | 550 | initial updated by merge - 6 | 60 | inserted by merge - 7 | 770 | initial updated by merge - 8 | 80 | inserted by merge - 9 | 990 | initial updated by merge - 10 | 100 | inserted by merge - 11 | 1210 | initial updated by merge - 12 | 120 | inserted by merge - 13 | 1430 | initial updated by merge - 14 | 140 | inserted by merge - 15 | 1500 | initial not matched by source -(15 rows) - -ROLLBACK; --- same with a constant qual -BEGIN; -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid AND tid = 1 - WHEN MATCHED THEN - UPDATE SET balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge') - WHEN NOT MATCHED BY SOURCE THEN - UPDATE SET val = val || ' not matched by source'; -SELECT * FROM pa_target ORDER BY tid, val; - tid | balance | val ------+---------+------------------------------- - 1 | 110 | initial updated by merge - 2 | 20 | inserted by merge - 3 | 300 | initial not matched by source - 3 | 30 | inserted by merge - 4 | 40 | inserted by merge - 5 | 500 | initial not matched by source - 5 | 50 | inserted by merge - 6 | 60 | inserted by merge - 7 | 700 | initial not matched by source - 7 | 70 | inserted by merge - 8 | 80 | inserted by merge - 9 | 900 | initial not matched by source - 9 | 90 | inserted by merge - 10 | 100 | inserted by merge - 11 | 1100 | initial not matched by source - 11 | 110 | inserted by merge - 12 | 120 | inserted by merge - 13 | 1300 | initial not matched by source - 13 | 130 | inserted by merge - 14 | 140 | inserted by merge - 15 | 1500 | initial not matched by source -(21 rows) - -ROLLBACK; --- try updating the partition key column -BEGIN; -CREATE FUNCTION merge_func() RETURNS integer LANGUAGE plpgsql AS $$ -DECLARE - result integer; -BEGIN -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid - WHEN MATCHED THEN - UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge') - WHEN NOT MATCHED BY SOURCE THEN - UPDATE SET tid = 1, val = val || ' not matched by source'; -IF FOUND THEN - GET DIAGNOSTICS result := ROW_COUNT; -END IF; -RETURN result; -END; -$$; -SELECT merge_func(); - merge_func ------------- - 15 -(1 row) - -SELECT * FROM pa_target ORDER BY tid, val; - tid | balance | val ------+---------+------------------------------- - 1 | 1500 | initial not matched by source - 2 | 110 | initial updated by merge - 2 | 20 | inserted by merge - 4 | 330 | initial updated by merge - 4 | 40 | inserted by merge - 6 | 550 | initial updated by merge - 6 | 60 | inserted by merge - 8 | 770 | initial updated by merge - 8 | 80 | inserted by merge - 10 | 990 | initial updated by merge - 10 | 100 | inserted by merge - 12 | 1210 | initial updated by merge - 12 | 120 | inserted by merge - 14 | 1430 | initial updated by merge - 14 | 140 | inserted by merge -(15 rows) - -ROLLBACK; --- update partition key to partition not initially scanned -BEGIN; -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid AND t.tid = 1 - WHEN MATCHED THEN - UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' - RETURNING merge_action(), t.*; - merge_action | tid | balance | val ---------------+-----+---------+-------------------------- - UPDATE | 2 | 110 | initial updated by merge -(1 row) - -SELECT * FROM pa_target ORDER BY tid; - tid | balance | val ------+---------+-------------------------- - 2 | 110 | initial updated by merge - 3 | 300 | initial - 5 | 500 | initial - 7 | 700 | initial - 9 | 900 | initial - 11 | 1100 | initial - 13 | 1300 | initial - 15 | 1500 | initial -(8 rows) - -ROLLBACK; -DROP TABLE pa_target CASCADE; --- The target table is partitioned in the same way, but this time by attaching --- partitions which have columns in different order, dropped columns etc. -CREATE TABLE pa_target (tid integer, balance float, val text) - PARTITION BY LIST (tid); -CREATE TABLE part1 (tid integer, balance float, val text) - WITH (autovacuum_enabled=off); -CREATE TABLE part2 (balance float, tid integer, val text) - WITH (autovacuum_enabled=off); -CREATE TABLE part3 (tid integer, balance float, val text) - WITH (autovacuum_enabled=off); -CREATE TABLE part4 (extraid text, tid integer, balance float, val text) - WITH (autovacuum_enabled=off); -ALTER TABLE part4 DROP COLUMN extraid; -ALTER TABLE pa_target ATTACH PARTITION part1 FOR VALUES IN (1,4); -ALTER TABLE pa_target ATTACH PARTITION part2 FOR VALUES IN (2,5,6); -ALTER TABLE pa_target ATTACH PARTITION part3 FOR VALUES IN (3,8,9); -ALTER TABLE pa_target ATTACH PARTITION part4 DEFAULT; --- insert a few rows in the target table (odd numbered tid) -INSERT INTO pa_target SELECT id, id * 100, 'initial' FROM generate_series(1,15,2) AS id; --- try simple MERGE -BEGIN; -DO $$ -DECLARE - result integer; -BEGIN -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid - WHEN MATCHED THEN - UPDATE SET balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge') - WHEN NOT MATCHED BY SOURCE THEN - UPDATE SET val = val || ' not matched by source'; -GET DIAGNOSTICS result := ROW_COUNT; -RAISE NOTICE 'ROW_COUNT = %', result; -END; -$$; -NOTICE: ROW_COUNT = 15 -SELECT * FROM pa_target ORDER BY tid, val; - tid | balance | val ------+---------+------------------------------- - 1 | 110 | initial updated by merge - 2 | 20 | inserted by merge - 3 | 330 | initial updated by merge - 4 | 40 | inserted by merge - 5 | 550 | initial updated by merge - 6 | 60 | inserted by merge - 7 | 770 | initial updated by merge - 8 | 80 | inserted by merge - 9 | 990 | initial updated by merge - 10 | 100 | inserted by merge - 11 | 1210 | initial updated by merge - 12 | 120 | inserted by merge - 13 | 1430 | initial updated by merge - 14 | 140 | inserted by merge - 15 | 1500 | initial not matched by source -(15 rows) - -ROLLBACK; --- same with a constant qual -BEGIN; -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid AND tid IN (1, 5) - WHEN MATCHED AND tid % 5 = 0 THEN DELETE - WHEN MATCHED THEN - UPDATE SET balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge') - WHEN NOT MATCHED BY SOURCE THEN - UPDATE SET val = val || ' not matched by source'; -SELECT * FROM pa_target ORDER BY tid, val; - tid | balance | val ------+---------+------------------------------- - 1 | 110 | initial updated by merge - 2 | 20 | inserted by merge - 3 | 300 | initial not matched by source - 3 | 30 | inserted by merge - 4 | 40 | inserted by merge - 6 | 60 | inserted by merge - 7 | 700 | initial not matched by source - 7 | 70 | inserted by merge - 8 | 80 | inserted by merge - 9 | 900 | initial not matched by source - 9 | 90 | inserted by merge - 10 | 100 | inserted by merge - 11 | 1100 | initial not matched by source - 11 | 110 | inserted by merge - 12 | 120 | inserted by merge - 13 | 1300 | initial not matched by source - 13 | 130 | inserted by merge - 14 | 140 | inserted by merge - 15 | 1500 | initial not matched by source -(19 rows) - -ROLLBACK; --- try updating the partition key column -BEGIN; -DO $$ -DECLARE - result integer; -BEGIN -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid - WHEN MATCHED THEN - UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge') - WHEN NOT MATCHED BY SOURCE THEN - UPDATE SET tid = 1, val = val || ' not matched by source'; -GET DIAGNOSTICS result := ROW_COUNT; -RAISE NOTICE 'ROW_COUNT = %', result; -END; -$$; -NOTICE: ROW_COUNT = 15 -SELECT * FROM pa_target ORDER BY tid, val; - tid | balance | val ------+---------+------------------------------- - 1 | 1500 | initial not matched by source - 2 | 110 | initial updated by merge - 2 | 20 | inserted by merge - 4 | 330 | initial updated by merge - 4 | 40 | inserted by merge - 6 | 550 | initial updated by merge - 6 | 60 | inserted by merge - 8 | 770 | initial updated by merge - 8 | 80 | inserted by merge - 10 | 990 | initial updated by merge - 10 | 100 | inserted by merge - 12 | 1210 | initial updated by merge - 12 | 120 | inserted by merge - 14 | 1430 | initial updated by merge - 14 | 140 | inserted by merge -(15 rows) - -ROLLBACK; --- as above, but blocked by BEFORE DELETE ROW trigger -BEGIN; -CREATE FUNCTION trig_fn() RETURNS trigger LANGUAGE plpgsql AS - $$ BEGIN RETURN NULL; END; $$; -CREATE TRIGGER del_trig BEFORE DELETE ON pa_target - FOR EACH ROW EXECUTE PROCEDURE trig_fn(); -DO $$ -DECLARE - result integer; -BEGIN -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid - WHEN MATCHED THEN - UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge') - WHEN NOT MATCHED BY SOURCE THEN - UPDATE SET val = val || ' not matched by source'; -GET DIAGNOSTICS result := ROW_COUNT; -RAISE NOTICE 'ROW_COUNT = %', result; -END; -$$; -NOTICE: ROW_COUNT = 11 -SELECT * FROM pa_target ORDER BY tid, val; - tid | balance | val ------+---------+------------------------------- - 1 | 100 | initial - 2 | 20 | inserted by merge - 3 | 300 | initial - 4 | 40 | inserted by merge - 6 | 550 | initial updated by merge - 6 | 60 | inserted by merge - 7 | 700 | initial - 8 | 80 | inserted by merge - 9 | 900 | initial - 10 | 100 | inserted by merge - 12 | 1210 | initial updated by merge - 12 | 120 | inserted by merge - 14 | 1430 | initial updated by merge - 14 | 140 | inserted by merge - 15 | 1500 | initial not matched by source -(15 rows) - -ROLLBACK; --- as above, but blocked by BEFORE INSERT ROW trigger -BEGIN; -CREATE FUNCTION trig_fn() RETURNS trigger LANGUAGE plpgsql AS - $$ BEGIN RETURN NULL; END; $$; -CREATE TRIGGER ins_trig BEFORE INSERT ON pa_target - FOR EACH ROW EXECUTE PROCEDURE trig_fn(); -DO $$ -DECLARE - result integer; -BEGIN -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid - WHEN MATCHED THEN - UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (sid, delta, 'inserted by merge') - WHEN NOT MATCHED BY SOURCE THEN - UPDATE SET val = val || ' not matched by source'; -GET DIAGNOSTICS result := ROW_COUNT; -RAISE NOTICE 'ROW_COUNT = %', result; -END; -$$; -NOTICE: ROW_COUNT = 4 -SELECT * FROM pa_target ORDER BY tid, val; - tid | balance | val ------+---------+------------------------------- - 6 | 550 | initial updated by merge - 12 | 1210 | initial updated by merge - 14 | 1430 | initial updated by merge - 15 | 1500 | initial not matched by source -(4 rows) - -ROLLBACK; --- test RLS enforcement -BEGIN; -ALTER TABLE pa_target ENABLE ROW LEVEL SECURITY; -ALTER TABLE pa_target FORCE ROW LEVEL SECURITY; -CREATE POLICY pa_target_pol ON pa_target USING (tid != 0); -MERGE INTO pa_target t - USING pa_source s - ON t.tid = s.sid AND t.tid IN (1,2,3,4) - WHEN MATCHED THEN - UPDATE SET tid = tid - 1; -ERROR: new row violates row-level security policy for table "pa_target" -ROLLBACK; -DROP TABLE pa_source; -DROP TABLE pa_target CASCADE; --- Sub-partitioning -CREATE TABLE pa_target (logts timestamp, tid integer, balance float, val text) - PARTITION BY RANGE (logts); -CREATE TABLE part_m01 PARTITION OF pa_target - FOR VALUES FROM ('2017-01-01') TO ('2017-02-01') - PARTITION BY LIST (tid); -CREATE TABLE part_m01_odd PARTITION OF part_m01 - FOR VALUES IN (1,3,5,7,9) WITH (autovacuum_enabled=off); -CREATE TABLE part_m01_even PARTITION OF part_m01 - FOR VALUES IN (2,4,6,8) WITH (autovacuum_enabled=off); -CREATE TABLE part_m02 PARTITION OF pa_target - FOR VALUES FROM ('2017-02-01') TO ('2017-03-01') - PARTITION BY LIST (tid); -CREATE TABLE part_m02_odd PARTITION OF part_m02 - FOR VALUES IN (1,3,5,7,9) WITH (autovacuum_enabled=off); -CREATE TABLE part_m02_even PARTITION OF part_m02 - FOR VALUES IN (2,4,6,8) WITH (autovacuum_enabled=off); -CREATE TABLE pa_source (sid integer, delta float) - WITH (autovacuum_enabled=off); --- insert many rows to the source table -INSERT INTO pa_source SELECT id, id * 10 FROM generate_series(1,14) AS id; --- insert a few rows in the target table (odd numbered tid) -INSERT INTO pa_target SELECT '2017-01-31', id, id * 100, 'initial' FROM generate_series(1,9,3) AS id; -INSERT INTO pa_target SELECT '2017-02-28', id, id * 100, 'initial' FROM generate_series(2,9,3) AS id; --- try simple MERGE -BEGIN; -MERGE INTO pa_target t - USING (SELECT '2017-01-15' AS slogts, * FROM pa_source WHERE sid < 10) s - ON t.tid = s.sid - WHEN MATCHED THEN - UPDATE SET balance = balance + delta, val = val || ' updated by merge' - WHEN NOT MATCHED THEN - INSERT VALUES (slogts::timestamp, sid, delta, 'inserted by merge') - RETURNING merge_action(), t.*; - merge_action | logts | tid | balance | val ---------------+--------------------------+-----+---------+-------------------------- - UPDATE | Tue Jan 31 00:00:00 2017 | 1 | 110 | initial updated by merge - UPDATE | Tue Feb 28 00:00:00 2017 | 2 | 220 | initial updated by merge - INSERT | Sun Jan 15 00:00:00 2017 | 3 | 30 | inserted by merge - UPDATE | Tue Jan 31 00:00:00 2017 | 4 | 440 | initial updated by merge - UPDATE | Tue Feb 28 00:00:00 2017 | 5 | 550 | initial updated by merge - INSERT | Sun Jan 15 00:00:00 2017 | 6 | 60 | inserted by merge - UPDATE | Tue Jan 31 00:00:00 2017 | 7 | 770 | initial updated by merge - UPDATE | Tue Feb 28 00:00:00 2017 | 8 | 880 | initial updated by merge - INSERT | Sun Jan 15 00:00:00 2017 | 9 | 90 | inserted by merge -(9 rows) - -SELECT * FROM pa_target ORDER BY tid; - logts | tid | balance | val ---------------------------+-----+---------+-------------------------- - Tue Jan 31 00:00:00 2017 | 1 | 110 | initial updated by merge - Tue Feb 28 00:00:00 2017 | 2 | 220 | initial updated by merge - Sun Jan 15 00:00:00 2017 | 3 | 30 | inserted by merge - Tue Jan 31 00:00:00 2017 | 4 | 440 | initial updated by merge - Tue Feb 28 00:00:00 2017 | 5 | 550 | initial updated by merge - Sun Jan 15 00:00:00 2017 | 6 | 60 | inserted by merge - Tue Jan 31 00:00:00 2017 | 7 | 770 | initial updated by merge - Tue Feb 28 00:00:00 2017 | 8 | 880 | initial updated by merge - Sun Jan 15 00:00:00 2017 | 9 | 90 | inserted by merge -(9 rows) - -ROLLBACK; -DROP TABLE pa_source; -DROP TABLE pa_target CASCADE; --- Partitioned table with primary key -CREATE TABLE pa_target (tid integer PRIMARY KEY) PARTITION BY LIST (tid); -CREATE TABLE pa_targetp PARTITION OF pa_target DEFAULT; -CREATE TABLE pa_source (sid integer); -INSERT INTO pa_source VALUES (1), (2); -EXPLAIN (VERBOSE, COSTS OFF) -MERGE INTO pa_target t USING pa_source s ON t.tid = s.sid - WHEN NOT MATCHED THEN INSERT VALUES (s.sid); - QUERY PLAN -------------------------------------------------------------- - Merge on public.pa_target t - Merge on public.pa_targetp t_1 - -> Hash Left Join - Output: s.sid, s.ctid, t_1.tableoid, t_1.ctid - Inner Unique: true - Hash Cond: (s.sid = t_1.tid) - -> Seq Scan on public.pa_source s - Output: s.sid, s.ctid - -> Hash - Output: t_1.tid, t_1.tableoid, t_1.ctid - -> Seq Scan on public.pa_targetp t_1 - Output: t_1.tid, t_1.tableoid, t_1.ctid -(12 rows) - -MERGE INTO pa_target t USING pa_source s ON t.tid = s.sid - WHEN NOT MATCHED THEN INSERT VALUES (s.sid); -TABLE pa_target; - tid ------ - 1 - 2 -(2 rows) - --- Partition-less partitioned table --- (the bug we are checking for appeared only if table had partitions before) -DROP TABLE pa_targetp; -EXPLAIN (VERBOSE, COSTS OFF) -MERGE INTO pa_target t USING pa_source s ON t.tid = s.sid - WHEN NOT MATCHED THEN INSERT VALUES (s.sid); - QUERY PLAN --------------------------------------------- - Merge on public.pa_target t - -> Hash Left Join - Output: s.sid, s.ctid, t.ctid - Inner Unique: true - Hash Cond: (s.sid = t.tid) - -> Seq Scan on public.pa_source s - Output: s.sid, s.ctid - -> Hash - Output: t.tid, t.ctid - -> Result - Output: t.tid, t.ctid - One-Time Filter: false -(12 rows) - -MERGE INTO pa_target t USING pa_source s ON t.tid = s.sid - WHEN NOT MATCHED THEN INSERT VALUES (s.sid); -ERROR: no partition of relation "pa_target" found for row -DETAIL: Partition key of the failing row contains (tid) = (1). -DROP TABLE pa_source; -DROP TABLE pa_target CASCADE; --- some complex joins on the source side -CREATE TABLE cj_target (tid integer, balance float, val text) - WITH (autovacuum_enabled=off); -CREATE TABLE cj_source1 (sid1 integer, scat integer, delta integer) - WITH (autovacuum_enabled=off); -CREATE TABLE cj_source2 (sid2 integer, sval text) - WITH (autovacuum_enabled=off); -INSERT INTO cj_source1 VALUES (1, 10, 100); -INSERT INTO cj_source1 VALUES (1, 20, 200); -INSERT INTO cj_source1 VALUES (2, 20, 300); -INSERT INTO cj_source1 VALUES (3, 10, 400); -INSERT INTO cj_source2 VALUES (1, 'initial source2'); -INSERT INTO cj_source2 VALUES (2, 'initial source2'); -INSERT INTO cj_source2 VALUES (3, 'initial source2'); --- source relation is an unaliased join -MERGE INTO cj_target t -USING cj_source1 s1 - INNER JOIN cj_source2 s2 ON sid1 = sid2 -ON t.tid = sid1 -WHEN NOT MATCHED THEN - INSERT VALUES (sid1, delta, sval); --- try accessing columns from either side of the source join -MERGE INTO cj_target t -USING cj_source2 s2 - INNER JOIN cj_source1 s1 ON sid1 = sid2 AND scat = 20 -ON t.tid = sid1 -WHEN NOT MATCHED THEN - INSERT VALUES (sid2, delta, sval) -WHEN MATCHED THEN - DELETE; --- some simple expressions in INSERT targetlist -MERGE INTO cj_target t -USING cj_source2 s2 - INNER JOIN cj_source1 s1 ON sid1 = sid2 -ON t.tid = sid1 -WHEN NOT MATCHED THEN - INSERT VALUES (sid2, delta + scat, sval) -WHEN MATCHED THEN - UPDATE SET val = val || ' updated by merge'; -MERGE INTO cj_target t -USING cj_source2 s2 - INNER JOIN cj_source1 s1 ON sid1 = sid2 AND scat = 20 -ON t.tid = sid1 -WHEN MATCHED THEN - UPDATE SET val = val || ' ' || delta::text; -SELECT * FROM cj_target; - tid | balance | val ------+---------+---------------------------------- - 3 | 400 | initial source2 updated by merge - 1 | 220 | initial source2 200 - 1 | 110 | initial source2 200 - 2 | 320 | initial source2 300 -(4 rows) - --- try it with an outer join and PlaceHolderVar -MERGE INTO cj_target t -USING (SELECT *, 'join input'::text AS phv FROM cj_source1) fj - FULL JOIN cj_source2 fj2 ON fj.scat = fj2.sid2 * 10 -ON t.tid = fj.scat -WHEN NOT MATCHED THEN - INSERT (tid, balance, val) VALUES (fj.scat, fj.delta, fj.phv); -SELECT * FROM cj_target; - tid | balance | val ------+---------+---------------------------------- - 3 | 400 | initial source2 updated by merge - 1 | 220 | initial source2 200 - 1 | 110 | initial source2 200 - 2 | 320 | initial source2 300 - 10 | 100 | join input - 10 | 400 | join input - 20 | 200 | join input - 20 | 300 | join input - | | -(9 rows) - -ALTER TABLE cj_source1 RENAME COLUMN sid1 TO sid; -ALTER TABLE cj_source2 RENAME COLUMN sid2 TO sid; -TRUNCATE cj_target; -MERGE INTO cj_target t -USING cj_source1 s1 - INNER JOIN cj_source2 s2 ON s1.sid = s2.sid -ON t.tid = s1.sid -WHEN NOT MATCHED THEN - INSERT VALUES (s2.sid, delta, sval); -DROP TABLE cj_source2, cj_source1, cj_target; --- Function scans -CREATE TABLE fs_target (a int, b int, c text) - WITH (autovacuum_enabled=off); -MERGE INTO fs_target t -USING generate_series(1,100,1) AS id -ON t.a = id -WHEN MATCHED THEN - UPDATE SET b = b + id -WHEN NOT MATCHED THEN - INSERT VALUES (id, -1); -MERGE INTO fs_target t -USING generate_series(1,100,2) AS id -ON t.a = id -WHEN MATCHED THEN - UPDATE SET b = b + id, c = 'updated '|| id.*::text -WHEN NOT MATCHED THEN - INSERT VALUES (id, -1, 'inserted ' || id.*::text); -SELECT count(*) FROM fs_target; - count -------- - 100 -(1 row) - -DROP TABLE fs_target; --- SERIALIZABLE test --- handled in isolation tests --- Inheritance-based partitioning -CREATE TABLE measurement ( - city_id int not null, - logdate date not null, - peaktemp int, - unitsales int -) WITH (autovacuum_enabled=off); -CREATE TABLE measurement_y2006m02 ( - CHECK ( logdate >= DATE '2006-02-01' AND logdate < DATE '2006-03-01' ) -) INHERITS (measurement) WITH (autovacuum_enabled=off); -CREATE TABLE measurement_y2006m03 ( - CHECK ( logdate >= DATE '2006-03-01' AND logdate < DATE '2006-04-01' ) -) INHERITS (measurement) WITH (autovacuum_enabled=off); -CREATE TABLE measurement_y2007m01 ( - filler text, - peaktemp int, - logdate date not null, - city_id int not null, - unitsales int - CHECK ( logdate >= DATE '2007-01-01' AND logdate < DATE '2007-02-01') -) WITH (autovacuum_enabled=off); -ALTER TABLE measurement_y2007m01 DROP COLUMN filler; -ALTER TABLE measurement_y2007m01 INHERIT measurement; -INSERT INTO measurement VALUES (0, '2005-07-21', 5, 15); -CREATE OR REPLACE FUNCTION measurement_insert_trigger() -RETURNS TRIGGER AS $$ -BEGIN - IF ( NEW.logdate >= DATE '2006-02-01' AND - NEW.logdate < DATE '2006-03-01' ) THEN - INSERT INTO measurement_y2006m02 VALUES (NEW.*); - ELSIF ( NEW.logdate >= DATE '2006-03-01' AND - NEW.logdate < DATE '2006-04-01' ) THEN - INSERT INTO measurement_y2006m03 VALUES (NEW.*); - ELSIF ( NEW.logdate >= DATE '2007-01-01' AND - NEW.logdate < DATE '2007-02-01' ) THEN - INSERT INTO measurement_y2007m01 (city_id, logdate, peaktemp, unitsales) - VALUES (NEW.*); - ELSE - RAISE EXCEPTION 'Date out of range. Fix the measurement_insert_trigger() function!'; - END IF; - RETURN NULL; -END; -$$ LANGUAGE plpgsql ; -CREATE TRIGGER insert_measurement_trigger - BEFORE INSERT ON measurement - FOR EACH ROW EXECUTE PROCEDURE measurement_insert_trigger(); -INSERT INTO measurement VALUES (1, '2006-02-10', 35, 10); -INSERT INTO measurement VALUES (1, '2006-02-16', 45, 20); -INSERT INTO measurement VALUES (1, '2006-03-17', 25, 10); -INSERT INTO measurement VALUES (1, '2006-03-27', 15, 40); -INSERT INTO measurement VALUES (1, '2007-01-15', 10, 10); -INSERT INTO measurement VALUES (1, '2007-01-17', 10, 10); -SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate; - tableoid | city_id | logdate | peaktemp | unitsales -----------------------+---------+------------+----------+----------- - measurement | 0 | 07-21-2005 | 5 | 15 - measurement_y2006m02 | 1 | 02-10-2006 | 35 | 10 - measurement_y2006m02 | 1 | 02-16-2006 | 45 | 20 - measurement_y2006m03 | 1 | 03-17-2006 | 25 | 10 - measurement_y2006m03 | 1 | 03-27-2006 | 15 | 40 - measurement_y2007m01 | 1 | 01-15-2007 | 10 | 10 - measurement_y2007m01 | 1 | 01-17-2007 | 10 | 10 -(7 rows) - -CREATE TABLE new_measurement (LIKE measurement) WITH (autovacuum_enabled=off); -INSERT INTO new_measurement VALUES (0, '2005-07-21', 25, 20); -INSERT INTO new_measurement VALUES (1, '2006-03-01', 20, 10); -INSERT INTO new_measurement VALUES (1, '2006-02-16', 50, 10); -INSERT INTO new_measurement VALUES (2, '2006-02-10', 20, 20); -INSERT INTO new_measurement VALUES (1, '2006-03-27', NULL, NULL); -INSERT INTO new_measurement VALUES (1, '2007-01-17', NULL, NULL); -INSERT INTO new_measurement VALUES (1, '2007-01-15', 5, NULL); -INSERT INTO new_measurement VALUES (1, '2007-01-16', 10, 10); -BEGIN; -MERGE INTO ONLY measurement m - USING new_measurement nm ON - (m.city_id = nm.city_id and m.logdate=nm.logdate) -WHEN MATCHED AND nm.peaktemp IS NULL THEN DELETE -WHEN MATCHED THEN UPDATE - SET peaktemp = greatest(m.peaktemp, nm.peaktemp), - unitsales = m.unitsales + coalesce(nm.unitsales, 0) -WHEN NOT MATCHED THEN INSERT - (city_id, logdate, peaktemp, unitsales) - VALUES (city_id, logdate, peaktemp, unitsales); -SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate, peaktemp; - tableoid | city_id | logdate | peaktemp | unitsales -----------------------+---------+------------+----------+----------- - measurement | 0 | 07-21-2005 | 25 | 35 - measurement_y2006m02 | 1 | 02-10-2006 | 35 | 10 - measurement_y2006m02 | 1 | 02-16-2006 | 45 | 20 - measurement_y2006m02 | 1 | 02-16-2006 | 50 | 10 - measurement_y2006m03 | 1 | 03-01-2006 | 20 | 10 - measurement_y2006m03 | 1 | 03-17-2006 | 25 | 10 - measurement_y2006m03 | 1 | 03-27-2006 | 15 | 40 - measurement_y2006m03 | 1 | 03-27-2006 | | - measurement_y2007m01 | 1 | 01-15-2007 | 5 | - measurement_y2007m01 | 1 | 01-15-2007 | 10 | 10 - measurement_y2007m01 | 1 | 01-16-2007 | 10 | 10 - measurement_y2007m01 | 1 | 01-17-2007 | 10 | 10 - measurement_y2007m01 | 1 | 01-17-2007 | | - measurement_y2006m02 | 2 | 02-10-2006 | 20 | 20 -(14 rows) - -ROLLBACK; -MERGE into measurement m - USING new_measurement nm ON - (m.city_id = nm.city_id and m.logdate=nm.logdate) -WHEN MATCHED AND nm.peaktemp IS NULL THEN DELETE -WHEN MATCHED THEN UPDATE - SET peaktemp = greatest(m.peaktemp, nm.peaktemp), - unitsales = m.unitsales + coalesce(nm.unitsales, 0) -WHEN NOT MATCHED THEN INSERT - (city_id, logdate, peaktemp, unitsales) - VALUES (city_id, logdate, peaktemp, unitsales); -SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate; - tableoid | city_id | logdate | peaktemp | unitsales -----------------------+---------+------------+----------+----------- - measurement | 0 | 07-21-2005 | 25 | 35 - measurement_y2006m02 | 1 | 02-10-2006 | 35 | 10 - measurement_y2006m02 | 1 | 02-16-2006 | 50 | 30 - measurement_y2006m03 | 1 | 03-01-2006 | 20 | 10 - measurement_y2006m03 | 1 | 03-17-2006 | 25 | 10 - measurement_y2007m01 | 1 | 01-15-2007 | 10 | 10 - measurement_y2007m01 | 1 | 01-16-2007 | 10 | 10 - measurement_y2006m02 | 2 | 02-10-2006 | 20 | 20 -(8 rows) - -BEGIN; -MERGE INTO new_measurement nm - USING ONLY measurement m ON - (nm.city_id = m.city_id and nm.logdate=m.logdate) -WHEN MATCHED THEN DELETE; -SELECT * FROM new_measurement ORDER BY city_id, logdate; - city_id | logdate | peaktemp | unitsales ----------+------------+----------+----------- - 1 | 02-16-2006 | 50 | 10 - 1 | 03-01-2006 | 20 | 10 - 1 | 03-27-2006 | | - 1 | 01-15-2007 | 5 | - 1 | 01-16-2007 | 10 | 10 - 1 | 01-17-2007 | | - 2 | 02-10-2006 | 20 | 20 -(7 rows) - -ROLLBACK; -MERGE INTO new_measurement nm - USING measurement m ON - (nm.city_id = m.city_id and nm.logdate=m.logdate) -WHEN MATCHED THEN DELETE; -SELECT * FROM new_measurement ORDER BY city_id, logdate; - city_id | logdate | peaktemp | unitsales ----------+------------+----------+----------- - 1 | 03-27-2006 | | - 1 | 01-17-2007 | | -(2 rows) - -DROP TABLE measurement, new_measurement CASCADE; -NOTICE: drop cascades to 3 other objects -DETAIL: drop cascades to table measurement_y2006m02 -drop cascades to table measurement_y2006m03 -drop cascades to table measurement_y2007m01 -DROP FUNCTION measurement_insert_trigger(); --- --- test non-strict join clause --- -CREATE TABLE src (a int, b text); -INSERT INTO src VALUES (1, 'src row'); -CREATE TABLE tgt (a int, b text); -INSERT INTO tgt VALUES (NULL, 'tgt row'); -MERGE INTO tgt USING src ON tgt.a IS NOT DISTINCT FROM src.a - WHEN MATCHED THEN UPDATE SET a = src.a, b = src.b - WHEN NOT MATCHED BY SOURCE THEN DELETE - RETURNING merge_action(), src.*, tgt.*; - merge_action | a | b | a | b ---------------+---+---+---+--------- - DELETE | | | | tgt row -(1 row) - -SELECT * FROM tgt; - a | b ----+--- -(0 rows) - -DROP TABLE src, tgt; --- --- test for bug #18634 (wrong varnullingrels error) --- -CREATE TABLE bug18634t (a int, b int, c text); -INSERT INTO bug18634t VALUES(1, 10, 'tgt1'), (2, 20, 'tgt2'); -CREATE VIEW bug18634v AS - SELECT * FROM bug18634t WHERE EXISTS (SELECT 1 FROM bug18634t); -CREATE TABLE bug18634s (a int, b int, c text); -INSERT INTO bug18634s VALUES (1, 2, 'src1'); -MERGE INTO bug18634v t USING bug18634s s ON s.a = t.a - WHEN MATCHED THEN UPDATE SET b = s.b - WHEN NOT MATCHED BY SOURCE THEN DELETE - RETURNING merge_action(), s.c, t.*; - merge_action | c | a | b | c ---------------+------+---+----+------ - UPDATE | src1 | 1 | 2 | tgt1 - DELETE | | 2 | 20 | tgt2 -(2 rows) - -SELECT * FROM bug18634t; - a | b | c ----+---+------ - 1 | 2 | tgt1 -(1 row) - -DROP TABLE bug18634t CASCADE; -NOTICE: drop cascades to view bug18634v -DROP TABLE bug18634s; --- prepare -RESET SESSION AUTHORIZATION; --- try a system catalog -MERGE INTO pg_class c -USING (SELECT 'pg_depend'::regclass AS oid) AS j -ON j.oid = c.oid -WHEN MATCHED THEN - UPDATE SET reltuples = reltuples + 1 -RETURNING j.oid; - oid ------------ - pg_depend -(1 row) - -CREATE VIEW classv AS SELECT * FROM pg_class; -MERGE INTO classv c -USING pg_namespace n -ON n.oid = c.relnamespace -WHEN MATCHED AND c.oid = 'pg_depend'::regclass THEN - UPDATE SET reltuples = reltuples - 1 -RETURNING c.oid; - oid ------- - 2608 -(1 row) - -DROP TABLE target, target2; -DROP TABLE source, source2; -DROP FUNCTION merge_trigfunc(); -DROP USER regress_merge_privs; -DROP USER regress_merge_no_privs; -DROP USER regress_merge_none; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/misc_functions.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/misc_functions.out --- /Users/admin/pgsql/src/test/regress/expected/misc_functions.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/misc_functions.out 2024-12-13 13:20:09 @@ -1,905 +1,2 @@ --- directory paths and dlsuffix are passed to us in environment variables -\getenv libdir PG_LIBDIR -\getenv dlsuffix PG_DLSUFFIX -\set regresslib :libdir '/regress' :dlsuffix --- Function to assist with verifying EXPLAIN which includes costs. A series --- of bool flags allows control over which portions are masked out -CREATE FUNCTION explain_mask_costs(query text, do_analyze bool, - hide_costs bool, hide_row_est bool, hide_width bool) RETURNS setof text -LANGUAGE plpgsql AS -$$ -DECLARE - ln text; - analyze_str text; -BEGIN - IF do_analyze = true THEN - analyze_str := 'on'; - ELSE - analyze_str := 'off'; - END IF; - - -- avoid jit related output by disabling it - SET LOCAL jit = 0; - - FOR ln IN - EXECUTE format('explain (analyze %s, costs on, summary off, timing off, buffers off) %s', - analyze_str, query) - LOOP - IF hide_costs = true THEN - ln := regexp_replace(ln, 'cost=\d+\.\d\d\.\.\d+\.\d\d', 'cost=N..N'); - END IF; - - IF hide_row_est = true THEN - -- don't use 'g' so that we leave the actual rows intact - ln := regexp_replace(ln, 'rows=\d+', 'rows=N'); - END IF; - - IF hide_width = true THEN - ln := regexp_replace(ln, 'width=\d+', 'width=N'); - END IF; - - RETURN NEXT ln; - END LOOP; -END; -$$; --- --- num_nulls() --- -SELECT num_nonnulls(NULL); - num_nonnulls --------------- - 0 -(1 row) - -SELECT num_nonnulls('1'); - num_nonnulls --------------- - 1 -(1 row) - -SELECT num_nonnulls(NULL::text); - num_nonnulls --------------- - 0 -(1 row) - -SELECT num_nonnulls(NULL::text, NULL::int); - num_nonnulls --------------- - 0 -(1 row) - -SELECT num_nonnulls(1, 2, NULL::text, NULL::point, '', int8 '9', 1.0 / NULL); - num_nonnulls --------------- - 4 -(1 row) - -SELECT num_nonnulls(VARIADIC '{1,2,NULL,3}'::int[]); - num_nonnulls --------------- - 3 -(1 row) - -SELECT num_nonnulls(VARIADIC '{"1","2","3","4"}'::text[]); - num_nonnulls --------------- - 4 -(1 row) - -SELECT num_nonnulls(VARIADIC ARRAY(SELECT CASE WHEN i <> 40 THEN i END FROM generate_series(1, 100) i)); - num_nonnulls --------------- - 99 -(1 row) - -SELECT num_nulls(NULL); - num_nulls ------------ - 1 -(1 row) - -SELECT num_nulls('1'); - num_nulls ------------ - 0 -(1 row) - -SELECT num_nulls(NULL::text); - num_nulls ------------ - 1 -(1 row) - -SELECT num_nulls(NULL::text, NULL::int); - num_nulls ------------ - 2 -(1 row) - -SELECT num_nulls(1, 2, NULL::text, NULL::point, '', int8 '9', 1.0 / NULL); - num_nulls ------------ - 3 -(1 row) - -SELECT num_nulls(VARIADIC '{1,2,NULL,3}'::int[]); - num_nulls ------------ - 1 -(1 row) - -SELECT num_nulls(VARIADIC '{"1","2","3","4"}'::text[]); - num_nulls ------------ - 0 -(1 row) - -SELECT num_nulls(VARIADIC ARRAY(SELECT CASE WHEN i <> 40 THEN i END FROM generate_series(1, 100) i)); - num_nulls ------------ - 1 -(1 row) - --- special cases -SELECT num_nonnulls(VARIADIC NULL::text[]); - num_nonnulls --------------- - -(1 row) - -SELECT num_nonnulls(VARIADIC '{}'::int[]); - num_nonnulls --------------- - 0 -(1 row) - -SELECT num_nulls(VARIADIC NULL::text[]); - num_nulls ------------ - -(1 row) - -SELECT num_nulls(VARIADIC '{}'::int[]); - num_nulls ------------ - 0 -(1 row) - --- should fail, one or more arguments is required -SELECT num_nonnulls(); -ERROR: function num_nonnulls() does not exist -LINE 1: SELECT num_nonnulls(); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -SELECT num_nulls(); -ERROR: function num_nulls() does not exist -LINE 1: SELECT num_nulls(); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. --- --- canonicalize_path() --- -CREATE FUNCTION test_canonicalize_path(text) - RETURNS text - AS :'regresslib' - LANGUAGE C STRICT IMMUTABLE; -SELECT test_canonicalize_path('/'); - test_canonicalize_path ------------------------- - / -(1 row) - -SELECT test_canonicalize_path('/./abc/def/'); - test_canonicalize_path ------------------------- - /abc/def -(1 row) - -SELECT test_canonicalize_path('/./../abc/def'); - test_canonicalize_path ------------------------- - /abc/def -(1 row) - -SELECT test_canonicalize_path('/./../../abc/def/'); - test_canonicalize_path ------------------------- - /abc/def -(1 row) - -SELECT test_canonicalize_path('/abc/.././def/ghi'); - test_canonicalize_path ------------------------- - /def/ghi -(1 row) - -SELECT test_canonicalize_path('/abc/./../def/ghi//'); - test_canonicalize_path ------------------------- - /def/ghi -(1 row) - -SELECT test_canonicalize_path('/abc/def/../..'); - test_canonicalize_path ------------------------- - / -(1 row) - -SELECT test_canonicalize_path('/abc/def/../../..'); - test_canonicalize_path ------------------------- - / -(1 row) - -SELECT test_canonicalize_path('/abc/def/../../../../ghi/jkl'); - test_canonicalize_path ------------------------- - /ghi/jkl -(1 row) - -SELECT test_canonicalize_path('.'); - test_canonicalize_path ------------------------- - . -(1 row) - -SELECT test_canonicalize_path('./'); - test_canonicalize_path ------------------------- - . -(1 row) - -SELECT test_canonicalize_path('./abc/..'); - test_canonicalize_path ------------------------- - . -(1 row) - -SELECT test_canonicalize_path('abc/../'); - test_canonicalize_path ------------------------- - . -(1 row) - -SELECT test_canonicalize_path('abc/../def'); - test_canonicalize_path ------------------------- - def -(1 row) - -SELECT test_canonicalize_path('..'); - test_canonicalize_path ------------------------- - .. -(1 row) - -SELECT test_canonicalize_path('../abc/def'); - test_canonicalize_path ------------------------- - ../abc/def -(1 row) - -SELECT test_canonicalize_path('../abc/..'); - test_canonicalize_path ------------------------- - .. -(1 row) - -SELECT test_canonicalize_path('../abc/../def'); - test_canonicalize_path ------------------------- - ../def -(1 row) - -SELECT test_canonicalize_path('../abc/../../def/ghi'); - test_canonicalize_path ------------------------- - ../../def/ghi -(1 row) - -SELECT test_canonicalize_path('./abc/./def/.'); - test_canonicalize_path ------------------------- - abc/def -(1 row) - -SELECT test_canonicalize_path('./abc/././def/.'); - test_canonicalize_path ------------------------- - abc/def -(1 row) - -SELECT test_canonicalize_path('./abc/./def/.././ghi/../../../jkl/mno'); - test_canonicalize_path ------------------------- - ../jkl/mno -(1 row) - --- --- pg_log_backend_memory_contexts() --- --- Memory contexts are logged and they are not returned to the function. --- Furthermore, their contents can vary depending on the timing. However, --- we can at least verify that the code doesn't fail, and that the --- permissions are set properly. --- -SELECT pg_log_backend_memory_contexts(pg_backend_pid()); - pg_log_backend_memory_contexts --------------------------------- - t -(1 row) - -SELECT pg_log_backend_memory_contexts(pid) FROM pg_stat_activity - WHERE backend_type = 'checkpointer'; - pg_log_backend_memory_contexts --------------------------------- - t -(1 row) - -CREATE ROLE regress_log_memory; -SELECT has_function_privilege('regress_log_memory', - 'pg_log_backend_memory_contexts(integer)', 'EXECUTE'); -- no - has_function_privilege ------------------------- - f -(1 row) - -GRANT EXECUTE ON FUNCTION pg_log_backend_memory_contexts(integer) - TO regress_log_memory; -SELECT has_function_privilege('regress_log_memory', - 'pg_log_backend_memory_contexts(integer)', 'EXECUTE'); -- yes - has_function_privilege ------------------------- - t -(1 row) - -SET ROLE regress_log_memory; -SELECT pg_log_backend_memory_contexts(pg_backend_pid()); - pg_log_backend_memory_contexts --------------------------------- - t -(1 row) - -RESET ROLE; -REVOKE EXECUTE ON FUNCTION pg_log_backend_memory_contexts(integer) - FROM regress_log_memory; -DROP ROLE regress_log_memory; --- --- Test some built-in SRFs --- --- The outputs of these are variable, so we can't just print their results --- directly, but we can at least verify that the code doesn't fail. --- -select setting as segsize -from pg_settings where name = 'wal_segment_size' -\gset -select count(*) > 0 as ok from pg_ls_waldir(); - ok ----- - t -(1 row) - --- Test ProjectSet as well as FunctionScan -select count(*) > 0 as ok from (select pg_ls_waldir()) ss; - ok ----- - t -(1 row) - --- Test not-run-to-completion cases. -select * from pg_ls_waldir() limit 0; - name | size | modification -------+------+-------------- -(0 rows) - -select count(*) > 0 as ok from (select * from pg_ls_waldir() limit 1) ss; - ok ----- - t -(1 row) - -select (w).size = :segsize as ok -from (select pg_ls_waldir() w) ss where length((w).name) = 24 limit 1; - ok ----- - t -(1 row) - -select count(*) >= 0 as ok from pg_ls_archive_statusdir(); - ok ----- - t -(1 row) - -select count(*) >= 0 as ok from pg_ls_summariesdir(); - ok ----- - t -(1 row) - --- pg_read_file() -select length(pg_read_file('postmaster.pid')) > 20; - ?column? ----------- - t -(1 row) - -select length(pg_read_file('postmaster.pid', 1, 20)); - length --------- - 20 -(1 row) - --- Test missing_ok -select pg_read_file('does not exist'); -- error -ERROR: could not open file "does not exist" for reading: No such file or directory -select pg_read_file('does not exist', true) IS NULL; -- ok - ?column? ----------- - t -(1 row) - --- Test invalid argument -select pg_read_file('does not exist', 0, -1); -- error -ERROR: requested length cannot be negative -select pg_read_file('does not exist', 0, -1, true); -- error -ERROR: requested length cannot be negative --- pg_read_binary_file() -select length(pg_read_binary_file('postmaster.pid')) > 20; - ?column? ----------- - t -(1 row) - -select length(pg_read_binary_file('postmaster.pid', 1, 20)); - length --------- - 20 -(1 row) - --- Test missing_ok -select pg_read_binary_file('does not exist'); -- error -ERROR: could not open file "does not exist" for reading: No such file or directory -select pg_read_binary_file('does not exist', true) IS NULL; -- ok - ?column? ----------- - t -(1 row) - --- Test invalid argument -select pg_read_binary_file('does not exist', 0, -1); -- error -ERROR: requested length cannot be negative -select pg_read_binary_file('does not exist', 0, -1, true); -- error -ERROR: requested length cannot be negative --- pg_stat_file() -select size > 20, isdir from pg_stat_file('postmaster.pid'); - ?column? | isdir -----------+------- - t | f -(1 row) - --- pg_ls_dir() -select * from (select pg_ls_dir('.') a) a where a = 'base' limit 1; - a ------- - base -(1 row) - --- Test missing_ok (second argument) -select pg_ls_dir('does not exist', false, false); -- error -ERROR: could not open directory "does not exist": No such file or directory -select pg_ls_dir('does not exist', true, false); -- ok - pg_ls_dir ------------ -(0 rows) - --- Test include_dot_dirs (third argument) -select count(*) = 1 as dot_found - from pg_ls_dir('.', false, true) as ls where ls = '.'; - dot_found ------------ - t -(1 row) - -select count(*) = 1 as dot_found - from pg_ls_dir('.', false, false) as ls where ls = '.'; - dot_found ------------ - f -(1 row) - --- pg_timezone_names() -select * from (select (pg_timezone_names()).name) ptn where name='UTC' limit 1; - name ------- - UTC -(1 row) - --- pg_tablespace_databases() -select count(*) > 0 from - (select pg_tablespace_databases(oid) as pts from pg_tablespace - where spcname = 'pg_default') pts - join pg_database db on pts.pts = db.oid; - ?column? ----------- - t -(1 row) - --- --- Test replication slot directory functions --- -CREATE ROLE regress_slot_dir_funcs; --- Not available by default. -SELECT has_function_privilege('regress_slot_dir_funcs', - 'pg_ls_logicalsnapdir()', 'EXECUTE'); - has_function_privilege ------------------------- - f -(1 row) - -SELECT has_function_privilege('regress_slot_dir_funcs', - 'pg_ls_logicalmapdir()', 'EXECUTE'); - has_function_privilege ------------------------- - f -(1 row) - -SELECT has_function_privilege('regress_slot_dir_funcs', - 'pg_ls_replslotdir(text)', 'EXECUTE'); - has_function_privilege ------------------------- - f -(1 row) - -GRANT pg_monitor TO regress_slot_dir_funcs; --- Role is now part of pg_monitor, so these are available. -SELECT has_function_privilege('regress_slot_dir_funcs', - 'pg_ls_logicalsnapdir()', 'EXECUTE'); - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_slot_dir_funcs', - 'pg_ls_logicalmapdir()', 'EXECUTE'); - has_function_privilege ------------------------- - t -(1 row) - -SELECT has_function_privilege('regress_slot_dir_funcs', - 'pg_ls_replslotdir(text)', 'EXECUTE'); - has_function_privilege ------------------------- - t -(1 row) - -DROP ROLE regress_slot_dir_funcs; --- --- Test adding a support function to a subject function --- -CREATE FUNCTION my_int_eq(int, int) RETURNS bool - LANGUAGE internal STRICT IMMUTABLE PARALLEL SAFE - AS $$int4eq$$; --- By default, planner does not think that's selective -EXPLAIN (COSTS OFF) -SELECT * FROM tenk1 a JOIN tenk1 b ON a.unique1 = b.unique1 -WHERE my_int_eq(a.unique2, 42); - QUERY PLAN ----------------------------------------------- - Hash Join - Hash Cond: (b.unique1 = a.unique1) - -> Seq Scan on tenk1 b - -> Hash - -> Seq Scan on tenk1 a - Filter: my_int_eq(unique2, 42) -(6 rows) - --- With support function that knows it's int4eq, we get a different plan -CREATE FUNCTION test_support_func(internal) - RETURNS internal - AS :'regresslib', 'test_support_func' - LANGUAGE C STRICT; -ALTER FUNCTION my_int_eq(int, int) SUPPORT test_support_func; -EXPLAIN (COSTS OFF) -SELECT * FROM tenk1 a JOIN tenk1 b ON a.unique1 = b.unique1 -WHERE my_int_eq(a.unique2, 42); - QUERY PLAN -------------------------------------------------- - Nested Loop - -> Seq Scan on tenk1 a - Filter: my_int_eq(unique2, 42) - -> Index Scan using tenk1_unique1 on tenk1 b - Index Cond: (unique1 = a.unique1) -(5 rows) - --- Also test non-default rowcount estimate -CREATE FUNCTION my_gen_series(int, int) RETURNS SETOF integer - LANGUAGE internal STRICT IMMUTABLE PARALLEL SAFE - AS $$generate_series_int4$$ - SUPPORT test_support_func; -EXPLAIN (COSTS OFF) -SELECT * FROM tenk1 a JOIN my_gen_series(1,1000) g ON a.unique1 = g; - QUERY PLAN ----------------------------------------- - Hash Join - Hash Cond: (g.g = a.unique1) - -> Function Scan on my_gen_series g - -> Hash - -> Seq Scan on tenk1 a -(5 rows) - -EXPLAIN (COSTS OFF) -SELECT * FROM tenk1 a JOIN my_gen_series(1,10) g ON a.unique1 = g; - QUERY PLAN -------------------------------------------------- - Nested Loop - -> Function Scan on my_gen_series g - -> Index Scan using tenk1_unique1 on tenk1 a - Index Cond: (unique1 = g.g) -(4 rows) - --- --- Test the SupportRequestRows support function for generate_series_timestamp() --- --- Ensure the row estimate matches the actual rows -SELECT explain_mask_costs($$ -SELECT * FROM generate_series(TIMESTAMPTZ '2024-02-01', TIMESTAMPTZ '2024-03-01', INTERVAL '1 day') g(s);$$, -true, true, false, true); - explain_mask_costs ------------------------------------------------------------------------------------------- - Function Scan on generate_series g (cost=N..N rows=30 width=N) (actual rows=30 loops=1) -(1 row) - --- As above but with generate_series_timestamp -SELECT explain_mask_costs($$ -SELECT * FROM generate_series(TIMESTAMP '2024-02-01', TIMESTAMP '2024-03-01', INTERVAL '1 day') g(s);$$, -true, true, false, true); - explain_mask_costs ------------------------------------------------------------------------------------------- - Function Scan on generate_series g (cost=N..N rows=30 width=N) (actual rows=30 loops=1) -(1 row) - --- As above but with generate_series_timestamptz_at_zone() -SELECT explain_mask_costs($$ -SELECT * FROM generate_series(TIMESTAMPTZ '2024-02-01', TIMESTAMPTZ '2024-03-01', INTERVAL '1 day', 'UTC') g(s);$$, -true, true, false, true); - explain_mask_costs ------------------------------------------------------------------------------------------- - Function Scan on generate_series g (cost=N..N rows=30 width=N) (actual rows=30 loops=1) -(1 row) - --- Ensure the estimated and actual row counts match when the range isn't --- evenly divisible by the step -SELECT explain_mask_costs($$ -SELECT * FROM generate_series(TIMESTAMPTZ '2024-02-01', TIMESTAMPTZ '2024-03-01', INTERVAL '7 day') g(s);$$, -true, true, false, true); - explain_mask_costs ----------------------------------------------------------------------------------------- - Function Scan on generate_series g (cost=N..N rows=5 width=N) (actual rows=5 loops=1) -(1 row) - --- Ensure the estimates match when step is decreasing -SELECT explain_mask_costs($$ -SELECT * FROM generate_series(TIMESTAMPTZ '2024-03-01', TIMESTAMPTZ '2024-02-01', INTERVAL '-1 day') g(s);$$, -true, true, false, true); - explain_mask_costs ------------------------------------------------------------------------------------------- - Function Scan on generate_series g (cost=N..N rows=30 width=N) (actual rows=30 loops=1) -(1 row) - --- Ensure an empty range estimates 1 row -SELECT explain_mask_costs($$ -SELECT * FROM generate_series(TIMESTAMPTZ '2024-03-01', TIMESTAMPTZ '2024-02-01', INTERVAL '1 day') g(s);$$, -true, true, false, true); - explain_mask_costs ----------------------------------------------------------------------------------------- - Function Scan on generate_series g (cost=N..N rows=1 width=N) (actual rows=0 loops=1) -(1 row) - --- Ensure we get the default row estimate for infinity values -SELECT explain_mask_costs($$ -SELECT * FROM generate_series(TIMESTAMPTZ '-infinity', TIMESTAMPTZ 'infinity', INTERVAL '1 day') g(s);$$, -false, true, false, true); - explain_mask_costs -------------------------------------------------------------------- - Function Scan on generate_series g (cost=N..N rows=1000 width=N) -(1 row) - --- Ensure the row estimate behaves correctly when step size is zero. --- We expect generate_series_timestamp() to throw the error rather than in --- the support function. -SELECT * FROM generate_series(TIMESTAMPTZ '2024-02-01', TIMESTAMPTZ '2024-03-01', INTERVAL '0 day') g(s); -ERROR: step size cannot equal zero --- --- Test the SupportRequestRows support function for generate_series_numeric() --- --- Ensure the row estimate matches the actual rows -SELECT explain_mask_costs($$ -SELECT * FROM generate_series(1.0, 25.0) g(s);$$, -true, true, false, true); - explain_mask_costs ------------------------------------------------------------------------------------------- - Function Scan on generate_series g (cost=N..N rows=25 width=N) (actual rows=25 loops=1) -(1 row) - --- As above but with non-default step -SELECT explain_mask_costs($$ -SELECT * FROM generate_series(1.0, 25.0, 2.0) g(s);$$, -true, true, false, true); - explain_mask_costs ------------------------------------------------------------------------------------------- - Function Scan on generate_series g (cost=N..N rows=13 width=N) (actual rows=13 loops=1) -(1 row) - --- Ensure the estimates match when step is decreasing -SELECT explain_mask_costs($$ -SELECT * FROM generate_series(25.0, 1.0, -1.0) g(s);$$, -true, true, false, true); - explain_mask_costs ------------------------------------------------------------------------------------------- - Function Scan on generate_series g (cost=N..N rows=25 width=N) (actual rows=25 loops=1) -(1 row) - --- Ensure an empty range estimates 1 row -SELECT explain_mask_costs($$ -SELECT * FROM generate_series(25.0, 1.0, 1.0) g(s);$$, -true, true, false, true); - explain_mask_costs ----------------------------------------------------------------------------------------- - Function Scan on generate_series g (cost=N..N rows=1 width=N) (actual rows=0 loops=1) -(1 row) - --- Ensure we get the default row estimate for error cases (infinity/NaN values --- and zero step size) -SELECT explain_mask_costs($$ -SELECT * FROM generate_series('-infinity'::NUMERIC, 'infinity'::NUMERIC, 1.0) g(s);$$, -false, true, false, true); - explain_mask_costs -------------------------------------------------------------------- - Function Scan on generate_series g (cost=N..N rows=1000 width=N) -(1 row) - -SELECT explain_mask_costs($$ -SELECT * FROM generate_series(1.0, 25.0, 'NaN'::NUMERIC) g(s);$$, -false, true, false, true); - explain_mask_costs -------------------------------------------------------------------- - Function Scan on generate_series g (cost=N..N rows=1000 width=N) -(1 row) - -SELECT explain_mask_costs($$ -SELECT * FROM generate_series(25.0, 2.0, 0.0) g(s);$$, -false, true, false, true); - explain_mask_costs -------------------------------------------------------------------- - Function Scan on generate_series g (cost=N..N rows=1000 width=N) -(1 row) - --- Test functions for control data -SELECT count(*) > 0 AS ok FROM pg_control_checkpoint(); - ok ----- - t -(1 row) - -SELECT count(*) > 0 AS ok FROM pg_control_init(); - ok ----- - t -(1 row) - -SELECT count(*) > 0 AS ok FROM pg_control_recovery(); - ok ----- - t -(1 row) - -SELECT count(*) > 0 AS ok FROM pg_control_system(); - ok ----- - t -(1 row) - --- pg_split_walfile_name, pg_walfile_name & pg_walfile_name_offset -SELECT * FROM pg_split_walfile_name(NULL); - segment_number | timeline_id -----------------+------------- - | -(1 row) - -SELECT * FROM pg_split_walfile_name('invalid'); -ERROR: invalid WAL file name "invalid" -SELECT segment_number > 0 AS ok_segment_number, timeline_id - FROM pg_split_walfile_name('000000010000000100000000'); - ok_segment_number | timeline_id --------------------+------------- - t | 1 -(1 row) - -SELECT segment_number > 0 AS ok_segment_number, timeline_id - FROM pg_split_walfile_name('ffffffFF00000001000000af'); - ok_segment_number | timeline_id --------------------+------------- - t | 4294967295 -(1 row) - -SELECT setting::int8 AS segment_size -FROM pg_settings -WHERE name = 'wal_segment_size' -\gset -SELECT segment_number, file_offset -FROM pg_walfile_name_offset('0/0'::pg_lsn + :segment_size), - pg_split_walfile_name(file_name); - segment_number | file_offset -----------------+------------- - 1 | 0 -(1 row) - -SELECT segment_number, file_offset -FROM pg_walfile_name_offset('0/0'::pg_lsn + :segment_size + 1), - pg_split_walfile_name(file_name); - segment_number | file_offset -----------------+------------- - 1 | 1 -(1 row) - -SELECT segment_number, file_offset = :segment_size - 1 -FROM pg_walfile_name_offset('0/0'::pg_lsn + :segment_size - 1), - pg_split_walfile_name(file_name); - segment_number | ?column? -----------------+---------- - 0 | t -(1 row) - --- pg_current_logfile -CREATE ROLE regress_current_logfile; --- not available by default -SELECT has_function_privilege('regress_current_logfile', - 'pg_current_logfile()', 'EXECUTE'); - has_function_privilege ------------------------- - f -(1 row) - -GRANT pg_monitor TO regress_current_logfile; --- role has privileges of pg_monitor and can execute the function -SELECT has_function_privilege('regress_current_logfile', - 'pg_current_logfile()', 'EXECUTE'); - has_function_privilege ------------------------- - t -(1 row) - -DROP ROLE regress_current_logfile; --- pg_column_toast_chunk_id -CREATE TABLE test_chunk_id (a TEXT, b TEXT STORAGE EXTERNAL); -INSERT INTO test_chunk_id VALUES ('x', repeat('x', 8192)); -SELECT t.relname AS toastrel FROM pg_class c - LEFT JOIN pg_class t ON c.reltoastrelid = t.oid - WHERE c.relname = 'test_chunk_id' -\gset -SELECT pg_column_toast_chunk_id(a) IS NULL, - pg_column_toast_chunk_id(b) IN (SELECT chunk_id FROM pg_toast.:toastrel) - FROM test_chunk_id; - ?column? | ?column? -----------+---------- - t | t -(1 row) - -DROP TABLE test_chunk_id; -DROP FUNCTION explain_mask_costs(text, bool, bool, bool, bool); --- test stratnum support functions -SELECT gist_stratnum_identity(3::smallint); - gist_stratnum_identity ------------------------- - 3 -(1 row) - -SELECT gist_stratnum_identity(18::smallint); - gist_stratnum_identity ------------------------- - 18 -(1 row) - +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/sysviews.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/sysviews.out --- /Users/admin/pgsql/src/test/regress/expected/sysviews.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/sysviews.out 2024-12-13 13:20:10 @@ -1,225 +1,2 @@ --- --- Test assorted system views --- --- This test is mainly meant to provide some code coverage for the --- set-returning functions that underlie certain system views. --- The output of most of these functions is very environment-dependent, --- so our ability to test with fixed expected output is pretty limited; --- but even a trivial check of count(*) will exercise the normal code path --- through the SRF. -select count(*) >= 0 as ok from pg_available_extension_versions; - ok ----- - t -(1 row) - -select count(*) >= 0 as ok from pg_available_extensions; - ok ----- - t -(1 row) - --- The entire output of pg_backend_memory_contexts is not stable, --- we test only the existence and basic condition of TopMemoryContext. -select type, name, ident, level, total_bytes >= free_bytes - from pg_backend_memory_contexts where level = 1; - type | name | ident | level | ?column? -----------+------------------+-------+-------+---------- - AllocSet | TopMemoryContext | | 1 | t -(1 row) - --- We can exercise some MemoryContext type stats functions. Most of the --- column values are too platform-dependant to display. --- Ensure stats from the bump allocator look sane. Bump isn't a commonly --- used context, but it is used in tuplesort.c, so open a cursor to keep --- the tuplesort alive long enough for us to query the context stats. -begin; -declare cur cursor for select left(a,10), b - from (values(repeat('a', 512 * 1024),1),(repeat('b', 512),2)) v(a,b) - order by v.a desc; -fetch 1 from cur; - left | b -------------+--- - bbbbbbbbbb | 2 -(1 row) - -select type, name, total_bytes > 0, total_nblocks, free_bytes > 0, free_chunks -from pg_backend_memory_contexts where name = 'Caller tuples'; - type | name | ?column? | total_nblocks | ?column? | free_chunks -------+---------------+----------+---------------+----------+------------- - Bump | Caller tuples | t | 2 | t | 0 -(1 row) - -rollback; --- Further sanity checks on pg_backend_memory_contexts. We expect --- CacheMemoryContext to have multiple children. Ensure that's the case. -with contexts as ( - select * from pg_backend_memory_contexts -) -select count(*) > 1 -from contexts c1, contexts c2 -where c2.name = 'CacheMemoryContext' -and c1.path[c2.level] = c2.path[c2.level]; - ?column? ----------- - t -(1 row) - --- At introduction, pg_config had 23 entries; it may grow -select count(*) > 20 as ok from pg_config; - ok ----- - t -(1 row) - --- We expect no cursors in this test; see also portals.sql -select count(*) = 0 as ok from pg_cursors; - ok ----- - t -(1 row) - -select count(*) >= 0 as ok from pg_file_settings; - ok ----- - t -(1 row) - --- There will surely be at least one rule, with no errors. -select count(*) > 0 as ok, count(*) FILTER (WHERE error IS NOT NULL) = 0 AS no_err - from pg_hba_file_rules; - ok | no_err -----+-------- - t | t -(1 row) - --- There may be no rules, and there should be no errors. -select count(*) >= 0 as ok, count(*) FILTER (WHERE error IS NOT NULL) = 0 AS no_err - from pg_ident_file_mappings; - ok | no_err -----+-------- - t | t -(1 row) - --- There will surely be at least one active lock -select count(*) > 0 as ok from pg_locks; - ok ----- - t -(1 row) - --- We expect no prepared statements in this test; see also prepare.sql -select count(*) = 0 as ok from pg_prepared_statements; - ok ----- - t -(1 row) - --- See also prepared_xacts.sql -select count(*) >= 0 as ok from pg_prepared_xacts; - ok ----- - t -(1 row) - --- There will surely be at least one SLRU cache -select count(*) > 0 as ok from pg_stat_slru; - ok ----- - t -(1 row) - --- There must be only one record -select count(*) = 1 as ok from pg_stat_wal; - ok ----- - t -(1 row) - --- We expect no walreceiver running in this test -select count(*) = 0 as ok from pg_stat_wal_receiver; - ok ----- - t -(1 row) - --- This is to record the prevailing planner enable_foo settings during --- a regression test run. -select name, setting from pg_settings where name like 'enable%'; - name | setting ---------------------------------+--------- - enable_async_append | on - enable_bitmapscan | on - enable_distinct_reordering | on - enable_gathermerge | on - enable_group_by_reordering | on - enable_hashagg | on - enable_hashjoin | on - enable_incremental_sort | on - enable_indexonlyscan | on - enable_indexscan | on - enable_material | on - enable_memoize | on - enable_mergejoin | on - enable_nestloop | on - enable_parallel_append | on - enable_parallel_hash | on - enable_partition_pruning | on - enable_partitionwise_aggregate | off - enable_partitionwise_join | off - enable_presorted_aggregate | on - enable_seqscan | on - enable_sort | on - enable_tidscan | on -(23 rows) - --- There are always wait event descriptions for various types. InjectionPoint --- may be present or absent, depending on history since last postmaster start. -select type, count(*) > 0 as ok FROM pg_wait_events - where type <> 'InjectionPoint' group by type order by type COLLATE "C"; - type | ok ------------+---- - Activity | t - BufferPin | t - Client | t - Extension | t - IO | t - IPC | t - LWLock | t - Lock | t - Timeout | t -(9 rows) - --- Test that the pg_timezone_names and pg_timezone_abbrevs views are --- more-or-less working. We can't test their contents in any great detail --- without the outputs changing anytime IANA updates the underlying data, --- but it seems reasonable to expect at least one entry per major meridian. --- (At the time of writing, the actual counts are around 38 because of --- zones using fractional GMT offsets, so this is a pretty loose test.) -select count(distinct utc_offset) >= 24 as ok from pg_timezone_names; - ok ----- - t -(1 row) - -select count(distinct utc_offset) >= 24 as ok from pg_timezone_abbrevs; - ok ----- - t -(1 row) - --- Let's check the non-default timezone abbreviation sets, too -set timezone_abbreviations = 'Australia'; -select count(distinct utc_offset) >= 24 as ok from pg_timezone_abbrevs; - ok ----- - t -(1 row) - -set timezone_abbreviations = 'India'; -select count(distinct utc_offset) >= 24 as ok from pg_timezone_abbrevs; - ok ----- - t -(1 row) - +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/tsrf.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/tsrf.out --- /Users/admin/pgsql/src/test/regress/expected/tsrf.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/tsrf.out 2024-12-13 13:20:10 @@ -1,712 +1,2 @@ --- --- tsrf - targetlist set returning function tests --- --- simple srf -SELECT generate_series(1, 3); - generate_series ------------------ - 1 - 2 - 3 -(3 rows) - --- parallel iteration -SELECT generate_series(1, 3), generate_series(3,5); - generate_series | generate_series ------------------+----------------- - 1 | 3 - 2 | 4 - 3 | 5 -(3 rows) - --- parallel iteration, different number of rows -SELECT generate_series(1, 2), generate_series(1,4); - generate_series | generate_series ------------------+----------------- - 1 | 1 - 2 | 2 - | 3 - | 4 -(4 rows) - --- srf, with SRF argument -SELECT generate_series(1, generate_series(1, 3)); - generate_series ------------------ - 1 - 1 - 2 - 1 - 2 - 3 -(6 rows) - --- but we've traditionally rejected the same in FROM -SELECT * FROM generate_series(1, generate_series(1, 3)); -ERROR: set-returning functions must appear at top level of FROM -LINE 1: SELECT * FROM generate_series(1, generate_series(1, 3)); - ^ --- srf, with two SRF arguments -SELECT generate_series(generate_series(1,3), generate_series(2, 4)); - generate_series ------------------ - 1 - 2 - 2 - 3 - 3 - 4 -(6 rows) - --- check proper nesting of SRFs in different expressions -explain (verbose, costs off) -SELECT generate_series(1, generate_series(1, 3)), generate_series(2, 4); - QUERY PLAN --------------------------------------------------------------------------------- - ProjectSet - Output: generate_series(1, (generate_series(1, 3))), (generate_series(2, 4)) - -> ProjectSet - Output: generate_series(1, 3), generate_series(2, 4) - -> Result -(5 rows) - -SELECT generate_series(1, generate_series(1, 3)), generate_series(2, 4); - generate_series | generate_series ------------------+----------------- - 1 | 2 - 1 | 3 - 2 | 3 - 1 | 4 - 2 | 4 - 3 | 4 -(6 rows) - -CREATE TABLE few(id int, dataa text, datab text); -INSERT INTO few VALUES(1, 'a', 'foo'),(2, 'a', 'bar'),(3, 'b', 'bar'); --- SRF with a provably-dummy relation -explain (verbose, costs off) -SELECT unnest(ARRAY[1, 2]) FROM few WHERE false; - QUERY PLAN --------------------------------------- - ProjectSet - Output: unnest('{1,2}'::integer[]) - -> Result - One-Time Filter: false -(4 rows) - -SELECT unnest(ARRAY[1, 2]) FROM few WHERE false; - unnest --------- -(0 rows) - --- SRF shouldn't prevent upper query from recognizing lower as dummy -explain (verbose, costs off) -SELECT * FROM few f1, - (SELECT unnest(ARRAY[1,2]) FROM few f2 WHERE false OFFSET 0) ss; - QUERY PLAN ------------------------------------------------- - Result - Output: f1.id, f1.dataa, f1.datab, ss.unnest - One-Time Filter: false -(3 rows) - -SELECT * FROM few f1, - (SELECT unnest(ARRAY[1,2]) FROM few f2 WHERE false OFFSET 0) ss; - id | dataa | datab | unnest -----+-------+-------+-------- -(0 rows) - --- SRF output order of sorting is maintained, if SRF is not referenced -SELECT few.id, generate_series(1,3) g FROM few ORDER BY id DESC; - id | g -----+--- - 3 | 1 - 3 | 2 - 3 | 3 - 2 | 1 - 2 | 2 - 2 | 3 - 1 | 1 - 1 | 2 - 1 | 3 -(9 rows) - --- but SRFs can be referenced in sort -SELECT few.id, generate_series(1,3) g FROM few ORDER BY id, g DESC; - id | g -----+--- - 1 | 3 - 1 | 2 - 1 | 1 - 2 | 3 - 2 | 2 - 2 | 1 - 3 | 3 - 3 | 2 - 3 | 1 -(9 rows) - -SELECT few.id, generate_series(1,3) g FROM few ORDER BY id, generate_series(1,3) DESC; - id | g -----+--- - 1 | 3 - 1 | 2 - 1 | 1 - 2 | 3 - 2 | 2 - 2 | 1 - 3 | 3 - 3 | 2 - 3 | 1 -(9 rows) - --- it's weird to have ORDER BYs that increase the number of results -SELECT few.id FROM few ORDER BY id, generate_series(1,3) DESC; - id ----- - 1 - 1 - 1 - 2 - 2 - 2 - 3 - 3 - 3 -(9 rows) - --- SRFs are computed after aggregation -SET enable_hashagg TO 0; -- stable output order -SELECT few.dataa, count(*), min(id), max(id), unnest('{1,1,3}'::int[]) FROM few WHERE few.id = 1 GROUP BY few.dataa; - dataa | count | min | max | unnest --------+-------+-----+-----+-------- - a | 1 | 1 | 1 | 1 - a | 1 | 1 | 1 | 1 - a | 1 | 1 | 1 | 3 -(3 rows) - --- unless referenced in GROUP BY clause -SELECT few.dataa, count(*), min(id), max(id), unnest('{1,1,3}'::int[]) FROM few WHERE few.id = 1 GROUP BY few.dataa, unnest('{1,1,3}'::int[]); - dataa | count | min | max | unnest --------+-------+-----+-----+-------- - a | 2 | 1 | 1 | 1 - a | 1 | 1 | 1 | 3 -(2 rows) - -SELECT few.dataa, count(*), min(id), max(id), unnest('{1,1,3}'::int[]) FROM few WHERE few.id = 1 GROUP BY few.dataa, 5; - dataa | count | min | max | unnest --------+-------+-----+-----+-------- - a | 2 | 1 | 1 | 1 - a | 1 | 1 | 1 | 3 -(2 rows) - -RESET enable_hashagg; --- check HAVING works when GROUP BY does [not] reference SRF output -SELECT dataa, generate_series(1,1), count(*) FROM few GROUP BY 1 HAVING count(*) > 1; - dataa | generate_series | count --------+-----------------+------- - a | 1 | 2 -(1 row) - -SELECT dataa, generate_series(1,1), count(*) FROM few GROUP BY 1, 2 HAVING count(*) > 1; - dataa | generate_series | count --------+-----------------+------- - a | 1 | 2 -(1 row) - --- it's weird to have GROUP BYs that increase the number of results -SELECT few.dataa, count(*) FROM few WHERE dataa = 'a' GROUP BY few.dataa ORDER BY 2; - dataa | count --------+------- - a | 2 -(1 row) - -SELECT few.dataa, count(*) FROM few WHERE dataa = 'a' GROUP BY few.dataa, unnest('{1,1,3}'::int[]) ORDER BY 2; - dataa | count --------+------- - a | 2 - a | 4 -(2 rows) - --- SRFs are not allowed if they'd need to be conditionally executed -SELECT q1, case when q1 > 0 then generate_series(1,3) else 0 end FROM int8_tbl; -ERROR: set-returning functions are not allowed in CASE -LINE 1: SELECT q1, case when q1 > 0 then generate_series(1,3) else 0... - ^ -HINT: You might be able to move the set-returning function into a LATERAL FROM item. -SELECT q1, coalesce(generate_series(1,3), 0) FROM int8_tbl; -ERROR: set-returning functions are not allowed in COALESCE -LINE 1: SELECT q1, coalesce(generate_series(1,3), 0) FROM int8_tbl; - ^ -HINT: You might be able to move the set-returning function into a LATERAL FROM item. --- SRFs are not allowed in aggregate arguments -SELECT min(generate_series(1, 3)) FROM few; -ERROR: aggregate function calls cannot contain set-returning function calls -LINE 1: SELECT min(generate_series(1, 3)) FROM few; - ^ -HINT: You might be able to move the set-returning function into a LATERAL FROM item. --- ... unless they're within a sub-select -SELECT sum((3 = ANY(SELECT generate_series(1,4)))::int); - sum ------ - 1 -(1 row) - -SELECT sum((3 = ANY(SELECT lag(x) over(order by x) - FROM generate_series(1,4) x))::int); - sum ------ - 1 -(1 row) - --- SRFs are not allowed in window function arguments, either -SELECT min(generate_series(1, 3)) OVER() FROM few; -ERROR: window function calls cannot contain set-returning function calls -LINE 1: SELECT min(generate_series(1, 3)) OVER() FROM few; - ^ -HINT: You might be able to move the set-returning function into a LATERAL FROM item. --- SRFs are normally computed after window functions -SELECT id,lag(id) OVER(), count(*) OVER(), generate_series(1,3) FROM few; - id | lag | count | generate_series -----+-----+-------+----------------- - 1 | | 3 | 1 - 1 | | 3 | 2 - 1 | | 3 | 3 - 2 | 1 | 3 | 1 - 2 | 1 | 3 | 2 - 2 | 1 | 3 | 3 - 3 | 2 | 3 | 1 - 3 | 2 | 3 | 2 - 3 | 2 | 3 | 3 -(9 rows) - --- unless referencing SRFs -SELECT SUM(count(*)) OVER(PARTITION BY generate_series(1,3) ORDER BY generate_series(1,3)), generate_series(1,3) g FROM few GROUP BY g; - sum | g ------+--- - 3 | 1 - 3 | 2 - 3 | 3 -(3 rows) - --- sorting + grouping -SELECT few.dataa, count(*), min(id), max(id), generate_series(1,3) FROM few GROUP BY few.dataa ORDER BY 5, 1; - dataa | count | min | max | generate_series --------+-------+-----+-----+----------------- - a | 2 | 1 | 2 | 1 - b | 1 | 3 | 3 | 1 - a | 2 | 1 | 2 | 2 - b | 1 | 3 | 3 | 2 - a | 2 | 1 | 2 | 3 - b | 1 | 3 | 3 | 3 -(6 rows) - --- grouping sets are a bit special, they produce NULLs in columns not actually NULL -set enable_hashagg = false; -SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab); - dataa | b | g | count --------+-----+---+------- - a | bar | 1 | 1 - a | bar | 2 | 1 - a | foo | 1 | 1 - a | foo | 2 | 1 - a | | 1 | 2 - a | | 2 | 2 - b | bar | 1 | 1 - b | bar | 2 | 1 - b | | 1 | 1 - b | | 2 | 1 - | | 1 | 3 - | | 2 | 3 - | bar | 1 | 2 - | bar | 2 | 2 - | foo | 1 | 1 - | foo | 2 | 1 -(16 rows) - -SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab) ORDER BY dataa; - dataa | b | g | count --------+-----+---+------- - a | bar | 1 | 1 - a | bar | 2 | 1 - a | foo | 1 | 1 - a | foo | 2 | 1 - a | | 1 | 2 - a | | 2 | 2 - b | bar | 1 | 1 - b | bar | 2 | 1 - b | | 1 | 1 - b | | 2 | 1 - | | 1 | 3 - | | 2 | 3 - | bar | 1 | 2 - | bar | 2 | 2 - | foo | 1 | 1 - | foo | 2 | 1 -(16 rows) - -SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab) ORDER BY g; - dataa | b | g | count --------+-----+---+------- - a | bar | 1 | 1 - a | foo | 1 | 1 - a | | 1 | 2 - b | bar | 1 | 1 - b | | 1 | 1 - | | 1 | 3 - | bar | 1 | 2 - | foo | 1 | 1 - | foo | 2 | 1 - a | bar | 2 | 1 - b | | 2 | 1 - a | foo | 2 | 1 - | bar | 2 | 2 - a | | 2 | 2 - | | 2 | 3 - b | bar | 2 | 1 -(16 rows) - -SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g); - dataa | b | g | count --------+-----+---+------- - a | bar | 1 | 1 - a | bar | 2 | 1 - a | bar | | 2 - a | foo | 1 | 1 - a | foo | 2 | 1 - a | foo | | 2 - a | | | 4 - b | bar | 1 | 1 - b | bar | 2 | 1 - b | bar | | 2 - b | | | 2 - | | | 6 - | bar | 1 | 2 - | bar | 2 | 2 - | bar | | 4 - | foo | 1 | 1 - | foo | 2 | 1 - | foo | | 2 - a | | 1 | 2 - b | | 1 | 1 - | | 1 | 3 - a | | 2 | 2 - b | | 2 | 1 - | | 2 | 3 -(24 rows) - -SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g) ORDER BY dataa; - dataa | b | g | count --------+-----+---+------- - a | foo | | 2 - a | | | 4 - a | | 2 | 2 - a | bar | 1 | 1 - a | bar | 2 | 1 - a | bar | | 2 - a | foo | 1 | 1 - a | foo | 2 | 1 - a | | 1 | 2 - b | bar | 1 | 1 - b | | | 2 - b | | 1 | 1 - b | bar | 2 | 1 - b | bar | | 2 - b | | 2 | 1 - | | 2 | 3 - | | | 6 - | bar | 1 | 2 - | bar | 2 | 2 - | bar | | 4 - | foo | 1 | 1 - | foo | 2 | 1 - | foo | | 2 - | | 1 | 3 -(24 rows) - -SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g) ORDER BY g; - dataa | b | g | count --------+-----+---+------- - a | bar | 1 | 1 - a | foo | 1 | 1 - b | bar | 1 | 1 - | bar | 1 | 2 - | foo | 1 | 1 - a | | 1 | 2 - b | | 1 | 1 - | | 1 | 3 - a | | 2 | 2 - b | | 2 | 1 - | bar | 2 | 2 - | | 2 | 3 - | foo | 2 | 1 - a | bar | 2 | 1 - a | foo | 2 | 1 - b | bar | 2 | 1 - a | | | 4 - b | bar | | 2 - b | | | 2 - | | | 6 - a | foo | | 2 - a | bar | | 2 - | bar | | 4 - | foo | | 2 -(24 rows) - -reset enable_hashagg; --- case with degenerate ORDER BY -explain (verbose, costs off) -select 'foo' as f, generate_series(1,2) as g from few order by 1; - QUERY PLAN ----------------------------------------------- - ProjectSet - Output: 'foo'::text, generate_series(1, 2) - -> Seq Scan on public.few - Output: id, dataa, datab -(4 rows) - -select 'foo' as f, generate_series(1,2) as g from few order by 1; - f | g ------+--- - foo | 1 - foo | 2 - foo | 1 - foo | 2 - foo | 1 - foo | 2 -(6 rows) - --- data modification -CREATE TABLE fewmore AS SELECT generate_series(1,3) AS data; -INSERT INTO fewmore VALUES(generate_series(4,5)); -SELECT * FROM fewmore; - data ------- - 1 - 2 - 3 - 4 - 5 -(5 rows) - --- SRFs are not allowed in UPDATE (they once were, but it was nonsense) -UPDATE fewmore SET data = generate_series(4,9); -ERROR: set-returning functions are not allowed in UPDATE -LINE 1: UPDATE fewmore SET data = generate_series(4,9); - ^ --- SRFs are not allowed in RETURNING -INSERT INTO fewmore VALUES(1) RETURNING generate_series(1,3); -ERROR: set-returning functions are not allowed in RETURNING -LINE 1: INSERT INTO fewmore VALUES(1) RETURNING generate_series(1,3)... - ^ --- nor standalone VALUES (but surely this is a bug?) -VALUES(1, generate_series(1,2)); -ERROR: set-returning functions are not allowed in VALUES -LINE 1: VALUES(1, generate_series(1,2)); - ^ --- We allow tSRFs that are not at top level -SELECT int4mul(generate_series(1,2), 10); - int4mul ---------- - 10 - 20 -(2 rows) - -SELECT generate_series(1,3) IS DISTINCT FROM 2; - ?column? ----------- - t - f - t -(3 rows) - --- but SRFs in function RTEs must be at top level (annoying restriction) -SELECT * FROM int4mul(generate_series(1,2), 10); -ERROR: set-returning functions must appear at top level of FROM -LINE 1: SELECT * FROM int4mul(generate_series(1,2), 10); - ^ --- DISTINCT ON is evaluated before tSRF evaluation if SRF is not --- referenced either in ORDER BY or in the DISTINCT ON list. The ORDER --- BY reference can be implicitly generated, if there's no other ORDER BY. --- implicit reference (via implicit ORDER) to all columns -SELECT DISTINCT ON (a) a, b, generate_series(1,3) g -FROM (VALUES (3, 2), (3,1), (1,1), (1,4), (5,3), (5,1)) AS t(a, b); - a | b | g ----+---+--- - 1 | 1 | 1 - 3 | 2 | 1 - 5 | 3 | 1 -(3 rows) - --- unreferenced in DISTINCT ON or ORDER BY -SELECT DISTINCT ON (a) a, b, generate_series(1,3) g -FROM (VALUES (3, 2), (3,1), (1,1), (1,4), (5,3), (5,1)) AS t(a, b) -ORDER BY a, b DESC; - a | b | g ----+---+--- - 1 | 4 | 1 - 1 | 4 | 2 - 1 | 4 | 3 - 3 | 2 | 1 - 3 | 2 | 2 - 3 | 2 | 3 - 5 | 3 | 1 - 5 | 3 | 2 - 5 | 3 | 3 -(9 rows) - --- referenced in ORDER BY -SELECT DISTINCT ON (a) a, b, generate_series(1,3) g -FROM (VALUES (3, 2), (3,1), (1,1), (1,4), (5,3), (5,1)) AS t(a, b) -ORDER BY a, b DESC, g DESC; - a | b | g ----+---+--- - 1 | 4 | 3 - 3 | 2 | 3 - 5 | 3 | 3 -(3 rows) - --- referenced in ORDER BY and DISTINCT ON -SELECT DISTINCT ON (a, b, g) a, b, generate_series(1,3) g -FROM (VALUES (3, 2), (3,1), (1,1), (1,4), (5,3), (5,1)) AS t(a, b) -ORDER BY a, b DESC, g DESC; - a | b | g ----+---+--- - 1 | 4 | 3 - 1 | 4 | 2 - 1 | 4 | 1 - 1 | 1 | 3 - 1 | 1 | 2 - 1 | 1 | 1 - 3 | 2 | 3 - 3 | 2 | 2 - 3 | 2 | 1 - 3 | 1 | 3 - 3 | 1 | 2 - 3 | 1 | 1 - 5 | 3 | 3 - 5 | 3 | 2 - 5 | 3 | 1 - 5 | 1 | 3 - 5 | 1 | 2 - 5 | 1 | 1 -(18 rows) - --- only SRF mentioned in DISTINCT ON -SELECT DISTINCT ON (g) a, b, generate_series(1,3) g -FROM (VALUES (3, 2), (3,1), (1,1), (1,4), (5,3), (5,1)) AS t(a, b); - a | b | g ----+---+--- - 3 | 2 | 1 - 5 | 1 | 2 - 3 | 1 | 3 -(3 rows) - --- LIMIT / OFFSET is evaluated after SRF evaluation -SELECT a, generate_series(1,2) FROM (VALUES(1),(2),(3)) r(a) LIMIT 2 OFFSET 2; - a | generate_series ----+----------------- - 2 | 1 - 2 | 2 -(2 rows) - --- SRFs are not allowed in LIMIT. -SELECT 1 LIMIT generate_series(1,3); -ERROR: set-returning functions are not allowed in LIMIT -LINE 1: SELECT 1 LIMIT generate_series(1,3); - ^ --- tSRF in correlated subquery, referencing table outside -SELECT (SELECT generate_series(1,3) LIMIT 1 OFFSET few.id) FROM few; - generate_series ------------------ - 2 - 3 - -(3 rows) - --- tSRF in correlated subquery, referencing SRF outside -SELECT (SELECT generate_series(1,3) LIMIT 1 OFFSET g.i) FROM generate_series(0,3) g(i); - generate_series ------------------ - 1 - 2 - 3 - -(4 rows) - --- Operators can return sets too -CREATE OPERATOR |@| (PROCEDURE = unnest, RIGHTARG = ANYARRAY); -SELECT |@|ARRAY[1,2,3]; - ?column? ----------- - 1 - 2 - 3 -(3 rows) - --- Some fun cases involving duplicate SRF calls -explain (verbose, costs off) -select generate_series(1,3) as x, generate_series(1,3) + 1 as xp1; - QUERY PLAN ------------------------------------------------------------------- - Result - Output: (generate_series(1, 3)), ((generate_series(1, 3)) + 1) - -> ProjectSet - Output: generate_series(1, 3) - -> Result -(5 rows) - -select generate_series(1,3) as x, generate_series(1,3) + 1 as xp1; - x | xp1 ----+----- - 1 | 2 - 2 | 3 - 3 | 4 -(3 rows) - -explain (verbose, costs off) -select generate_series(1,3)+1 order by generate_series(1,3); - QUERY PLAN ------------------------------------------------------------------------- - Sort - Output: (((generate_series(1, 3)) + 1)), (generate_series(1, 3)) - Sort Key: (generate_series(1, 3)) - -> Result - Output: ((generate_series(1, 3)) + 1), (generate_series(1, 3)) - -> ProjectSet - Output: generate_series(1, 3) - -> Result -(8 rows) - -select generate_series(1,3)+1 order by generate_series(1,3); - ?column? ----------- - 2 - 3 - 4 -(3 rows) - --- Check that SRFs of same nesting level run in lockstep -explain (verbose, costs off) -select generate_series(1,3) as x, generate_series(3,6) + 1 as y; - QUERY PLAN ------------------------------------------------------------------- - Result - Output: (generate_series(1, 3)), ((generate_series(3, 6)) + 1) - -> ProjectSet - Output: generate_series(1, 3), generate_series(3, 6) - -> Result -(5 rows) - -select generate_series(1,3) as x, generate_series(3,6) + 1 as y; - x | y ----+--- - 1 | 4 - 2 | 5 - 3 | 6 - | 7 -(4 rows) - --- Clean up -DROP TABLE few; -DROP TABLE fewmore; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/tid.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/tid.out --- /Users/admin/pgsql/src/test/regress/expected/tid.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/tid.out 2024-12-13 13:20:10 @@ -1,121 +1,2 @@ --- basic tests for the TID data type -SELECT - '(0,0)'::tid as tid00, - '(0,1)'::tid as tid01, - '(-1,0)'::tid as tidm10, - '(4294967295,65535)'::tid as tidmax; - tid00 | tid01 | tidm10 | tidmax --------+-------+----------------+-------------------- - (0,0) | (0,1) | (4294967295,0) | (4294967295,65535) -(1 row) - -SELECT '(4294967296,1)'::tid; -- error -ERROR: invalid input syntax for type tid: "(4294967296,1)" -LINE 1: SELECT '(4294967296,1)'::tid; - ^ -SELECT '(1,65536)'::tid; -- error -ERROR: invalid input syntax for type tid: "(1,65536)" -LINE 1: SELECT '(1,65536)'::tid; - ^ --- Also try it with non-error-throwing API -SELECT pg_input_is_valid('(0)', 'tid'); - pg_input_is_valid -------------------- - f -(1 row) - -SELECT * FROM pg_input_error_info('(0)', 'tid'); - message | detail | hint | sql_error_code -------------------------------------------+--------+------+---------------- - invalid input syntax for type tid: "(0)" | | | 22P02 -(1 row) - -SELECT pg_input_is_valid('(0,-1)', 'tid'); - pg_input_is_valid -------------------- - f -(1 row) - -SELECT * FROM pg_input_error_info('(0,-1)', 'tid'); - message | detail | hint | sql_error_code ----------------------------------------------+--------+------+---------------- - invalid input syntax for type tid: "(0,-1)" | | | 22P02 -(1 row) - --- tests for functions related to TID handling -CREATE TABLE tid_tab (a int); --- min() and max() for TIDs -INSERT INTO tid_tab VALUES (1), (2); -SELECT min(ctid) FROM tid_tab; - min -------- - (0,1) -(1 row) - -SELECT max(ctid) FROM tid_tab; - max -------- - (0,2) -(1 row) - -TRUNCATE tid_tab; --- Tests for currtid2() with various relation kinds --- Materialized view -CREATE MATERIALIZED VIEW tid_matview AS SELECT a FROM tid_tab; -SELECT currtid2('tid_matview'::text, '(0,1)'::tid); -- fails -ERROR: tid (0, 1) is not valid for relation "tid_matview" -INSERT INTO tid_tab VALUES (1); -REFRESH MATERIALIZED VIEW tid_matview; -SELECT currtid2('tid_matview'::text, '(0,1)'::tid); -- ok - currtid2 ----------- - (0,1) -(1 row) - -DROP MATERIALIZED VIEW tid_matview; -TRUNCATE tid_tab; --- Sequence -CREATE SEQUENCE tid_seq; -SELECT currtid2('tid_seq'::text, '(0,1)'::tid); -- ok - currtid2 ----------- - (0,1) -(1 row) - -DROP SEQUENCE tid_seq; --- Index, fails with incorrect relation type -CREATE INDEX tid_ind ON tid_tab(a); -SELECT currtid2('tid_ind'::text, '(0,1)'::tid); -- fails -ERROR: cannot open relation "tid_ind" -DETAIL: This operation is not supported for indexes. -DROP INDEX tid_ind; --- Partitioned table, no storage -CREATE TABLE tid_part (a int) PARTITION BY RANGE (a); -SELECT currtid2('tid_part'::text, '(0,1)'::tid); -- fails -ERROR: cannot look at latest visible tid for relation "public.tid_part" -DROP TABLE tid_part; --- Views --- ctid not defined in the view -CREATE VIEW tid_view_no_ctid AS SELECT a FROM tid_tab; -SELECT currtid2('tid_view_no_ctid'::text, '(0,1)'::tid); -- fails -ERROR: currtid cannot handle views with no CTID -DROP VIEW tid_view_no_ctid; --- ctid fetched directly from the source table. -CREATE VIEW tid_view_with_ctid AS SELECT ctid, a FROM tid_tab; -SELECT currtid2('tid_view_with_ctid'::text, '(0,1)'::tid); -- fails -ERROR: tid (0, 1) is not valid for relation "tid_tab" -INSERT INTO tid_tab VALUES (1); -SELECT currtid2('tid_view_with_ctid'::text, '(0,1)'::tid); -- ok - currtid2 ----------- - (0,1) -(1 row) - -DROP VIEW tid_view_with_ctid; -TRUNCATE tid_tab; --- ctid attribute with incorrect data type -CREATE VIEW tid_view_fake_ctid AS SELECT 1 AS ctid, 2 AS a; -SELECT currtid2('tid_view_fake_ctid'::text, '(0,1)'::tid); -- fails -ERROR: ctid isn't of type TID -DROP VIEW tid_view_fake_ctid; -DROP TABLE tid_tab CASCADE; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/tidscan.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/tidscan.out --- /Users/admin/pgsql/src/test/regress/expected/tidscan.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/tidscan.out 2024-12-13 13:20:10 @@ -1,296 +1,2 @@ --- tests for tidscans -CREATE TABLE tidscan(id integer); --- only insert a few rows, we don't want to spill onto a second table page -INSERT INTO tidscan VALUES (1), (2), (3); --- show ctids -SELECT ctid, * FROM tidscan; - ctid | id --------+---- - (0,1) | 1 - (0,2) | 2 - (0,3) | 3 -(3 rows) - --- ctid equality - implemented as tidscan -EXPLAIN (COSTS OFF) -SELECT ctid, * FROM tidscan WHERE ctid = '(0,1)'; - QUERY PLAN ------------------------------------ - Tid Scan on tidscan - TID Cond: (ctid = '(0,1)'::tid) -(2 rows) - -SELECT ctid, * FROM tidscan WHERE ctid = '(0,1)'; - ctid | id --------+---- - (0,1) | 1 -(1 row) - -EXPLAIN (COSTS OFF) -SELECT ctid, * FROM tidscan WHERE '(0,1)' = ctid; - QUERY PLAN ------------------------------------ - Tid Scan on tidscan - TID Cond: ('(0,1)'::tid = ctid) -(2 rows) - -SELECT ctid, * FROM tidscan WHERE '(0,1)' = ctid; - ctid | id --------+---- - (0,1) | 1 -(1 row) - --- OR'd clauses -EXPLAIN (COSTS OFF) -SELECT ctid, * FROM tidscan WHERE ctid = '(0,2)' OR '(0,1)' = ctid; - QUERY PLAN --------------------------------------------------------------- - Tid Scan on tidscan - TID Cond: ((ctid = '(0,2)'::tid) OR ('(0,1)'::tid = ctid)) -(2 rows) - -SELECT ctid, * FROM tidscan WHERE ctid = '(0,2)' OR '(0,1)' = ctid; - ctid | id --------+---- - (0,1) | 1 - (0,2) | 2 -(2 rows) - --- ctid = ScalarArrayOp - implemented as tidscan -EXPLAIN (COSTS OFF) -SELECT ctid, * FROM tidscan WHERE ctid = ANY(ARRAY['(0,1)', '(0,2)']::tid[]); - QUERY PLAN -------------------------------------------------------- - Tid Scan on tidscan - TID Cond: (ctid = ANY ('{"(0,1)","(0,2)"}'::tid[])) -(2 rows) - -SELECT ctid, * FROM tidscan WHERE ctid = ANY(ARRAY['(0,1)', '(0,2)']::tid[]); - ctid | id --------+---- - (0,1) | 1 - (0,2) | 2 -(2 rows) - --- ctid != ScalarArrayOp - can't be implemented as tidscan -EXPLAIN (COSTS OFF) -SELECT ctid, * FROM tidscan WHERE ctid != ANY(ARRAY['(0,1)', '(0,2)']::tid[]); - QUERY PLAN ------------------------------------------------------- - Seq Scan on tidscan - Filter: (ctid <> ANY ('{"(0,1)","(0,2)"}'::tid[])) -(2 rows) - -SELECT ctid, * FROM tidscan WHERE ctid != ANY(ARRAY['(0,1)', '(0,2)']::tid[]); - ctid | id --------+---- - (0,1) | 1 - (0,2) | 2 - (0,3) | 3 -(3 rows) - --- tid equality extracted from sub-AND clauses -EXPLAIN (COSTS OFF) -SELECT ctid, * FROM tidscan -WHERE (id = 3 AND ctid IN ('(0,2)', '(0,3)')) OR (ctid = '(0,1)' AND id = 1); - QUERY PLAN --------------------------------------------------------------------------------------------------------------- - Tid Scan on tidscan - TID Cond: ((ctid = ANY ('{"(0,2)","(0,3)"}'::tid[])) OR (ctid = '(0,1)'::tid)) - Filter: (((id = 3) AND (ctid = ANY ('{"(0,2)","(0,3)"}'::tid[]))) OR ((ctid = '(0,1)'::tid) AND (id = 1))) -(3 rows) - -SELECT ctid, * FROM tidscan -WHERE (id = 3 AND ctid IN ('(0,2)', '(0,3)')) OR (ctid = '(0,1)' AND id = 1); - ctid | id --------+---- - (0,1) | 1 - (0,3) | 3 -(2 rows) - --- nestloop-with-inner-tidscan joins on tid -SET enable_hashjoin TO off; -- otherwise hash join might win -EXPLAIN (COSTS OFF) -SELECT t1.ctid, t1.*, t2.ctid, t2.* -FROM tidscan t1 JOIN tidscan t2 ON t1.ctid = t2.ctid WHERE t1.id = 1; - QUERY PLAN ------------------------------------- - Nested Loop - -> Seq Scan on tidscan t1 - Filter: (id = 1) - -> Tid Scan on tidscan t2 - TID Cond: (t1.ctid = ctid) -(5 rows) - -SELECT t1.ctid, t1.*, t2.ctid, t2.* -FROM tidscan t1 JOIN tidscan t2 ON t1.ctid = t2.ctid WHERE t1.id = 1; - ctid | id | ctid | id --------+----+-------+---- - (0,1) | 1 | (0,1) | 1 -(1 row) - -EXPLAIN (COSTS OFF) -SELECT t1.ctid, t1.*, t2.ctid, t2.* -FROM tidscan t1 LEFT JOIN tidscan t2 ON t1.ctid = t2.ctid WHERE t1.id = 1; - QUERY PLAN ------------------------------------- - Nested Loop Left Join - -> Seq Scan on tidscan t1 - Filter: (id = 1) - -> Tid Scan on tidscan t2 - TID Cond: (t1.ctid = ctid) -(5 rows) - -SELECT t1.ctid, t1.*, t2.ctid, t2.* -FROM tidscan t1 LEFT JOIN tidscan t2 ON t1.ctid = t2.ctid WHERE t1.id = 1; - ctid | id | ctid | id --------+----+-------+---- - (0,1) | 1 | (0,1) | 1 -(1 row) - -RESET enable_hashjoin; --- exercise backward scan and rewind -BEGIN; -DECLARE c CURSOR FOR -SELECT ctid, * FROM tidscan WHERE ctid = ANY(ARRAY['(0,1)', '(0,2)']::tid[]); -FETCH ALL FROM c; - ctid | id --------+---- - (0,1) | 1 - (0,2) | 2 -(2 rows) - -FETCH BACKWARD 1 FROM c; - ctid | id --------+---- - (0,2) | 2 -(1 row) - -FETCH FIRST FROM c; - ctid | id --------+---- - (0,1) | 1 -(1 row) - -ROLLBACK; --- tidscan via CURRENT OF -BEGIN; -DECLARE c CURSOR FOR SELECT ctid, * FROM tidscan; -FETCH NEXT FROM c; -- skip one row - ctid | id --------+---- - (0,1) | 1 -(1 row) - -FETCH NEXT FROM c; - ctid | id --------+---- - (0,2) | 2 -(1 row) - --- perform update -EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) -UPDATE tidscan SET id = -id WHERE CURRENT OF c RETURNING *; - QUERY PLAN ---------------------------------------------------- - Update on tidscan (actual rows=1 loops=1) - -> Tid Scan on tidscan (actual rows=1 loops=1) - TID Cond: CURRENT OF c -(3 rows) - -FETCH NEXT FROM c; - ctid | id --------+---- - (0,3) | 3 -(1 row) - --- perform update -EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) -UPDATE tidscan SET id = -id WHERE CURRENT OF c RETURNING *; - QUERY PLAN ---------------------------------------------------- - Update on tidscan (actual rows=1 loops=1) - -> Tid Scan on tidscan (actual rows=1 loops=1) - TID Cond: CURRENT OF c -(3 rows) - -SELECT * FROM tidscan; - id ----- - 1 - -2 - -3 -(3 rows) - --- position cursor past any rows -FETCH NEXT FROM c; - ctid | id -------+---- -(0 rows) - --- should error out -EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) -UPDATE tidscan SET id = -id WHERE CURRENT OF c RETURNING *; -ERROR: cursor "c" is not positioned on a row -ROLLBACK; --- bulk joins on CTID --- (these plans don't use TID scans, but this still seems like an --- appropriate place for these tests) -EXPLAIN (COSTS OFF) -SELECT count(*) FROM tenk1 t1 JOIN tenk1 t2 ON t1.ctid = t2.ctid; - QUERY PLAN ----------------------------------------- - Aggregate - -> Hash Join - Hash Cond: (t1.ctid = t2.ctid) - -> Seq Scan on tenk1 t1 - -> Hash - -> Seq Scan on tenk1 t2 -(6 rows) - -SELECT count(*) FROM tenk1 t1 JOIN tenk1 t2 ON t1.ctid = t2.ctid; - count -------- - 10000 -(1 row) - -SET enable_hashjoin TO off; -EXPLAIN (COSTS OFF) -SELECT count(*) FROM tenk1 t1 JOIN tenk1 t2 ON t1.ctid = t2.ctid; - QUERY PLAN ------------------------------------------ - Aggregate - -> Merge Join - Merge Cond: (t1.ctid = t2.ctid) - -> Sort - Sort Key: t1.ctid - -> Seq Scan on tenk1 t1 - -> Sort - Sort Key: t2.ctid - -> Seq Scan on tenk1 t2 -(9 rows) - -SELECT count(*) FROM tenk1 t1 JOIN tenk1 t2 ON t1.ctid = t2.ctid; - count -------- - 10000 -(1 row) - -RESET enable_hashjoin; --- check predicate lock on CTID -BEGIN ISOLATION LEVEL SERIALIZABLE; -SELECT * FROM tidscan WHERE ctid = '(0,1)'; - id ----- - 1 -(1 row) - --- locktype should be 'tuple' -SELECT locktype, mode FROM pg_locks WHERE pid = pg_backend_pid() AND mode = 'SIReadLock'; - locktype | mode -----------+------------ - tuple | SIReadLock -(1 row) - -ROLLBACK; -DROP TABLE tidscan; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/tidrangescan.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/tidrangescan.out --- /Users/admin/pgsql/src/test/regress/expected/tidrangescan.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/tidrangescan.out 2024-12-13 13:20:10 @@ -1,300 +1,2 @@ --- tests for tidrangescans -SET enable_seqscan TO off; -CREATE TABLE tidrangescan(id integer, data text); --- empty table -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid < '(1, 0)'; - QUERY PLAN ------------------------------------ - Tid Range Scan on tidrangescan - TID Cond: (ctid < '(1,0)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid < '(1, 0)'; - ctid ------- -(0 rows) - -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid > '(9, 0)'; - QUERY PLAN ------------------------------------ - Tid Range Scan on tidrangescan - TID Cond: (ctid > '(9,0)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid > '(9, 0)'; - ctid ------- -(0 rows) - --- insert enough tuples to fill at least two pages -INSERT INTO tidrangescan SELECT i,repeat('x', 100) FROM generate_series(1,200) AS s(i); --- remove all tuples after the 10th tuple on each page. Trying to ensure --- we get the same layout with all CPU architectures and smaller than standard --- page sizes. -DELETE FROM tidrangescan -WHERE substring(ctid::text FROM ',(\d+)\)')::integer > 10 OR substring(ctid::text FROM '\((\d+),')::integer > 2; -VACUUM tidrangescan; --- range scans with upper bound -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid < '(1,0)'; - QUERY PLAN ------------------------------------ - Tid Range Scan on tidrangescan - TID Cond: (ctid < '(1,0)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid < '(1,0)'; - ctid --------- - (0,1) - (0,2) - (0,3) - (0,4) - (0,5) - (0,6) - (0,7) - (0,8) - (0,9) - (0,10) -(10 rows) - -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid <= '(1,5)'; - QUERY PLAN ------------------------------------- - Tid Range Scan on tidrangescan - TID Cond: (ctid <= '(1,5)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid <= '(1,5)'; - ctid --------- - (0,1) - (0,2) - (0,3) - (0,4) - (0,5) - (0,6) - (0,7) - (0,8) - (0,9) - (0,10) - (1,1) - (1,2) - (1,3) - (1,4) - (1,5) -(15 rows) - -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid < '(0,0)'; - QUERY PLAN ------------------------------------ - Tid Range Scan on tidrangescan - TID Cond: (ctid < '(0,0)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid < '(0,0)'; - ctid ------- -(0 rows) - --- range scans with lower bound -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid > '(2,8)'; - QUERY PLAN ------------------------------------ - Tid Range Scan on tidrangescan - TID Cond: (ctid > '(2,8)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid > '(2,8)'; - ctid --------- - (2,9) - (2,10) -(2 rows) - -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE '(2,8)' < ctid; - QUERY PLAN ------------------------------------ - Tid Range Scan on tidrangescan - TID Cond: ('(2,8)'::tid < ctid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE '(2,8)' < ctid; - ctid --------- - (2,9) - (2,10) -(2 rows) - -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid >= '(2,8)'; - QUERY PLAN ------------------------------------- - Tid Range Scan on tidrangescan - TID Cond: (ctid >= '(2,8)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid >= '(2,8)'; - ctid --------- - (2,8) - (2,9) - (2,10) -(3 rows) - -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid >= '(100,0)'; - QUERY PLAN --------------------------------------- - Tid Range Scan on tidrangescan - TID Cond: (ctid >= '(100,0)'::tid) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid >= '(100,0)'; - ctid ------- -(0 rows) - --- range scans with both bounds -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE ctid > '(1,4)' AND '(1,7)' >= ctid; - QUERY PLAN ----------------------------------------------------------------- - Tid Range Scan on tidrangescan - TID Cond: ((ctid > '(1,4)'::tid) AND ('(1,7)'::tid >= ctid)) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE ctid > '(1,4)' AND '(1,7)' >= ctid; - ctid -------- - (1,5) - (1,6) - (1,7) -(3 rows) - -EXPLAIN (COSTS OFF) -SELECT ctid FROM tidrangescan WHERE '(1,7)' >= ctid AND ctid > '(1,4)'; - QUERY PLAN ----------------------------------------------------------------- - Tid Range Scan on tidrangescan - TID Cond: (('(1,7)'::tid >= ctid) AND (ctid > '(1,4)'::tid)) -(2 rows) - -SELECT ctid FROM tidrangescan WHERE '(1,7)' >= ctid AND ctid > '(1,4)'; - ctid -------- - (1,5) - (1,6) - (1,7) -(3 rows) - --- extreme offsets -SELECT ctid FROM tidrangescan WHERE ctid > '(0,65535)' AND ctid < '(1,0)' LIMIT 1; - ctid ------- -(0 rows) - -SELECT ctid FROM tidrangescan WHERE ctid < '(0,0)' LIMIT 1; - ctid ------- -(0 rows) - -SELECT ctid FROM tidrangescan WHERE ctid > '(4294967295,65535)'; - ctid ------- -(0 rows) - -SELECT ctid FROM tidrangescan WHERE ctid < '(0,0)'; - ctid ------- -(0 rows) - --- NULLs in the range cannot return tuples -SELECT ctid FROM tidrangescan WHERE ctid >= (SELECT NULL::tid); - ctid ------- -(0 rows) - --- rescans -EXPLAIN (COSTS OFF) -SELECT t.ctid,t2.c FROM tidrangescan t, -LATERAL (SELECT count(*) c FROM tidrangescan t2 WHERE t2.ctid <= t.ctid) t2 -WHERE t.ctid < '(1,0)'; - QUERY PLAN ------------------------------------------------ - Nested Loop - -> Tid Range Scan on tidrangescan t - TID Cond: (ctid < '(1,0)'::tid) - -> Aggregate - -> Tid Range Scan on tidrangescan t2 - TID Cond: (ctid <= t.ctid) -(6 rows) - -SELECT t.ctid,t2.c FROM tidrangescan t, -LATERAL (SELECT count(*) c FROM tidrangescan t2 WHERE t2.ctid <= t.ctid) t2 -WHERE t.ctid < '(1,0)'; - ctid | c ---------+---- - (0,1) | 1 - (0,2) | 2 - (0,3) | 3 - (0,4) | 4 - (0,5) | 5 - (0,6) | 6 - (0,7) | 7 - (0,8) | 8 - (0,9) | 9 - (0,10) | 10 -(10 rows) - --- cursors --- Ensure we get a TID Range scan without a Materialize node. -EXPLAIN (COSTS OFF) -DECLARE c SCROLL CURSOR FOR SELECT ctid FROM tidrangescan WHERE ctid < '(1,0)'; - QUERY PLAN ------------------------------------ - Tid Range Scan on tidrangescan - TID Cond: (ctid < '(1,0)'::tid) -(2 rows) - -BEGIN; -DECLARE c SCROLL CURSOR FOR SELECT ctid FROM tidrangescan WHERE ctid < '(1,0)'; -FETCH NEXT c; - ctid -------- - (0,1) -(1 row) - -FETCH NEXT c; - ctid -------- - (0,2) -(1 row) - -FETCH PRIOR c; - ctid -------- - (0,1) -(1 row) - -FETCH FIRST c; - ctid -------- - (0,1) -(1 row) - -FETCH LAST c; - ctid --------- - (0,10) -(1 row) - -COMMIT; -DROP TABLE tidrangescan; -RESET enable_seqscan; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/collate.utf8_1.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/collate.utf8.out --- /Users/admin/pgsql/src/test/regress/expected/collate.utf8_1.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/collate.utf8.out 2024-12-13 13:20:10 @@ -1,8 +1,2 @@ -/* - * This test is for collations and character operations when using the - * builtin provider with the C.UTF-8 locale. - */ -/* skip test if not UTF8 server encoding */ -SELECT getdatabaseencoding() <> 'UTF8' AS skip_test \gset -\if :skip_test -\quit +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/collate.icu.utf8_1.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/collate.icu.utf8.out --- /Users/admin/pgsql/src/test/regress/expected/collate.icu.utf8_1.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/collate.icu.utf8.out 2024-12-13 13:20:10 @@ -1,9 +1,2 @@ -/* - * This test is for ICU collations. - */ -/* skip test if not UTF8 server encoding or no ICU collations installed */ -SELECT getdatabaseencoding() <> 'UTF8' OR - (SELECT count(*) FROM pg_collation WHERE collprovider = 'i' AND collname <> 'unicode') = 0 - AS skip_test \gset -\if :skip_test -\quit +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/incremental_sort.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/incremental_sort.out --- /Users/admin/pgsql/src/test/regress/expected/incremental_sort.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/incremental_sort.out 2024-12-13 13:20:10 @@ -1,1724 +1,2 @@ --- When there is a LIMIT clause, incremental sort is beneficial because --- it only has to sort some of the groups, and not the entire table. -explain (costs off) -select * from (select * from tenk1 order by four) t order by four, ten -limit 1; - QUERY PLAN ------------------------------------------ - Limit - -> Incremental Sort - Sort Key: tenk1.four, tenk1.ten - Presorted Key: tenk1.four - -> Sort - Sort Key: tenk1.four - -> Seq Scan on tenk1 -(7 rows) - --- When work_mem is not enough to sort the entire table, incremental sort --- may be faster if individual groups still fit into work_mem. -set work_mem to '2MB'; -explain (costs off) -select * from (select * from tenk1 order by four) t order by four, ten; - QUERY PLAN ------------------------------------ - Incremental Sort - Sort Key: tenk1.four, tenk1.ten - Presorted Key: tenk1.four - -> Sort - Sort Key: tenk1.four - -> Seq Scan on tenk1 -(6 rows) - -reset work_mem; -create table t(a integer, b integer); -create or replace function explain_analyze_without_memory(query text) -returns table (out_line text) language plpgsql -as -$$ -declare - line text; -begin - for line in - execute 'explain (analyze, costs off, summary off, timing off, buffers off) ' || query - loop - out_line := regexp_replace(line, '\d+kB', 'NNkB', 'g'); - return next; - end loop; -end; -$$; -create or replace function explain_analyze_inc_sort_nodes(query text) -returns jsonb language plpgsql -as -$$ -declare - elements jsonb; - element jsonb; - matching_nodes jsonb := '[]'::jsonb; -begin - execute 'explain (analyze, costs off, summary off, timing off, buffers off, format ''json'') ' || query into strict elements; - while jsonb_array_length(elements) > 0 loop - element := elements->0; - elements := elements - 0; - case jsonb_typeof(element) - when 'array' then - if jsonb_array_length(element) > 0 then - elements := elements || element; - end if; - when 'object' then - if element ? 'Plan' then - elements := elements || jsonb_build_array(element->'Plan'); - element := element - 'Plan'; - else - if element ? 'Plans' then - elements := elements || jsonb_build_array(element->'Plans'); - element := element - 'Plans'; - end if; - if (element->>'Node Type')::text = 'Incremental Sort' then - matching_nodes := matching_nodes || element; - end if; - end if; - end case; - end loop; - return matching_nodes; -end; -$$; -create or replace function explain_analyze_inc_sort_nodes_without_memory(query text) -returns jsonb language plpgsql -as -$$ -declare - nodes jsonb := '[]'::jsonb; - node jsonb; - group_key text; - space_key text; -begin - for node in select * from jsonb_array_elements(explain_analyze_inc_sort_nodes(query)) t loop - for group_key in select unnest(array['Full-sort Groups', 'Pre-sorted Groups']::text[]) t loop - for space_key in select unnest(array['Sort Space Memory', 'Sort Space Disk']::text[]) t loop - node := jsonb_set(node, array[group_key, space_key, 'Average Sort Space Used'], '"NN"', false); - node := jsonb_set(node, array[group_key, space_key, 'Peak Sort Space Used'], '"NN"', false); - end loop; - end loop; - nodes := nodes || node; - end loop; - return nodes; -end; -$$; -create or replace function explain_analyze_inc_sort_nodes_verify_invariants(query text) -returns bool language plpgsql -as -$$ -declare - node jsonb; - group_stats jsonb; - group_key text; - space_key text; -begin - for node in select * from jsonb_array_elements(explain_analyze_inc_sort_nodes(query)) t loop - for group_key in select unnest(array['Full-sort Groups', 'Pre-sorted Groups']::text[]) t loop - group_stats := node->group_key; - for space_key in select unnest(array['Sort Space Memory', 'Sort Space Disk']::text[]) t loop - if (group_stats->space_key->'Peak Sort Space Used')::bigint < (group_stats->space_key->'Peak Sort Space Used')::bigint then - raise exception '% has invalid max space < average space', group_key; - end if; - end loop; - end loop; - end loop; - return true; -end; -$$; --- A single large group tested around each mode transition point. -insert into t(a, b) select i/100 + 1, i + 1 from generate_series(0, 999) n(i); -analyze t; -explain (costs off) select * from (select * from t order by a) s order by a, b limit 31; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 31; - a | b ----+---- - 1 | 1 - 1 | 2 - 1 | 3 - 1 | 4 - 1 | 5 - 1 | 6 - 1 | 7 - 1 | 8 - 1 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 1 | 20 - 1 | 21 - 1 | 22 - 1 | 23 - 1 | 24 - 1 | 25 - 1 | 26 - 1 | 27 - 1 | 28 - 1 | 29 - 1 | 30 - 1 | 31 -(31 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 32; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 32; - a | b ----+---- - 1 | 1 - 1 | 2 - 1 | 3 - 1 | 4 - 1 | 5 - 1 | 6 - 1 | 7 - 1 | 8 - 1 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 1 | 20 - 1 | 21 - 1 | 22 - 1 | 23 - 1 | 24 - 1 | 25 - 1 | 26 - 1 | 27 - 1 | 28 - 1 | 29 - 1 | 30 - 1 | 31 - 1 | 32 -(32 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 33; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 33; - a | b ----+---- - 1 | 1 - 1 | 2 - 1 | 3 - 1 | 4 - 1 | 5 - 1 | 6 - 1 | 7 - 1 | 8 - 1 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 1 | 20 - 1 | 21 - 1 | 22 - 1 | 23 - 1 | 24 - 1 | 25 - 1 | 26 - 1 | 27 - 1 | 28 - 1 | 29 - 1 | 30 - 1 | 31 - 1 | 32 - 1 | 33 -(33 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 65; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 65; - a | b ----+---- - 1 | 1 - 1 | 2 - 1 | 3 - 1 | 4 - 1 | 5 - 1 | 6 - 1 | 7 - 1 | 8 - 1 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 1 | 20 - 1 | 21 - 1 | 22 - 1 | 23 - 1 | 24 - 1 | 25 - 1 | 26 - 1 | 27 - 1 | 28 - 1 | 29 - 1 | 30 - 1 | 31 - 1 | 32 - 1 | 33 - 1 | 34 - 1 | 35 - 1 | 36 - 1 | 37 - 1 | 38 - 1 | 39 - 1 | 40 - 1 | 41 - 1 | 42 - 1 | 43 - 1 | 44 - 1 | 45 - 1 | 46 - 1 | 47 - 1 | 48 - 1 | 49 - 1 | 50 - 1 | 51 - 1 | 52 - 1 | 53 - 1 | 54 - 1 | 55 - 1 | 56 - 1 | 57 - 1 | 58 - 1 | 59 - 1 | 60 - 1 | 61 - 1 | 62 - 1 | 63 - 1 | 64 - 1 | 65 -(65 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 66; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 66; - a | b ----+---- - 1 | 1 - 1 | 2 - 1 | 3 - 1 | 4 - 1 | 5 - 1 | 6 - 1 | 7 - 1 | 8 - 1 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 1 | 20 - 1 | 21 - 1 | 22 - 1 | 23 - 1 | 24 - 1 | 25 - 1 | 26 - 1 | 27 - 1 | 28 - 1 | 29 - 1 | 30 - 1 | 31 - 1 | 32 - 1 | 33 - 1 | 34 - 1 | 35 - 1 | 36 - 1 | 37 - 1 | 38 - 1 | 39 - 1 | 40 - 1 | 41 - 1 | 42 - 1 | 43 - 1 | 44 - 1 | 45 - 1 | 46 - 1 | 47 - 1 | 48 - 1 | 49 - 1 | 50 - 1 | 51 - 1 | 52 - 1 | 53 - 1 | 54 - 1 | 55 - 1 | 56 - 1 | 57 - 1 | 58 - 1 | 59 - 1 | 60 - 1 | 61 - 1 | 62 - 1 | 63 - 1 | 64 - 1 | 65 - 1 | 66 -(66 rows) - -delete from t; --- An initial large group followed by a small group. -insert into t(a, b) select i/50 + 1, i + 1 from generate_series(0, 999) n(i); -analyze t; -explain (costs off) select * from (select * from t order by a) s order by a, b limit 55; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 55; - a | b ----+---- - 1 | 1 - 1 | 2 - 1 | 3 - 1 | 4 - 1 | 5 - 1 | 6 - 1 | 7 - 1 | 8 - 1 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 1 | 20 - 1 | 21 - 1 | 22 - 1 | 23 - 1 | 24 - 1 | 25 - 1 | 26 - 1 | 27 - 1 | 28 - 1 | 29 - 1 | 30 - 1 | 31 - 1 | 32 - 1 | 33 - 1 | 34 - 1 | 35 - 1 | 36 - 1 | 37 - 1 | 38 - 1 | 39 - 1 | 40 - 1 | 41 - 1 | 42 - 1 | 43 - 1 | 44 - 1 | 45 - 1 | 46 - 1 | 47 - 1 | 48 - 1 | 49 - 1 | 50 - 2 | 51 - 2 | 52 - 2 | 53 - 2 | 54 - 2 | 55 -(55 rows) - --- Test EXPLAIN ANALYZE with only a fullsort group. -select explain_analyze_without_memory('select * from (select * from t order by a) s order by a, b limit 55'); - explain_analyze_without_memory ---------------------------------------------------------------------------------------------------------------- - Limit (actual rows=55 loops=1) - -> Incremental Sort (actual rows=55 loops=1) - Sort Key: t.a, t.b - Presorted Key: t.a - Full-sort Groups: 2 Sort Methods: top-N heapsort, quicksort Average Memory: NNkB Peak Memory: NNkB - -> Sort (actual rows=101 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: NNkB - -> Seq Scan on t (actual rows=1000 loops=1) -(9 rows) - -select jsonb_pretty(explain_analyze_inc_sort_nodes_without_memory('select * from (select * from t order by a) s order by a, b limit 55')); - jsonb_pretty -------------------------------------------------- - [ + - { + - "Disabled": false, + - "Sort Key": [ + - "t.a", + - "t.b" + - ], + - "Node Type": "Incremental Sort", + - "Actual Rows": 55, + - "Actual Loops": 1, + - "Async Capable": false, + - "Presorted Key": [ + - "t.a" + - ], + - "Parallel Aware": false, + - "Full-sort Groups": { + - "Group Count": 2, + - "Sort Methods Used": [ + - "top-N heapsort", + - "quicksort" + - ], + - "Sort Space Memory": { + - "Peak Sort Space Used": "NN", + - "Average Sort Space Used": "NN"+ - } + - }, + - "Parent Relationship": "Outer" + - } + - ] -(1 row) - -select explain_analyze_inc_sort_nodes_verify_invariants('select * from (select * from t order by a) s order by a, b limit 55'); - explain_analyze_inc_sort_nodes_verify_invariants --------------------------------------------------- - t -(1 row) - -delete from t; --- An initial small group followed by a large group. -insert into t(a, b) select (case when i < 5 then i else 9 end), i from generate_series(1, 1000) n(i); -analyze t; -explain (costs off) select * from (select * from t order by a) s order by a, b limit 70; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 70; - a | b ----+---- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 9 | 5 - 9 | 6 - 9 | 7 - 9 | 8 - 9 | 9 - 9 | 10 - 9 | 11 - 9 | 12 - 9 | 13 - 9 | 14 - 9 | 15 - 9 | 16 - 9 | 17 - 9 | 18 - 9 | 19 - 9 | 20 - 9 | 21 - 9 | 22 - 9 | 23 - 9 | 24 - 9 | 25 - 9 | 26 - 9 | 27 - 9 | 28 - 9 | 29 - 9 | 30 - 9 | 31 - 9 | 32 - 9 | 33 - 9 | 34 - 9 | 35 - 9 | 36 - 9 | 37 - 9 | 38 - 9 | 39 - 9 | 40 - 9 | 41 - 9 | 42 - 9 | 43 - 9 | 44 - 9 | 45 - 9 | 46 - 9 | 47 - 9 | 48 - 9 | 49 - 9 | 50 - 9 | 51 - 9 | 52 - 9 | 53 - 9 | 54 - 9 | 55 - 9 | 56 - 9 | 57 - 9 | 58 - 9 | 59 - 9 | 60 - 9 | 61 - 9 | 62 - 9 | 63 - 9 | 64 - 9 | 65 - 9 | 66 - 9 | 67 - 9 | 68 - 9 | 69 - 9 | 70 -(70 rows) - --- Checks case where we hit a group boundary at the last tuple of a batch. --- Because the full sort state is bounded, we scan 64 tuples (the mode --- transition point) but only retain 5. Thus when we transition modes, all --- tuples in the full sort state have different prefix keys. -explain (costs off) select * from (select * from t order by a) s order by a, b limit 5; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 5; - a | b ----+--- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 9 | 5 -(5 rows) - --- Test rescan. -begin; --- We force the planner to choose a plan with incremental sort on the right side --- of a nested loop join node. That way we trigger the rescan code path. -set local enable_hashjoin = off; -set local enable_mergejoin = off; -set local enable_material = off; -set local enable_sort = off; -explain (costs off) select * from t left join (select * from (select * from t order by a) v order by a, b) s on s.a = t.a where t.a in (1, 2); - QUERY PLAN ------------------------------------------------- - Nested Loop Left Join - Join Filter: (t_1.a = t.a) - -> Seq Scan on t - Filter: (a = ANY ('{1,2}'::integer[])) - -> Incremental Sort - Sort Key: t_1.a, t_1.b - Presorted Key: t_1.a - -> Sort - Disabled: true - Sort Key: t_1.a - -> Seq Scan on t t_1 -(11 rows) - -select * from t left join (select * from (select * from t order by a) v order by a, b) s on s.a = t.a where t.a in (1, 2); - a | b | a | b ----+---+---+--- - 1 | 1 | 1 | 1 - 2 | 2 | 2 | 2 -(2 rows) - -rollback; --- Test EXPLAIN ANALYZE with both fullsort and presorted groups. -select explain_analyze_without_memory('select * from (select * from t order by a) s order by a, b limit 70'); - explain_analyze_without_memory ----------------------------------------------------------------------------------------------------------------- - Limit (actual rows=70 loops=1) - -> Incremental Sort (actual rows=70 loops=1) - Sort Key: t.a, t.b - Presorted Key: t.a - Full-sort Groups: 1 Sort Method: quicksort Average Memory: NNkB Peak Memory: NNkB - Pre-sorted Groups: 5 Sort Methods: top-N heapsort, quicksort Average Memory: NNkB Peak Memory: NNkB - -> Sort (actual rows=1000 loops=1) - Sort Key: t.a - Sort Method: quicksort Memory: NNkB - -> Seq Scan on t (actual rows=1000 loops=1) -(10 rows) - -select jsonb_pretty(explain_analyze_inc_sort_nodes_without_memory('select * from (select * from t order by a) s order by a, b limit 70')); - jsonb_pretty -------------------------------------------------- - [ + - { + - "Disabled": false, + - "Sort Key": [ + - "t.a", + - "t.b" + - ], + - "Node Type": "Incremental Sort", + - "Actual Rows": 70, + - "Actual Loops": 1, + - "Async Capable": false, + - "Presorted Key": [ + - "t.a" + - ], + - "Parallel Aware": false, + - "Full-sort Groups": { + - "Group Count": 1, + - "Sort Methods Used": [ + - "quicksort" + - ], + - "Sort Space Memory": { + - "Peak Sort Space Used": "NN", + - "Average Sort Space Used": "NN"+ - } + - }, + - "Pre-sorted Groups": { + - "Group Count": 5, + - "Sort Methods Used": [ + - "top-N heapsort", + - "quicksort" + - ], + - "Sort Space Memory": { + - "Peak Sort Space Used": "NN", + - "Average Sort Space Used": "NN"+ - } + - }, + - "Parent Relationship": "Outer" + - } + - ] -(1 row) - -select explain_analyze_inc_sort_nodes_verify_invariants('select * from (select * from t order by a) s order by a, b limit 70'); - explain_analyze_inc_sort_nodes_verify_invariants --------------------------------------------------- - t -(1 row) - -delete from t; --- Small groups of 10 tuples each tested around each mode transition point. -insert into t(a, b) select i / 10, i from generate_series(1, 1000) n(i); -analyze t; -explain (costs off) select * from (select * from t order by a) s order by a, b limit 31; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 31; - a | b ----+---- - 0 | 1 - 0 | 2 - 0 | 3 - 0 | 4 - 0 | 5 - 0 | 6 - 0 | 7 - 0 | 8 - 0 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 2 | 20 - 2 | 21 - 2 | 22 - 2 | 23 - 2 | 24 - 2 | 25 - 2 | 26 - 2 | 27 - 2 | 28 - 2 | 29 - 3 | 30 - 3 | 31 -(31 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 32; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 32; - a | b ----+---- - 0 | 1 - 0 | 2 - 0 | 3 - 0 | 4 - 0 | 5 - 0 | 6 - 0 | 7 - 0 | 8 - 0 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 2 | 20 - 2 | 21 - 2 | 22 - 2 | 23 - 2 | 24 - 2 | 25 - 2 | 26 - 2 | 27 - 2 | 28 - 2 | 29 - 3 | 30 - 3 | 31 - 3 | 32 -(32 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 33; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 33; - a | b ----+---- - 0 | 1 - 0 | 2 - 0 | 3 - 0 | 4 - 0 | 5 - 0 | 6 - 0 | 7 - 0 | 8 - 0 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 2 | 20 - 2 | 21 - 2 | 22 - 2 | 23 - 2 | 24 - 2 | 25 - 2 | 26 - 2 | 27 - 2 | 28 - 2 | 29 - 3 | 30 - 3 | 31 - 3 | 32 - 3 | 33 -(33 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 65; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 65; - a | b ----+---- - 0 | 1 - 0 | 2 - 0 | 3 - 0 | 4 - 0 | 5 - 0 | 6 - 0 | 7 - 0 | 8 - 0 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 2 | 20 - 2 | 21 - 2 | 22 - 2 | 23 - 2 | 24 - 2 | 25 - 2 | 26 - 2 | 27 - 2 | 28 - 2 | 29 - 3 | 30 - 3 | 31 - 3 | 32 - 3 | 33 - 3 | 34 - 3 | 35 - 3 | 36 - 3 | 37 - 3 | 38 - 3 | 39 - 4 | 40 - 4 | 41 - 4 | 42 - 4 | 43 - 4 | 44 - 4 | 45 - 4 | 46 - 4 | 47 - 4 | 48 - 4 | 49 - 5 | 50 - 5 | 51 - 5 | 52 - 5 | 53 - 5 | 54 - 5 | 55 - 5 | 56 - 5 | 57 - 5 | 58 - 5 | 59 - 6 | 60 - 6 | 61 - 6 | 62 - 6 | 63 - 6 | 64 - 6 | 65 -(65 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 66; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 66; - a | b ----+---- - 0 | 1 - 0 | 2 - 0 | 3 - 0 | 4 - 0 | 5 - 0 | 6 - 0 | 7 - 0 | 8 - 0 | 9 - 1 | 10 - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 2 | 20 - 2 | 21 - 2 | 22 - 2 | 23 - 2 | 24 - 2 | 25 - 2 | 26 - 2 | 27 - 2 | 28 - 2 | 29 - 3 | 30 - 3 | 31 - 3 | 32 - 3 | 33 - 3 | 34 - 3 | 35 - 3 | 36 - 3 | 37 - 3 | 38 - 3 | 39 - 4 | 40 - 4 | 41 - 4 | 42 - 4 | 43 - 4 | 44 - 4 | 45 - 4 | 46 - 4 | 47 - 4 | 48 - 4 | 49 - 5 | 50 - 5 | 51 - 5 | 52 - 5 | 53 - 5 | 54 - 5 | 55 - 5 | 56 - 5 | 57 - 5 | 58 - 5 | 59 - 6 | 60 - 6 | 61 - 6 | 62 - 6 | 63 - 6 | 64 - 6 | 65 - 6 | 66 -(66 rows) - -delete from t; --- Small groups of only 1 tuple each tested around each mode transition point. -insert into t(a, b) select i, i from generate_series(1, 1000) n(i); -analyze t; -explain (costs off) select * from (select * from t order by a) s order by a, b limit 31; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 31; - a | b -----+---- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 5 | 5 - 6 | 6 - 7 | 7 - 8 | 8 - 9 | 9 - 10 | 10 - 11 | 11 - 12 | 12 - 13 | 13 - 14 | 14 - 15 | 15 - 16 | 16 - 17 | 17 - 18 | 18 - 19 | 19 - 20 | 20 - 21 | 21 - 22 | 22 - 23 | 23 - 24 | 24 - 25 | 25 - 26 | 26 - 27 | 27 - 28 | 28 - 29 | 29 - 30 | 30 - 31 | 31 -(31 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 32; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 32; - a | b -----+---- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 5 | 5 - 6 | 6 - 7 | 7 - 8 | 8 - 9 | 9 - 10 | 10 - 11 | 11 - 12 | 12 - 13 | 13 - 14 | 14 - 15 | 15 - 16 | 16 - 17 | 17 - 18 | 18 - 19 | 19 - 20 | 20 - 21 | 21 - 22 | 22 - 23 | 23 - 24 | 24 - 25 | 25 - 26 | 26 - 27 | 27 - 28 | 28 - 29 | 29 - 30 | 30 - 31 | 31 - 32 | 32 -(32 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 33; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 33; - a | b -----+---- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 5 | 5 - 6 | 6 - 7 | 7 - 8 | 8 - 9 | 9 - 10 | 10 - 11 | 11 - 12 | 12 - 13 | 13 - 14 | 14 - 15 | 15 - 16 | 16 - 17 | 17 - 18 | 18 - 19 | 19 - 20 | 20 - 21 | 21 - 22 | 22 - 23 | 23 - 24 | 24 - 25 | 25 - 26 | 26 - 27 | 27 - 28 | 28 - 29 | 29 - 30 | 30 - 31 | 31 - 32 | 32 - 33 | 33 -(33 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 65; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 65; - a | b -----+---- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 5 | 5 - 6 | 6 - 7 | 7 - 8 | 8 - 9 | 9 - 10 | 10 - 11 | 11 - 12 | 12 - 13 | 13 - 14 | 14 - 15 | 15 - 16 | 16 - 17 | 17 - 18 | 18 - 19 | 19 - 20 | 20 - 21 | 21 - 22 | 22 - 23 | 23 - 24 | 24 - 25 | 25 - 26 | 26 - 27 | 27 - 28 | 28 - 29 | 29 - 30 | 30 - 31 | 31 - 32 | 32 - 33 | 33 - 34 | 34 - 35 | 35 - 36 | 36 - 37 | 37 - 38 | 38 - 39 | 39 - 40 | 40 - 41 | 41 - 42 | 42 - 43 | 43 - 44 | 44 - 45 | 45 - 46 | 46 - 47 | 47 - 48 | 48 - 49 | 49 - 50 | 50 - 51 | 51 - 52 | 52 - 53 | 53 - 54 | 54 - 55 | 55 - 56 | 56 - 57 | 57 - 58 | 58 - 59 | 59 - 60 | 60 - 61 | 61 - 62 | 62 - 63 | 63 - 64 | 64 - 65 | 65 -(65 rows) - -explain (costs off) select * from (select * from t order by a) s order by a, b limit 66; - QUERY PLAN ---------------------------------- - Limit - -> Incremental Sort - Sort Key: t.a, t.b - Presorted Key: t.a - -> Sort - Sort Key: t.a - -> Seq Scan on t -(7 rows) - -select * from (select * from t order by a) s order by a, b limit 66; - a | b -----+---- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 5 | 5 - 6 | 6 - 7 | 7 - 8 | 8 - 9 | 9 - 10 | 10 - 11 | 11 - 12 | 12 - 13 | 13 - 14 | 14 - 15 | 15 - 16 | 16 - 17 | 17 - 18 | 18 - 19 | 19 - 20 | 20 - 21 | 21 - 22 | 22 - 23 | 23 - 24 | 24 - 25 | 25 - 26 | 26 - 27 | 27 - 28 | 28 - 29 | 29 - 30 | 30 - 31 | 31 - 32 | 32 - 33 | 33 - 34 | 34 - 35 | 35 - 36 | 36 - 37 | 37 - 38 | 38 - 39 | 39 - 40 | 40 - 41 | 41 - 42 | 42 - 43 | 43 - 44 | 44 - 45 | 45 - 46 | 46 - 47 | 47 - 48 | 48 - 49 | 49 - 50 | 50 - 51 | 51 - 52 | 52 - 53 | 53 - 54 | 54 - 55 | 55 - 56 | 56 - 57 | 57 - 58 | 58 - 59 | 59 - 60 | 60 - 61 | 61 - 62 | 62 - 63 | 63 - 64 | 64 - 65 | 65 - 66 | 66 -(66 rows) - -delete from t; -drop table t; --- Incremental sort vs. parallel queries -set min_parallel_table_scan_size = '1kB'; -set min_parallel_index_scan_size = '1kB'; -set parallel_setup_cost = 0; -set parallel_tuple_cost = 0; -set max_parallel_workers_per_gather = 2; -create table t (a int, b int, c int); -insert into t select mod(i,10),mod(i,10),i from generate_series(1,10000) s(i); -create index on t (a); -analyze t; -set enable_incremental_sort = off; -explain (costs off) select a,b,sum(c) from t group by 1,2 order by 1,2,3 limit 1; - QUERY PLAN ------------------------------------------------------- - Limit - -> Sort - Sort Key: a, b, (sum(c)) - -> Finalize HashAggregate - Group Key: a, b - -> Gather - Workers Planned: 2 - -> Partial HashAggregate - Group Key: a, b - -> Parallel Seq Scan on t -(10 rows) - -set enable_incremental_sort = on; -explain (costs off) select a,b,sum(c) from t group by 1,2 order by 1,2,3 limit 1; - QUERY PLAN ----------------------------------------------------------------------- - Limit - -> Incremental Sort - Sort Key: a, b, (sum(c)) - Presorted Key: a, b - -> GroupAggregate - Group Key: a, b - -> Gather Merge - Workers Planned: 2 - -> Incremental Sort - Sort Key: a, b - Presorted Key: a - -> Parallel Index Scan using t_a_idx on t -(12 rows) - --- Incremental sort vs. set operations with varno 0 -set enable_hashagg to off; -explain (costs off) select * from t union select * from t order by 1,3; - QUERY PLAN ----------------------------------------------------------- - Incremental Sort - Sort Key: t.a, t.c - Presorted Key: t.a - -> Unique - -> Merge Append - Sort Key: t.a, t.b, t.c - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: t.a, t.b, t.c - -> Parallel Seq Scan on t - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: t_1.a, t_1.b, t_1.c - -> Parallel Seq Scan on t t_1 -(16 rows) - --- Full sort, not just incremental sort can be pushed below a gather merge path --- by generate_useful_gather_paths. -explain (costs off) select distinct a,b from t; - QUERY PLAN ------------------------------------------------- - Unique - -> Gather Merge - Workers Planned: 2 - -> Unique - -> Sort - Sort Key: a, b - -> Parallel Seq Scan on t -(7 rows) - -drop table t; --- Sort pushdown can't go below where expressions are part of the rel target. --- In particular this is interesting for volatile expressions which have to --- go above joins since otherwise we'll incorrectly use expression evaluations --- across multiple rows. -set enable_hashagg=off; -set enable_seqscan=off; -set enable_incremental_sort = off; -set parallel_tuple_cost=0; -set parallel_setup_cost=0; -set min_parallel_table_scan_size = 0; -set min_parallel_index_scan_size = 0; --- Parallel sort below join. -explain (costs off) select distinct sub.unique1, stringu1 -from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub; - QUERY PLAN --------------------------------------------------------------------------- - Unique - -> Nested Loop - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: tenk1.unique1, tenk1.stringu1 - -> Parallel Index Scan using tenk1_unique1 on tenk1 - -> Function Scan on generate_series -(8 rows) - -explain (costs off) select sub.unique1, stringu1 -from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub -order by 1, 2; - QUERY PLAN --------------------------------------------------------------------- - Nested Loop - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: tenk1.unique1, tenk1.stringu1 - -> Parallel Index Scan using tenk1_unique1 on tenk1 - -> Function Scan on generate_series -(7 rows) - --- Parallel sort but with expression that can be safely generated at the base rel. -explain (costs off) select distinct sub.unique1, md5(stringu1) -from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub; - QUERY PLAN ----------------------------------------------------------------------------------------- - Unique - -> Nested Loop - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: tenk1.unique1, (md5((tenk1.stringu1)::text)) COLLATE "C" - -> Parallel Index Scan using tenk1_unique1 on tenk1 - -> Function Scan on generate_series -(8 rows) - -explain (costs off) select sub.unique1, md5(stringu1) -from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub -order by 1, 2; - QUERY PLAN ----------------------------------------------------------------------------------- - Nested Loop - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: tenk1.unique1, (md5((tenk1.stringu1)::text)) COLLATE "C" - -> Parallel Index Scan using tenk1_unique1 on tenk1 - -> Function Scan on generate_series -(7 rows) - --- Parallel sort with an aggregate that can be safely generated in parallel, --- but we can't sort by partial aggregate values. -explain (costs off) select count(*) -from tenk1 t1 -join tenk1 t2 on t1.unique1 = t2.unique2 -join tenk1 t3 on t2.unique1 = t3.unique1 -order by count(*); - QUERY PLAN ------------------------------------------------------------------------------------------------ - Sort - Sort Key: (count(*)) - -> Finalize Aggregate - -> Gather - Workers Planned: 2 - -> Partial Aggregate - -> Parallel Hash Join - Hash Cond: (t2.unique1 = t3.unique1) - -> Parallel Hash Join - Hash Cond: (t1.unique1 = t2.unique2) - -> Parallel Index Only Scan using tenk1_unique1 on tenk1 t1 - -> Parallel Hash - -> Parallel Index Scan using tenk1_unique2 on tenk1 t2 - -> Parallel Hash - -> Parallel Index Only Scan using tenk1_unique1 on tenk1 t3 -(15 rows) - --- Parallel sort but with expression (correlated subquery) that --- is prohibited in parallel plans. -explain (costs off) select distinct - unique1, - (select t.unique1 from tenk1 where tenk1.unique1 = t.unique1) -from tenk1 t, generate_series(1, 1000); - QUERY PLAN ---------------------------------------------------------------------------------- - Unique - -> Sort - Sort Key: t.unique1, ((SubPlan 1)) - -> Gather - Workers Planned: 2 - -> Nested Loop - -> Parallel Index Only Scan using tenk1_unique1 on tenk1 t - -> Function Scan on generate_series - SubPlan 1 - -> Index Only Scan using tenk1_unique1 on tenk1 - Index Cond: (unique1 = t.unique1) -(11 rows) - -explain (costs off) select - unique1, - (select t.unique1 from tenk1 where tenk1.unique1 = t.unique1) -from tenk1 t, generate_series(1, 1000) -order by 1, 2; - QUERY PLAN ---------------------------------------------------------------------------- - Sort - Sort Key: t.unique1, ((SubPlan 1)) - -> Gather - Workers Planned: 2 - -> Nested Loop - -> Parallel Index Only Scan using tenk1_unique1 on tenk1 t - -> Function Scan on generate_series - SubPlan 1 - -> Index Only Scan using tenk1_unique1 on tenk1 - Index Cond: (unique1 = t.unique1) -(10 rows) - --- Parallel sort but with expression not available until the upper rel. -explain (costs off) select distinct sub.unique1, stringu1 || random()::text -from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub; - QUERY PLAN ---------------------------------------------------------------------------------------------- - Unique - -> Sort - Sort Key: tenk1.unique1, (((tenk1.stringu1)::text || (random())::text)) COLLATE "C" - -> Gather - Workers Planned: 2 - -> Nested Loop - -> Parallel Index Scan using tenk1_unique1 on tenk1 - -> Function Scan on generate_series -(8 rows) - -explain (costs off) select sub.unique1, stringu1 || random()::text -from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub -order by 1, 2; - QUERY PLAN ---------------------------------------------------------------------------------------- - Sort - Sort Key: tenk1.unique1, (((tenk1.stringu1)::text || (random())::text)) COLLATE "C" - -> Gather - Workers Planned: 2 - -> Nested Loop - -> Parallel Index Scan using tenk1_unique1 on tenk1 - -> Function Scan on generate_series -(7 rows) - -reset enable_hashagg; -reset enable_seqscan; -reset enable_incremental_sort; -reset parallel_tuple_cost; -reset parallel_setup_cost; -reset min_parallel_table_scan_size; -reset min_parallel_index_scan_size; --- Ensure incremental sorts work for amcanorderbyop type indexes -create table point_table (a point, b int); -create index point_table_a_idx on point_table using gist(a); --- Ensure we get an incremental sort plan for both of the following queries -explain (costs off) select a, b, a <-> point(5, 5) dist from point_table order by dist, b limit 1; - QUERY PLAN ---------------------------------------------------------------- - Limit - -> Incremental Sort - Sort Key: ((a <-> '(5,5)'::point)), b - Presorted Key: ((a <-> '(5,5)'::point)) - -> Index Scan using point_table_a_idx on point_table - Order By: (a <-> '(5,5)'::point) -(6 rows) - -explain (costs off) select a, b, a <-> point(5, 5) dist from point_table order by dist, b desc limit 1; - QUERY PLAN ---------------------------------------------------------------- - Limit - -> Incremental Sort - Sort Key: ((a <-> '(5,5)'::point)), b DESC - Presorted Key: ((a <-> '(5,5)'::point)) - -> Index Scan using point_table_a_idx on point_table - Order By: (a <-> '(5,5)'::point) -(6 rows) - --- Ensure we get an incremental sort on the outer side of the mergejoin -explain (costs off) -select * from - (select * from tenk1 order by four) t1 join tenk1 t2 on t1.four = t2.four and t1.two = t2.two -order by t1.four, t1.two limit 1; - QUERY PLAN ------------------------------------------------------------------------ - Limit - -> Merge Join - Merge Cond: ((tenk1.four = t2.four) AND (tenk1.two = t2.two)) - -> Incremental Sort - Sort Key: tenk1.four, tenk1.two - Presorted Key: tenk1.four - -> Sort - Sort Key: tenk1.four - -> Seq Scan on tenk1 - -> Sort - Sort Key: t2.four, t2.two - -> Seq Scan on tenk1 t2 -(12 rows) - +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/create_role.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/create_role.out --- /Users/admin/pgsql/src/test/regress/expected/create_role.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/create_role.out 2024-12-13 13:20:10 @@ -1,261 +1,2 @@ --- ok, superuser can create users with any set of privileges -CREATE ROLE regress_role_super SUPERUSER; -CREATE ROLE regress_role_admin CREATEDB CREATEROLE REPLICATION BYPASSRLS; -GRANT CREATE ON DATABASE regression TO regress_role_admin WITH GRANT OPTION; -CREATE ROLE regress_role_limited_admin CREATEROLE; -CREATE ROLE regress_role_normal; --- fail, CREATEROLE user can't give away role attributes without having them -SET SESSION AUTHORIZATION regress_role_limited_admin; -CREATE ROLE regress_nosuch_superuser SUPERUSER; -ERROR: permission denied to create role -DETAIL: Only roles with the SUPERUSER attribute may create roles with the SUPERUSER attribute. -CREATE ROLE regress_nosuch_replication_bypassrls REPLICATION BYPASSRLS; -ERROR: permission denied to create role -DETAIL: Only roles with the REPLICATION attribute may create roles with the REPLICATION attribute. -CREATE ROLE regress_nosuch_replication REPLICATION; -ERROR: permission denied to create role -DETAIL: Only roles with the REPLICATION attribute may create roles with the REPLICATION attribute. -CREATE ROLE regress_nosuch_bypassrls BYPASSRLS; -ERROR: permission denied to create role -DETAIL: Only roles with the BYPASSRLS attribute may create roles with the BYPASSRLS attribute. -CREATE ROLE regress_nosuch_createdb CREATEDB; -ERROR: permission denied to create role -DETAIL: Only roles with the CREATEDB attribute may create roles with the CREATEDB attribute. --- ok, can create a role without any special attributes -CREATE ROLE regress_role_limited; --- fail, can't give it in any of the restricted attributes -ALTER ROLE regress_role_limited SUPERUSER; -ERROR: permission denied to alter role -DETAIL: Only roles with the SUPERUSER attribute may change the SUPERUSER attribute. -ALTER ROLE regress_role_limited REPLICATION; -ERROR: permission denied to alter role -DETAIL: Only roles with the REPLICATION attribute may change the REPLICATION attribute. -ALTER ROLE regress_role_limited CREATEDB; -ERROR: permission denied to alter role -DETAIL: Only roles with the CREATEDB attribute may change the CREATEDB attribute. -ALTER ROLE regress_role_limited BYPASSRLS; -ERROR: permission denied to alter role -DETAIL: Only roles with the BYPASSRLS attribute may change the BYPASSRLS attribute. -DROP ROLE regress_role_limited; --- ok, can give away these role attributes if you have them -SET SESSION AUTHORIZATION regress_role_admin; -CREATE ROLE regress_replication_bypassrls REPLICATION BYPASSRLS; -CREATE ROLE regress_replication REPLICATION; -CREATE ROLE regress_bypassrls BYPASSRLS; -CREATE ROLE regress_createdb CREATEDB; --- ok, can toggle these role attributes off and on if you have them -ALTER ROLE regress_replication NOREPLICATION; -ALTER ROLE regress_replication REPLICATION; -ALTER ROLE regress_bypassrls NOBYPASSRLS; -ALTER ROLE regress_bypassrls BYPASSRLS; -ALTER ROLE regress_createdb NOCREATEDB; -ALTER ROLE regress_createdb CREATEDB; --- fail, can't toggle SUPERUSER -ALTER ROLE regress_createdb SUPERUSER; -ERROR: permission denied to alter role -DETAIL: Only roles with the SUPERUSER attribute may change the SUPERUSER attribute. -ALTER ROLE regress_createdb NOSUPERUSER; -ERROR: permission denied to alter role -DETAIL: Only roles with the SUPERUSER attribute may change the SUPERUSER attribute. --- ok, having CREATEROLE is enough to create users with these privileges -CREATE ROLE regress_createrole CREATEROLE NOINHERIT; -GRANT CREATE ON DATABASE regression TO regress_createrole WITH GRANT OPTION; -CREATE ROLE regress_login LOGIN; -CREATE ROLE regress_inherit INHERIT; -CREATE ROLE regress_connection_limit CONNECTION LIMIT 5; -CREATE ROLE regress_encrypted_password ENCRYPTED PASSWORD 'foo'; -CREATE ROLE regress_password_null PASSWORD NULL; --- ok, backwards compatible noise words should be ignored -CREATE ROLE regress_noiseword SYSID 12345; -NOTICE: SYSID can no longer be specified --- fail, cannot grant membership in superuser role -CREATE ROLE regress_nosuch_super IN ROLE regress_role_super; -ERROR: permission denied to grant role "regress_role_super" -DETAIL: Only roles with the SUPERUSER attribute may grant roles with the SUPERUSER attribute. --- fail, database owner cannot have members -CREATE ROLE regress_nosuch_dbowner IN ROLE pg_database_owner; -ERROR: role "pg_database_owner" cannot have explicit members --- ok, can grant other users into a role -CREATE ROLE regress_inroles ROLE - regress_role_super, regress_createdb, regress_createrole, regress_login, - regress_inherit, regress_connection_limit, regress_encrypted_password, regress_password_null; --- fail, cannot grant a role into itself -CREATE ROLE regress_nosuch_recursive ROLE regress_nosuch_recursive; -ERROR: role "regress_nosuch_recursive" is a member of role "regress_nosuch_recursive" --- ok, can grant other users into a role with admin option -CREATE ROLE regress_adminroles ADMIN - regress_role_super, regress_createdb, regress_createrole, regress_login, - regress_inherit, regress_connection_limit, regress_encrypted_password, regress_password_null; --- fail, cannot grant a role into itself with admin option -CREATE ROLE regress_nosuch_admin_recursive ADMIN regress_nosuch_admin_recursive; -ERROR: role "regress_nosuch_admin_recursive" is a member of role "regress_nosuch_admin_recursive" --- fail, regress_createrole does not have CREATEDB privilege -SET SESSION AUTHORIZATION regress_createrole; -CREATE DATABASE regress_nosuch_db; -ERROR: permission denied to create database --- ok, regress_createrole can create new roles -CREATE ROLE regress_plainrole; --- ok, roles with CREATEROLE can create new roles with it -CREATE ROLE regress_rolecreator CREATEROLE; --- ok, roles with CREATEROLE can create new roles with different role --- attributes, including CREATEROLE -CREATE ROLE regress_hasprivs CREATEROLE LOGIN INHERIT CONNECTION LIMIT 5; --- ok, we should be able to modify a role we created -COMMENT ON ROLE regress_hasprivs IS 'some comment'; -ALTER ROLE regress_hasprivs RENAME TO regress_tenant; -ALTER ROLE regress_tenant NOINHERIT NOLOGIN CONNECTION LIMIT 7; --- fail, we should be unable to modify a role we did not create -COMMENT ON ROLE regress_role_normal IS 'some comment'; -ERROR: permission denied -DETAIL: The current user must have the ADMIN option on role "regress_role_normal". -ALTER ROLE regress_role_normal RENAME TO regress_role_abnormal; -ERROR: permission denied to rename role -DETAIL: Only roles with the CREATEROLE attribute and the ADMIN option on role "regress_role_normal" may rename this role. -ALTER ROLE regress_role_normal NOINHERIT NOLOGIN CONNECTION LIMIT 7; -ERROR: permission denied to alter role -DETAIL: Only roles with the CREATEROLE attribute and the ADMIN option on role "regress_role_normal" may alter this role. --- ok, regress_tenant can create objects within the database -SET SESSION AUTHORIZATION regress_tenant; -CREATE TABLE tenant_table (i integer); -CREATE INDEX tenant_idx ON tenant_table(i); -CREATE VIEW tenant_view AS SELECT * FROM pg_catalog.pg_class; -REVOKE ALL PRIVILEGES ON tenant_table FROM PUBLIC; --- fail, these objects belonging to regress_tenant -SET SESSION AUTHORIZATION regress_createrole; -DROP INDEX tenant_idx; -ERROR: must be owner of index tenant_idx -ALTER TABLE tenant_table ADD COLUMN t text; -ERROR: must be owner of table tenant_table -DROP TABLE tenant_table; -ERROR: must be owner of table tenant_table -ALTER VIEW tenant_view OWNER TO regress_role_admin; -ERROR: must be owner of view tenant_view -DROP VIEW tenant_view; -ERROR: must be owner of view tenant_view --- fail, can't create objects owned as regress_tenant -CREATE SCHEMA regress_tenant_schema AUTHORIZATION regress_tenant; -ERROR: must be able to SET ROLE "regress_tenant" --- fail, we don't inherit permissions from regress_tenant -REASSIGN OWNED BY regress_tenant TO regress_createrole; -ERROR: permission denied to reassign objects -DETAIL: Only roles with privileges of role "regress_tenant" may reassign objects owned by it. --- ok, create a role with a value for createrole_self_grant -SET createrole_self_grant = 'set, inherit'; -CREATE ROLE regress_tenant2; -GRANT CREATE ON DATABASE regression TO regress_tenant2; --- ok, regress_tenant2 can create objects within the database -SET SESSION AUTHORIZATION regress_tenant2; -CREATE TABLE tenant2_table (i integer); -REVOKE ALL PRIVILEGES ON tenant2_table FROM PUBLIC; --- ok, because we have SET and INHERIT on regress_tenant2 -SET SESSION AUTHORIZATION regress_createrole; -CREATE SCHEMA regress_tenant2_schema AUTHORIZATION regress_tenant2; -ALTER SCHEMA regress_tenant2_schema OWNER TO regress_createrole; -ALTER TABLE tenant2_table OWNER TO regress_createrole; -ALTER TABLE tenant2_table OWNER TO regress_tenant2; --- with SET but not INHERIT, we can give away objects but not take them -REVOKE INHERIT OPTION FOR regress_tenant2 FROM regress_createrole; -ALTER SCHEMA regress_tenant2_schema OWNER TO regress_tenant2; -ALTER TABLE tenant2_table OWNER TO regress_createrole; -ERROR: must be owner of table tenant2_table --- with INHERIT but not SET, we can take objects but not give them away -GRANT regress_tenant2 TO regress_createrole WITH INHERIT TRUE, SET FALSE; -ALTER TABLE tenant2_table OWNER TO regress_createrole; -ALTER TABLE tenant2_table OWNER TO regress_tenant2; -ERROR: must be able to SET ROLE "regress_tenant2" -DROP TABLE tenant2_table; --- fail, CREATEROLE is not enough to create roles in privileged roles -CREATE ROLE regress_read_all_data IN ROLE pg_read_all_data; -ERROR: permission denied to grant role "pg_read_all_data" -DETAIL: Only roles with the ADMIN option on role "pg_read_all_data" may grant this role. -CREATE ROLE regress_write_all_data IN ROLE pg_write_all_data; -ERROR: permission denied to grant role "pg_write_all_data" -DETAIL: Only roles with the ADMIN option on role "pg_write_all_data" may grant this role. -CREATE ROLE regress_monitor IN ROLE pg_monitor; -ERROR: permission denied to grant role "pg_monitor" -DETAIL: Only roles with the ADMIN option on role "pg_monitor" may grant this role. -CREATE ROLE regress_read_all_settings IN ROLE pg_read_all_settings; -ERROR: permission denied to grant role "pg_read_all_settings" -DETAIL: Only roles with the ADMIN option on role "pg_read_all_settings" may grant this role. -CREATE ROLE regress_read_all_stats IN ROLE pg_read_all_stats; -ERROR: permission denied to grant role "pg_read_all_stats" -DETAIL: Only roles with the ADMIN option on role "pg_read_all_stats" may grant this role. -CREATE ROLE regress_stat_scan_tables IN ROLE pg_stat_scan_tables; -ERROR: permission denied to grant role "pg_stat_scan_tables" -DETAIL: Only roles with the ADMIN option on role "pg_stat_scan_tables" may grant this role. -CREATE ROLE regress_read_server_files IN ROLE pg_read_server_files; -ERROR: permission denied to grant role "pg_read_server_files" -DETAIL: Only roles with the ADMIN option on role "pg_read_server_files" may grant this role. -CREATE ROLE regress_write_server_files IN ROLE pg_write_server_files; -ERROR: permission denied to grant role "pg_write_server_files" -DETAIL: Only roles with the ADMIN option on role "pg_write_server_files" may grant this role. -CREATE ROLE regress_execute_server_program IN ROLE pg_execute_server_program; -ERROR: permission denied to grant role "pg_execute_server_program" -DETAIL: Only roles with the ADMIN option on role "pg_execute_server_program" may grant this role. -CREATE ROLE regress_signal_backend IN ROLE pg_signal_backend; -ERROR: permission denied to grant role "pg_signal_backend" -DETAIL: Only roles with the ADMIN option on role "pg_signal_backend" may grant this role. --- fail, role still owns database objects -DROP ROLE regress_tenant; -ERROR: role "regress_tenant" cannot be dropped because some objects depend on it -DETAIL: owner of table tenant_table -owner of view tenant_view --- fail, creation of these roles failed above so they do not now exist -SET SESSION AUTHORIZATION regress_role_admin; -DROP ROLE regress_nosuch_superuser; -ERROR: role "regress_nosuch_superuser" does not exist -DROP ROLE regress_nosuch_replication_bypassrls; -ERROR: role "regress_nosuch_replication_bypassrls" does not exist -DROP ROLE regress_nosuch_replication; -ERROR: role "regress_nosuch_replication" does not exist -DROP ROLE regress_nosuch_bypassrls; -ERROR: role "regress_nosuch_bypassrls" does not exist -DROP ROLE regress_nosuch_super; -ERROR: role "regress_nosuch_super" does not exist -DROP ROLE regress_nosuch_dbowner; -ERROR: role "regress_nosuch_dbowner" does not exist -DROP ROLE regress_nosuch_recursive; -ERROR: role "regress_nosuch_recursive" does not exist -DROP ROLE regress_nosuch_admin_recursive; -ERROR: role "regress_nosuch_admin_recursive" does not exist -DROP ROLE regress_plainrole; --- must revoke privileges before dropping role -REVOKE CREATE ON DATABASE regression FROM regress_createrole CASCADE; --- ok, should be able to drop non-superuser roles we created -DROP ROLE regress_replication_bypassrls; -DROP ROLE regress_replication; -DROP ROLE regress_bypassrls; -DROP ROLE regress_createdb; -DROP ROLE regress_createrole; -DROP ROLE regress_login; -DROP ROLE regress_inherit; -DROP ROLE regress_connection_limit; -DROP ROLE regress_encrypted_password; -DROP ROLE regress_password_null; -DROP ROLE regress_noiseword; -DROP ROLE regress_inroles; -DROP ROLE regress_adminroles; --- fail, cannot drop ourself, nor superusers or roles we lack ADMIN for -DROP ROLE regress_role_super; -ERROR: permission denied to drop role -DETAIL: Only roles with the SUPERUSER attribute may drop roles with the SUPERUSER attribute. -DROP ROLE regress_role_admin; -ERROR: current user cannot be dropped -DROP ROLE regress_rolecreator; -ERROR: permission denied to drop role -DETAIL: Only roles with the CREATEROLE attribute and the ADMIN option on role "regress_rolecreator" may drop this role. --- ok -RESET SESSION AUTHORIZATION; -REVOKE CREATE ON DATABASE regression FROM regress_role_admin CASCADE; -DROP INDEX tenant_idx; -DROP TABLE tenant_table; -DROP VIEW tenant_view; -DROP SCHEMA regress_tenant2_schema; --- check for duplicated drop -DROP ROLE regress_tenant, regress_tenant; -DROP ROLE regress_tenant2; -DROP ROLE regress_rolecreator; -DROP ROLE regress_role_admin; -DROP ROLE regress_role_limited_admin; -DROP ROLE regress_role_super; -DROP ROLE regress_role_normal; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/without_overlaps.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/without_overlaps.out --- /Users/admin/pgsql/src/test/regress/expected/without_overlaps.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/without_overlaps.out 2024-12-13 13:20:10 @@ -1,2635 +1,2 @@ --- Tests for WITHOUT OVERLAPS. --- --- We leave behind several tables to test pg_dump etc: --- temporal_rng, temporal_rng2, --- temporal_fk_rng2rng. -SET datestyle TO ISO, YMD; --- --- test input parser --- --- PK with no columns just WITHOUT OVERLAPS: -CREATE TABLE temporal_rng ( - valid_at daterange, - CONSTRAINT temporal_rng_pk PRIMARY KEY (valid_at WITHOUT OVERLAPS) -); -ERROR: constraint using WITHOUT OVERLAPS needs at least two columns --- PK with a range column/PERIOD that isn't there: -CREATE TABLE temporal_rng ( - id INTEGER, - CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -); -ERROR: column "valid_at" named in key does not exist -LINE 3: CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHO... - ^ --- PK with a non-range column: -CREATE TABLE temporal_rng ( - id int4range, - valid_at TEXT, - CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -); -ERROR: column "valid_at" in WITHOUT OVERLAPS is not a range or multirange type -LINE 4: CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHO... - ^ --- PK with one column plus a range: -CREATE TABLE temporal_rng ( - -- Since we can't depend on having btree_gist here, - -- use an int4range instead of an int. - -- (The rangetypes regression test uses the same trick.) - id int4range, - valid_at daterange, - CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -); -\d temporal_rng - Table "public.temporal_rng" - Column | Type | Collation | Nullable | Default -----------+-----------+-----------+----------+--------- - id | int4range | | not null | - valid_at | daterange | | not null | -Indexes: - "temporal_rng_pk" PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) - -SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conname = 'temporal_rng_pk'; - pg_get_constraintdef ---------------------------------------------- - PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -(1 row) - -SELECT pg_get_indexdef(conindid, 0, true) FROM pg_constraint WHERE conname = 'temporal_rng_pk'; - pg_get_indexdef -------------------------------------------------------------------------------- - CREATE UNIQUE INDEX temporal_rng_pk ON temporal_rng USING gist (id, valid_at) -(1 row) - --- PK from LIKE: -CREATE TABLE temporal_rng2 (LIKE temporal_rng INCLUDING ALL); -\d temporal_rng2 - Table "public.temporal_rng2" - Column | Type | Collation | Nullable | Default -----------+-----------+-----------+----------+--------- - id | int4range | | not null | - valid_at | daterange | | not null | -Indexes: - "temporal_rng2_pkey" PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) - -DROP TABLE temporal_rng2; --- no PK from INHERITS: -CREATE TABLE temporal_rng2 () INHERITS (temporal_rng); -\d temporal_rng2 - Table "public.temporal_rng2" - Column | Type | Collation | Nullable | Default -----------+-----------+-----------+----------+--------- - id | int4range | | not null | - valid_at | daterange | | not null | -Inherits: temporal_rng - -DROP TABLE temporal_rng2; -DROP TABLE temporal_rng; --- PK in inheriting table: -CREATE TABLE temporal_rng ( - id int4range, - valid_at daterange -); -CREATE TABLE temporal_rng2 ( - CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -) INHERITS (temporal_rng); -\d temporal_rng2 - Table "public.temporal_rng2" - Column | Type | Collation | Nullable | Default -----------+-----------+-----------+----------+--------- - id | int4range | | not null | - valid_at | daterange | | not null | -Indexes: - "temporal_rng_pk" PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -Inherits: temporal_rng - -DROP TABLE temporal_rng CASCADE; -NOTICE: drop cascades to table temporal_rng2 --- Add PK to already inheriting table: -CREATE TABLE temporal_rng ( - id int4range, - valid_at daterange -); -CREATE TABLE temporal_rng2 () INHERITS (temporal_rng); -ALTER TABLE temporal_rng2 - ADD CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS); -\d temporal_rng2 - Table "public.temporal_rng2" - Column | Type | Collation | Nullable | Default -----------+-----------+-----------+----------+--------- - id | int4range | | not null | - valid_at | daterange | | not null | -Indexes: - "temporal_rng_pk" PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -Inherits: temporal_rng - -DROP TABLE temporal_rng2; -DROP TABLE temporal_rng; --- PK with two columns plus a range: -CREATE TABLE temporal_rng2 ( - id1 int4range, - id2 int4range, - valid_at daterange, - CONSTRAINT temporal_rng2_pk PRIMARY KEY (id1, id2, valid_at WITHOUT OVERLAPS) -); -\d temporal_rng2 - Table "public.temporal_rng2" - Column | Type | Collation | Nullable | Default -----------+-----------+-----------+----------+--------- - id1 | int4range | | not null | - id2 | int4range | | not null | - valid_at | daterange | | not null | -Indexes: - "temporal_rng2_pk" PRIMARY KEY (id1, id2, valid_at WITHOUT OVERLAPS) - -SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conname = 'temporal_rng2_pk'; - pg_get_constraintdef ---------------------------------------------------- - PRIMARY KEY (id1, id2, valid_at WITHOUT OVERLAPS) -(1 row) - -SELECT pg_get_indexdef(conindid, 0, true) FROM pg_constraint WHERE conname = 'temporal_rng2_pk'; - pg_get_indexdef ---------------------------------------------------------------------------------------- - CREATE UNIQUE INDEX temporal_rng2_pk ON temporal_rng2 USING gist (id1, id2, valid_at) -(1 row) - --- PK with a custom range type: -CREATE TYPE textrange2 AS range (subtype=text, collation="C"); -CREATE TABLE temporal_rng3 ( - id int4range, - valid_at textrange2, - CONSTRAINT temporal_rng3_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -); -ALTER TABLE temporal_rng3 DROP CONSTRAINT temporal_rng3_pk; -DROP TABLE temporal_rng3; -DROP TYPE textrange2; --- PK with one column plus a multirange: -CREATE TABLE temporal_mltrng ( - id int4range, - valid_at datemultirange, - CONSTRAINT temporal_mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -); -\d temporal_mltrng - Table "public.temporal_mltrng" - Column | Type | Collation | Nullable | Default -----------+----------------+-----------+----------+--------- - id | int4range | | not null | - valid_at | datemultirange | | not null | -Indexes: - "temporal_mltrng_pk" PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) - -SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conname = 'temporal_mltrng_pk'; - pg_get_constraintdef ---------------------------------------------- - PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -(1 row) - -SELECT pg_get_indexdef(conindid, 0, true) FROM pg_constraint WHERE conname = 'temporal_mltrng_pk'; - pg_get_indexdef -------------------------------------------------------------------------------------- - CREATE UNIQUE INDEX temporal_mltrng_pk ON temporal_mltrng USING gist (id, valid_at) -(1 row) - --- PK with two columns plus a multirange: -CREATE TABLE temporal_mltrng2 ( - id1 int4range, - id2 int4range, - valid_at datemultirange, - CONSTRAINT temporal_mltrng2_pk PRIMARY KEY (id1, id2, valid_at WITHOUT OVERLAPS) -); -\d temporal_mltrng2 - Table "public.temporal_mltrng2" - Column | Type | Collation | Nullable | Default -----------+----------------+-----------+----------+--------- - id1 | int4range | | not null | - id2 | int4range | | not null | - valid_at | datemultirange | | not null | -Indexes: - "temporal_mltrng2_pk" PRIMARY KEY (id1, id2, valid_at WITHOUT OVERLAPS) - -SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conname = 'temporal_mltrng2_pk'; - pg_get_constraintdef ---------------------------------------------------- - PRIMARY KEY (id1, id2, valid_at WITHOUT OVERLAPS) -(1 row) - -SELECT pg_get_indexdef(conindid, 0, true) FROM pg_constraint WHERE conname = 'temporal_mltrng2_pk'; - pg_get_indexdef ---------------------------------------------------------------------------------------------- - CREATE UNIQUE INDEX temporal_mltrng2_pk ON temporal_mltrng2 USING gist (id1, id2, valid_at) -(1 row) - --- UNIQUE with no columns just WITHOUT OVERLAPS: -CREATE TABLE temporal_rng3 ( - valid_at daterange, - CONSTRAINT temporal_rng3_uq UNIQUE (valid_at WITHOUT OVERLAPS) -); -ERROR: constraint using WITHOUT OVERLAPS needs at least two columns --- UNIQUE with a range column/PERIOD that isn't there: -CREATE TABLE temporal_rng3 ( - id INTEGER, - CONSTRAINT temporal_rng3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS) -); -ERROR: column "valid_at" named in key does not exist -LINE 3: CONSTRAINT temporal_rng3_uq UNIQUE (id, valid_at WITHOUT O... - ^ --- UNIQUE with a non-range column: -CREATE TABLE temporal_rng3 ( - id int4range, - valid_at TEXT, - CONSTRAINT temporal_rng3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS) -); -ERROR: column "valid_at" in WITHOUT OVERLAPS is not a range or multirange type -LINE 4: CONSTRAINT temporal_rng3_uq UNIQUE (id, valid_at WITHOUT O... - ^ --- UNIQUE with one column plus a range: -CREATE TABLE temporal_rng3 ( - id int4range, - valid_at daterange, - CONSTRAINT temporal_rng3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS) -); -\d temporal_rng3 - Table "public.temporal_rng3" - Column | Type | Collation | Nullable | Default -----------+-----------+-----------+----------+--------- - id | int4range | | | - valid_at | daterange | | | -Indexes: - "temporal_rng3_uq" UNIQUE (id, valid_at WITHOUT OVERLAPS) - -SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conname = 'temporal_rng3_uq'; - pg_get_constraintdef ----------------------------------------- - UNIQUE (id, valid_at WITHOUT OVERLAPS) -(1 row) - -SELECT pg_get_indexdef(conindid, 0, true) FROM pg_constraint WHERE conname = 'temporal_rng3_uq'; - pg_get_indexdef ---------------------------------------------------------------------------------- - CREATE UNIQUE INDEX temporal_rng3_uq ON temporal_rng3 USING gist (id, valid_at) -(1 row) - -DROP TABLE temporal_rng3; --- UNIQUE with two columns plus a range: -CREATE TABLE temporal_rng3 ( - id1 int4range, - id2 int4range, - valid_at daterange, - CONSTRAINT temporal_rng3_uq UNIQUE (id1, id2, valid_at WITHOUT OVERLAPS) -); -\d temporal_rng3 - Table "public.temporal_rng3" - Column | Type | Collation | Nullable | Default -----------+-----------+-----------+----------+--------- - id1 | int4range | | | - id2 | int4range | | | - valid_at | daterange | | | -Indexes: - "temporal_rng3_uq" UNIQUE (id1, id2, valid_at WITHOUT OVERLAPS) - -SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conname = 'temporal_rng3_uq'; - pg_get_constraintdef ----------------------------------------------- - UNIQUE (id1, id2, valid_at WITHOUT OVERLAPS) -(1 row) - -SELECT pg_get_indexdef(conindid, 0, true) FROM pg_constraint WHERE conname = 'temporal_rng3_uq'; - pg_get_indexdef ---------------------------------------------------------------------------------------- - CREATE UNIQUE INDEX temporal_rng3_uq ON temporal_rng3 USING gist (id1, id2, valid_at) -(1 row) - -DROP TABLE temporal_rng3; --- UNIQUE with a custom range type: -CREATE TYPE textrange2 AS range (subtype=text, collation="C"); -CREATE TABLE temporal_rng3 ( - id int4range, - valid_at textrange2, - CONSTRAINT temporal_rng3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS) -); -ALTER TABLE temporal_rng3 DROP CONSTRAINT temporal_rng3_uq; -DROP TABLE temporal_rng3; -DROP TYPE textrange2; --- --- test ALTER TABLE ADD CONSTRAINT --- -CREATE TABLE temporal_rng ( - id int4range, - valid_at daterange -); -ALTER TABLE temporal_rng - ADD CONSTRAINT temporal_rng_pk - PRIMARY KEY (id, valid_at WITHOUT OVERLAPS); --- PK with USING INDEX (not possible): -CREATE TABLE temporal3 ( - id int4range, - valid_at daterange -); -CREATE INDEX idx_temporal3_uq ON temporal3 USING gist (id, valid_at); -ALTER TABLE temporal3 - ADD CONSTRAINT temporal3_pk - PRIMARY KEY USING INDEX idx_temporal3_uq; -ERROR: "idx_temporal3_uq" is not a unique index -LINE 2: ADD CONSTRAINT temporal3_pk - ^ -DETAIL: Cannot create a primary key or unique constraint using such an index. -DROP TABLE temporal3; --- UNIQUE with USING INDEX (not possible): -CREATE TABLE temporal3 ( - id int4range, - valid_at daterange -); -CREATE INDEX idx_temporal3_uq ON temporal3 USING gist (id, valid_at); -ALTER TABLE temporal3 - ADD CONSTRAINT temporal3_uq - UNIQUE USING INDEX idx_temporal3_uq; -ERROR: "idx_temporal3_uq" is not a unique index -LINE 2: ADD CONSTRAINT temporal3_uq - ^ -DETAIL: Cannot create a primary key or unique constraint using such an index. -DROP TABLE temporal3; --- UNIQUE with USING [UNIQUE] INDEX (possible but not a temporal constraint): -CREATE TABLE temporal3 ( - id int4range, - valid_at daterange -); -CREATE UNIQUE INDEX idx_temporal3_uq ON temporal3 (id, valid_at); -ALTER TABLE temporal3 - ADD CONSTRAINT temporal3_uq - UNIQUE USING INDEX idx_temporal3_uq; -NOTICE: ALTER TABLE / ADD CONSTRAINT USING INDEX will rename index "idx_temporal3_uq" to "temporal3_uq" -DROP TABLE temporal3; --- Add range column and the PK at the same time -CREATE TABLE temporal3 ( - id int4range -); -ALTER TABLE temporal3 - ADD COLUMN valid_at daterange, - ADD CONSTRAINT temporal3_pk - PRIMARY KEY (id, valid_at WITHOUT OVERLAPS); -DROP TABLE temporal3; --- Add range column and UNIQUE constraint at the same time -CREATE TABLE temporal3 ( - id int4range -); -ALTER TABLE temporal3 - ADD COLUMN valid_at daterange, - ADD CONSTRAINT temporal3_uq - UNIQUE (id, valid_at WITHOUT OVERLAPS); -DROP TABLE temporal3; --- --- range PK: test with existing rows --- -ALTER TABLE temporal_rng DROP CONSTRAINT temporal_rng_pk; --- okay: -INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)', daterange('2018-01-02', '2018-02-03')); -INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)', daterange('2018-03-03', '2018-04-04')); -INSERT INTO temporal_rng (id, valid_at) VALUES ('[2,3)', daterange('2018-01-01', '2018-01-05')); -INSERT INTO temporal_rng (id, valid_at) VALUES ('[3,4)', daterange('2018-01-01', NULL)); -ALTER TABLE temporal_rng ADD CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS); -ALTER TABLE temporal_rng DROP CONSTRAINT temporal_rng_pk; --- should fail: -BEGIN; - INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)', daterange('2018-01-01', '2018-01-05')); - ALTER TABLE temporal_rng ADD CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS); -ERROR: could not create exclusion constraint "temporal_rng_pk" -DETAIL: Key (id, valid_at)=([1,2), [2018-01-02,2018-02-03)) conflicts with key (id, valid_at)=([1,2), [2018-01-01,2018-01-05)). -ROLLBACK; --- rejects empty: -BEGIN; - INSERT INTO temporal_rng (id, valid_at) VALUES ('[3,4)', 'empty'); - ALTER TABLE temporal_rng ADD CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS); -ERROR: empty WITHOUT OVERLAPS value found in column "valid_at" in relation "temporal_rng" -ROLLBACK; -ALTER TABLE temporal_rng ADD CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS); -DELETE FROM temporal_rng; --- --- range PK: test inserts --- --- okay: -INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)', daterange('2018-01-02', '2018-02-03')); -INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)', daterange('2018-03-03', '2018-04-04')); -INSERT INTO temporal_rng (id, valid_at) VALUES ('[2,3)', daterange('2018-01-01', '2018-01-05')); -INSERT INTO temporal_rng (id, valid_at) VALUES ('[3,4)', daterange('2018-01-01', NULL)); --- should fail: -INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)', daterange('2018-01-01', '2018-01-05')); -ERROR: conflicting key value violates exclusion constraint "temporal_rng_pk" -DETAIL: Key (id, valid_at)=([1,2), [2018-01-01,2018-01-05)) conflicts with existing key (id, valid_at)=([1,2), [2018-01-02,2018-02-03)). -INSERT INTO temporal_rng (id, valid_at) VALUES (NULL, daterange('2018-01-01', '2018-01-05')); -ERROR: null value in column "id" of relation "temporal_rng" violates not-null constraint -DETAIL: Failing row contains (null, [2018-01-01,2018-01-05)). -INSERT INTO temporal_rng (id, valid_at) VALUES ('[3,4)', NULL); -ERROR: null value in column "valid_at" of relation "temporal_rng" violates not-null constraint -DETAIL: Failing row contains ([3,4), null). --- rejects empty: -INSERT INTO temporal_rng (id, valid_at) VALUES ('[3,4)', 'empty'); -ERROR: empty WITHOUT OVERLAPS value found in column "valid_at" in relation "temporal_rng" -SELECT * FROM temporal_rng ORDER BY id, valid_at; - id | valid_at --------+------------------------- - [1,2) | [2018-01-02,2018-02-03) - [1,2) | [2018-03-03,2018-04-04) - [2,3) | [2018-01-01,2018-01-05) - [3,4) | [2018-01-01,) -(4 rows) - --- --- range PK: test updates --- --- update the scalar part -UPDATE temporal_rng -SET id = '[11,12)' -WHERE id = '[1,2)' -AND valid_at @> '2018-01-15'::date; --- update the range part -UPDATE temporal_rng -SET valid_at = '[2020-01-01,2021-01-01)' -WHERE id = '[11,12)' -AND valid_at @> '2018-01-15'::date; --- update both at once -UPDATE temporal_rng -SET id = '[21,22)', - valid_at = '[2018-01-02,2018-02-03)' -WHERE id = '[11,12)' -AND valid_at @> '2020-01-15'::date; -SELECT * FROM temporal_rng ORDER BY id, valid_at; - id | valid_at ----------+------------------------- - [1,2) | [2018-03-03,2018-04-04) - [2,3) | [2018-01-01,2018-01-05) - [3,4) | [2018-01-01,) - [21,22) | [2018-01-02,2018-02-03) -(4 rows) - --- should fail: -UPDATE temporal_rng -SET id = '[1,2)', - valid_at = daterange('2018-03-05', '2018-05-05') -WHERE id = '[21,22)'; -ERROR: conflicting key value violates exclusion constraint "temporal_rng_pk" -DETAIL: Key (id, valid_at)=([1,2), [2018-03-05,2018-05-05)) conflicts with existing key (id, valid_at)=([1,2), [2018-03-03,2018-04-04)). --- set the scalar part to NULL -UPDATE temporal_rng -SET id = NULL, - valid_at = daterange('2018-03-05', '2018-05-05') -WHERE id = '[21,22)'; -ERROR: null value in column "id" of relation "temporal_rng" violates not-null constraint -DETAIL: Failing row contains (null, [2018-03-05,2018-05-05)). --- set the range part to NULL -UPDATE temporal_rng -SET id = '[1,2)', - valid_at = NULL -WHERE id = '[21,22)'; -ERROR: null value in column "valid_at" of relation "temporal_rng" violates not-null constraint -DETAIL: Failing row contains ([1,2), null). --- rejects empty: -UPDATE temporal_rng -SET id = '[1,2)', - valid_at = 'empty' -WHERE id = '[21,22)'; -ERROR: empty WITHOUT OVERLAPS value found in column "valid_at" in relation "temporal_rng" -SELECT * FROM temporal_rng ORDER BY id, valid_at; - id | valid_at ----------+------------------------- - [1,2) | [2018-03-03,2018-04-04) - [2,3) | [2018-01-01,2018-01-05) - [3,4) | [2018-01-01,) - [21,22) | [2018-01-02,2018-02-03) -(4 rows) - --- --- range UQ: test with existing rows --- -CREATE TABLE temporal_rng3 ( - id int4range, - valid_at daterange -); --- okay: -INSERT INTO temporal_rng3 (id, valid_at) VALUES ('[1,2)', daterange('2018-01-02', '2018-02-03')); -INSERT INTO temporal_rng3 (id, valid_at) VALUES ('[1,2)', daterange('2018-03-03', '2018-04-04')); -INSERT INTO temporal_rng3 (id, valid_at) VALUES ('[2,3)', daterange('2018-01-01', '2018-01-05')); -INSERT INTO temporal_rng3 (id, valid_at) VALUES ('[3,4)', daterange('2018-01-01', NULL)); -INSERT INTO temporal_rng3 (id, valid_at) VALUES (NULL, daterange('2018-01-01', '2018-01-05')); -INSERT INTO temporal_rng3 (id, valid_at) VALUES ('[3,4)', NULL); -ALTER TABLE temporal_rng3 ADD CONSTRAINT temporal_rng3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS); -ALTER TABLE temporal_rng3 DROP CONSTRAINT temporal_rng3_uq; --- should fail: -BEGIN; - INSERT INTO temporal_rng3 (id, valid_at) VALUES ('[1,2)', daterange('2018-01-01', '2018-01-05')); - ALTER TABLE temporal_rng3 ADD CONSTRAINT temporal_rng3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS); -ERROR: could not create exclusion constraint "temporal_rng3_uq" -DETAIL: Key (id, valid_at)=([1,2), [2018-01-02,2018-02-03)) conflicts with key (id, valid_at)=([1,2), [2018-01-01,2018-01-05)). -ROLLBACK; --- rejects empty: -BEGIN; - INSERT INTO temporal_rng3 (id, valid_at) VALUES ('[3,4)', 'empty'); - ALTER TABLE temporal_rng3 ADD CONSTRAINT temporal_rng3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS); -ERROR: empty WITHOUT OVERLAPS value found in column "valid_at" in relation "temporal_rng3" -ROLLBACK; -ALTER TABLE temporal_rng3 ADD CONSTRAINT temporal_rng3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS); -DELETE FROM temporal_rng3; --- --- range UQ: test inserts --- --- okay: -INSERT INTO temporal_rng3 (id, valid_at) VALUES ('[1,2)', daterange('2018-01-02', '2018-02-03')); -INSERT INTO temporal_rng3 (id, valid_at) VALUES ('[1,2)', daterange('2018-03-03', '2018-04-04')); -INSERT INTO temporal_rng3 (id, valid_at) VALUES ('[2,3)', daterange('2018-01-01', '2018-01-05')); -INSERT INTO temporal_rng3 (id, valid_at) VALUES ('[3,4)', daterange('2018-01-01', NULL)); -INSERT INTO temporal_rng3 (id, valid_at) VALUES (NULL, daterange('2018-01-01', '2018-01-05')); -INSERT INTO temporal_rng3 (id, valid_at) VALUES ('[3,4)', NULL); --- should fail: -INSERT INTO temporal_rng3 (id, valid_at) VALUES ('[1,2)', daterange('2018-01-01', '2018-01-05')); -ERROR: conflicting key value violates exclusion constraint "temporal_rng3_uq" -DETAIL: Key (id, valid_at)=([1,2), [2018-01-01,2018-01-05)) conflicts with existing key (id, valid_at)=([1,2), [2018-01-02,2018-02-03)). --- rejects empty: -INSERT INTO temporal_rng3 (id, valid_at) VALUES ('[3,4)', 'empty'); -ERROR: empty WITHOUT OVERLAPS value found in column "valid_at" in relation "temporal_rng3" -SELECT * FROM temporal_rng3 ORDER BY id, valid_at; - id | valid_at --------+------------------------- - [1,2) | [2018-01-02,2018-02-03) - [1,2) | [2018-03-03,2018-04-04) - [2,3) | [2018-01-01,2018-01-05) - [3,4) | [2018-01-01,) - [3,4) | - | [2018-01-01,2018-01-05) -(6 rows) - --- --- range UQ: test updates --- --- update the scalar part -UPDATE temporal_rng3 -SET id = '[11,12)' -WHERE id = '[1,2)' -AND valid_at @> '2018-01-15'::date; --- update the range part -UPDATE temporal_rng3 -SET valid_at = '[2020-01-01,2021-01-01)' -WHERE id = '[11,12)' -AND valid_at @> '2018-01-15'::date; --- update both at once -UPDATE temporal_rng3 -SET id = '[21,22)', - valid_at = '[2018-01-02,2018-02-03)' -WHERE id = '[11,12)' -AND valid_at @> '2020-01-15'::date; --- set the scalar part to NULL -UPDATE temporal_rng3 -SET id = NULL, - valid_at = daterange('2020-01-01', '2021-01-01') -WHERE id = '[21,22)'; --- set the range part to NULL -UPDATE temporal_rng3 -SET id = '[1,2)', - valid_at = NULL -WHERE id IS NULL AND valid_at @> '2020-06-01'::date; -SELECT * FROM temporal_rng3 ORDER BY id, valid_at; - id | valid_at --------+------------------------- - [1,2) | [2018-03-03,2018-04-04) - [1,2) | - [2,3) | [2018-01-01,2018-01-05) - [3,4) | [2018-01-01,) - [3,4) | - | [2018-01-01,2018-01-05) -(6 rows) - --- should fail: -UPDATE temporal_rng3 -SET valid_at = daterange('2018-03-01', '2018-05-05') -WHERE id = '[1,2)' AND valid_at IS NULL; -ERROR: conflicting key value violates exclusion constraint "temporal_rng3_uq" -DETAIL: Key (id, valid_at)=([1,2), [2018-03-01,2018-05-05)) conflicts with existing key (id, valid_at)=([1,2), [2018-03-03,2018-04-04)). --- rejects empty: -UPDATE temporal_rng3 -SET valid_at = 'empty' -WHERE id = '[1,2)' AND valid_at IS NULL; -ERROR: empty WITHOUT OVERLAPS value found in column "valid_at" in relation "temporal_rng3" --- still rejects empty when scalar part is NULL: -UPDATE temporal_rng3 -SET id = NULL, - valid_at = 'empty' -WHERE id = '[1,2)' AND valid_at IS NULL; -ERROR: empty WITHOUT OVERLAPS value found in column "valid_at" in relation "temporal_rng3" -SELECT * FROM temporal_rng3 ORDER BY id, valid_at; - id | valid_at --------+------------------------- - [1,2) | [2018-03-03,2018-04-04) - [1,2) | - [2,3) | [2018-01-01,2018-01-05) - [3,4) | [2018-01-01,) - [3,4) | - | [2018-01-01,2018-01-05) -(6 rows) - -DROP TABLE temporal_rng3; --- --- multirange PK: test with existing rows --- -ALTER TABLE temporal_mltrng DROP CONSTRAINT temporal_mltrng_pk; --- okay: -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2018-01-02', '2018-02-03'))); -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2018-03-03', '2018-04-04'))); -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[2,3)', datemultirange(daterange('2018-01-01', '2018-01-05'))); -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[3,4)', datemultirange(daterange('2018-01-01', NULL))); -ALTER TABLE temporal_mltrng ADD CONSTRAINT temporal_mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS); -ALTER TABLE temporal_mltrng DROP CONSTRAINT temporal_mltrng_pk; --- should fail: -BEGIN; - INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2018-01-01', '2018-01-05'))); - ALTER TABLE temporal_mltrng ADD CONSTRAINT temporal_mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS); -ERROR: could not create exclusion constraint "temporal_mltrng_pk" -DETAIL: Key (id, valid_at)=([1,2), {[2018-01-02,2018-02-03)}) conflicts with key (id, valid_at)=([1,2), {[2018-01-01,2018-01-05)}). -ROLLBACK; --- rejects empty: -BEGIN; - INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[3,4)', '{}'); - ALTER TABLE temporal_mltrng ADD CONSTRAINT temporal_mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS); -ERROR: empty WITHOUT OVERLAPS value found in column "valid_at" in relation "temporal_mltrng" -ROLLBACK; -ALTER TABLE temporal_mltrng ADD CONSTRAINT temporal_mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS); -DELETE FROM temporal_mltrng; --- --- multirange PK: test inserts --- --- okay: -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2018-01-02', '2018-02-03'))); -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2018-03-03', '2018-04-04'))); -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[2,3)', datemultirange(daterange('2018-01-01', '2018-01-05'))); -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[3,4)', datemultirange(daterange('2018-01-01', NULL))); --- should fail: -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2018-01-01', '2018-01-05'))); -ERROR: conflicting key value violates exclusion constraint "temporal_mltrng_pk" -DETAIL: Key (id, valid_at)=([1,2), {[2018-01-01,2018-01-05)}) conflicts with existing key (id, valid_at)=([1,2), {[2018-01-02,2018-02-03)}). -INSERT INTO temporal_mltrng (id, valid_at) VALUES (NULL, datemultirange(daterange('2018-01-01', '2018-01-05'))); -ERROR: null value in column "id" of relation "temporal_mltrng" violates not-null constraint -DETAIL: Failing row contains (null, {[2018-01-01,2018-01-05)}). -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[3,4)', NULL); -ERROR: null value in column "valid_at" of relation "temporal_mltrng" violates not-null constraint -DETAIL: Failing row contains ([3,4), null). --- rejects empty: -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[3,4)', '{}'); -ERROR: empty WITHOUT OVERLAPS value found in column "valid_at" in relation "temporal_mltrng" -SELECT * FROM temporal_mltrng ORDER BY id, valid_at; - id | valid_at --------+--------------------------- - [1,2) | {[2018-01-02,2018-02-03)} - [1,2) | {[2018-03-03,2018-04-04)} - [2,3) | {[2018-01-01,2018-01-05)} - [3,4) | {[2018-01-01,)} -(4 rows) - --- --- multirange PK: test updates --- --- update the scalar part -UPDATE temporal_mltrng -SET id = '[11,12)' -WHERE id = '[1,2)' -AND valid_at @> '2018-01-15'::date; --- update the multirange part -UPDATE temporal_mltrng -SET valid_at = '{[2020-01-01,2021-01-01)}' -WHERE id = '[11,12)' -AND valid_at @> '2018-01-15'::date; --- update both at once -UPDATE temporal_mltrng -SET id = '[21,22)', - valid_at = '{[2018-01-02,2018-02-03)}' -WHERE id = '[11,12)' -AND valid_at @> '2020-01-15'::date; -SELECT * FROM temporal_mltrng ORDER BY id, valid_at; - id | valid_at ----------+--------------------------- - [1,2) | {[2018-03-03,2018-04-04)} - [2,3) | {[2018-01-01,2018-01-05)} - [3,4) | {[2018-01-01,)} - [21,22) | {[2018-01-02,2018-02-03)} -(4 rows) - --- should fail: -UPDATE temporal_mltrng -SET id = '[1,2)', - valid_at = datemultirange(daterange('2018-03-05', '2018-05-05')) -WHERE id = '[21,22)'; -ERROR: conflicting key value violates exclusion constraint "temporal_mltrng_pk" -DETAIL: Key (id, valid_at)=([1,2), {[2018-03-05,2018-05-05)}) conflicts with existing key (id, valid_at)=([1,2), {[2018-03-03,2018-04-04)}). --- set the scalar part to NULL -UPDATE temporal_mltrng -SET id = NULL, - valid_at = datemultirange(daterange('2018-03-05', '2018-05-05')) -WHERE id = '[21,22)'; -ERROR: null value in column "id" of relation "temporal_mltrng" violates not-null constraint -DETAIL: Failing row contains (null, {[2018-03-05,2018-05-05)}). --- set the multirange part to NULL -UPDATE temporal_mltrng -SET id = '[1,2)', - valid_at = NULL -WHERE id = '[21,22)'; -ERROR: null value in column "valid_at" of relation "temporal_mltrng" violates not-null constraint -DETAIL: Failing row contains ([1,2), null). --- rejects empty: -UPDATE temporal_mltrng -SET id = '[1,2)', - valid_at = '{}' -WHERE id = '[21,22)'; -ERROR: empty WITHOUT OVERLAPS value found in column "valid_at" in relation "temporal_mltrng" -SELECT * FROM temporal_mltrng ORDER BY id, valid_at; - id | valid_at ----------+--------------------------- - [1,2) | {[2018-03-03,2018-04-04)} - [2,3) | {[2018-01-01,2018-01-05)} - [3,4) | {[2018-01-01,)} - [21,22) | {[2018-01-02,2018-02-03)} -(4 rows) - --- --- multirange UQ: test with existing rows --- -CREATE TABLE temporal_mltrng3 ( - id int4range, - valid_at datemultirange -); --- okay: -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2018-01-02', '2018-02-03'))); -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2018-03-03', '2018-04-04'))); -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[2,3)', datemultirange(daterange('2018-01-01', '2018-01-05'))); -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[3,4)', datemultirange(daterange('2018-01-01', NULL))); -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES (NULL, datemultirange(daterange('2018-01-01', '2018-01-05'))); -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[3,4)', NULL); -ALTER TABLE temporal_mltrng3 ADD CONSTRAINT temporal_mltrng3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS); -ALTER TABLE temporal_mltrng3 DROP CONSTRAINT temporal_mltrng3_uq; --- should fail: -BEGIN; - INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2018-01-01', '2018-01-05'))); - ALTER TABLE temporal_mltrng3 ADD CONSTRAINT temporal_mltrng3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS); -ERROR: could not create exclusion constraint "temporal_mltrng3_uq" -DETAIL: Key (id, valid_at)=([1,2), {[2018-01-02,2018-02-03)}) conflicts with key (id, valid_at)=([1,2), {[2018-01-01,2018-01-05)}). -ROLLBACK; --- rejects empty: -BEGIN; - INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[3,4)', '{}'); - ALTER TABLE temporal_mltrng3 ADD CONSTRAINT temporal_mltrng3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS); -ERROR: empty WITHOUT OVERLAPS value found in column "valid_at" in relation "temporal_mltrng3" -ROLLBACK; -ALTER TABLE temporal_mltrng3 ADD CONSTRAINT temporal_mltrng3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS); -DELETE FROM temporal_mltrng3; --- --- multirange UQ: test inserts --- --- okay: -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2018-01-02', '2018-02-03'))); -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2018-03-03', '2018-04-04'))); -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[2,3)', datemultirange(daterange('2018-01-01', '2018-01-05'))); -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[3,4)', datemultirange(daterange('2018-01-01', NULL))); -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES (NULL, datemultirange(daterange('2018-01-01', '2018-01-05'))); -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[3,4)', NULL); --- should fail: -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2018-01-01', '2018-01-05'))); -ERROR: conflicting key value violates exclusion constraint "temporal_mltrng3_uq" -DETAIL: Key (id, valid_at)=([1,2), {[2018-01-01,2018-01-05)}) conflicts with existing key (id, valid_at)=([1,2), {[2018-01-02,2018-02-03)}). --- rejects empty: -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[3,4)', '{}'); -ERROR: empty WITHOUT OVERLAPS value found in column "valid_at" in relation "temporal_mltrng3" -SELECT * FROM temporal_mltrng3 ORDER BY id, valid_at; - id | valid_at --------+--------------------------- - [1,2) | {[2018-01-02,2018-02-03)} - [1,2) | {[2018-03-03,2018-04-04)} - [2,3) | {[2018-01-01,2018-01-05)} - [3,4) | {[2018-01-01,)} - [3,4) | - | {[2018-01-01,2018-01-05)} -(6 rows) - --- --- multirange UQ: test updates --- --- update the scalar part -UPDATE temporal_mltrng3 -SET id = '[11,12)' -WHERE id = '[1,2)' -AND valid_at @> '2018-01-15'::date; --- update the multirange part -UPDATE temporal_mltrng3 -SET valid_at = '{[2020-01-01,2021-01-01)}' -WHERE id = '[11,12)' -AND valid_at @> '2018-01-15'::date; --- update both at once -UPDATE temporal_mltrng3 -SET id = '[21,22)', - valid_at = '{[2018-01-02,2018-02-03)}' -WHERE id = '[11,12)' -AND valid_at @> '2020-01-15'::date; --- set the scalar part to NULL -UPDATE temporal_mltrng3 -SET id = NULL, - valid_at = datemultirange(daterange('2020-01-01', '2021-01-01')) -WHERE id = '[21,22)'; --- set the multirange part to NULL -UPDATE temporal_mltrng3 -SET id = '[1,2)', - valid_at = NULL -WHERE id IS NULL AND valid_at @> '2020-06-01'::date; -SELECT * FROM temporal_mltrng3 ORDER BY id, valid_at; - id | valid_at --------+--------------------------- - [1,2) | {[2018-03-03,2018-04-04)} - [1,2) | - [2,3) | {[2018-01-01,2018-01-05)} - [3,4) | {[2018-01-01,)} - [3,4) | - | {[2018-01-01,2018-01-05)} -(6 rows) - --- should fail: -UPDATE temporal_mltrng3 -SET valid_at = datemultirange(daterange('2018-03-01', '2018-05-05')) -WHERE id = '[1,2)' AND valid_at IS NULL; -ERROR: conflicting key value violates exclusion constraint "temporal_mltrng3_uq" -DETAIL: Key (id, valid_at)=([1,2), {[2018-03-01,2018-05-05)}) conflicts with existing key (id, valid_at)=([1,2), {[2018-03-03,2018-04-04)}). --- rejects empty: -UPDATE temporal_mltrng3 -SET valid_at = '{}' -WHERE id = '[1,2)' AND valid_at IS NULL; -ERROR: empty WITHOUT OVERLAPS value found in column "valid_at" in relation "temporal_mltrng3" --- still rejects empty when scalar part is NULL: -UPDATE temporal_mltrng3 -SET id = NULL, - valid_at = '{}' -WHERE id = '[1,2)' AND valid_at IS NULL; -ERROR: empty WITHOUT OVERLAPS value found in column "valid_at" in relation "temporal_mltrng3" -SELECT * FROM temporal_mltrng3 ORDER BY id, valid_at; - id | valid_at --------+--------------------------- - [1,2) | {[2018-03-03,2018-04-04)} - [1,2) | - [2,3) | {[2018-01-01,2018-01-05)} - [3,4) | {[2018-01-01,)} - [3,4) | - | {[2018-01-01,2018-01-05)} -(6 rows) - -DROP TABLE temporal_mltrng3; --- --- test a range with both a PK and a UNIQUE constraint --- -CREATE TABLE temporal3 ( - id int4range, - valid_at daterange, - id2 int8range, - name TEXT, - CONSTRAINT temporal3_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal3_uniq UNIQUE (id2, valid_at WITHOUT OVERLAPS) -); -INSERT INTO temporal3 (id, valid_at, id2, name) - VALUES - ('[1,2)', daterange('2000-01-01', '2010-01-01'), '[7,8)', 'foo'), - ('[2,3)', daterange('2000-01-01', '2010-01-01'), '[9,10)', 'bar') -; -DROP TABLE temporal3; --- --- test changing the PK's dependencies --- -CREATE TABLE temporal3 ( - id int4range, - valid_at daterange, - CONSTRAINT temporal3_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -); -ALTER TABLE temporal3 ALTER COLUMN valid_at DROP NOT NULL; -ERROR: column "valid_at" is in a primary key -ALTER TABLE temporal3 ALTER COLUMN valid_at TYPE tstzrange USING tstzrange(lower(valid_at), upper(valid_at)); -ALTER TABLE temporal3 RENAME COLUMN valid_at TO valid_thru; -ALTER TABLE temporal3 DROP COLUMN valid_thru; -DROP TABLE temporal3; --- --- test PARTITION BY for ranges --- --- temporal PRIMARY KEY: -CREATE TABLE temporal_partitioned ( - id int4range, - valid_at daterange, - name text, - CONSTRAINT temporal_paritioned_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -) PARTITION BY LIST (id); -CREATE TABLE tp1 PARTITION OF temporal_partitioned FOR VALUES IN ('[1,2)', '[2,3)'); -CREATE TABLE tp2 PARTITION OF temporal_partitioned FOR VALUES IN ('[3,4)', '[4,5)'); -INSERT INTO temporal_partitioned (id, valid_at, name) VALUES - ('[1,2)', daterange('2000-01-01', '2000-02-01'), 'one'), - ('[1,2)', daterange('2000-02-01', '2000-03-01'), 'one'), - ('[3,4)', daterange('2000-01-01', '2010-01-01'), 'three'); -SELECT * FROM temporal_partitioned ORDER BY id, valid_at; - id | valid_at | name --------+-------------------------+------- - [1,2) | [2000-01-01,2000-02-01) | one - [1,2) | [2000-02-01,2000-03-01) | one - [3,4) | [2000-01-01,2010-01-01) | three -(3 rows) - -SELECT * FROM tp1 ORDER BY id, valid_at; - id | valid_at | name --------+-------------------------+------ - [1,2) | [2000-01-01,2000-02-01) | one - [1,2) | [2000-02-01,2000-03-01) | one -(2 rows) - -SELECT * FROM tp2 ORDER BY id, valid_at; - id | valid_at | name --------+-------------------------+------- - [3,4) | [2000-01-01,2010-01-01) | three -(1 row) - -DROP TABLE temporal_partitioned; --- temporal UNIQUE: -CREATE TABLE temporal_partitioned ( - id int4range, - valid_at daterange, - name text, - CONSTRAINT temporal_paritioned_uq UNIQUE (id, valid_at WITHOUT OVERLAPS) -) PARTITION BY LIST (id); -CREATE TABLE tp1 PARTITION OF temporal_partitioned FOR VALUES IN ('[1,2)', '[2,3)'); -CREATE TABLE tp2 PARTITION OF temporal_partitioned FOR VALUES IN ('[3,4)', '[4,5)'); -INSERT INTO temporal_partitioned (id, valid_at, name) VALUES - ('[1,2)', daterange('2000-01-01', '2000-02-01'), 'one'), - ('[1,2)', daterange('2000-02-01', '2000-03-01'), 'one'), - ('[3,4)', daterange('2000-01-01', '2010-01-01'), 'three'); -SELECT * FROM temporal_partitioned ORDER BY id, valid_at; - id | valid_at | name --------+-------------------------+------- - [1,2) | [2000-01-01,2000-02-01) | one - [1,2) | [2000-02-01,2000-03-01) | one - [3,4) | [2000-01-01,2010-01-01) | three -(3 rows) - -SELECT * FROM tp1 ORDER BY id, valid_at; - id | valid_at | name --------+-------------------------+------ - [1,2) | [2000-01-01,2000-02-01) | one - [1,2) | [2000-02-01,2000-03-01) | one -(2 rows) - -SELECT * FROM tp2 ORDER BY id, valid_at; - id | valid_at | name --------+-------------------------+------- - [3,4) | [2000-01-01,2010-01-01) | three -(1 row) - -DROP TABLE temporal_partitioned; --- ALTER TABLE REPLICA IDENTITY -\d temporal_rng - Table "public.temporal_rng" - Column | Type | Collation | Nullable | Default -----------+-----------+-----------+----------+--------- - id | int4range | | not null | - valid_at | daterange | | not null | -Indexes: - "temporal_rng_pk" PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) - -ALTER TABLE temporal_rng REPLICA IDENTITY USING INDEX temporal_rng_pk; -\d temporal_rng - Table "public.temporal_rng" - Column | Type | Collation | Nullable | Default -----------+-----------+-----------+----------+--------- - id | int4range | | not null | - valid_at | daterange | | not null | -Indexes: - "temporal_rng_pk" PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) REPLICA IDENTITY - --- --- ON CONFLICT: ranges --- -TRUNCATE temporal_rng; -INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)', daterange('2000-01-01', '2010-01-01')); --- with a conflict -INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)', daterange('2005-01-01', '2006-01-01')) ON CONFLICT DO NOTHING; --- id matches but no conflict -INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)', daterange('2010-01-01', '2020-01-01')) ON CONFLICT DO NOTHING; --- date matches but no conflict -INSERT INTO temporal_rng (id, valid_at) VALUES ('[2,3)', daterange('2005-01-01', '2006-01-01')) ON CONFLICT DO NOTHING; -SELECT * FROM temporal_rng ORDER BY id, valid_at; - id | valid_at --------+------------------------- - [1,2) | [2000-01-01,2010-01-01) - [1,2) | [2010-01-01,2020-01-01) - [2,3) | [2005-01-01,2006-01-01) -(3 rows) - -TRUNCATE temporal_rng; -INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)', daterange('2000-01-01', '2010-01-01')); --- with a conflict -INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)', daterange('2005-01-01', '2006-01-01')) ON CONFLICT (id, valid_at) DO NOTHING; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification --- id matches but no conflict -INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)', daterange('2010-01-01', '2020-01-01')) ON CONFLICT (id, valid_at) DO NOTHING; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification --- date matches but no conflict -INSERT INTO temporal_rng (id, valid_at) VALUES ('[2,3)', daterange('2005-01-01', '2006-01-01')) ON CONFLICT (id, valid_at) DO NOTHING; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification -SELECT * FROM temporal_rng ORDER BY id, valid_at; - id | valid_at --------+------------------------- - [1,2) | [2000-01-01,2010-01-01) -(1 row) - -TRUNCATE temporal_rng; -INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)', daterange('2000-01-01', '2010-01-01')); --- with a conflict -INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)', daterange('2005-01-01', '2006-01-01')) ON CONFLICT ON CONSTRAINT temporal_rng_pk DO NOTHING; --- id matches but no conflict -INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)', daterange('2010-01-01', '2020-01-01')) ON CONFLICT ON CONSTRAINT temporal_rng_pk DO NOTHING; --- date matches but no conflict -INSERT INTO temporal_rng (id, valid_at) VALUES ('[2,3)', daterange('2005-01-01', '2006-01-01')) ON CONFLICT ON CONSTRAINT temporal_rng_pk DO NOTHING; -SELECT * FROM temporal_rng ORDER BY id, valid_at; - id | valid_at --------+------------------------- - [1,2) | [2000-01-01,2010-01-01) - [1,2) | [2010-01-01,2020-01-01) - [2,3) | [2005-01-01,2006-01-01) -(3 rows) - -TRUNCATE temporal_rng; -INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)', daterange('2000-01-01', '2010-01-01')); --- with a conflict -INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)', daterange('2005-01-01', '2006-01-01')) ON CONFLICT (id, valid_at) DO UPDATE SET id = EXCLUDED.id + '[2,3)'; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification --- id matches but no conflict -INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)', daterange('2010-01-01', '2020-01-01')) ON CONFLICT (id, valid_at) DO UPDATE SET id = EXCLUDED.id + '[3,4)'; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification --- date matches but no conflict -INSERT INTO temporal_rng (id, valid_at) VALUES ('[2,3)', daterange('2005-01-01', '2006-01-01')) ON CONFLICT (id, valid_at) DO UPDATE SET id = EXCLUDED.id + '[4,5)'; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification -SELECT * FROM temporal_rng ORDER BY id, valid_at; - id | valid_at --------+------------------------- - [1,2) | [2000-01-01,2010-01-01) -(1 row) - -TRUNCATE temporal_rng; -INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)', daterange('2000-01-01', '2010-01-01')); --- with a conflict -INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)', daterange('2005-01-01', '2006-01-01')) ON CONFLICT ON CONSTRAINT temporal_rng_pk DO UPDATE SET id = EXCLUDED.id + '[2,3)'; -ERROR: ON CONFLICT DO UPDATE not supported with exclusion constraints --- id matches but no conflict -INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)', daterange('2010-01-01', '2020-01-01')) ON CONFLICT ON CONSTRAINT temporal_rng_pk DO UPDATE SET id = EXCLUDED.id + '[3,4)'; -ERROR: ON CONFLICT DO UPDATE not supported with exclusion constraints --- date matches but no conflict -INSERT INTO temporal_rng (id, valid_at) VALUES ('[2,3)', daterange('2005-01-01', '2006-01-01')) ON CONFLICT ON CONSTRAINT temporal_rng_pk DO UPDATE SET id = EXCLUDED.id + '[4,5)'; -ERROR: ON CONFLICT DO UPDATE not supported with exclusion constraints -SELECT * FROM temporal_rng ORDER BY id, valid_at; - id | valid_at --------+------------------------- - [1,2) | [2000-01-01,2010-01-01) -(1 row) - --- with a UNIQUE constraint: -CREATE TABLE temporal3 ( - id int4range, - valid_at daterange, - CONSTRAINT temporal3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS) -); -TRUNCATE temporal3; -INSERT INTO temporal3 (id, valid_at) VALUES ('[1,2)', daterange('2000-01-01', '2010-01-01')); --- with a conflict -INSERT INTO temporal3 (id, valid_at) VALUES ('[1,2)', daterange('2005-01-01', '2006-01-01')) ON CONFLICT DO NOTHING; --- id matches but no conflict -INSERT INTO temporal3 (id, valid_at) VALUES ('[1,2)', daterange('2010-01-01', '2020-01-01')) ON CONFLICT DO NOTHING; --- date matches but no conflict -INSERT INTO temporal3 (id, valid_at) VALUES ('[2,3)', daterange('2005-01-01', '2006-01-01')) ON CONFLICT DO NOTHING; -SELECT * FROM temporal3 ORDER BY id, valid_at; - id | valid_at --------+------------------------- - [1,2) | [2000-01-01,2010-01-01) - [1,2) | [2010-01-01,2020-01-01) - [2,3) | [2005-01-01,2006-01-01) -(3 rows) - -TRUNCATE temporal3; -INSERT INTO temporal3 (id, valid_at) VALUES ('[1,2)', daterange('2000-01-01', '2010-01-01')); --- with a conflict -INSERT INTO temporal3 (id, valid_at) VALUES ('[1,2)', daterange('2005-01-01', '2006-01-01')) ON CONFLICT (id, valid_at) DO NOTHING; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification --- id matches but no conflict -INSERT INTO temporal3 (id, valid_at) VALUES ('[1,2)', daterange('2010-01-01', '2020-01-01')) ON CONFLICT (id, valid_at) DO NOTHING; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification --- date matches but no conflict -INSERT INTO temporal3 (id, valid_at) VALUES ('[2,3)', daterange('2005-01-01', '2006-01-01')) ON CONFLICT (id, valid_at) DO NOTHING; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification -SELECT * FROM temporal3 ORDER BY id, valid_at; - id | valid_at --------+------------------------- - [1,2) | [2000-01-01,2010-01-01) -(1 row) - -TRUNCATE temporal3; -INSERT INTO temporal3 (id, valid_at) VALUES ('[1,2)', daterange('2000-01-01', '2010-01-01')); --- with a conflict -INSERT INTO temporal3 (id, valid_at) VALUES ('[1,2)', daterange('2005-01-01', '2006-01-01')) ON CONFLICT ON CONSTRAINT temporal3_uq DO NOTHING; --- id matches but no conflict -INSERT INTO temporal3 (id, valid_at) VALUES ('[1,2)', daterange('2010-01-01', '2020-01-01')) ON CONFLICT ON CONSTRAINT temporal3_uq DO NOTHING; --- date matches but no conflict -INSERT INTO temporal3 (id, valid_at) VALUES ('[2,3)', daterange('2005-01-01', '2006-01-01')) ON CONFLICT ON CONSTRAINT temporal3_uq DO NOTHING; -SELECT * FROM temporal3 ORDER BY id, valid_at; - id | valid_at --------+------------------------- - [1,2) | [2000-01-01,2010-01-01) - [1,2) | [2010-01-01,2020-01-01) - [2,3) | [2005-01-01,2006-01-01) -(3 rows) - -TRUNCATE temporal3; -INSERT INTO temporal3 (id, valid_at) VALUES ('[1,2)', daterange('2000-01-01', '2010-01-01')); --- with a conflict -INSERT INTO temporal3 (id, valid_at) VALUES ('[1,2)', daterange('2005-01-01', '2006-01-01')) ON CONFLICT (id, valid_at) DO UPDATE SET id = EXCLUDED.id + '[2,3)'; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification --- id matches but no conflict -INSERT INTO temporal3 (id, valid_at) VALUES ('[1,2)', daterange('2010-01-01', '2020-01-01')) ON CONFLICT (id, valid_at) DO UPDATE SET id = EXCLUDED.id + '[3,4)'; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification --- date matches but no conflict -INSERT INTO temporal3 (id, valid_at) VALUES ('[2,3)', daterange('2005-01-01', '2006-01-01')) ON CONFLICT (id, valid_at) DO UPDATE SET id = EXCLUDED.id + '[4,5)'; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification -SELECT * FROM temporal3 ORDER BY id, valid_at; - id | valid_at --------+------------------------- - [1,2) | [2000-01-01,2010-01-01) -(1 row) - -TRUNCATE temporal3; -INSERT INTO temporal3 (id, valid_at) VALUES ('[1,2)', daterange('2000-01-01', '2010-01-01')); --- with a conflict -INSERT INTO temporal3 (id, valid_at) VALUES ('[1,2)', daterange('2005-01-01', '2006-01-01')) ON CONFLICT ON CONSTRAINT temporal3_uq DO UPDATE SET id = EXCLUDED.id + '[2,3)'; -ERROR: ON CONFLICT DO UPDATE not supported with exclusion constraints --- id matches but no conflict -INSERT INTO temporal3 (id, valid_at) VALUES ('[1,2)', daterange('2010-01-01', '2020-01-01')) ON CONFLICT ON CONSTRAINT temporal3_uq DO UPDATE SET id = EXCLUDED.id + '[3,4)'; -ERROR: ON CONFLICT DO UPDATE not supported with exclusion constraints --- date matches but no conflict -INSERT INTO temporal3 (id, valid_at) VALUES ('[2,3)', daterange('2005-01-01', '2006-01-01')) ON CONFLICT ON CONSTRAINT temporal3_uq DO UPDATE SET id = EXCLUDED.id + '[4,5)'; -ERROR: ON CONFLICT DO UPDATE not supported with exclusion constraints -SELECT * FROM temporal3 ORDER BY id, valid_at; - id | valid_at --------+------------------------- - [1,2) | [2000-01-01,2010-01-01) -(1 row) - -DROP TABLE temporal3; --- --- ON CONFLICT: multiranges --- -TRUNCATE temporal_mltrng; -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2000-01-01', '2010-01-01'))); --- with a conflict -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2005-01-01', '2006-01-01'))) ON CONFLICT DO NOTHING; --- id matches but no conflict -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2010-01-01', '2020-01-01'))) ON CONFLICT DO NOTHING; --- date matches but no conflict -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[2,3)', datemultirange(daterange('2005-01-01', '2006-01-01'))) ON CONFLICT DO NOTHING; -SELECT * FROM temporal_mltrng ORDER BY id, valid_at; - id | valid_at --------+--------------------------- - [1,2) | {[2000-01-01,2010-01-01)} - [1,2) | {[2010-01-01,2020-01-01)} - [2,3) | {[2005-01-01,2006-01-01)} -(3 rows) - -TRUNCATE temporal_mltrng; -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2000-01-01', '2010-01-01'))); --- with a conflict -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2005-01-01', '2006-01-01'))) ON CONFLICT (id, valid_at) DO NOTHING; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification --- id matches but no conflict -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2010-01-01', '2020-01-01'))) ON CONFLICT (id, valid_at) DO NOTHING; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification --- date matches but no conflict -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[2,3)', datemultirange(daterange('2005-01-01', '2006-01-01'))) ON CONFLICT (id, valid_at) DO NOTHING; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification -SELECT * FROM temporal_mltrng ORDER BY id, valid_at; - id | valid_at --------+--------------------------- - [1,2) | {[2000-01-01,2010-01-01)} -(1 row) - -TRUNCATE temporal_mltrng; -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2000-01-01', '2010-01-01'))); --- with a conflict -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2005-01-01', '2006-01-01'))) ON CONFLICT ON CONSTRAINT temporal_mltrng_pk DO NOTHING; --- id matches but no conflict -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2010-01-01', '2020-01-01'))) ON CONFLICT ON CONSTRAINT temporal_mltrng_pk DO NOTHING; --- date matches but no conflict -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[2,3)', datemultirange(daterange('2005-01-01', '2006-01-01'))) ON CONFLICT ON CONSTRAINT temporal_mltrng_pk DO NOTHING; -SELECT * FROM temporal_mltrng ORDER BY id, valid_at; - id | valid_at --------+--------------------------- - [1,2) | {[2000-01-01,2010-01-01)} - [1,2) | {[2010-01-01,2020-01-01)} - [2,3) | {[2005-01-01,2006-01-01)} -(3 rows) - -TRUNCATE temporal_mltrng; -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2000-01-01', '2010-01-01'))); --- with a conflict -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2005-01-01', '2006-01-01'))) ON CONFLICT (id, valid_at) DO UPDATE SET id = EXCLUDED.id + '[2,3)'; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification --- id matches but no conflict -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2010-01-01', '2020-01-01'))) ON CONFLICT (id, valid_at) DO UPDATE SET id = EXCLUDED.id + '[3,4)'; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification --- date matches but no conflict -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[2,3)', datemultirange(daterange('2005-01-01', '2006-01-01'))) ON CONFLICT (id, valid_at) DO UPDATE SET id = EXCLUDED.id + '[4,5)'; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification -SELECT * FROM temporal_mltrng ORDER BY id, valid_at; - id | valid_at --------+--------------------------- - [1,2) | {[2000-01-01,2010-01-01)} -(1 row) - -TRUNCATE temporal_mltrng; -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2000-01-01', '2010-01-01'))); --- with a conflict -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2005-01-01', '2006-01-01'))) ON CONFLICT ON CONSTRAINT temporal_mltrng_pk DO UPDATE SET id = EXCLUDED.id + '[2,3)'; -ERROR: ON CONFLICT DO UPDATE not supported with exclusion constraints --- id matches but no conflict -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2010-01-01', '2020-01-01'))) ON CONFLICT ON CONSTRAINT temporal_mltrng_pk DO UPDATE SET id = EXCLUDED.id + '[3,4)'; -ERROR: ON CONFLICT DO UPDATE not supported with exclusion constraints --- date matches but no conflict -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[2,3)', datemultirange(daterange('2005-01-01', '2006-01-01'))) ON CONFLICT ON CONSTRAINT temporal_mltrng_pk DO UPDATE SET id = EXCLUDED.id + '[4,5)'; -ERROR: ON CONFLICT DO UPDATE not supported with exclusion constraints -SELECT * FROM temporal_mltrng ORDER BY id, valid_at; - id | valid_at --------+--------------------------- - [1,2) | {[2000-01-01,2010-01-01)} -(1 row) - --- with a UNIQUE constraint: -CREATE TABLE temporal_mltrng3 ( - id int4range, - valid_at datemultirange, - CONSTRAINT temporal_mltrng3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS) -); -TRUNCATE temporal_mltrng3; -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2000-01-01', '2010-01-01'))); --- with a conflict -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2005-01-01', '2006-01-01'))) ON CONFLICT DO NOTHING; --- id matches but no conflict -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2010-01-01', '2020-01-01'))) ON CONFLICT DO NOTHING; --- date matches but no conflict -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[2,3)', datemultirange(daterange('2005-01-01', '2006-01-01'))) ON CONFLICT DO NOTHING; -SELECT * FROM temporal_mltrng3 ORDER BY id, valid_at; - id | valid_at --------+--------------------------- - [1,2) | {[2000-01-01,2010-01-01)} - [1,2) | {[2010-01-01,2020-01-01)} - [2,3) | {[2005-01-01,2006-01-01)} -(3 rows) - -TRUNCATE temporal_mltrng3; -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2000-01-01', '2010-01-01'))); --- with a conflict -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2005-01-01', '2006-01-01'))) ON CONFLICT (id, valid_at) DO NOTHING; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification --- id matches but no conflict -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2010-01-01', '2020-01-01'))) ON CONFLICT (id, valid_at) DO NOTHING; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification --- date matches but no conflict -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[2,3)', datemultirange(daterange('2005-01-01', '2006-01-01'))) ON CONFLICT (id, valid_at) DO NOTHING; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification -SELECT * FROM temporal_mltrng3 ORDER BY id, valid_at; - id | valid_at --------+--------------------------- - [1,2) | {[2000-01-01,2010-01-01)} -(1 row) - -TRUNCATE temporal_mltrng3; -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2000-01-01', '2010-01-01'))); --- with a conflict -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2005-01-01', '2006-01-01'))) ON CONFLICT ON CONSTRAINT temporal_mltrng3_uq DO NOTHING; --- id matches but no conflict -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2010-01-01', '2020-01-01'))) ON CONFLICT ON CONSTRAINT temporal_mltrng3_uq DO NOTHING; --- date matches but no conflict -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[2,3)', datemultirange(daterange('2005-01-01', '2006-01-01'))) ON CONFLICT ON CONSTRAINT temporal_mltrng3_uq DO NOTHING; -SELECT * FROM temporal_mltrng3 ORDER BY id, valid_at; - id | valid_at --------+--------------------------- - [1,2) | {[2000-01-01,2010-01-01)} - [1,2) | {[2010-01-01,2020-01-01)} - [2,3) | {[2005-01-01,2006-01-01)} -(3 rows) - -TRUNCATE temporal_mltrng3; -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2000-01-01', '2010-01-01'))); --- with a conflict -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2005-01-01', '2006-01-01'))) ON CONFLICT (id, valid_at) DO UPDATE SET id = EXCLUDED.id + '[2,3)'; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification --- id matches but no conflict -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2010-01-01', '2020-01-01'))) ON CONFLICT (id, valid_at) DO UPDATE SET id = EXCLUDED.id + '[3,4)'; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification --- date matches but no conflict -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[2,3)', datemultirange(daterange('2005-01-01', '2006-01-01'))) ON CONFLICT (id, valid_at) DO UPDATE SET id = EXCLUDED.id + '[4,5)'; -ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification -SELECT * FROM temporal_mltrng3 ORDER BY id, valid_at; - id | valid_at --------+--------------------------- - [1,2) | {[2000-01-01,2010-01-01)} -(1 row) - -TRUNCATE temporal_mltrng3; -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2000-01-01', '2010-01-01'))); --- with a conflict -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2005-01-01', '2006-01-01'))) ON CONFLICT ON CONSTRAINT temporal_mltrng3_uq DO UPDATE SET id = EXCLUDED.id + '[2,3)'; -ERROR: ON CONFLICT DO UPDATE not supported with exclusion constraints --- id matches but no conflict -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2010-01-01', '2020-01-01'))) ON CONFLICT ON CONSTRAINT temporal_mltrng3_uq DO UPDATE SET id = EXCLUDED.id + '[3,4)'; -ERROR: ON CONFLICT DO UPDATE not supported with exclusion constraints --- date matches but no conflict -INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[2,3)', datemultirange(daterange('2005-01-01', '2006-01-01'))) ON CONFLICT ON CONSTRAINT temporal_mltrng3_uq DO UPDATE SET id = EXCLUDED.id + '[4,5)'; -ERROR: ON CONFLICT DO UPDATE not supported with exclusion constraints -SELECT * FROM temporal_mltrng3 ORDER BY id, valid_at; - id | valid_at --------+--------------------------- - [1,2) | {[2000-01-01,2010-01-01)} -(1 row) - -DROP TABLE temporal_mltrng3; --- --- test FK dependencies --- --- can't drop a range referenced by an FK, unless with CASCADE -CREATE TABLE temporal3 ( - id int4range, - valid_at daterange, - CONSTRAINT temporal3_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -); -CREATE TABLE temporal_fk_rng2rng ( - id int4range, - valid_at daterange, - parent_id int4range, - CONSTRAINT temporal_fk_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal3 (id, PERIOD valid_at) -); -ALTER TABLE temporal3 DROP COLUMN valid_at; -ERROR: cannot drop column valid_at of table temporal3 because other objects depend on it -DETAIL: constraint temporal_fk_rng2rng_fk on table temporal_fk_rng2rng depends on column valid_at of table temporal3 -HINT: Use DROP ... CASCADE to drop the dependent objects too. -ALTER TABLE temporal3 DROP COLUMN valid_at CASCADE; -NOTICE: drop cascades to constraint temporal_fk_rng2rng_fk on table temporal_fk_rng2rng -DROP TABLE temporal_fk_rng2rng; -DROP TABLE temporal3; --- --- test FOREIGN KEY, range references range --- --- test table setup -DROP TABLE temporal_rng; -CREATE TABLE temporal_rng (id int4range, valid_at daterange); -ALTER TABLE temporal_rng - ADD CONSTRAINT temporal_rng_pk - PRIMARY KEY (id, valid_at WITHOUT OVERLAPS); --- Can't create a FK with a mismatched range type -CREATE TABLE temporal_fk_rng2rng ( - id int4range, - valid_at int4range, - parent_id int4range, - CONSTRAINT temporal_fk_rng2rng_pk2 PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk_rng2rng_fk2 FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_rng (id, PERIOD valid_at) -); -ERROR: foreign key constraint "temporal_fk_rng2rng_fk2" cannot be implemented -DETAIL: Key columns "valid_at" of the referencing table and "valid_at" of the referenced table are of incompatible types: int4range and daterange. --- works: PERIOD for both referenced and referencing -CREATE TABLE temporal_fk_rng2rng ( - id int4range, - valid_at daterange, - parent_id int4range, - CONSTRAINT temporal_fk_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_rng (id, PERIOD valid_at) -); -DROP TABLE temporal_fk_rng2rng; --- with mismatched PERIOD columns: --- (parent_id, PERIOD valid_at) REFERENCES (id, valid_at) --- REFERENCES part should specify PERIOD -CREATE TABLE temporal_fk_rng2rng ( - id int4range, - valid_at daterange, - parent_id int4range, - CONSTRAINT temporal_fk_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_rng (id, valid_at) -); -ERROR: foreign key uses PERIOD on the referencing table but not the referenced table --- (parent_id, valid_at) REFERENCES (id, valid_at) --- both should specify PERIOD: -CREATE TABLE temporal_fk_rng2rng ( - id int4range, - valid_at daterange, - parent_id int4range, - CONSTRAINT temporal_fk_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, valid_at) - REFERENCES temporal_rng (id, valid_at) -); -ERROR: foreign key must use PERIOD when referencing a primary using WITHOUT OVERLAPS --- (parent_id, valid_at) REFERENCES (id, PERIOD valid_at) --- FOREIGN KEY part should specify PERIOD -CREATE TABLE temporal_fk_rng2rng ( - id int4range, - valid_at daterange, - parent_id int4range, - CONSTRAINT temporal_fk_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, valid_at) - REFERENCES temporal_rng (id, PERIOD valid_at) -); -ERROR: foreign key uses PERIOD on the referenced table but not the referencing table --- (parent_id, valid_at) REFERENCES [implicit] --- FOREIGN KEY part should specify PERIOD -CREATE TABLE temporal_fk_rng2rng ( - id int4range, - valid_at daterange, - parent_id int4range, - CONSTRAINT temporal_fk_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, valid_at) - REFERENCES temporal_rng -); -ERROR: foreign key uses PERIOD on the referenced table but not the referencing table --- (parent_id, PERIOD valid_at) REFERENCES (id) -CREATE TABLE temporal_fk_rng2rng ( - id int4range, - valid_at daterange, - parent_id int4range, - CONSTRAINT temporal_fk_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_rng (id) -); -ERROR: foreign key uses PERIOD on the referencing table but not the referenced table --- (parent_id) REFERENCES (id, PERIOD valid_at) -CREATE TABLE temporal_fk_rng2rng ( - id int4range, - valid_at daterange, - parent_id int4range, - CONSTRAINT temporal_fk_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id) - REFERENCES temporal_rng (id, PERIOD valid_at) -); -ERROR: foreign key uses PERIOD on the referenced table but not the referencing table --- with inferred PK on the referenced table: --- (parent_id, PERIOD valid_at) REFERENCES [implicit] -CREATE TABLE temporal_fk_rng2rng ( - id int4range, - valid_at daterange, - parent_id int4range, - CONSTRAINT temporal_fk_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_rng -); -DROP TABLE temporal_fk_rng2rng; --- (parent_id) REFERENCES [implicit] -CREATE TABLE temporal_fk_rng2rng ( - id int4range, - valid_at daterange, - parent_id int4range, - CONSTRAINT temporal_fk_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id) - REFERENCES temporal_rng -); -ERROR: foreign key uses PERIOD on the referenced table but not the referencing table --- should fail because of duplicate referenced columns: -CREATE TABLE temporal_fk_rng2rng ( - id int4range, - valid_at daterange, - parent_id int4range, - CONSTRAINT temporal_fk_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, PERIOD parent_id) - REFERENCES temporal_rng (id, PERIOD id) -); -ERROR: foreign key referenced-columns list must not contain duplicates --- Two scalar columns -DROP TABLE temporal_rng2; -CREATE TABLE temporal_rng2 ( - id1 int4range, - id2 int4range, - valid_at daterange, - CONSTRAINT temporal_rng2_pk PRIMARY KEY (id1, id2, valid_at WITHOUT OVERLAPS) -); -CREATE TABLE temporal_fk2_rng2rng ( - id int4range, - valid_at daterange, - parent_id1 int4range, - parent_id2 int4range, - CONSTRAINT temporal_fk2_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk2_rng2rng_fk FOREIGN KEY (parent_id1, parent_id2, PERIOD valid_at) - REFERENCES temporal_rng2 (id1, id2, PERIOD valid_at) -); -\d temporal_fk2_rng2rng - Table "public.temporal_fk2_rng2rng" - Column | Type | Collation | Nullable | Default -------------+-----------+-----------+----------+--------- - id | int4range | | not null | - valid_at | daterange | | not null | - parent_id1 | int4range | | | - parent_id2 | int4range | | | -Indexes: - "temporal_fk2_rng2rng_pk" PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -Foreign-key constraints: - "temporal_fk2_rng2rng_fk" FOREIGN KEY (parent_id1, parent_id2, PERIOD valid_at) REFERENCES temporal_rng2(id1, id2, PERIOD valid_at) - -DROP TABLE temporal_fk2_rng2rng; --- --- test ALTER TABLE ADD CONSTRAINT --- -CREATE TABLE temporal_fk_rng2rng ( - id int4range, - valid_at daterange, - parent_id int4range, - CONSTRAINT temporal_fk_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -); -ALTER TABLE temporal_fk_rng2rng - ADD CONSTRAINT temporal_fk_rng2rng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_rng (id, PERIOD valid_at); --- Two scalar columns: -CREATE TABLE temporal_fk2_rng2rng ( - id int4range, - valid_at daterange, - parent_id1 int4range, - parent_id2 int4range, - CONSTRAINT temporal_fk2_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -); -ALTER TABLE temporal_fk2_rng2rng - ADD CONSTRAINT temporal_fk2_rng2rng_fk - FOREIGN KEY (parent_id1, parent_id2, PERIOD valid_at) - REFERENCES temporal_rng2 (id1, id2, PERIOD valid_at); -\d temporal_fk2_rng2rng - Table "public.temporal_fk2_rng2rng" - Column | Type | Collation | Nullable | Default -------------+-----------+-----------+----------+--------- - id | int4range | | not null | - valid_at | daterange | | not null | - parent_id1 | int4range | | | - parent_id2 | int4range | | | -Indexes: - "temporal_fk2_rng2rng_pk" PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -Foreign-key constraints: - "temporal_fk2_rng2rng_fk" FOREIGN KEY (parent_id1, parent_id2, PERIOD valid_at) REFERENCES temporal_rng2(id1, id2, PERIOD valid_at) - --- with inferred PK on the referenced table, and wrong column type: -ALTER TABLE temporal_fk_rng2rng - DROP CONSTRAINT temporal_fk_rng2rng_fk, - ALTER COLUMN valid_at TYPE tsrange USING tsrange(lower(valid_at), upper(valid_at)); -ALTER TABLE temporal_fk_rng2rng - ADD CONSTRAINT temporal_fk_rng2rng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_rng; -ERROR: foreign key constraint "temporal_fk_rng2rng_fk" cannot be implemented -DETAIL: Key columns "valid_at" of the referencing table and "valid_at" of the referenced table are of incompatible types: tsrange and daterange. -ALTER TABLE temporal_fk_rng2rng - ALTER COLUMN valid_at TYPE daterange USING daterange(lower(valid_at)::date, upper(valid_at)::date); --- with inferred PK on the referenced table: -ALTER TABLE temporal_fk_rng2rng - ADD CONSTRAINT temporal_fk_rng2rng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_rng; --- should fail because of duplicate referenced columns: -ALTER TABLE temporal_fk_rng2rng - ADD CONSTRAINT temporal_fk_rng2rng_fk2 - FOREIGN KEY (parent_id, PERIOD parent_id) - REFERENCES temporal_rng (id, PERIOD id); -ERROR: foreign key referenced-columns list must not contain duplicates --- --- test with rows already --- -DELETE FROM temporal_fk_rng2rng; -DELETE FROM temporal_rng; -INSERT INTO temporal_rng (id, valid_at) VALUES - ('[1,2)', daterange('2018-01-02', '2018-02-03')), - ('[1,2)', daterange('2018-03-03', '2018-04-04')), - ('[2,3)', daterange('2018-01-01', '2018-01-05')), - ('[3,4)', daterange('2018-01-01', NULL)); -ALTER TABLE temporal_fk_rng2rng - DROP CONSTRAINT temporal_fk_rng2rng_fk; -INSERT INTO temporal_fk_rng2rng (id, valid_at, parent_id) VALUES ('[1,2)', daterange('2018-01-02', '2018-02-01'), '[1,2)'); -ALTER TABLE temporal_fk_rng2rng - ADD CONSTRAINT temporal_fk_rng2rng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_rng; -ALTER TABLE temporal_fk_rng2rng - DROP CONSTRAINT temporal_fk_rng2rng_fk; -INSERT INTO temporal_fk_rng2rng (id, valid_at, parent_id) VALUES ('[2,3)', daterange('2018-01-02', '2018-04-01'), '[1,2)'); --- should fail: -ALTER TABLE temporal_fk_rng2rng - ADD CONSTRAINT temporal_fk_rng2rng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_rng; -ERROR: insert or update on table "temporal_fk_rng2rng" violates foreign key constraint "temporal_fk_rng2rng_fk" -DETAIL: Key (parent_id, valid_at)=([1,2), [2018-01-02,2018-04-01)) is not present in table "temporal_rng". --- okay again: -DELETE FROM temporal_fk_rng2rng; -ALTER TABLE temporal_fk_rng2rng - ADD CONSTRAINT temporal_fk_rng2rng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_rng; --- --- test pg_get_constraintdef --- -SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conname = 'temporal_fk_rng2rng_fk'; - pg_get_constraintdef ---------------------------------------------------------------------------------------- - FOREIGN KEY (parent_id, PERIOD valid_at) REFERENCES temporal_rng(id, PERIOD valid_at) -(1 row) - --- --- test FK referencing inserts --- -INSERT INTO temporal_fk_rng2rng (id, valid_at, parent_id) VALUES ('[1,2)', daterange('2018-01-02', '2018-02-01'), '[1,2)'); --- should fail: -INSERT INTO temporal_fk_rng2rng (id, valid_at, parent_id) VALUES ('[2,3)', daterange('2018-01-02', '2018-04-01'), '[1,2)'); -ERROR: insert or update on table "temporal_fk_rng2rng" violates foreign key constraint "temporal_fk_rng2rng_fk" -DETAIL: Key (parent_id, valid_at)=([1,2), [2018-01-02,2018-04-01)) is not present in table "temporal_rng". --- now it should work: -INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)', daterange('2018-02-03', '2018-03-03')); -INSERT INTO temporal_fk_rng2rng (id, valid_at, parent_id) VALUES ('[2,3)', daterange('2018-01-02', '2018-04-01'), '[1,2)'); --- --- test FK referencing updates --- --- slide the edge across a referenced transition: -UPDATE temporal_fk_rng2rng SET valid_at = daterange('2018-01-02', '2018-02-20') WHERE id = '[1,2)'; --- should fail: -UPDATE temporal_fk_rng2rng SET valid_at = daterange('2018-01-02', '2018-05-01') WHERE id = '[1,2)'; -ERROR: insert or update on table "temporal_fk_rng2rng" violates foreign key constraint "temporal_fk_rng2rng_fk" -DETAIL: Key (parent_id, valid_at)=([1,2), [2018-01-02,2018-05-01)) is not present in table "temporal_rng". -UPDATE temporal_fk_rng2rng SET parent_id = '[8,9)' WHERE id = '[1,2)'; -ERROR: insert or update on table "temporal_fk_rng2rng" violates foreign key constraint "temporal_fk_rng2rng_fk" -DETAIL: Key (parent_id, valid_at)=([8,9), [2018-01-02,2018-02-20)) is not present in table "temporal_rng". --- ALTER FK DEFERRABLE -BEGIN; - INSERT INTO temporal_rng (id, valid_at) VALUES - ('[5,6)', daterange('2018-01-01', '2018-02-01')), - ('[5,6)', daterange('2018-02-01', '2018-03-01')); - INSERT INTO temporal_fk_rng2rng (id, valid_at, parent_id) VALUES - ('[3,4)', daterange('2018-01-05', '2018-01-10'), '[5,6)'); - ALTER TABLE temporal_fk_rng2rng - ALTER CONSTRAINT temporal_fk_rng2rng_fk - DEFERRABLE INITIALLY DEFERRED; - DELETE FROM temporal_rng WHERE id = '[5,6)'; --should not fail yet. -COMMIT; -- should fail here. -ERROR: update or delete on table "temporal_rng" violates foreign key constraint "temporal_fk_rng2rng_fk" on table "temporal_fk_rng2rng" -DETAIL: Key (id, valid_at)=([5,6), [2018-01-01,2018-02-01)) is still referenced from table "temporal_fk_rng2rng". --- --- test FK referenced updates NO ACTION --- -TRUNCATE temporal_rng, temporal_fk_rng2rng; -ALTER TABLE temporal_fk_rng2rng - DROP CONSTRAINT temporal_fk_rng2rng_fk; -ALTER TABLE temporal_fk_rng2rng - ADD CONSTRAINT temporal_fk_rng2rng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_rng - ON UPDATE NO ACTION; --- a PK update that succeeds because the numeric id isn't referenced: -INSERT INTO temporal_rng (id, valid_at) VALUES ('[5,6)', daterange('2018-01-01', '2018-02-01')); -UPDATE temporal_rng SET valid_at = daterange('2016-01-01', '2016-02-01') WHERE id = '[5,6)'; --- a PK update that succeeds even though the numeric id is referenced because the range isn't: -DELETE FROM temporal_rng WHERE id = '[5,6)'; -INSERT INTO temporal_rng (id, valid_at) VALUES - ('[5,6)', daterange('2018-01-01', '2018-02-01')), - ('[5,6)', daterange('2018-02-01', '2018-03-01')); -INSERT INTO temporal_fk_rng2rng (id, valid_at, parent_id) VALUES ('[3,4)', daterange('2018-01-05', '2018-01-10'), '[5,6)'); -UPDATE temporal_rng SET valid_at = daterange('2016-02-01', '2016-03-01') -WHERE id = '[5,6)' AND valid_at = daterange('2018-02-01', '2018-03-01'); --- A PK update sliding the edge between two referenced rows: -INSERT INTO temporal_rng (id, valid_at) VALUES - ('[6,7)', daterange('2018-01-01', '2018-02-01')), - ('[6,7)', daterange('2018-02-01', '2018-03-01')); -INSERT INTO temporal_fk_rng2rng (id, valid_at, parent_id) VALUES - ('[4,5)', daterange('2018-01-15', '2018-02-15'), '[6,7)'); -UPDATE temporal_rng -SET valid_at = CASE WHEN lower(valid_at) = '2018-01-01' THEN daterange('2018-01-01', '2018-01-05') - WHEN lower(valid_at) = '2018-02-01' THEN daterange('2018-01-05', '2018-03-01') END -WHERE id = '[6,7)'; --- a PK update that fails because both are referenced: -UPDATE temporal_rng SET valid_at = daterange('2016-01-01', '2016-02-01') -WHERE id = '[5,6)' AND valid_at = daterange('2018-01-01', '2018-02-01'); -ERROR: update or delete on table "temporal_rng" violates foreign key constraint "temporal_fk_rng2rng_fk" on table "temporal_fk_rng2rng" -DETAIL: Key (id, valid_at)=([5,6), [2018-01-01,2018-02-01)) is still referenced from table "temporal_fk_rng2rng". --- a PK update that fails because both are referenced, but not 'til commit: -BEGIN; - ALTER TABLE temporal_fk_rng2rng - ALTER CONSTRAINT temporal_fk_rng2rng_fk - DEFERRABLE INITIALLY DEFERRED; - UPDATE temporal_rng SET valid_at = daterange('2016-01-01', '2016-02-01') - WHERE id = '[5,6)' AND valid_at = daterange('2018-01-01', '2018-02-01'); -COMMIT; -ERROR: update or delete on table "temporal_rng" violates foreign key constraint "temporal_fk_rng2rng_fk" on table "temporal_fk_rng2rng" -DETAIL: Key (id, valid_at)=([5,6), [2018-01-01,2018-02-01)) is still referenced from table "temporal_fk_rng2rng". --- changing the scalar part fails: -UPDATE temporal_rng SET id = '[7,8)' -WHERE id = '[5,6)' AND valid_at = daterange('2018-01-01', '2018-02-01'); -ERROR: update or delete on table "temporal_rng" violates foreign key constraint "temporal_fk_rng2rng_fk" on table "temporal_fk_rng2rng" -DETAIL: Key (id, valid_at)=([5,6), [2018-01-01,2018-02-01)) is still referenced from table "temporal_fk_rng2rng". --- then delete the objecting FK record and the same PK update succeeds: -DELETE FROM temporal_fk_rng2rng WHERE id = '[3,4)'; -UPDATE temporal_rng SET valid_at = daterange('2016-01-01', '2016-02-01') -WHERE id = '[5,6)' AND valid_at = daterange('2018-01-01', '2018-02-01'); --- --- test FK referenced updates RESTRICT --- -TRUNCATE temporal_rng, temporal_fk_rng2rng; -ALTER TABLE temporal_fk_rng2rng - DROP CONSTRAINT temporal_fk_rng2rng_fk; -ALTER TABLE temporal_fk_rng2rng - ADD CONSTRAINT temporal_fk_rng2rng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_rng - ON UPDATE RESTRICT; --- a PK update that succeeds because the numeric id isn't referenced: -INSERT INTO temporal_rng (id, valid_at) VALUES ('[5,6)', daterange('2018-01-01', '2018-02-01')); -UPDATE temporal_rng SET valid_at = daterange('2016-01-01', '2016-02-01') WHERE id = '[5,6)'; --- a PK update that succeeds even though the numeric id is referenced because the range isn't: -DELETE FROM temporal_rng WHERE id = '[5,6)'; -INSERT INTO temporal_rng (id, valid_at) VALUES - ('[5,6)', daterange('2018-01-01', '2018-02-01')), - ('[5,6)', daterange('2018-02-01', '2018-03-01')); -INSERT INTO temporal_fk_rng2rng (id, valid_at, parent_id) VALUES ('[3,4)', daterange('2018-01-05', '2018-01-10'), '[5,6)'); -UPDATE temporal_rng SET valid_at = daterange('2016-02-01', '2016-03-01') -WHERE id = '[5,6)' AND valid_at = daterange('2018-02-01', '2018-03-01'); --- A PK update sliding the edge between two referenced rows: -INSERT INTO temporal_rng (id, valid_at) VALUES - ('[6,7)', daterange('2018-01-01', '2018-02-01')), - ('[6,7)', daterange('2018-02-01', '2018-03-01')); -INSERT INTO temporal_fk_rng2rng (id, valid_at, parent_id) VALUES - ('[4,5)', daterange('2018-01-15', '2018-02-15'), '[6,7)'); -UPDATE temporal_rng -SET valid_at = CASE WHEN lower(valid_at) = '2018-01-01' THEN daterange('2018-01-01', '2018-01-05') - WHEN lower(valid_at) = '2018-02-01' THEN daterange('2018-01-05', '2018-03-01') END -WHERE id = '[6,7)'; -ERROR: update or delete on table "temporal_rng" violates RESTRICT setting of foreign key constraint "temporal_fk_rng2rng_fk" on table "temporal_fk_rng2rng" -DETAIL: Key (id, valid_at)=([6,7), [2018-01-01,2018-02-01)) is referenced from table "temporal_fk_rng2rng". --- a PK update that fails because both are referenced (even before commit): -BEGIN; - ALTER TABLE temporal_fk_rng2rng - ALTER CONSTRAINT temporal_fk_rng2rng_fk - DEFERRABLE INITIALLY DEFERRED; - UPDATE temporal_rng SET valid_at = daterange('2016-01-01', '2016-02-01') - WHERE id = '[5,6)' AND valid_at = daterange('2018-01-01', '2018-02-01'); -ERROR: update or delete on table "temporal_rng" violates RESTRICT setting of foreign key constraint "temporal_fk_rng2rng_fk" on table "temporal_fk_rng2rng" -DETAIL: Key (id, valid_at)=([5,6), [2018-01-01,2018-02-01)) is referenced from table "temporal_fk_rng2rng". -ROLLBACK; --- changing the scalar part fails: -UPDATE temporal_rng SET id = '[7,8)' -WHERE id = '[5,6)' AND valid_at = daterange('2018-01-01', '2018-02-01'); -ERROR: update or delete on table "temporal_rng" violates RESTRICT setting of foreign key constraint "temporal_fk_rng2rng_fk" on table "temporal_fk_rng2rng" -DETAIL: Key (id, valid_at)=([5,6), [2018-01-01,2018-02-01)) is referenced from table "temporal_fk_rng2rng". --- then delete the objecting FK record and the same PK update succeeds: -DELETE FROM temporal_fk_rng2rng WHERE id = '[3,4)'; -UPDATE temporal_rng SET valid_at = daterange('2016-01-01', '2016-02-01') -WHERE id = '[5,6)' AND valid_at = daterange('2018-01-01', '2018-02-01'); --- --- test FK referenced deletes NO ACTION --- -TRUNCATE temporal_rng, temporal_fk_rng2rng; -ALTER TABLE temporal_fk_rng2rng - DROP CONSTRAINT temporal_fk_rng2rng_fk; -ALTER TABLE temporal_fk_rng2rng - ADD CONSTRAINT temporal_fk_rng2rng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_rng; --- a PK delete that succeeds because the numeric id isn't referenced: -INSERT INTO temporal_rng (id, valid_at) VALUES ('[5,6)', daterange('2018-01-01', '2018-02-01')); -DELETE FROM temporal_rng WHERE id = '[5,6)'; --- a PK delete that succeeds even though the numeric id is referenced because the range isn't: -INSERT INTO temporal_rng (id, valid_at) VALUES - ('[5,6)', daterange('2018-01-01', '2018-02-01')), - ('[5,6)', daterange('2018-02-01', '2018-03-01')); -INSERT INTO temporal_fk_rng2rng (id, valid_at, parent_id) VALUES ('[3,4)', daterange('2018-01-05', '2018-01-10'), '[5,6)'); -DELETE FROM temporal_rng WHERE id = '[5,6)' AND valid_at = daterange('2018-02-01', '2018-03-01'); --- a PK delete that fails because both are referenced: -DELETE FROM temporal_rng WHERE id = '[5,6)' AND valid_at = daterange('2018-01-01', '2018-02-01'); -ERROR: update or delete on table "temporal_rng" violates foreign key constraint "temporal_fk_rng2rng_fk" on table "temporal_fk_rng2rng" -DETAIL: Key (id, valid_at)=([5,6), [2018-01-01,2018-02-01)) is still referenced from table "temporal_fk_rng2rng". --- a PK delete that fails because both are referenced, but not 'til commit: -BEGIN; - ALTER TABLE temporal_fk_rng2rng - ALTER CONSTRAINT temporal_fk_rng2rng_fk - DEFERRABLE INITIALLY DEFERRED; - DELETE FROM temporal_rng WHERE id = '[5,6)' AND valid_at = daterange('2018-01-01', '2018-02-01'); -COMMIT; -ERROR: update or delete on table "temporal_rng" violates foreign key constraint "temporal_fk_rng2rng_fk" on table "temporal_fk_rng2rng" -DETAIL: Key (id, valid_at)=([5,6), [2018-01-01,2018-02-01)) is still referenced from table "temporal_fk_rng2rng". --- then delete the objecting FK record and the same PK delete succeeds: -DELETE FROM temporal_fk_rng2rng WHERE id = '[3,4)'; -DELETE FROM temporal_rng WHERE id = '[5,6)' AND valid_at = daterange('2018-01-01', '2018-02-01'); --- --- test FK referenced deletes RESTRICT --- -TRUNCATE temporal_rng, temporal_fk_rng2rng; -ALTER TABLE temporal_fk_rng2rng - DROP CONSTRAINT temporal_fk_rng2rng_fk; -ALTER TABLE temporal_fk_rng2rng - ADD CONSTRAINT temporal_fk_rng2rng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_rng - ON DELETE RESTRICT; -INSERT INTO temporal_rng (id, valid_at) VALUES ('[5,6)', daterange('2018-01-01', '2018-02-01')); -DELETE FROM temporal_rng WHERE id = '[5,6)'; --- a PK delete that succeeds even though the numeric id is referenced because the range isn't: -INSERT INTO temporal_rng (id, valid_at) VALUES - ('[5,6)', daterange('2018-01-01', '2018-02-01')), - ('[5,6)', daterange('2018-02-01', '2018-03-01')); -INSERT INTO temporal_fk_rng2rng (id, valid_at, parent_id) VALUES ('[3,4)', daterange('2018-01-05', '2018-01-10'), '[5,6)'); -DELETE FROM temporal_rng WHERE id = '[5,6)' AND valid_at = daterange('2018-02-01', '2018-03-01'); --- a PK delete that fails because both are referenced (even before commit): -BEGIN; - ALTER TABLE temporal_fk_rng2rng - ALTER CONSTRAINT temporal_fk_rng2rng_fk - DEFERRABLE INITIALLY DEFERRED; - DELETE FROM temporal_rng WHERE id = '[5,6)' AND valid_at = daterange('2018-01-01', '2018-02-01'); -ERROR: update or delete on table "temporal_rng" violates RESTRICT setting of foreign key constraint "temporal_fk_rng2rng_fk" on table "temporal_fk_rng2rng" -DETAIL: Key (id, valid_at)=([5,6), [2018-01-01,2018-02-01)) is referenced from table "temporal_fk_rng2rng". -ROLLBACK; --- then delete the objecting FK record and the same PK delete succeeds: -DELETE FROM temporal_fk_rng2rng WHERE id = '[3,4)'; -DELETE FROM temporal_rng WHERE id = '[5,6)' AND valid_at = daterange('2018-01-01', '2018-02-01'); --- --- test ON UPDATE/DELETE options --- --- test FK referenced updates CASCADE -INSERT INTO temporal_rng (id, valid_at) VALUES ('[6,7)', daterange('2018-01-01', '2021-01-01')); -INSERT INTO temporal_fk_rng2rng (id, valid_at, parent_id) VALUES ('[4,5)', daterange('2018-01-01', '2021-01-01'), '[6,7)'); -ALTER TABLE temporal_fk_rng2rng - DROP CONSTRAINT temporal_fk_rng2rng_fk, - ADD CONSTRAINT temporal_fk_rng2rng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_rng - ON DELETE CASCADE ON UPDATE CASCADE; -ERROR: unsupported ON UPDATE action for foreign key constraint using PERIOD --- test FK referenced updates SET NULL -INSERT INTO temporal_rng (id, valid_at) VALUES ('[9,10)', daterange('2018-01-01', '2021-01-01')); -INSERT INTO temporal_fk_rng2rng (id, valid_at, parent_id) VALUES ('[6,7)', daterange('2018-01-01', '2021-01-01'), '[9,10)'); -ALTER TABLE temporal_fk_rng2rng - DROP CONSTRAINT temporal_fk_rng2rng_fk, - ADD CONSTRAINT temporal_fk_rng2rng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_rng - ON DELETE SET NULL ON UPDATE SET NULL; -ERROR: unsupported ON UPDATE action for foreign key constraint using PERIOD --- test FK referenced updates SET DEFAULT -INSERT INTO temporal_rng (id, valid_at) VALUES ('[-1,-1]', daterange(null, null)); -INSERT INTO temporal_rng (id, valid_at) VALUES ('[12,13)', daterange('2018-01-01', '2021-01-01')); -INSERT INTO temporal_fk_rng2rng (id, valid_at, parent_id) VALUES ('[8,9)', daterange('2018-01-01', '2021-01-01'), '[12,13)'); -ALTER TABLE temporal_fk_rng2rng - ALTER COLUMN parent_id SET DEFAULT '[-1,-1]', - DROP CONSTRAINT temporal_fk_rng2rng_fk, - ADD CONSTRAINT temporal_fk_rng2rng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_rng - ON DELETE SET DEFAULT ON UPDATE SET DEFAULT; -ERROR: unsupported ON UPDATE action for foreign key constraint using PERIOD --- --- test FOREIGN KEY, multirange references multirange --- --- test table setup -DROP TABLE temporal_mltrng; -CREATE TABLE temporal_mltrng ( id int4range, valid_at datemultirange); -ALTER TABLE temporal_mltrng - ADD CONSTRAINT temporal_mltrng_pk - PRIMARY KEY (id, valid_at WITHOUT OVERLAPS); --- Can't create a FK with a mismatched multirange type -CREATE TABLE temporal_fk_mltrng2mltrng ( - id int4range, - valid_at int4multirange, - parent_id int4range, - CONSTRAINT temporal_fk_mltrng2mltrng_pk2 PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk_mltrng2mltrng_fk2 FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_mltrng (id, PERIOD valid_at) -); -ERROR: foreign key constraint "temporal_fk_mltrng2mltrng_fk2" cannot be implemented -DETAIL: Key columns "valid_at" of the referencing table and "valid_at" of the referenced table are of incompatible types: int4multirange and datemultirange. -CREATE TABLE temporal_fk_mltrng2mltrng ( - id int4range, - valid_at datemultirange, - parent_id int4range, - CONSTRAINT temporal_fk_mltrng2mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk_mltrng2mltrng_fk FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_mltrng (id, PERIOD valid_at) -); -DROP TABLE temporal_fk_mltrng2mltrng; --- with mismatched PERIOD columns: --- (parent_id, PERIOD valid_at) REFERENCES (id, valid_at) --- REFERENCES part should specify PERIOD -CREATE TABLE temporal_fk_mltrng2mltrng ( - id int4range, - valid_at datemultirange, - parent_id int4range, - CONSTRAINT temporal_fk_mltrng2mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk_mltrng2mltrng_fk FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_mltrng (id, valid_at) -); -ERROR: foreign key uses PERIOD on the referencing table but not the referenced table --- (parent_id, valid_at) REFERENCES (id, valid_at) --- both should specify PERIOD: -CREATE TABLE temporal_fk_mltrng2mltrng ( - id int4range, - valid_at datemultirange, - parent_id int4range, - CONSTRAINT temporal_fk_mltrng2mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk_mltrng2mltrng_fk FOREIGN KEY (parent_id, valid_at) - REFERENCES temporal_mltrng (id, valid_at) -); -ERROR: foreign key must use PERIOD when referencing a primary using WITHOUT OVERLAPS --- (parent_id, valid_at) REFERENCES (id, PERIOD valid_at) --- FOREIGN KEY part should specify PERIOD -CREATE TABLE temporal_fk_mltrng2mltrng ( - id int4range, - valid_at datemultirange, - parent_id int4range, - CONSTRAINT temporal_fk_mltrng2mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk_mltrng2mltrng_fk FOREIGN KEY (parent_id, valid_at) - REFERENCES temporal_mltrng (id, PERIOD valid_at) -); -ERROR: foreign key uses PERIOD on the referenced table but not the referencing table --- (parent_id, valid_at) REFERENCES [implicit] --- FOREIGN KEY part should specify PERIOD -CREATE TABLE temporal_fk_mltrng2mltrng ( - id int4range, - valid_at datemultirange, - parent_id int4range, - CONSTRAINT temporal_fk_mltrng2mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk_mltrng2mltrng_fk FOREIGN KEY (parent_id, valid_at) - REFERENCES temporal_mltrng -); -ERROR: foreign key uses PERIOD on the referenced table but not the referencing table --- (parent_id, PERIOD valid_at) REFERENCES (id) -CREATE TABLE temporal_fk_mltrng2mltrng ( - id int4range, - valid_at datemultirange, - parent_id int4range, - CONSTRAINT temporal_fk_mltrng2mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk_mltrng2mltrng_fk FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_mltrng (id) -); -ERROR: foreign key uses PERIOD on the referencing table but not the referenced table --- (parent_id) REFERENCES (id, PERIOD valid_at) -CREATE TABLE temporal_fk_mltrng2mltrng ( - id int4range, - valid_at datemultirange, - parent_id int4range, - CONSTRAINT temporal_fk_mltrng2mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk_mltrng2mltrng_fk FOREIGN KEY (parent_id) - REFERENCES temporal_mltrng (id, PERIOD valid_at) -); -ERROR: foreign key uses PERIOD on the referenced table but not the referencing table --- with inferred PK on the referenced table: --- (parent_id, PERIOD valid_at) REFERENCES [implicit] -CREATE TABLE temporal_fk_mltrng2mltrng ( - id int4range, - valid_at datemultirange, - parent_id int4range, - CONSTRAINT temporal_fk_mltrng2mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk_mltrng2mltrng_fk FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_mltrng -); -DROP TABLE temporal_fk_mltrng2mltrng; --- (parent_id) REFERENCES [implicit] -CREATE TABLE temporal_fk_mltrng2mltrng ( - id int4range, - valid_at datemultirange, - parent_id int4range, - CONSTRAINT temporal_fk_mltrng2mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk_mltrng2mltrng_fk FOREIGN KEY (parent_id) - REFERENCES temporal_mltrng -); -ERROR: foreign key uses PERIOD on the referenced table but not the referencing table --- should fail because of duplicate referenced columns: -CREATE TABLE temporal_fk_mltrng2mltrng ( - id int4range, - valid_at datemultirange, - parent_id int4range, - CONSTRAINT temporal_fk_mltrng2mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk_mltrng2mltrng_fk FOREIGN KEY (parent_id, PERIOD parent_id) - REFERENCES temporal_mltrng (id, PERIOD id) -); -ERROR: foreign key referenced-columns list must not contain duplicates --- Two scalar columns -DROP TABLE temporal_mltrng2; -CREATE TABLE temporal_mltrng2 ( - id1 int4range, - id2 int4range, - valid_at datemultirange, - CONSTRAINT temporal_mltrng2_pk PRIMARY KEY (id1, id2, valid_at WITHOUT OVERLAPS) -); -CREATE TABLE temporal_fk2_mltrng2mltrng ( - id int4range, - valid_at datemultirange, - parent_id1 int4range, - parent_id2 int4range, - CONSTRAINT temporal_fk2_mltrng2mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_fk2_mltrng2mltrng_fk FOREIGN KEY (parent_id1, parent_id2, PERIOD valid_at) - REFERENCES temporal_mltrng2 (id1, id2, PERIOD valid_at) -); -\d temporal_fk2_mltrng2mltrng - Table "public.temporal_fk2_mltrng2mltrng" - Column | Type | Collation | Nullable | Default -------------+----------------+-----------+----------+--------- - id | int4range | | not null | - valid_at | datemultirange | | not null | - parent_id1 | int4range | | | - parent_id2 | int4range | | | -Indexes: - "temporal_fk2_mltrng2mltrng_pk" PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -Foreign-key constraints: - "temporal_fk2_mltrng2mltrng_fk" FOREIGN KEY (parent_id1, parent_id2, PERIOD valid_at) REFERENCES temporal_mltrng2(id1, id2, PERIOD valid_at) - -DROP TABLE temporal_fk2_mltrng2mltrng; --- --- test ALTER TABLE ADD CONSTRAINT --- -CREATE TABLE temporal_fk_mltrng2mltrng ( - id int4range, - valid_at datemultirange, - parent_id int4range, - CONSTRAINT temporal_fk_mltrng2mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -); -ALTER TABLE temporal_fk_mltrng2mltrng - ADD CONSTRAINT temporal_fk_mltrng2mltrng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_mltrng (id, PERIOD valid_at); --- Two scalar columns: -CREATE TABLE temporal_fk2_mltrng2mltrng ( - id int4range, - valid_at datemultirange, - parent_id1 int4range, - parent_id2 int4range, - CONSTRAINT temporal_fk2_mltrng2mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -); -ALTER TABLE temporal_fk2_mltrng2mltrng - ADD CONSTRAINT temporal_fk2_mltrng2mltrng_fk - FOREIGN KEY (parent_id1, parent_id2, PERIOD valid_at) - REFERENCES temporal_mltrng2 (id1, id2, PERIOD valid_at); -\d temporal_fk2_mltrng2mltrng - Table "public.temporal_fk2_mltrng2mltrng" - Column | Type | Collation | Nullable | Default -------------+----------------+-----------+----------+--------- - id | int4range | | not null | - valid_at | datemultirange | | not null | - parent_id1 | int4range | | | - parent_id2 | int4range | | | -Indexes: - "temporal_fk2_mltrng2mltrng_pk" PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -Foreign-key constraints: - "temporal_fk2_mltrng2mltrng_fk" FOREIGN KEY (parent_id1, parent_id2, PERIOD valid_at) REFERENCES temporal_mltrng2(id1, id2, PERIOD valid_at) - --- should fail because of duplicate referenced columns: -ALTER TABLE temporal_fk_mltrng2mltrng - ADD CONSTRAINT temporal_fk_mltrng2mltrng_fk2 - FOREIGN KEY (parent_id, PERIOD parent_id) - REFERENCES temporal_mltrng (id, PERIOD id); -ERROR: foreign key referenced-columns list must not contain duplicates --- --- test with rows already --- -DELETE FROM temporal_fk_mltrng2mltrng; -INSERT INTO temporal_mltrng (id, valid_at) VALUES - ('[1,2)', datemultirange(daterange('2018-01-02', '2018-02-03'))), - ('[1,2)', datemultirange(daterange('2018-03-03', '2018-04-04'))), - ('[2,3)', datemultirange(daterange('2018-01-01', '2018-01-05'))), - ('[3,4)', datemultirange(daterange('2018-01-01', NULL))); -ALTER TABLE temporal_fk_mltrng2mltrng - DROP CONSTRAINT temporal_fk_mltrng2mltrng_fk; -INSERT INTO temporal_fk_mltrng2mltrng (id, valid_at, parent_id) VALUES ('[1,2)', datemultirange(daterange('2018-01-02', '2018-02-01')), '[1,2)'); -ALTER TABLE temporal_fk_mltrng2mltrng - ADD CONSTRAINT temporal_fk_mltrng2mltrng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_mltrng (id, PERIOD valid_at); -ALTER TABLE temporal_fk_mltrng2mltrng - DROP CONSTRAINT temporal_fk_mltrng2mltrng_fk; -INSERT INTO temporal_fk_mltrng2mltrng (id, valid_at, parent_id) VALUES ('[2,3)', datemultirange(daterange('2018-01-02', '2018-04-01')), '[1,2)'); --- should fail: -ALTER TABLE temporal_fk_mltrng2mltrng - ADD CONSTRAINT temporal_fk_mltrng2mltrng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_mltrng (id, PERIOD valid_at); -ERROR: insert or update on table "temporal_fk_mltrng2mltrng" violates foreign key constraint "temporal_fk_mltrng2mltrng_fk" -DETAIL: Key (parent_id, valid_at)=([1,2), {[2018-01-02,2018-04-01)}) is not present in table "temporal_mltrng". --- okay again: -DELETE FROM temporal_fk_mltrng2mltrng; -ALTER TABLE temporal_fk_mltrng2mltrng - ADD CONSTRAINT temporal_fk_mltrng2mltrng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_mltrng (id, PERIOD valid_at); --- --- test pg_get_constraintdef --- -SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conname = 'temporal_fk_mltrng2mltrng_fk'; - pg_get_constraintdef ------------------------------------------------------------------------------------------- - FOREIGN KEY (parent_id, PERIOD valid_at) REFERENCES temporal_mltrng(id, PERIOD valid_at) -(1 row) - --- --- test FK referencing inserts --- -INSERT INTO temporal_fk_mltrng2mltrng (id, valid_at, parent_id) VALUES ('[1,2)', datemultirange(daterange('2018-01-02', '2018-02-01')), '[1,2)'); --- should fail: -INSERT INTO temporal_fk_mltrng2mltrng (id, valid_at, parent_id) VALUES ('[2,3)', datemultirange(daterange('2018-01-02', '2018-04-01')), '[1,2)'); -ERROR: insert or update on table "temporal_fk_mltrng2mltrng" violates foreign key constraint "temporal_fk_mltrng2mltrng_fk" -DETAIL: Key (parent_id, valid_at)=([1,2), {[2018-01-02,2018-04-01)}) is not present in table "temporal_mltrng". --- now it should work: -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2018-02-03', '2018-03-03'))); -INSERT INTO temporal_fk_mltrng2mltrng (id, valid_at, parent_id) VALUES ('[2,3)', datemultirange(daterange('2018-01-02', '2018-04-01')), '[1,2)'); --- --- test FK referencing updates --- --- slide the edge across a referenced transition: -UPDATE temporal_fk_mltrng2mltrng SET valid_at = datemultirange(daterange('2018-01-02', '2018-02-20')) WHERE id = '[1,2)'; --- should fail: -UPDATE temporal_fk_mltrng2mltrng SET valid_at = datemultirange(daterange('2018-01-02', '2018-05-01')) WHERE id = '[1,2)'; -ERROR: insert or update on table "temporal_fk_mltrng2mltrng" violates foreign key constraint "temporal_fk_mltrng2mltrng_fk" -DETAIL: Key (parent_id, valid_at)=([1,2), {[2018-01-02,2018-05-01)}) is not present in table "temporal_mltrng". -UPDATE temporal_fk_mltrng2mltrng SET parent_id = '[8,9)' WHERE id = '[1,2)'; -ERROR: insert or update on table "temporal_fk_mltrng2mltrng" violates foreign key constraint "temporal_fk_mltrng2mltrng_fk" -DETAIL: Key (parent_id, valid_at)=([8,9), {[2018-01-02,2018-02-20)}) is not present in table "temporal_mltrng". --- ALTER FK DEFERRABLE -BEGIN; - INSERT INTO temporal_mltrng (id, valid_at) VALUES - ('[5,6)', datemultirange(daterange('2018-01-01', '2018-02-01'))), - ('[5,6)', datemultirange(daterange('2018-02-01', '2018-03-01'))); - INSERT INTO temporal_fk_mltrng2mltrng (id, valid_at, parent_id) VALUES - ('[3,4)', datemultirange(daterange('2018-01-05', '2018-01-10')), '[5,6)'); - ALTER TABLE temporal_fk_mltrng2mltrng - ALTER CONSTRAINT temporal_fk_mltrng2mltrng_fk - DEFERRABLE INITIALLY DEFERRED; - DELETE FROM temporal_mltrng WHERE id = '[5,6)'; --should not fail yet. -COMMIT; -- should fail here. -ERROR: update or delete on table "temporal_mltrng" violates foreign key constraint "temporal_fk_mltrng2mltrng_fk" on table "temporal_fk_mltrng2mltrng" -DETAIL: Key (id, valid_at)=([5,6), {[2018-01-01,2018-02-01)}) is still referenced from table "temporal_fk_mltrng2mltrng". --- --- test FK referenced updates NO ACTION --- -TRUNCATE temporal_mltrng, temporal_fk_mltrng2mltrng; -ALTER TABLE temporal_fk_mltrng2mltrng - DROP CONSTRAINT temporal_fk_mltrng2mltrng_fk; -ALTER TABLE temporal_fk_mltrng2mltrng - ADD CONSTRAINT temporal_fk_mltrng2mltrng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_mltrng (id, PERIOD valid_at) - ON UPDATE NO ACTION; --- a PK update that succeeds because the numeric id isn't referenced: -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[5,6)', datemultirange(daterange('2018-01-01', '2018-02-01'))); -UPDATE temporal_mltrng SET valid_at = datemultirange(daterange('2016-01-01', '2016-02-01')) WHERE id = '[5,6)'; --- a PK update that succeeds even though the numeric id is referenced because the range isn't: -DELETE FROM temporal_mltrng WHERE id = '[5,6)'; -INSERT INTO temporal_mltrng (id, valid_at) VALUES - ('[5,6)', datemultirange(daterange('2018-01-01', '2018-02-01'))), - ('[5,6)', datemultirange(daterange('2018-02-01', '2018-03-01'))); -INSERT INTO temporal_fk_mltrng2mltrng (id, valid_at, parent_id) VALUES ('[3,4)', datemultirange(daterange('2018-01-05', '2018-01-10')), '[5,6)'); -UPDATE temporal_mltrng SET valid_at = datemultirange(daterange('2016-02-01', '2016-03-01')) -WHERE id = '[5,6)' AND valid_at = datemultirange(daterange('2018-02-01', '2018-03-01')); --- A PK update sliding the edge between two referenced rows: -INSERT INTO temporal_mltrng (id, valid_at) VALUES - ('[6,7)', datemultirange(daterange('2018-01-01', '2018-02-01'))), - ('[6,7)', datemultirange(daterange('2018-02-01', '2018-03-01'))); -INSERT INTO temporal_fk_mltrng2mltrng (id, valid_at, parent_id) VALUES - ('[4,5)', datemultirange(daterange('2018-01-15', '2018-02-15')), '[6,7)'); -UPDATE temporal_mltrng -SET valid_at = CASE WHEN lower(valid_at) = '2018-01-01' THEN datemultirange(daterange('2018-01-01', '2018-01-05')) - WHEN lower(valid_at) = '2018-02-01' THEN datemultirange(daterange('2018-01-05', '2018-03-01')) END -WHERE id = '[6,7)'; --- a PK update that fails because both are referenced: -UPDATE temporal_mltrng SET valid_at = datemultirange(daterange('2016-01-01', '2016-02-01')) -WHERE id = '[5,6)' AND valid_at = datemultirange(daterange('2018-01-01', '2018-02-01')); -ERROR: update or delete on table "temporal_mltrng" violates foreign key constraint "temporal_fk_mltrng2mltrng_fk" on table "temporal_fk_mltrng2mltrng" -DETAIL: Key (id, valid_at)=([5,6), {[2018-01-01,2018-02-01)}) is still referenced from table "temporal_fk_mltrng2mltrng". --- a PK update that fails because both are referenced, but not 'til commit: -BEGIN; - ALTER TABLE temporal_fk_mltrng2mltrng - ALTER CONSTRAINT temporal_fk_mltrng2mltrng_fk - DEFERRABLE INITIALLY DEFERRED; - UPDATE temporal_mltrng SET valid_at = datemultirange(daterange('2016-01-01', '2016-02-01')) - WHERE id = '[5,6)' AND valid_at = datemultirange(daterange('2018-01-01', '2018-02-01')); -COMMIT; -ERROR: update or delete on table "temporal_mltrng" violates foreign key constraint "temporal_fk_mltrng2mltrng_fk" on table "temporal_fk_mltrng2mltrng" -DETAIL: Key (id, valid_at)=([5,6), {[2018-01-01,2018-02-01)}) is still referenced from table "temporal_fk_mltrng2mltrng". --- changing the scalar part fails: -UPDATE temporal_mltrng SET id = '[7,8)' -WHERE id = '[5,6)' AND valid_at = datemultirange(daterange('2018-01-01', '2018-02-01')); -ERROR: update or delete on table "temporal_mltrng" violates foreign key constraint "temporal_fk_mltrng2mltrng_fk" on table "temporal_fk_mltrng2mltrng" -DETAIL: Key (id, valid_at)=([5,6), {[2018-01-01,2018-02-01)}) is still referenced from table "temporal_fk_mltrng2mltrng". --- --- test FK referenced updates RESTRICT --- -TRUNCATE temporal_mltrng, temporal_fk_mltrng2mltrng; -ALTER TABLE temporal_fk_mltrng2mltrng - DROP CONSTRAINT temporal_fk_mltrng2mltrng_fk; -ALTER TABLE temporal_fk_mltrng2mltrng - ADD CONSTRAINT temporal_fk_mltrng2mltrng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_mltrng (id, PERIOD valid_at) - ON UPDATE RESTRICT; --- a PK update that succeeds because the numeric id isn't referenced: -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[5,6)', datemultirange(daterange('2018-01-01', '2018-02-01'))); -UPDATE temporal_mltrng SET valid_at = datemultirange(daterange('2016-01-01', '2016-02-01')) WHERE id = '[5,6)'; --- a PK update that succeeds even though the numeric id is referenced because the range isn't: -DELETE FROM temporal_mltrng WHERE id = '[5,6)'; -INSERT INTO temporal_mltrng (id, valid_at) VALUES - ('[5,6)', datemultirange(daterange('2018-01-01', '2018-02-01'))), - ('[5,6)', datemultirange(daterange('2018-02-01', '2018-03-01'))); -INSERT INTO temporal_fk_mltrng2mltrng (id, valid_at, parent_id) VALUES ('[3,4)', datemultirange(daterange('2018-01-05', '2018-01-10')), '[5,6)'); -UPDATE temporal_mltrng SET valid_at = datemultirange(daterange('2016-02-01', '2016-03-01')) -WHERE id = '[5,6)' AND valid_at = datemultirange(daterange('2018-02-01', '2018-03-01')); --- A PK update sliding the edge between two referenced rows: -INSERT INTO temporal_mltrng (id, valid_at) VALUES - ('[6,7)', datemultirange(daterange('2018-01-01', '2018-02-01'))), - ('[6,7)', datemultirange(daterange('2018-02-01', '2018-03-01'))); -INSERT INTO temporal_fk_mltrng2mltrng (id, valid_at, parent_id) VALUES - ('[4,5)', datemultirange(daterange('2018-01-15', '2018-02-15')), '[6,7)'); -UPDATE temporal_mltrng -SET valid_at = CASE WHEN lower(valid_at) = '2018-01-01' THEN datemultirange(daterange('2018-01-01', '2018-01-05')) - WHEN lower(valid_at) = '2018-02-01' THEN datemultirange(daterange('2018-01-05', '2018-03-01')) END -WHERE id = '[6,7)'; -ERROR: update or delete on table "temporal_mltrng" violates RESTRICT setting of foreign key constraint "temporal_fk_mltrng2mltrng_fk" on table "temporal_fk_mltrng2mltrng" -DETAIL: Key (id, valid_at)=([6,7), {[2018-01-01,2018-02-01)}) is referenced from table "temporal_fk_mltrng2mltrng". --- a PK update that fails because both are referenced (even before commit): -BEGIN; - ALTER TABLE temporal_fk_mltrng2mltrng - ALTER CONSTRAINT temporal_fk_mltrng2mltrng_fk - DEFERRABLE INITIALLY DEFERRED; - UPDATE temporal_mltrng SET valid_at = datemultirange(daterange('2016-01-01', '2016-02-01')) - WHERE id = '[5,6)' AND valid_at = datemultirange(daterange('2018-01-01', '2018-02-01')); -ERROR: update or delete on table "temporal_mltrng" violates RESTRICT setting of foreign key constraint "temporal_fk_mltrng2mltrng_fk" on table "temporal_fk_mltrng2mltrng" -DETAIL: Key (id, valid_at)=([5,6), {[2018-01-01,2018-02-01)}) is referenced from table "temporal_fk_mltrng2mltrng". -ROLLBACK; --- changing the scalar part fails: -UPDATE temporal_mltrng SET id = '[7,8)' -WHERE id = '[5,6)' AND valid_at = datemultirange(daterange('2018-01-01', '2018-02-01')); -ERROR: update or delete on table "temporal_mltrng" violates RESTRICT setting of foreign key constraint "temporal_fk_mltrng2mltrng_fk" on table "temporal_fk_mltrng2mltrng" -DETAIL: Key (id, valid_at)=([5,6), {[2018-01-01,2018-02-01)}) is referenced from table "temporal_fk_mltrng2mltrng". --- --- test FK referenced deletes NO ACTION --- -TRUNCATE temporal_mltrng, temporal_fk_mltrng2mltrng; -ALTER TABLE temporal_fk_mltrng2mltrng - DROP CONSTRAINT temporal_fk_mltrng2mltrng_fk; -ALTER TABLE temporal_fk_mltrng2mltrng - ADD CONSTRAINT temporal_fk_mltrng2mltrng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_mltrng (id, PERIOD valid_at); --- a PK delete that succeeds because the numeric id isn't referenced: -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[5,6)', datemultirange(daterange('2018-01-01', '2018-02-01'))); -DELETE FROM temporal_mltrng WHERE id = '[5,6)'; --- a PK delete that succeeds even though the numeric id is referenced because the range isn't: -INSERT INTO temporal_mltrng (id, valid_at) VALUES - ('[5,6)', datemultirange(daterange('2018-01-01', '2018-02-01'))), - ('[5,6)', datemultirange(daterange('2018-02-01', '2018-03-01'))); -INSERT INTO temporal_fk_mltrng2mltrng (id, valid_at, parent_id) VALUES ('[3,4)', datemultirange(daterange('2018-01-05', '2018-01-10')), '[5,6)'); -DELETE FROM temporal_mltrng WHERE id = '[5,6)' AND valid_at = datemultirange(daterange('2018-02-01', '2018-03-01')); --- a PK delete that fails because both are referenced: -DELETE FROM temporal_mltrng WHERE id = '[5,6)' AND valid_at = datemultirange(daterange('2018-01-01', '2018-02-01')); -ERROR: update or delete on table "temporal_mltrng" violates foreign key constraint "temporal_fk_mltrng2mltrng_fk" on table "temporal_fk_mltrng2mltrng" -DETAIL: Key (id, valid_at)=([5,6), {[2018-01-01,2018-02-01)}) is still referenced from table "temporal_fk_mltrng2mltrng". --- a PK delete that fails because both are referenced, but not 'til commit: -BEGIN; - ALTER TABLE temporal_fk_mltrng2mltrng - ALTER CONSTRAINT temporal_fk_mltrng2mltrng_fk - DEFERRABLE INITIALLY DEFERRED; - DELETE FROM temporal_mltrng WHERE id = '[5,6)' AND valid_at = datemultirange(daterange('2018-01-01', '2018-02-01')); -COMMIT; -ERROR: update or delete on table "temporal_mltrng" violates foreign key constraint "temporal_fk_mltrng2mltrng_fk" on table "temporal_fk_mltrng2mltrng" -DETAIL: Key (id, valid_at)=([5,6), {[2018-01-01,2018-02-01)}) is still referenced from table "temporal_fk_mltrng2mltrng". --- --- test FK referenced deletes RESTRICT --- -TRUNCATE temporal_mltrng, temporal_fk_mltrng2mltrng; -ALTER TABLE temporal_fk_mltrng2mltrng - DROP CONSTRAINT temporal_fk_mltrng2mltrng_fk; -ALTER TABLE temporal_fk_mltrng2mltrng - ADD CONSTRAINT temporal_fk_mltrng2mltrng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_mltrng (id, PERIOD valid_at) - ON DELETE RESTRICT; -INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[5,6)', datemultirange(daterange('2018-01-01', '2018-02-01'))); -DELETE FROM temporal_mltrng WHERE id = '[5,6)'; --- a PK delete that succeeds even though the numeric id is referenced because the range isn't: -INSERT INTO temporal_mltrng (id, valid_at) VALUES - ('[5,6)', datemultirange(daterange('2018-01-01', '2018-02-01'))), - ('[5,6)', datemultirange(daterange('2018-02-01', '2018-03-01'))); -INSERT INTO temporal_fk_mltrng2mltrng (id, valid_at, parent_id) VALUES ('[3,4)', datemultirange(daterange('2018-01-05', '2018-01-10')), '[5,6)'); -DELETE FROM temporal_mltrng WHERE id = '[5,6)' AND valid_at = datemultirange(daterange('2018-02-01', '2018-03-01')); --- a PK delete that fails because both are referenced (even before commit): -BEGIN; - ALTER TABLE temporal_fk_mltrng2mltrng - ALTER CONSTRAINT temporal_fk_mltrng2mltrng_fk - DEFERRABLE INITIALLY DEFERRED; - DELETE FROM temporal_mltrng WHERE id = '[5,6)' AND valid_at = datemultirange(daterange('2018-01-01', '2018-02-01')); -ERROR: update or delete on table "temporal_mltrng" violates RESTRICT setting of foreign key constraint "temporal_fk_mltrng2mltrng_fk" on table "temporal_fk_mltrng2mltrng" -DETAIL: Key (id, valid_at)=([5,6), {[2018-01-01,2018-02-01)}) is referenced from table "temporal_fk_mltrng2mltrng". -ROLLBACK; --- --- FK between partitioned tables: ranges --- -CREATE TABLE temporal_partitioned_rng ( - id int4range, - valid_at daterange, - name text, - CONSTRAINT temporal_paritioned_rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -) PARTITION BY LIST (id); -CREATE TABLE tp1 partition OF temporal_partitioned_rng FOR VALUES IN ('[1,2)', '[3,4)', '[5,6)', '[7,8)', '[9,10)', '[11,12)'); -CREATE TABLE tp2 partition OF temporal_partitioned_rng FOR VALUES IN ('[2,3)', '[4,5)', '[6,7)', '[8,9)', '[10,11)', '[12,13)'); -INSERT INTO temporal_partitioned_rng (id, valid_at, name) VALUES - ('[1,2)', daterange('2000-01-01', '2000-02-01'), 'one'), - ('[1,2)', daterange('2000-02-01', '2000-03-01'), 'one'), - ('[2,3)', daterange('2000-01-01', '2010-01-01'), 'two'); -CREATE TABLE temporal_partitioned_fk_rng2rng ( - id int4range, - valid_at daterange, - parent_id int4range, - CONSTRAINT temporal_partitioned_fk_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_partitioned_fk_rng2rng_fk FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_partitioned_rng (id, PERIOD valid_at) -) PARTITION BY LIST (id); -CREATE TABLE tfkp1 partition OF temporal_partitioned_fk_rng2rng FOR VALUES IN ('[1,2)', '[3,4)', '[5,6)', '[7,8)', '[9,10)', '[11,12)'); -CREATE TABLE tfkp2 partition OF temporal_partitioned_fk_rng2rng FOR VALUES IN ('[2,3)', '[4,5)', '[6,7)', '[8,9)', '[10,11)', '[12,13)'); --- --- partitioned FK referencing inserts --- -INSERT INTO temporal_partitioned_fk_rng2rng (id, valid_at, parent_id) VALUES - ('[1,2)', daterange('2000-01-01', '2000-02-15'), '[1,2)'), - ('[1,2)', daterange('2001-01-01', '2002-01-01'), '[2,3)'), - ('[2,3)', daterange('2000-01-01', '2000-02-15'), '[1,2)'); --- should fail: -INSERT INTO temporal_partitioned_fk_rng2rng (id, valid_at, parent_id) VALUES - ('[3,4)', daterange('2010-01-01', '2010-02-15'), '[1,2)'); -ERROR: insert or update on table "tfkp1" violates foreign key constraint "temporal_partitioned_fk_rng2rng_fk" -DETAIL: Key (parent_id, valid_at)=([1,2), [2010-01-01,2010-02-15)) is not present in table "temporal_partitioned_rng". -INSERT INTO temporal_partitioned_fk_rng2rng (id, valid_at, parent_id) VALUES - ('[3,4)', daterange('2000-01-01', '2000-02-15'), '[3,4)'); -ERROR: insert or update on table "tfkp1" violates foreign key constraint "temporal_partitioned_fk_rng2rng_fk" -DETAIL: Key (parent_id, valid_at)=([3,4), [2000-01-01,2000-02-15)) is not present in table "temporal_partitioned_rng". --- --- partitioned FK referencing updates --- -UPDATE temporal_partitioned_fk_rng2rng SET valid_at = daterange('2000-01-01', '2000-02-13') WHERE id = '[2,3)'; --- move a row from the first partition to the second -UPDATE temporal_partitioned_fk_rng2rng SET id = '[4,5)' WHERE id = '[1,2)'; --- move a row from the second partition to the first -UPDATE temporal_partitioned_fk_rng2rng SET id = '[1,2)' WHERE id = '[4,5)'; --- should fail: -UPDATE temporal_partitioned_fk_rng2rng SET valid_at = daterange('2000-01-01', '2000-04-01') WHERE id = '[1,2)'; -ERROR: conflicting key value violates exclusion constraint "tfkp1_pkey" -DETAIL: Key (id, valid_at)=([1,2), [2000-01-01,2000-04-01)) conflicts with existing key (id, valid_at)=([1,2), [2000-01-01,2000-04-01)). --- --- partitioned FK referenced updates NO ACTION --- -TRUNCATE temporal_partitioned_rng, temporal_partitioned_fk_rng2rng; -INSERT INTO temporal_partitioned_rng (id, valid_at) VALUES ('[5,6)', daterange('2016-01-01', '2016-02-01')); -UPDATE temporal_partitioned_rng SET valid_at = daterange('2018-01-01', '2018-02-01') WHERE id = '[5,6)'; -INSERT INTO temporal_partitioned_rng (id, valid_at) VALUES ('[5,6)', daterange('2018-02-01', '2018-03-01')); -INSERT INTO temporal_partitioned_fk_rng2rng (id, valid_at, parent_id) VALUES ('[3,4)', daterange('2018-01-05', '2018-01-10'), '[5,6)'); -UPDATE temporal_partitioned_rng SET valid_at = daterange('2016-02-01', '2016-03-01') - WHERE id = '[5,6)' AND valid_at = daterange('2018-02-01', '2018-03-01'); --- should fail: -UPDATE temporal_partitioned_rng SET valid_at = daterange('2016-01-01', '2016-02-01') - WHERE id = '[5,6)' AND valid_at = daterange('2018-01-01', '2018-02-01'); -ERROR: update or delete on table "tp1" violates foreign key constraint "temporal_partitioned_fk_rng2rng_parent_id_valid_at_fkey" on table "temporal_partitioned_fk_rng2rng" -DETAIL: Key (id, valid_at)=([5,6), [2018-01-01,2018-02-01)) is still referenced from table "temporal_partitioned_fk_rng2rng". --- --- partitioned FK referenced deletes NO ACTION --- -TRUNCATE temporal_partitioned_rng, temporal_partitioned_fk_rng2rng; -INSERT INTO temporal_partitioned_rng (id, valid_at) VALUES ('[5,6)', daterange('2018-01-01', '2018-02-01')); -INSERT INTO temporal_partitioned_rng (id, valid_at) VALUES ('[5,6)', daterange('2018-02-01', '2018-03-01')); -INSERT INTO temporal_partitioned_fk_rng2rng (id, valid_at, parent_id) VALUES ('[3,4)', daterange('2018-01-05', '2018-01-10'), '[5,6)'); -DELETE FROM temporal_partitioned_rng WHERE id = '[5,6)' AND valid_at = daterange('2018-02-01', '2018-03-01'); --- should fail: -DELETE FROM temporal_partitioned_rng WHERE id = '[5,6)' AND valid_at = daterange('2018-01-01', '2018-02-01'); -ERROR: update or delete on table "tp1" violates foreign key constraint "temporal_partitioned_fk_rng2rng_parent_id_valid_at_fkey" on table "temporal_partitioned_fk_rng2rng" -DETAIL: Key (id, valid_at)=([5,6), [2018-01-01,2018-02-01)) is still referenced from table "temporal_partitioned_fk_rng2rng". --- --- partitioned FK referenced updates RESTRICT --- -TRUNCATE temporal_partitioned_rng, temporal_partitioned_fk_rng2rng; -ALTER TABLE temporal_partitioned_fk_rng2rng - DROP CONSTRAINT temporal_partitioned_fk_rng2rng_fk; -ALTER TABLE temporal_partitioned_fk_rng2rng - ADD CONSTRAINT temporal_partitioned_fk_rng2rng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_partitioned_rng - ON DELETE RESTRICT; -INSERT INTO temporal_partitioned_rng (id, valid_at) VALUES ('[5,6)', daterange('2016-01-01', '2016-02-01')); -UPDATE temporal_partitioned_rng SET valid_at = daterange('2018-01-01', '2018-02-01') WHERE id = '[5,6)'; -INSERT INTO temporal_partitioned_rng (id, valid_at) VALUES ('[5,6)', daterange('2018-02-01', '2018-03-01')); -INSERT INTO temporal_partitioned_fk_rng2rng (id, valid_at, parent_id) VALUES ('[3,4)', daterange('2018-01-05', '2018-01-10'), '[5,6)'); -UPDATE temporal_partitioned_rng SET valid_at = daterange('2016-02-01', '2016-03-01') - WHERE id = '[5,6)' AND valid_at = daterange('2018-02-01', '2018-03-01'); --- should fail: -UPDATE temporal_partitioned_rng SET valid_at = daterange('2016-01-01', '2016-02-01') - WHERE id = '[5,6)' AND valid_at = daterange('2018-01-01', '2018-02-01'); -ERROR: update or delete on table "tp1" violates foreign key constraint "temporal_partitioned_fk_rng2rng_parent_id_valid_at_fkey" on table "temporal_partitioned_fk_rng2rng" -DETAIL: Key (id, valid_at)=([5,6), [2018-01-01,2018-02-01)) is still referenced from table "temporal_partitioned_fk_rng2rng". --- --- partitioned FK referenced deletes RESTRICT --- -TRUNCATE temporal_partitioned_rng, temporal_partitioned_fk_rng2rng; -INSERT INTO temporal_partitioned_rng (id, valid_at) VALUES ('[5,6)', daterange('2018-01-01', '2018-02-01')); -INSERT INTO temporal_partitioned_rng (id, valid_at) VALUES ('[5,6)', daterange('2018-02-01', '2018-03-01')); -INSERT INTO temporal_partitioned_fk_rng2rng (id, valid_at, parent_id) VALUES ('[3,4)', daterange('2018-01-05', '2018-01-10'), '[5,6)'); -DELETE FROM temporal_partitioned_rng WHERE id = '[5,6)' AND valid_at = daterange('2018-02-01', '2018-03-01'); --- should fail: -DELETE FROM temporal_partitioned_rng WHERE id = '[5,6)' AND valid_at = daterange('2018-01-01', '2018-02-01'); -ERROR: update or delete on table "tp1" violates RESTRICT setting of foreign key constraint "temporal_partitioned_fk_rng2rng_parent_id_valid_at_fkey" on table "temporal_partitioned_fk_rng2rng" -DETAIL: Key (id, valid_at)=([5,6), [2018-01-01,2018-02-01)) is referenced from table "temporal_partitioned_fk_rng2rng". --- --- partitioned FK referenced updates CASCADE --- -ALTER TABLE temporal_partitioned_fk_rng2rng - DROP CONSTRAINT temporal_partitioned_fk_rng2rng_fk, - ADD CONSTRAINT temporal_partitioned_fk_rng2rng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_partitioned_rng - ON DELETE CASCADE ON UPDATE CASCADE; -ERROR: unsupported ON UPDATE action for foreign key constraint using PERIOD --- --- partitioned FK referenced deletes CASCADE --- --- --- partitioned FK referenced updates SET NULL --- -ALTER TABLE temporal_partitioned_fk_rng2rng - DROP CONSTRAINT temporal_partitioned_fk_rng2rng_fk, - ADD CONSTRAINT temporal_partitioned_fk_rng2rng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_partitioned_rng - ON DELETE SET NULL ON UPDATE SET NULL; -ERROR: unsupported ON UPDATE action for foreign key constraint using PERIOD --- --- partitioned FK referenced deletes SET NULL --- --- --- partitioned FK referenced updates SET DEFAULT --- -ALTER TABLE temporal_partitioned_fk_rng2rng - ALTER COLUMN parent_id SET DEFAULT '[-1,-1]', - DROP CONSTRAINT temporal_partitioned_fk_rng2rng_fk, - ADD CONSTRAINT temporal_partitioned_fk_rng2rng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_partitioned_rng - ON DELETE SET DEFAULT ON UPDATE SET DEFAULT; -ERROR: unsupported ON UPDATE action for foreign key constraint using PERIOD --- --- partitioned FK referenced deletes SET DEFAULT --- -DROP TABLE temporal_partitioned_fk_rng2rng; -DROP TABLE temporal_partitioned_rng; --- --- FK between partitioned tables: multiranges --- -CREATE TABLE temporal_partitioned_mltrng ( - id int4range, - valid_at datemultirange, - name text, - CONSTRAINT temporal_paritioned_mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) -) PARTITION BY LIST (id); -CREATE TABLE tp1 PARTITION OF temporal_partitioned_mltrng FOR VALUES IN ('[1,2)', '[3,4)', '[5,6)', '[7,8)', '[9,10)', '[11,12)', '[13,14)', '[15,16)', '[17,18)', '[19,20)', '[21,22)', '[23,24)'); -CREATE TABLE tp2 PARTITION OF temporal_partitioned_mltrng FOR VALUES IN ('[0,1)', '[2,3)', '[4,5)', '[6,7)', '[8,9)', '[10,11)', '[12,13)', '[14,15)', '[16,17)', '[18,19)', '[20,21)', '[22,23)', '[24,25)'); -INSERT INTO temporal_partitioned_mltrng (id, valid_at, name) VALUES - ('[1,2)', datemultirange(daterange('2000-01-01', '2000-02-01')), 'one'), - ('[1,2)', datemultirange(daterange('2000-02-01', '2000-03-01')), 'one'), - ('[2,3)', datemultirange(daterange('2000-01-01', '2010-01-01')), 'two'); -CREATE TABLE temporal_partitioned_fk_mltrng2mltrng ( - id int4range, - valid_at datemultirange, - parent_id int4range, - CONSTRAINT temporal_partitioned_fk_mltrng2mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS), - CONSTRAINT temporal_partitioned_fk_mltrng2mltrng_fk FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_partitioned_mltrng (id, PERIOD valid_at) -) PARTITION BY LIST (id); -CREATE TABLE tfkp1 PARTITION OF temporal_partitioned_fk_mltrng2mltrng FOR VALUES IN ('[1,2)', '[3,4)', '[5,6)', '[7,8)', '[9,10)', '[11,12)', '[13,14)', '[15,16)', '[17,18)', '[19,20)', '[21,22)', '[23,24)'); -CREATE TABLE tfkp2 PARTITION OF temporal_partitioned_fk_mltrng2mltrng FOR VALUES IN ('[0,1)', '[2,3)', '[4,5)', '[6,7)', '[8,9)', '[10,11)', '[12,13)', '[14,15)', '[16,17)', '[18,19)', '[20,21)', '[22,23)', '[24,25)'); --- --- partitioned FK referencing inserts --- -INSERT INTO temporal_partitioned_fk_mltrng2mltrng (id, valid_at, parent_id) VALUES - ('[1,2)', datemultirange(daterange('2000-01-01', '2000-02-15')), '[1,2)'), - ('[1,2)', datemultirange(daterange('2001-01-01', '2002-01-01')), '[2,3)'), - ('[2,3)', datemultirange(daterange('2000-01-01', '2000-02-15')), '[1,2)'); --- should fail: -INSERT INTO temporal_partitioned_fk_mltrng2mltrng (id, valid_at, parent_id) VALUES - ('[3,4)', datemultirange(daterange('2010-01-01', '2010-02-15')), '[1,2)'); -ERROR: insert or update on table "tfkp1" violates foreign key constraint "temporal_partitioned_fk_mltrng2mltrng_fk" -DETAIL: Key (parent_id, valid_at)=([1,2), {[2010-01-01,2010-02-15)}) is not present in table "temporal_partitioned_mltrng". -INSERT INTO temporal_partitioned_fk_mltrng2mltrng (id, valid_at, parent_id) VALUES - ('[3,4)', datemultirange(daterange('2000-01-01', '2000-02-15')), '[3,4)'); -ERROR: insert or update on table "tfkp1" violates foreign key constraint "temporal_partitioned_fk_mltrng2mltrng_fk" -DETAIL: Key (parent_id, valid_at)=([3,4), {[2000-01-01,2000-02-15)}) is not present in table "temporal_partitioned_mltrng". --- --- partitioned FK referencing updates --- -UPDATE temporal_partitioned_fk_mltrng2mltrng SET valid_at = datemultirange(daterange('2000-01-01', '2000-02-13')) WHERE id = '[2,3)'; --- move a row from the first partition to the second -UPDATE temporal_partitioned_fk_mltrng2mltrng SET id = '[4,5)' WHERE id = '[1,2)'; --- move a row from the second partition to the first -UPDATE temporal_partitioned_fk_mltrng2mltrng SET id = '[1,2)' WHERE id = '[4,5)'; --- should fail: -UPDATE temporal_partitioned_fk_mltrng2mltrng SET valid_at = datemultirange(daterange('2000-01-01', '2000-04-01')) WHERE id = '[1,2)'; -ERROR: conflicting key value violates exclusion constraint "tfkp1_pkey" -DETAIL: Key (id, valid_at)=([1,2), {[2000-01-01,2000-04-01)}) conflicts with existing key (id, valid_at)=([1,2), {[2000-01-01,2000-04-01)}). --- --- partitioned FK referenced updates NO ACTION --- -TRUNCATE temporal_partitioned_mltrng, temporal_partitioned_fk_mltrng2mltrng; -INSERT INTO temporal_partitioned_mltrng (id, valid_at) VALUES ('[5,6)', datemultirange(daterange('2016-01-01', '2016-02-01'))); -UPDATE temporal_partitioned_mltrng SET valid_at = datemultirange(daterange('2018-01-01', '2018-02-01')) WHERE id = '[5,6)'; -INSERT INTO temporal_partitioned_mltrng (id, valid_at) VALUES ('[5,6)', datemultirange(daterange('2018-02-01', '2018-03-01'))); -INSERT INTO temporal_partitioned_fk_mltrng2mltrng (id, valid_at, parent_id) VALUES ('[3,4)', datemultirange(daterange('2018-01-05', '2018-01-10')), '[5,6)'); -UPDATE temporal_partitioned_mltrng SET valid_at = datemultirange(daterange('2016-02-01', '2016-03-01')) - WHERE id = '[5,6)' AND valid_at = datemultirange(daterange('2018-02-01', '2018-03-01')); --- should fail: -UPDATE temporal_partitioned_mltrng SET valid_at = datemultirange(daterange('2016-01-01', '2016-02-01')) - WHERE id = '[5,6)' AND valid_at = datemultirange(daterange('2018-01-01', '2018-02-01')); -ERROR: update or delete on table "tp1" violates foreign key constraint "temporal_partitioned_fk_mltrng2mltrng_parent_id_valid_at_fkey1" on table "temporal_partitioned_fk_mltrng2mltrng" -DETAIL: Key (id, valid_at)=([5,6), {[2018-01-01,2018-02-01)}) is still referenced from table "temporal_partitioned_fk_mltrng2mltrng". --- --- partitioned FK referenced deletes NO ACTION --- -TRUNCATE temporal_partitioned_mltrng, temporal_partitioned_fk_mltrng2mltrng; -INSERT INTO temporal_partitioned_mltrng (id, valid_at) VALUES ('[5,6)', datemultirange(daterange('2018-01-01', '2018-02-01'))); -INSERT INTO temporal_partitioned_mltrng (id, valid_at) VALUES ('[5,6)', datemultirange(daterange('2018-02-01', '2018-03-01'))); -INSERT INTO temporal_partitioned_fk_mltrng2mltrng (id, valid_at, parent_id) VALUES ('[3,4)', datemultirange(daterange('2018-01-05', '2018-01-10')), '[5,6)'); -DELETE FROM temporal_partitioned_mltrng WHERE id = '[5,6)' AND valid_at = datemultirange(daterange('2018-02-01', '2018-03-01')); --- should fail: -DELETE FROM temporal_partitioned_mltrng WHERE id = '[5,6)' AND valid_at = datemultirange(daterange('2018-01-01', '2018-02-01')); -ERROR: update or delete on table "tp1" violates foreign key constraint "temporal_partitioned_fk_mltrng2mltrng_parent_id_valid_at_fkey1" on table "temporal_partitioned_fk_mltrng2mltrng" -DETAIL: Key (id, valid_at)=([5,6), {[2018-01-01,2018-02-01)}) is still referenced from table "temporal_partitioned_fk_mltrng2mltrng". --- --- partitioned FK referenced updates RESTRICT --- -TRUNCATE temporal_partitioned_mltrng, temporal_partitioned_fk_mltrng2mltrng; -ALTER TABLE temporal_partitioned_fk_mltrng2mltrng - DROP CONSTRAINT temporal_partitioned_fk_mltrng2mltrng_fk; -ALTER TABLE temporal_partitioned_fk_mltrng2mltrng - ADD CONSTRAINT temporal_partitioned_fk_mltrng2mltrng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_partitioned_mltrng - ON DELETE RESTRICT; -INSERT INTO temporal_partitioned_mltrng (id, valid_at) VALUES ('[5,6)', datemultirange(daterange('2016-01-01', '2016-02-01'))); -UPDATE temporal_partitioned_mltrng SET valid_at = datemultirange(daterange('2018-01-01', '2018-02-01')) WHERE id = '[5,6)'; -INSERT INTO temporal_partitioned_mltrng (id, valid_at) VALUES ('[5,6)', datemultirange(daterange('2018-02-01', '2018-03-01'))); -INSERT INTO temporal_partitioned_fk_mltrng2mltrng (id, valid_at, parent_id) VALUES ('[3,4)', datemultirange(daterange('2018-01-05', '2018-01-10')), '[5,6)'); -UPDATE temporal_partitioned_mltrng SET valid_at = datemultirange(daterange('2016-02-01', '2016-03-01')) - WHERE id = '[5,6)' AND valid_at = datemultirange(daterange('2018-02-01', '2018-03-01')); --- should fail: -UPDATE temporal_partitioned_mltrng SET valid_at = datemultirange(daterange('2016-01-01', '2016-02-01')) - WHERE id = '[5,6)' AND valid_at = datemultirange(daterange('2018-01-01', '2018-02-01')); -ERROR: update or delete on table "tp1" violates foreign key constraint "temporal_partitioned_fk_mltrng2mltrng_parent_id_valid_at_fkey1" on table "temporal_partitioned_fk_mltrng2mltrng" -DETAIL: Key (id, valid_at)=([5,6), {[2018-01-01,2018-02-01)}) is still referenced from table "temporal_partitioned_fk_mltrng2mltrng". --- --- partitioned FK referenced deletes RESTRICT --- -TRUNCATE temporal_partitioned_mltrng, temporal_partitioned_fk_mltrng2mltrng; -INSERT INTO temporal_partitioned_mltrng (id, valid_at) VALUES ('[5,6)', datemultirange(daterange('2018-01-01', '2018-02-01'))); -INSERT INTO temporal_partitioned_mltrng (id, valid_at) VALUES ('[5,6)', datemultirange(daterange('2018-02-01', '2018-03-01'))); -INSERT INTO temporal_partitioned_fk_mltrng2mltrng (id, valid_at, parent_id) VALUES ('[3,4)', datemultirange(daterange('2018-01-05', '2018-01-10')), '[5,6)'); -DELETE FROM temporal_partitioned_mltrng WHERE id = '[5,6)' AND valid_at = datemultirange(daterange('2018-02-01', '2018-03-01')); --- should fail: -DELETE FROM temporal_partitioned_mltrng WHERE id = '[5,6)' AND valid_at = datemultirange(daterange('2018-01-01', '2018-02-01')); -ERROR: update or delete on table "tp1" violates RESTRICT setting of foreign key constraint "temporal_partitioned_fk_mltrng2mltrng_parent_id_valid_at_fkey1" on table "temporal_partitioned_fk_mltrng2mltrng" -DETAIL: Key (id, valid_at)=([5,6), {[2018-01-01,2018-02-01)}) is referenced from table "temporal_partitioned_fk_mltrng2mltrng". --- --- partitioned FK referenced updates CASCADE --- -ALTER TABLE temporal_partitioned_fk_mltrng2mltrng - DROP CONSTRAINT temporal_partitioned_fk_mltrng2mltrng_fk, - ADD CONSTRAINT temporal_partitioned_fk_mltrng2mltrng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_partitioned_mltrng - ON DELETE CASCADE ON UPDATE CASCADE; -ERROR: unsupported ON UPDATE action for foreign key constraint using PERIOD --- --- partitioned FK referenced deletes CASCADE --- --- --- partitioned FK referenced updates SET NULL --- -ALTER TABLE temporal_partitioned_fk_mltrng2mltrng - DROP CONSTRAINT temporal_partitioned_fk_mltrng2mltrng_fk, - ADD CONSTRAINT temporal_partitioned_fk_mltrng2mltrng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_partitioned_mltrng - ON DELETE SET NULL ON UPDATE SET NULL; -ERROR: unsupported ON UPDATE action for foreign key constraint using PERIOD --- --- partitioned FK referenced deletes SET NULL --- --- --- partitioned FK referenced updates SET DEFAULT --- -ALTER TABLE temporal_partitioned_fk_mltrng2mltrng - ALTER COLUMN parent_id SET DEFAULT '[0,1)', - DROP CONSTRAINT temporal_partitioned_fk_mltrng2mltrng_fk, - ADD CONSTRAINT temporal_partitioned_fk_mltrng2mltrng_fk - FOREIGN KEY (parent_id, PERIOD valid_at) - REFERENCES temporal_partitioned_mltrng - ON DELETE SET DEFAULT ON UPDATE SET DEFAULT; -ERROR: unsupported ON UPDATE action for foreign key constraint using PERIOD --- --- partitioned FK referenced deletes SET DEFAULT --- -DROP TABLE temporal_partitioned_fk_mltrng2mltrng; -DROP TABLE temporal_partitioned_mltrng; -RESET datestyle; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/rules.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/rules.out --- /Users/admin/pgsql/src/test/regress/expected/rules.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/rules.out 2024-12-13 13:20:10 @@ -1,3849 +1,2 @@ --- --- RULES --- From Jan's original setup_ruletest.sql and run_ruletest.sql --- - thomas 1998-09-13 --- --- --- Tables and rules for the view test --- -create table rtest_t1 (a int4, b int4); -create table rtest_t2 (a int4, b int4); -create table rtest_t3 (a int4, b int4); -create view rtest_v1 as select * from rtest_t1; -create rule rtest_v1_ins as on insert to rtest_v1 do instead - insert into rtest_t1 values (new.a, new.b); -create rule rtest_v1_upd as on update to rtest_v1 do instead - update rtest_t1 set a = new.a, b = new.b - where a = old.a; -create rule rtest_v1_del as on delete to rtest_v1 do instead - delete from rtest_t1 where a = old.a; --- Test comments -COMMENT ON RULE rtest_v1_bad ON rtest_v1 IS 'bad rule'; -ERROR: rule "rtest_v1_bad" for relation "rtest_v1" does not exist -COMMENT ON RULE rtest_v1_del ON rtest_v1 IS 'delete rule'; -COMMENT ON RULE rtest_v1_del ON rtest_v1 IS NULL; --- --- Tables and rules for the constraint update/delete test --- --- Note: --- Now that we have multiple action rule support, we check --- both possible syntaxes to define them (The last action --- can but must not have a semicolon at the end). --- -create table rtest_system (sysname text, sysdesc text); -create table rtest_interface (sysname text, ifname text); -create table rtest_person (pname text, pdesc text); -create table rtest_admin (pname text, sysname text); -create rule rtest_sys_upd as on update to rtest_system do also ( - update rtest_interface set sysname = new.sysname - where sysname = old.sysname; - update rtest_admin set sysname = new.sysname - where sysname = old.sysname - ); -create rule rtest_sys_del as on delete to rtest_system do also ( - delete from rtest_interface where sysname = old.sysname; - delete from rtest_admin where sysname = old.sysname; - ); -create rule rtest_pers_upd as on update to rtest_person do also - update rtest_admin set pname = new.pname where pname = old.pname; -create rule rtest_pers_del as on delete to rtest_person do also - delete from rtest_admin where pname = old.pname; --- --- Tables and rules for the logging test --- -create table rtest_emp (ename char(20), salary numeric); -create table rtest_emplog (ename char(20), who name, action char(10), newsal numeric, oldsal numeric); -create table rtest_empmass (ename char(20), salary numeric); -create rule rtest_emp_ins as on insert to rtest_emp do - insert into rtest_emplog values (new.ename, current_user, - 'hired', new.salary, '0.00'); -create rule rtest_emp_upd as on update to rtest_emp where new.salary != old.salary do - insert into rtest_emplog values (new.ename, current_user, - 'honored', new.salary, old.salary); -create rule rtest_emp_del as on delete to rtest_emp do - insert into rtest_emplog values (old.ename, current_user, - 'fired', '0.00', old.salary); --- --- Tables and rules for the multiple cascaded qualified instead --- rule test --- -create table rtest_t4 (a int4, b text); -create table rtest_t5 (a int4, b text); -create table rtest_t6 (a int4, b text); -create table rtest_t7 (a int4, b text); -create table rtest_t8 (a int4, b text); -create table rtest_t9 (a int4, b text); -create rule rtest_t4_ins1 as on insert to rtest_t4 - where new.a >= 10 and new.a < 20 do instead - insert into rtest_t5 values (new.a, new.b); -create rule rtest_t4_ins2 as on insert to rtest_t4 - where new.a >= 20 and new.a < 30 do - insert into rtest_t6 values (new.a, new.b); -create rule rtest_t5_ins as on insert to rtest_t5 - where new.a > 15 do - insert into rtest_t7 values (new.a, new.b); -create rule rtest_t6_ins as on insert to rtest_t6 - where new.a > 25 do instead - insert into rtest_t8 values (new.a, new.b); --- --- Tables and rules for the rule fire order test --- --- As of PG 7.3, the rules should fire in order by name, regardless --- of INSTEAD attributes or creation order. --- -create table rtest_order1 (a int4); -create table rtest_order2 (a int4, b int4, c text); -create sequence rtest_seq; -create rule rtest_order_r3 as on insert to rtest_order1 do instead - insert into rtest_order2 values (new.a, nextval('rtest_seq'), - 'rule 3 - this should run 3rd'); -create rule rtest_order_r4 as on insert to rtest_order1 - where a < 100 do instead - insert into rtest_order2 values (new.a, nextval('rtest_seq'), - 'rule 4 - this should run 4th'); -create rule rtest_order_r2 as on insert to rtest_order1 do - insert into rtest_order2 values (new.a, nextval('rtest_seq'), - 'rule 2 - this should run 2nd'); -create rule rtest_order_r1 as on insert to rtest_order1 do instead - insert into rtest_order2 values (new.a, nextval('rtest_seq'), - 'rule 1 - this should run 1st'); --- --- Tables and rules for the instead nothing test --- -create table rtest_nothn1 (a int4, b text); -create table rtest_nothn2 (a int4, b text); -create table rtest_nothn3 (a int4, b text); -create table rtest_nothn4 (a int4, b text); -create rule rtest_nothn_r1 as on insert to rtest_nothn1 - where new.a >= 10 and new.a < 20 do instead nothing; -create rule rtest_nothn_r2 as on insert to rtest_nothn1 - where new.a >= 30 and new.a < 40 do instead nothing; -create rule rtest_nothn_r3 as on insert to rtest_nothn2 - where new.a >= 100 do instead - insert into rtest_nothn3 values (new.a, new.b); -create rule rtest_nothn_r4 as on insert to rtest_nothn2 - do instead nothing; --- --- Tests on a view that is select * of a table --- and has insert/update/delete instead rules to --- behave close like the real table. --- --- --- We need test date later --- -insert into rtest_t2 values (1, 21); -insert into rtest_t2 values (2, 22); -insert into rtest_t2 values (3, 23); -insert into rtest_t3 values (1, 31); -insert into rtest_t3 values (2, 32); -insert into rtest_t3 values (3, 33); -insert into rtest_t3 values (4, 34); -insert into rtest_t3 values (5, 35); --- insert values -insert into rtest_v1 values (1, 11); -insert into rtest_v1 values (2, 12); -select * from rtest_v1; - a | b ----+---- - 1 | 11 - 2 | 12 -(2 rows) - --- delete with constant expression -delete from rtest_v1 where a = 1; -select * from rtest_v1; - a | b ----+---- - 2 | 12 -(1 row) - -insert into rtest_v1 values (1, 11); -delete from rtest_v1 where b = 12; -select * from rtest_v1; - a | b ----+---- - 1 | 11 -(1 row) - -insert into rtest_v1 values (2, 12); -insert into rtest_v1 values (2, 13); -select * from rtest_v1; - a | b ----+---- - 1 | 11 - 2 | 12 - 2 | 13 -(3 rows) - -** Remember the delete rule on rtest_v1: It says -** DO INSTEAD DELETE FROM rtest_t1 WHERE a = old.a -** So this time both rows with a = 2 must get deleted -\p -** Remember the delete rule on rtest_v1: It says -** DO INSTEAD DELETE FROM rtest_t1 WHERE a = old.a -** So this time both rows with a = 2 must get deleted -\r -delete from rtest_v1 where b = 12; -select * from rtest_v1; - a | b ----+---- - 1 | 11 -(1 row) - -delete from rtest_v1; --- insert select -insert into rtest_v1 select * from rtest_t2; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 2 | 22 - 3 | 23 -(3 rows) - -delete from rtest_v1; --- same with swapped targetlist -insert into rtest_v1 (b, a) select b, a from rtest_t2; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 2 | 22 - 3 | 23 -(3 rows) - --- now with only one target attribute -insert into rtest_v1 (a) select a from rtest_t3; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 2 | 22 - 3 | 23 - 1 | - 2 | - 3 | - 4 | - 5 | -(8 rows) - -select * from rtest_v1 where b isnull; - a | b ----+--- - 1 | - 2 | - 3 | - 4 | - 5 | -(5 rows) - --- let attribute a differ (must be done on rtest_t1 - see above) -update rtest_t1 set a = a + 10 where b isnull; -delete from rtest_v1 where b isnull; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 2 | 22 - 3 | 23 -(3 rows) - --- now updates with constant expression -update rtest_v1 set b = 42 where a = 2; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 3 | 23 - 2 | 42 -(3 rows) - -update rtest_v1 set b = 99 where b = 42; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 3 | 23 - 2 | 99 -(3 rows) - -update rtest_v1 set b = 88 where b < 50; -select * from rtest_v1; - a | b ----+---- - 2 | 99 - 1 | 88 - 3 | 88 -(3 rows) - -delete from rtest_v1; -insert into rtest_v1 select rtest_t2.a, rtest_t3.b - from rtest_t2, rtest_t3 - where rtest_t2.a = rtest_t3.a; -select * from rtest_v1; - a | b ----+---- - 1 | 31 - 2 | 32 - 3 | 33 -(3 rows) - --- updates in a mergejoin -update rtest_v1 set b = rtest_t2.b from rtest_t2 where rtest_v1.a = rtest_t2.a; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 2 | 22 - 3 | 23 -(3 rows) - -insert into rtest_v1 select * from rtest_t3; -select * from rtest_v1; - a | b ----+---- - 1 | 21 - 2 | 22 - 3 | 23 - 1 | 31 - 2 | 32 - 3 | 33 - 4 | 34 - 5 | 35 -(8 rows) - -update rtest_t1 set a = a + 10 where b > 30; -select * from rtest_v1; - a | b -----+---- - 1 | 21 - 2 | 22 - 3 | 23 - 11 | 31 - 12 | 32 - 13 | 33 - 14 | 34 - 15 | 35 -(8 rows) - -update rtest_v1 set a = rtest_t3.a + 20 from rtest_t3 where rtest_v1.b = rtest_t3.b; -select * from rtest_v1; - a | b -----+---- - 1 | 21 - 2 | 22 - 3 | 23 - 21 | 31 - 22 | 32 - 23 | 33 - 24 | 34 - 25 | 35 -(8 rows) - --- --- Test for constraint updates/deletes --- -insert into rtest_system values ('orion', 'Linux Jan Wieck'); -insert into rtest_system values ('notjw', 'WinNT Jan Wieck (notebook)'); -insert into rtest_system values ('neptun', 'Fileserver'); -insert into rtest_interface values ('orion', 'eth0'); -insert into rtest_interface values ('orion', 'eth1'); -insert into rtest_interface values ('notjw', 'eth0'); -insert into rtest_interface values ('neptun', 'eth0'); -insert into rtest_person values ('jw', 'Jan Wieck'); -insert into rtest_person values ('bm', 'Bruce Momjian'); -insert into rtest_admin values ('jw', 'orion'); -insert into rtest_admin values ('jw', 'notjw'); -insert into rtest_admin values ('bm', 'neptun'); -update rtest_system set sysname = 'pluto' where sysname = 'neptun'; -select * from rtest_interface; - sysname | ifname ----------+-------- - orion | eth0 - orion | eth1 - notjw | eth0 - pluto | eth0 -(4 rows) - -select * from rtest_admin; - pname | sysname --------+--------- - jw | orion - jw | notjw - bm | pluto -(3 rows) - -update rtest_person set pname = 'jwieck' where pdesc = 'Jan Wieck'; --- Note: use ORDER BY here to ensure consistent output across all systems. --- The above UPDATE affects two rows with equal keys, so they could be --- updated in either order depending on the whim of the local qsort(). -select * from rtest_admin order by pname, sysname; - pname | sysname ---------+--------- - bm | pluto - jwieck | notjw - jwieck | orion -(3 rows) - -delete from rtest_system where sysname = 'orion'; -select * from rtest_interface; - sysname | ifname ----------+-------- - notjw | eth0 - pluto | eth0 -(2 rows) - -select * from rtest_admin; - pname | sysname ---------+--------- - bm | pluto - jwieck | notjw -(2 rows) - --- --- Rule qualification test --- -insert into rtest_emp values ('wiecc', '5000.00'); -insert into rtest_emp values ('gates', '80000.00'); -update rtest_emp set ename = 'wiecx' where ename = 'wiecc'; -update rtest_emp set ename = 'wieck', salary = '6000.00' where ename = 'wiecx'; -update rtest_emp set salary = '7000.00' where ename = 'wieck'; -delete from rtest_emp where ename = 'gates'; -select ename, who = current_user as "matches user", action, newsal, oldsal from rtest_emplog order by ename, action, newsal; - ename | matches user | action | newsal | oldsal -----------------------+--------------+------------+----------+---------- - gates | t | fired | 0.00 | 80000.00 - gates | t | hired | 80000.00 | 0.00 - wiecc | t | hired | 5000.00 | 0.00 - wieck | t | honored | 6000.00 | 5000.00 - wieck | t | honored | 7000.00 | 6000.00 -(5 rows) - -insert into rtest_empmass values ('meyer', '4000.00'); -insert into rtest_empmass values ('maier', '5000.00'); -insert into rtest_empmass values ('mayr', '6000.00'); -insert into rtest_emp select * from rtest_empmass; -select ename, who = current_user as "matches user", action, newsal, oldsal from rtest_emplog order by ename, action, newsal; - ename | matches user | action | newsal | oldsal -----------------------+--------------+------------+----------+---------- - gates | t | fired | 0.00 | 80000.00 - gates | t | hired | 80000.00 | 0.00 - maier | t | hired | 5000.00 | 0.00 - mayr | t | hired | 6000.00 | 0.00 - meyer | t | hired | 4000.00 | 0.00 - wiecc | t | hired | 5000.00 | 0.00 - wieck | t | honored | 6000.00 | 5000.00 - wieck | t | honored | 7000.00 | 6000.00 -(8 rows) - -update rtest_empmass set salary = salary + '1000.00'; -update rtest_emp set salary = rtest_empmass.salary from rtest_empmass where rtest_emp.ename = rtest_empmass.ename; -select ename, who = current_user as "matches user", action, newsal, oldsal from rtest_emplog order by ename, action, newsal; - ename | matches user | action | newsal | oldsal -----------------------+--------------+------------+----------+---------- - gates | t | fired | 0.00 | 80000.00 - gates | t | hired | 80000.00 | 0.00 - maier | t | hired | 5000.00 | 0.00 - maier | t | honored | 6000.00 | 5000.00 - mayr | t | hired | 6000.00 | 0.00 - mayr | t | honored | 7000.00 | 6000.00 - meyer | t | hired | 4000.00 | 0.00 - meyer | t | honored | 5000.00 | 4000.00 - wiecc | t | hired | 5000.00 | 0.00 - wieck | t | honored | 6000.00 | 5000.00 - wieck | t | honored | 7000.00 | 6000.00 -(11 rows) - -delete from rtest_emp using rtest_empmass where rtest_emp.ename = rtest_empmass.ename; -select ename, who = current_user as "matches user", action, newsal, oldsal from rtest_emplog order by ename, action, newsal; - ename | matches user | action | newsal | oldsal -----------------------+--------------+------------+----------+---------- - gates | t | fired | 0.00 | 80000.00 - gates | t | hired | 80000.00 | 0.00 - maier | t | fired | 0.00 | 6000.00 - maier | t | hired | 5000.00 | 0.00 - maier | t | honored | 6000.00 | 5000.00 - mayr | t | fired | 0.00 | 7000.00 - mayr | t | hired | 6000.00 | 0.00 - mayr | t | honored | 7000.00 | 6000.00 - meyer | t | fired | 0.00 | 5000.00 - meyer | t | hired | 4000.00 | 0.00 - meyer | t | honored | 5000.00 | 4000.00 - wiecc | t | hired | 5000.00 | 0.00 - wieck | t | honored | 6000.00 | 5000.00 - wieck | t | honored | 7000.00 | 6000.00 -(14 rows) - --- --- Multiple cascaded qualified instead rule test --- -insert into rtest_t4 values (1, 'Record should go to rtest_t4'); -insert into rtest_t4 values (2, 'Record should go to rtest_t4'); -insert into rtest_t4 values (10, 'Record should go to rtest_t5'); -insert into rtest_t4 values (15, 'Record should go to rtest_t5'); -insert into rtest_t4 values (19, 'Record should go to rtest_t5 and t7'); -insert into rtest_t4 values (20, 'Record should go to rtest_t4 and t6'); -insert into rtest_t4 values (26, 'Record should go to rtest_t4 and t8'); -insert into rtest_t4 values (28, 'Record should go to rtest_t4 and t8'); -insert into rtest_t4 values (30, 'Record should go to rtest_t4'); -insert into rtest_t4 values (40, 'Record should go to rtest_t4'); -select * from rtest_t4; - a | b -----+------------------------------------- - 1 | Record should go to rtest_t4 - 2 | Record should go to rtest_t4 - 20 | Record should go to rtest_t4 and t6 - 26 | Record should go to rtest_t4 and t8 - 28 | Record should go to rtest_t4 and t8 - 30 | Record should go to rtest_t4 - 40 | Record should go to rtest_t4 -(7 rows) - -select * from rtest_t5; - a | b -----+------------------------------------- - 10 | Record should go to rtest_t5 - 15 | Record should go to rtest_t5 - 19 | Record should go to rtest_t5 and t7 -(3 rows) - -select * from rtest_t6; - a | b -----+------------------------------------- - 20 | Record should go to rtest_t4 and t6 -(1 row) - -select * from rtest_t7; - a | b -----+------------------------------------- - 19 | Record should go to rtest_t5 and t7 -(1 row) - -select * from rtest_t8; - a | b -----+------------------------------------- - 26 | Record should go to rtest_t4 and t8 - 28 | Record should go to rtest_t4 and t8 -(2 rows) - -delete from rtest_t4; -delete from rtest_t5; -delete from rtest_t6; -delete from rtest_t7; -delete from rtest_t8; -insert into rtest_t9 values (1, 'Record should go to rtest_t4'); -insert into rtest_t9 values (2, 'Record should go to rtest_t4'); -insert into rtest_t9 values (10, 'Record should go to rtest_t5'); -insert into rtest_t9 values (15, 'Record should go to rtest_t5'); -insert into rtest_t9 values (19, 'Record should go to rtest_t5 and t7'); -insert into rtest_t9 values (20, 'Record should go to rtest_t4 and t6'); -insert into rtest_t9 values (26, 'Record should go to rtest_t4 and t8'); -insert into rtest_t9 values (28, 'Record should go to rtest_t4 and t8'); -insert into rtest_t9 values (30, 'Record should go to rtest_t4'); -insert into rtest_t9 values (40, 'Record should go to rtest_t4'); -insert into rtest_t4 select * from rtest_t9 where a < 20; -select * from rtest_t4; - a | b ----+------------------------------ - 1 | Record should go to rtest_t4 - 2 | Record should go to rtest_t4 -(2 rows) - -select * from rtest_t5; - a | b -----+------------------------------------- - 10 | Record should go to rtest_t5 - 15 | Record should go to rtest_t5 - 19 | Record should go to rtest_t5 and t7 -(3 rows) - -select * from rtest_t6; - a | b ----+--- -(0 rows) - -select * from rtest_t7; - a | b -----+------------------------------------- - 19 | Record should go to rtest_t5 and t7 -(1 row) - -select * from rtest_t8; - a | b ----+--- -(0 rows) - -insert into rtest_t4 select * from rtest_t9 where b ~ 'and t8'; -select * from rtest_t4; - a | b -----+------------------------------------- - 1 | Record should go to rtest_t4 - 2 | Record should go to rtest_t4 - 26 | Record should go to rtest_t4 and t8 - 28 | Record should go to rtest_t4 and t8 -(4 rows) - -select * from rtest_t5; - a | b -----+------------------------------------- - 10 | Record should go to rtest_t5 - 15 | Record should go to rtest_t5 - 19 | Record should go to rtest_t5 and t7 -(3 rows) - -select * from rtest_t6; - a | b ----+--- -(0 rows) - -select * from rtest_t7; - a | b -----+------------------------------------- - 19 | Record should go to rtest_t5 and t7 -(1 row) - -select * from rtest_t8; - a | b -----+------------------------------------- - 26 | Record should go to rtest_t4 and t8 - 28 | Record should go to rtest_t4 and t8 -(2 rows) - -insert into rtest_t4 select a + 1, b from rtest_t9 where a in (20, 30, 40); -select * from rtest_t4; - a | b -----+------------------------------------- - 1 | Record should go to rtest_t4 - 2 | Record should go to rtest_t4 - 26 | Record should go to rtest_t4 and t8 - 28 | Record should go to rtest_t4 and t8 - 21 | Record should go to rtest_t4 and t6 - 31 | Record should go to rtest_t4 - 41 | Record should go to rtest_t4 -(7 rows) - -select * from rtest_t5; - a | b -----+------------------------------------- - 10 | Record should go to rtest_t5 - 15 | Record should go to rtest_t5 - 19 | Record should go to rtest_t5 and t7 -(3 rows) - -select * from rtest_t6; - a | b -----+------------------------------------- - 21 | Record should go to rtest_t4 and t6 -(1 row) - -select * from rtest_t7; - a | b -----+------------------------------------- - 19 | Record should go to rtest_t5 and t7 -(1 row) - -select * from rtest_t8; - a | b -----+------------------------------------- - 26 | Record should go to rtest_t4 and t8 - 28 | Record should go to rtest_t4 and t8 -(2 rows) - --- --- Check that the ordering of rules fired is correct --- -insert into rtest_order1 values (1); -select * from rtest_order2; - a | b | c ----+---+------------------------------ - 1 | 1 | rule 1 - this should run 1st - 1 | 2 | rule 2 - this should run 2nd - 1 | 3 | rule 3 - this should run 3rd - 1 | 4 | rule 4 - this should run 4th -(4 rows) - --- --- Check if instead nothing w/without qualification works --- -insert into rtest_nothn1 values (1, 'want this'); -insert into rtest_nothn1 values (2, 'want this'); -insert into rtest_nothn1 values (10, 'don''t want this'); -insert into rtest_nothn1 values (19, 'don''t want this'); -insert into rtest_nothn1 values (20, 'want this'); -insert into rtest_nothn1 values (29, 'want this'); -insert into rtest_nothn1 values (30, 'don''t want this'); -insert into rtest_nothn1 values (39, 'don''t want this'); -insert into rtest_nothn1 values (40, 'want this'); -insert into rtest_nothn1 values (50, 'want this'); -insert into rtest_nothn1 values (60, 'want this'); -select * from rtest_nothn1; - a | b -----+----------- - 1 | want this - 2 | want this - 20 | want this - 29 | want this - 40 | want this - 50 | want this - 60 | want this -(7 rows) - -insert into rtest_nothn2 values (10, 'too small'); -insert into rtest_nothn2 values (50, 'too small'); -insert into rtest_nothn2 values (100, 'OK'); -insert into rtest_nothn2 values (200, 'OK'); -select * from rtest_nothn2; - a | b ----+--- -(0 rows) - -select * from rtest_nothn3; - a | b ------+---- - 100 | OK - 200 | OK -(2 rows) - -delete from rtest_nothn1; -delete from rtest_nothn2; -delete from rtest_nothn3; -insert into rtest_nothn4 values (1, 'want this'); -insert into rtest_nothn4 values (2, 'want this'); -insert into rtest_nothn4 values (10, 'don''t want this'); -insert into rtest_nothn4 values (19, 'don''t want this'); -insert into rtest_nothn4 values (20, 'want this'); -insert into rtest_nothn4 values (29, 'want this'); -insert into rtest_nothn4 values (30, 'don''t want this'); -insert into rtest_nothn4 values (39, 'don''t want this'); -insert into rtest_nothn4 values (40, 'want this'); -insert into rtest_nothn4 values (50, 'want this'); -insert into rtest_nothn4 values (60, 'want this'); -insert into rtest_nothn1 select * from rtest_nothn4; -select * from rtest_nothn1; - a | b -----+----------- - 1 | want this - 2 | want this - 20 | want this - 29 | want this - 40 | want this - 50 | want this - 60 | want this -(7 rows) - -delete from rtest_nothn4; -insert into rtest_nothn4 values (10, 'too small'); -insert into rtest_nothn4 values (50, 'too small'); -insert into rtest_nothn4 values (100, 'OK'); -insert into rtest_nothn4 values (200, 'OK'); -insert into rtest_nothn2 select * from rtest_nothn4; -select * from rtest_nothn2; - a | b ----+--- -(0 rows) - -select * from rtest_nothn3; - a | b ------+---- - 100 | OK - 200 | OK -(2 rows) - -create table rtest_view1 (a int4, b text, v bool); -create table rtest_view2 (a int4); -create table rtest_view3 (a int4, b text); -create table rtest_view4 (a int4, b text, c int4); -create view rtest_vview1 as select a, b from rtest_view1 X - where 0 < (select count(*) from rtest_view2 Y where Y.a = X.a); -create view rtest_vview2 as select a, b from rtest_view1 where v; -create view rtest_vview3 as select a, b from rtest_vview2 X - where 0 < (select count(*) from rtest_view2 Y where Y.a = X.a); -create view rtest_vview4 as select X.a, X.b, count(Y.a) as refcount - from rtest_view1 X, rtest_view2 Y - where X.a = Y.a - group by X.a, X.b; -create function rtest_viewfunc1(int4) returns int4 as - 'select count(*)::int4 from rtest_view2 where a = $1' - language sql; -create view rtest_vview5 as select a, b, rtest_viewfunc1(a) as refcount - from rtest_view1; -insert into rtest_view1 values (1, 'item 1', 't'); -insert into rtest_view1 values (2, 'item 2', 't'); -insert into rtest_view1 values (3, 'item 3', 't'); -insert into rtest_view1 values (4, 'item 4', 'f'); -insert into rtest_view1 values (5, 'item 5', 't'); -insert into rtest_view1 values (6, 'item 6', 'f'); -insert into rtest_view1 values (7, 'item 7', 't'); -insert into rtest_view1 values (8, 'item 8', 't'); -insert into rtest_view2 values (2); -insert into rtest_view2 values (2); -insert into rtest_view2 values (4); -insert into rtest_view2 values (5); -insert into rtest_view2 values (7); -insert into rtest_view2 values (7); -insert into rtest_view2 values (7); -insert into rtest_view2 values (7); -select * from rtest_vview1; - a | b ----+-------- - 2 | item 2 - 4 | item 4 - 5 | item 5 - 7 | item 7 -(4 rows) - -select * from rtest_vview2; - a | b ----+-------- - 1 | item 1 - 2 | item 2 - 3 | item 3 - 5 | item 5 - 7 | item 7 - 8 | item 8 -(6 rows) - -select * from rtest_vview3; - a | b ----+-------- - 2 | item 2 - 5 | item 5 - 7 | item 7 -(3 rows) - -select * from rtest_vview4 order by a, b; - a | b | refcount ----+--------+---------- - 2 | item 2 | 2 - 4 | item 4 | 1 - 5 | item 5 | 1 - 7 | item 7 | 4 -(4 rows) - -select * from rtest_vview5; - a | b | refcount ----+--------+---------- - 1 | item 1 | 0 - 2 | item 2 | 2 - 3 | item 3 | 0 - 4 | item 4 | 1 - 5 | item 5 | 1 - 6 | item 6 | 0 - 7 | item 7 | 4 - 8 | item 8 | 0 -(8 rows) - -insert into rtest_view3 select * from rtest_vview1 where a < 7; -select * from rtest_view3; - a | b ----+-------- - 2 | item 2 - 4 | item 4 - 5 | item 5 -(3 rows) - -delete from rtest_view3; -insert into rtest_view3 select * from rtest_vview2 where a != 5 and b !~ '2'; -select * from rtest_view3; - a | b ----+-------- - 1 | item 1 - 3 | item 3 - 7 | item 7 - 8 | item 8 -(4 rows) - -delete from rtest_view3; -insert into rtest_view3 select * from rtest_vview3; -select * from rtest_view3; - a | b ----+-------- - 2 | item 2 - 5 | item 5 - 7 | item 7 -(3 rows) - -delete from rtest_view3; -insert into rtest_view4 select * from rtest_vview4 where 3 > refcount; -select * from rtest_view4 order by a, b; - a | b | c ----+--------+--- - 2 | item 2 | 2 - 4 | item 4 | 1 - 5 | item 5 | 1 -(3 rows) - -delete from rtest_view4; -insert into rtest_view4 select * from rtest_vview5 where a > 2 and refcount = 0; -select * from rtest_view4; - a | b | c ----+--------+--- - 3 | item 3 | 0 - 6 | item 6 | 0 - 8 | item 8 | 0 -(3 rows) - -delete from rtest_view4; --- --- Test for computations in views --- -create table rtest_comp ( - part text, - unit char(4), - size float -); -create table rtest_unitfact ( - unit char(4), - factor float -); -create view rtest_vcomp as - select X.part, (X.size * Y.factor) as size_in_cm - from rtest_comp X, rtest_unitfact Y - where X.unit = Y.unit; -insert into rtest_unitfact values ('m', 100.0); -insert into rtest_unitfact values ('cm', 1.0); -insert into rtest_unitfact values ('inch', 2.54); -insert into rtest_comp values ('p1', 'm', 5.0); -insert into rtest_comp values ('p2', 'm', 3.0); -insert into rtest_comp values ('p3', 'cm', 5.0); -insert into rtest_comp values ('p4', 'cm', 15.0); -insert into rtest_comp values ('p5', 'inch', 7.0); -insert into rtest_comp values ('p6', 'inch', 4.4); -select * from rtest_vcomp order by part; - part | size_in_cm -------+-------------------- - p1 | 500 - p2 | 300 - p3 | 5 - p4 | 15 - p5 | 17.78 - p6 | 11.176000000000002 -(6 rows) - -select * from rtest_vcomp where size_in_cm > 10.0 order by size_in_cm using >; - part | size_in_cm -------+-------------------- - p1 | 500 - p2 | 300 - p5 | 17.78 - p4 | 15 - p6 | 11.176000000000002 -(5 rows) - --- --- In addition run the (slightly modified) queries from the --- programmers manual section on the rule system. --- -CREATE TABLE shoe_data ( - shoename char(10), -- primary key - sh_avail integer, -- available # of pairs - slcolor char(10), -- preferred shoelace color - slminlen float, -- minimum shoelace length - slmaxlen float, -- maximum shoelace length - slunit char(8) -- length unit -); -CREATE TABLE shoelace_data ( - sl_name char(10), -- primary key - sl_avail integer, -- available # of pairs - sl_color char(10), -- shoelace color - sl_len float, -- shoelace length - sl_unit char(8) -- length unit -); -CREATE TABLE unit ( - un_name char(8), -- the primary key - un_fact float -- factor to transform to cm -); -CREATE VIEW shoe AS - SELECT sh.shoename, - sh.sh_avail, - sh.slcolor, - sh.slminlen, - sh.slminlen * un.un_fact AS slminlen_cm, - sh.slmaxlen, - sh.slmaxlen * un.un_fact AS slmaxlen_cm, - sh.slunit - FROM shoe_data sh, unit un - WHERE sh.slunit = un.un_name; -CREATE VIEW shoelace AS - SELECT s.sl_name, - s.sl_avail, - s.sl_color, - s.sl_len, - s.sl_unit, - s.sl_len * u.un_fact AS sl_len_cm - FROM shoelace_data s, unit u - WHERE s.sl_unit = u.un_name; -CREATE VIEW shoe_ready AS - SELECT rsh.shoename, - rsh.sh_avail, - rsl.sl_name, - rsl.sl_avail, - int4smaller(rsh.sh_avail, rsl.sl_avail) AS total_avail - FROM shoe rsh, shoelace rsl - WHERE rsl.sl_color = rsh.slcolor - AND rsl.sl_len_cm >= rsh.slminlen_cm - AND rsl.sl_len_cm <= rsh.slmaxlen_cm; -INSERT INTO unit VALUES ('cm', 1.0); -INSERT INTO unit VALUES ('m', 100.0); -INSERT INTO unit VALUES ('inch', 2.54); -INSERT INTO shoe_data VALUES ('sh1', 2, 'black', 70.0, 90.0, 'cm'); -INSERT INTO shoe_data VALUES ('sh2', 0, 'black', 30.0, 40.0, 'inch'); -INSERT INTO shoe_data VALUES ('sh3', 4, 'brown', 50.0, 65.0, 'cm'); -INSERT INTO shoe_data VALUES ('sh4', 3, 'brown', 40.0, 50.0, 'inch'); -INSERT INTO shoelace_data VALUES ('sl1', 5, 'black', 80.0, 'cm'); -INSERT INTO shoelace_data VALUES ('sl2', 6, 'black', 100.0, 'cm'); -INSERT INTO shoelace_data VALUES ('sl3', 0, 'black', 35.0 , 'inch'); -INSERT INTO shoelace_data VALUES ('sl4', 8, 'black', 40.0 , 'inch'); -INSERT INTO shoelace_data VALUES ('sl5', 4, 'brown', 1.0 , 'm'); -INSERT INTO shoelace_data VALUES ('sl6', 0, 'brown', 0.9 , 'm'); -INSERT INTO shoelace_data VALUES ('sl7', 7, 'brown', 60 , 'cm'); -INSERT INTO shoelace_data VALUES ('sl8', 1, 'brown', 40 , 'inch'); --- SELECTs in doc -SELECT * FROM shoelace ORDER BY sl_name; - sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm -------------+----------+------------+--------+----------+----------- - sl1 | 5 | black | 80 | cm | 80 - sl2 | 6 | black | 100 | cm | 100 - sl3 | 0 | black | 35 | inch | 88.9 - sl4 | 8 | black | 40 | inch | 101.6 - sl5 | 4 | brown | 1 | m | 100 - sl6 | 0 | brown | 0.9 | m | 90 - sl7 | 7 | brown | 60 | cm | 60 - sl8 | 1 | brown | 40 | inch | 101.6 -(8 rows) - -SELECT * FROM shoe_ready WHERE total_avail >= 2 ORDER BY 1; - shoename | sh_avail | sl_name | sl_avail | total_avail -------------+----------+------------+----------+------------- - sh1 | 2 | sl1 | 5 | 2 - sh3 | 4 | sl7 | 7 | 4 -(2 rows) - - CREATE TABLE shoelace_log ( - sl_name char(10), -- shoelace changed - sl_avail integer, -- new available value - log_who name, -- who did it - log_when timestamp -- when - ); --- Want "log_who" to be CURRENT_USER, --- but that is non-portable for the regression test --- - thomas 1999-02-21 - CREATE RULE log_shoelace AS ON UPDATE TO shoelace_data - WHERE NEW.sl_avail != OLD.sl_avail - DO INSERT INTO shoelace_log VALUES ( - NEW.sl_name, - NEW.sl_avail, - 'Al Bundy', - 'epoch' - ); -UPDATE shoelace_data SET sl_avail = 6 WHERE sl_name = 'sl7'; -SELECT * FROM shoelace_log; - sl_name | sl_avail | log_who | log_when -------------+----------+----------+-------------------------- - sl7 | 6 | Al Bundy | Thu Jan 01 00:00:00 1970 -(1 row) - - CREATE RULE shoelace_ins AS ON INSERT TO shoelace - DO INSTEAD - INSERT INTO shoelace_data VALUES ( - NEW.sl_name, - NEW.sl_avail, - NEW.sl_color, - NEW.sl_len, - NEW.sl_unit); - CREATE RULE shoelace_upd AS ON UPDATE TO shoelace - DO INSTEAD - UPDATE shoelace_data SET - sl_name = NEW.sl_name, - sl_avail = NEW.sl_avail, - sl_color = NEW.sl_color, - sl_len = NEW.sl_len, - sl_unit = NEW.sl_unit - WHERE sl_name = OLD.sl_name; - CREATE RULE shoelace_del AS ON DELETE TO shoelace - DO INSTEAD - DELETE FROM shoelace_data - WHERE sl_name = OLD.sl_name; - CREATE TABLE shoelace_arrive ( - arr_name char(10), - arr_quant integer - ); - CREATE TABLE shoelace_ok ( - ok_name char(10), - ok_quant integer - ); - CREATE RULE shoelace_ok_ins AS ON INSERT TO shoelace_ok - DO INSTEAD - UPDATE shoelace SET - sl_avail = sl_avail + NEW.ok_quant - WHERE sl_name = NEW.ok_name; -INSERT INTO shoelace_arrive VALUES ('sl3', 10); -INSERT INTO shoelace_arrive VALUES ('sl6', 20); -INSERT INTO shoelace_arrive VALUES ('sl8', 20); -SELECT * FROM shoelace ORDER BY sl_name; - sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm -------------+----------+------------+--------+----------+----------- - sl1 | 5 | black | 80 | cm | 80 - sl2 | 6 | black | 100 | cm | 100 - sl3 | 0 | black | 35 | inch | 88.9 - sl4 | 8 | black | 40 | inch | 101.6 - sl5 | 4 | brown | 1 | m | 100 - sl6 | 0 | brown | 0.9 | m | 90 - sl7 | 6 | brown | 60 | cm | 60 - sl8 | 1 | brown | 40 | inch | 101.6 -(8 rows) - -insert into shoelace_ok select * from shoelace_arrive; -SELECT * FROM shoelace ORDER BY sl_name; - sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm -------------+----------+------------+--------+----------+----------- - sl1 | 5 | black | 80 | cm | 80 - sl2 | 6 | black | 100 | cm | 100 - sl3 | 10 | black | 35 | inch | 88.9 - sl4 | 8 | black | 40 | inch | 101.6 - sl5 | 4 | brown | 1 | m | 100 - sl6 | 20 | brown | 0.9 | m | 90 - sl7 | 6 | brown | 60 | cm | 60 - sl8 | 21 | brown | 40 | inch | 101.6 -(8 rows) - -SELECT * FROM shoelace_log ORDER BY sl_name; - sl_name | sl_avail | log_who | log_when -------------+----------+----------+-------------------------- - sl3 | 10 | Al Bundy | Thu Jan 01 00:00:00 1970 - sl6 | 20 | Al Bundy | Thu Jan 01 00:00:00 1970 - sl7 | 6 | Al Bundy | Thu Jan 01 00:00:00 1970 - sl8 | 21 | Al Bundy | Thu Jan 01 00:00:00 1970 -(4 rows) - - CREATE VIEW shoelace_obsolete AS - SELECT * FROM shoelace WHERE NOT EXISTS - (SELECT shoename FROM shoe WHERE slcolor = sl_color); - CREATE VIEW shoelace_candelete AS - SELECT * FROM shoelace_obsolete WHERE sl_avail = 0; -insert into shoelace values ('sl9', 0, 'pink', 35.0, 'inch', 0.0); -insert into shoelace values ('sl10', 1000, 'magenta', 40.0, 'inch', 0.0); --- Unsupported (even though a similar updatable view construct is) -insert into shoelace values ('sl10', 1000, 'magenta', 40.0, 'inch', 0.0) - on conflict do nothing; -ERROR: INSERT with ON CONFLICT clause cannot be used with table that has INSERT or UPDATE rules -SELECT * FROM shoelace_obsolete ORDER BY sl_len_cm; - sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm -------------+----------+------------+--------+----------+----------- - sl9 | 0 | pink | 35 | inch | 88.9 - sl10 | 1000 | magenta | 40 | inch | 101.6 -(2 rows) - -SELECT * FROM shoelace_candelete; - sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm -------------+----------+------------+--------+----------+----------- - sl9 | 0 | pink | 35 | inch | 88.9 -(1 row) - -DELETE FROM shoelace WHERE EXISTS - (SELECT * FROM shoelace_candelete - WHERE sl_name = shoelace.sl_name); -SELECT * FROM shoelace ORDER BY sl_name; - sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm -------------+----------+------------+--------+----------+----------- - sl1 | 5 | black | 80 | cm | 80 - sl10 | 1000 | magenta | 40 | inch | 101.6 - sl2 | 6 | black | 100 | cm | 100 - sl3 | 10 | black | 35 | inch | 88.9 - sl4 | 8 | black | 40 | inch | 101.6 - sl5 | 4 | brown | 1 | m | 100 - sl6 | 20 | brown | 0.9 | m | 90 - sl7 | 6 | brown | 60 | cm | 60 - sl8 | 21 | brown | 40 | inch | 101.6 -(9 rows) - -SELECT * FROM shoe ORDER BY shoename; - shoename | sh_avail | slcolor | slminlen | slminlen_cm | slmaxlen | slmaxlen_cm | slunit -------------+----------+------------+----------+-------------+----------+-------------+---------- - sh1 | 2 | black | 70 | 70 | 90 | 90 | cm - sh2 | 0 | black | 30 | 76.2 | 40 | 101.6 | inch - sh3 | 4 | brown | 50 | 50 | 65 | 65 | cm - sh4 | 3 | brown | 40 | 101.6 | 50 | 127 | inch -(4 rows) - -SELECT count(*) FROM shoe; - count -------- - 4 -(1 row) - --- --- Simple test of qualified ON INSERT ... this did not work in 7.0 ... --- -create table rules_foo (f1 int); -create table rules_foo2 (f1 int); -create rule rules_foorule as on insert to rules_foo where f1 < 100 -do instead nothing; -insert into rules_foo values(1); -insert into rules_foo values(1001); -select * from rules_foo; - f1 ------- - 1001 -(1 row) - -drop rule rules_foorule on rules_foo; --- this should fail because f1 is not exposed for unqualified reference: -create rule rules_foorule as on insert to rules_foo where f1 < 100 -do instead insert into rules_foo2 values (f1); -ERROR: column "f1" does not exist -LINE 2: do instead insert into rules_foo2 values (f1); - ^ -DETAIL: There are columns named "f1", but they are in tables that cannot be referenced from this part of the query. -HINT: Try using a table-qualified name. --- this is the correct way: -create rule rules_foorule as on insert to rules_foo where f1 < 100 -do instead insert into rules_foo2 values (new.f1); -insert into rules_foo values(2); -insert into rules_foo values(100); -select * from rules_foo; - f1 ------- - 1001 - 100 -(2 rows) - -select * from rules_foo2; - f1 ----- - 2 -(1 row) - -drop rule rules_foorule on rules_foo; -drop table rules_foo; -drop table rules_foo2; --- --- Test rules containing INSERT ... SELECT, which is a very ugly special --- case as of 7.1. Example is based on bug report from Joel Burton. --- -create table pparent (pid int, txt text); -insert into pparent values (1,'parent1'); -insert into pparent values (2,'parent2'); -create table cchild (pid int, descrip text); -insert into cchild values (1,'descrip1'); -create view vview as - select pparent.pid, txt, descrip from - pparent left join cchild using (pid); -create rule rrule as - on update to vview do instead -( - insert into cchild (pid, descrip) - select old.pid, new.descrip where old.descrip isnull; - update cchild set descrip = new.descrip where cchild.pid = old.pid; -); -select * from vview; - pid | txt | descrip ------+---------+---------- - 1 | parent1 | descrip1 - 2 | parent2 | -(2 rows) - -update vview set descrip='test1' where pid=1; -select * from vview; - pid | txt | descrip ------+---------+--------- - 1 | parent1 | test1 - 2 | parent2 | -(2 rows) - -update vview set descrip='test2' where pid=2; -select * from vview; - pid | txt | descrip ------+---------+--------- - 1 | parent1 | test1 - 2 | parent2 | test2 -(2 rows) - -update vview set descrip='test3' where pid=3; -select * from vview; - pid | txt | descrip ------+---------+--------- - 1 | parent1 | test1 - 2 | parent2 | test2 -(2 rows) - -select * from cchild; - pid | descrip ------+--------- - 1 | test1 - 2 | test2 -(2 rows) - -drop rule rrule on vview; -drop view vview; -drop table pparent; -drop table cchild; --- --- Check that ruleutils are working --- --- temporarily disable fancy output, so view changes create less diff noise -\a\t -SELECT viewname, definition FROM pg_views -WHERE schemaname = 'pg_catalog' -ORDER BY viewname; -pg_available_extension_versions| SELECT e.name, - e.version, - (x.extname IS NOT NULL) AS installed, - e.superuser, - e.trusted, - e.relocatable, - e.schema, - e.requires, - e.comment - FROM (pg_available_extension_versions() e(name, version, superuser, trusted, relocatable, schema, requires, comment) - LEFT JOIN pg_extension x ON (((e.name = x.extname) AND (e.version = x.extversion)))); -pg_available_extensions| SELECT e.name, - e.default_version, - x.extversion AS installed_version, - e.comment - FROM (pg_available_extensions() e(name, default_version, comment) - LEFT JOIN pg_extension x ON ((e.name = x.extname))); -pg_backend_memory_contexts| SELECT name, - ident, - type, - level, - path, - total_bytes, - total_nblocks, - free_bytes, - free_chunks, - used_bytes - FROM pg_get_backend_memory_contexts() pg_get_backend_memory_contexts(name, ident, type, level, path, total_bytes, total_nblocks, free_bytes, free_chunks, used_bytes); -pg_config| SELECT name, - setting - FROM pg_config() pg_config(name, setting); -pg_cursors| SELECT name, - statement, - is_holdable, - is_binary, - is_scrollable, - creation_time - FROM pg_cursor() c(name, statement, is_holdable, is_binary, is_scrollable, creation_time); -pg_file_settings| SELECT sourcefile, - sourceline, - seqno, - name, - setting, - applied, - error - FROM pg_show_all_file_settings() a(sourcefile, sourceline, seqno, name, setting, applied, error); -pg_group| SELECT rolname AS groname, - oid AS grosysid, - ARRAY( SELECT pg_auth_members.member - FROM pg_auth_members - WHERE (pg_auth_members.roleid = pg_authid.oid)) AS grolist - FROM pg_authid - WHERE (NOT rolcanlogin); -pg_hba_file_rules| SELECT rule_number, - file_name, - line_number, - type, - database, - user_name, - address, - netmask, - auth_method, - options, - error - FROM pg_hba_file_rules() a(rule_number, file_name, line_number, type, database, user_name, address, netmask, auth_method, options, error); -pg_ident_file_mappings| SELECT map_number, - file_name, - line_number, - map_name, - sys_name, - pg_username, - error - FROM pg_ident_file_mappings() a(map_number, file_name, line_number, map_name, sys_name, pg_username, error); -pg_indexes| SELECT n.nspname AS schemaname, - c.relname AS tablename, - i.relname AS indexname, - t.spcname AS tablespace, - pg_get_indexdef(i.oid) AS indexdef - FROM ((((pg_index x - JOIN pg_class c ON ((c.oid = x.indrelid))) - JOIN pg_class i ON ((i.oid = x.indexrelid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - LEFT JOIN pg_tablespace t ON ((t.oid = i.reltablespace))) - WHERE ((c.relkind = ANY (ARRAY['r'::"char", 'm'::"char", 'p'::"char"])) AND (i.relkind = ANY (ARRAY['i'::"char", 'I'::"char"]))); -pg_locks| SELECT locktype, - database, - relation, - page, - tuple, - virtualxid, - transactionid, - classid, - objid, - objsubid, - virtualtransaction, - pid, - mode, - granted, - fastpath, - waitstart - FROM pg_lock_status() l(locktype, database, relation, page, tuple, virtualxid, transactionid, classid, objid, objsubid, virtualtransaction, pid, mode, granted, fastpath, waitstart); -pg_matviews| SELECT n.nspname AS schemaname, - c.relname AS matviewname, - pg_get_userbyid(c.relowner) AS matviewowner, - t.spcname AS tablespace, - c.relhasindex AS hasindexes, - c.relispopulated AS ispopulated, - pg_get_viewdef(c.oid) AS definition - FROM ((pg_class c - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - LEFT JOIN pg_tablespace t ON ((t.oid = c.reltablespace))) - WHERE (c.relkind = 'm'::"char"); -pg_policies| SELECT n.nspname AS schemaname, - c.relname AS tablename, - pol.polname AS policyname, - CASE - WHEN pol.polpermissive THEN 'PERMISSIVE'::text - ELSE 'RESTRICTIVE'::text - END AS permissive, - CASE - WHEN (pol.polroles = '{0}'::oid[]) THEN (string_to_array('public'::text, ''::text))::name[] - ELSE ARRAY( SELECT pg_authid.rolname - FROM pg_authid - WHERE (pg_authid.oid = ANY (pol.polroles)) - ORDER BY pg_authid.rolname) - END AS roles, - CASE pol.polcmd - WHEN 'r'::"char" THEN 'SELECT'::text - WHEN 'a'::"char" THEN 'INSERT'::text - WHEN 'w'::"char" THEN 'UPDATE'::text - WHEN 'd'::"char" THEN 'DELETE'::text - WHEN '*'::"char" THEN 'ALL'::text - ELSE NULL::text - END AS cmd, - pg_get_expr(pol.polqual, pol.polrelid) AS qual, - pg_get_expr(pol.polwithcheck, pol.polrelid) AS with_check - FROM ((pg_policy pol - JOIN pg_class c ON ((c.oid = pol.polrelid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))); -pg_prepared_statements| SELECT name, - statement, - prepare_time, - parameter_types, - result_types, - from_sql, - generic_plans, - custom_plans - FROM pg_prepared_statement() p(name, statement, prepare_time, parameter_types, result_types, from_sql, generic_plans, custom_plans); -pg_prepared_xacts| SELECT p.transaction, - p.gid, - p.prepared, - u.rolname AS owner, - d.datname AS database - FROM ((pg_prepared_xact() p(transaction, gid, prepared, ownerid, dbid) - LEFT JOIN pg_authid u ON ((p.ownerid = u.oid))) - LEFT JOIN pg_database d ON ((p.dbid = d.oid))); -pg_publication_tables| SELECT p.pubname, - n.nspname AS schemaname, - c.relname AS tablename, - ( SELECT array_agg(a.attname ORDER BY a.attnum) AS array_agg - FROM pg_attribute a - WHERE ((a.attrelid = gpt.relid) AND (a.attnum = ANY ((gpt.attrs)::smallint[])))) AS attnames, - pg_get_expr(gpt.qual, gpt.relid) AS rowfilter - FROM pg_publication p, - LATERAL pg_get_publication_tables(VARIADIC ARRAY[(p.pubname)::text]) gpt(pubid, relid, attrs, qual), - (pg_class c - JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (c.oid = gpt.relid); -pg_replication_origin_status| SELECT local_id, - external_id, - remote_lsn, - local_lsn - FROM pg_show_replication_origin_status() pg_show_replication_origin_status(local_id, external_id, remote_lsn, local_lsn); -pg_replication_slots| SELECT l.slot_name, - l.plugin, - l.slot_type, - l.datoid, - d.datname AS database, - l.temporary, - l.active, - l.active_pid, - l.xmin, - l.catalog_xmin, - l.restart_lsn, - l.confirmed_flush_lsn, - l.wal_status, - l.safe_wal_size, - l.two_phase, - l.inactive_since, - l.conflicting, - l.invalidation_reason, - l.failover, - l.synced - FROM (pg_get_replication_slots() l(slot_name, plugin, slot_type, datoid, temporary, active, active_pid, xmin, catalog_xmin, restart_lsn, confirmed_flush_lsn, wal_status, safe_wal_size, two_phase, inactive_since, conflicting, invalidation_reason, failover, synced) - LEFT JOIN pg_database d ON ((l.datoid = d.oid))); -pg_roles| SELECT pg_authid.rolname, - pg_authid.rolsuper, - pg_authid.rolinherit, - pg_authid.rolcreaterole, - pg_authid.rolcreatedb, - pg_authid.rolcanlogin, - pg_authid.rolreplication, - pg_authid.rolconnlimit, - '********'::text AS rolpassword, - pg_authid.rolvaliduntil, - pg_authid.rolbypassrls, - s.setconfig AS rolconfig, - pg_authid.oid - FROM (pg_authid - LEFT JOIN pg_db_role_setting s ON (((pg_authid.oid = s.setrole) AND (s.setdatabase = (0)::oid)))); -pg_rules| SELECT n.nspname AS schemaname, - c.relname AS tablename, - r.rulename, - pg_get_ruledef(r.oid) AS definition - FROM ((pg_rewrite r - JOIN pg_class c ON ((c.oid = r.ev_class))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (r.rulename <> '_RETURN'::name); -pg_seclabels| SELECT l.objoid, - l.classoid, - l.objsubid, - CASE - WHEN (rel.relkind = ANY (ARRAY['r'::"char", 'p'::"char"])) THEN 'table'::text - WHEN (rel.relkind = 'v'::"char") THEN 'view'::text - WHEN (rel.relkind = 'm'::"char") THEN 'materialized view'::text - WHEN (rel.relkind = 'S'::"char") THEN 'sequence'::text - WHEN (rel.relkind = 'f'::"char") THEN 'foreign table'::text - ELSE NULL::text - END AS objtype, - rel.relnamespace AS objnamespace, - CASE - WHEN pg_table_is_visible(rel.oid) THEN quote_ident((rel.relname)::text) - ELSE ((quote_ident((nsp.nspname)::text) || '.'::text) || quote_ident((rel.relname)::text)) - END AS objname, - l.provider, - l.label - FROM ((pg_seclabel l - JOIN pg_class rel ON (((l.classoid = rel.tableoid) AND (l.objoid = rel.oid)))) - JOIN pg_namespace nsp ON ((rel.relnamespace = nsp.oid))) - WHERE (l.objsubid = 0) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - 'column'::text AS objtype, - rel.relnamespace AS objnamespace, - (( - CASE - WHEN pg_table_is_visible(rel.oid) THEN quote_ident((rel.relname)::text) - ELSE ((quote_ident((nsp.nspname)::text) || '.'::text) || quote_ident((rel.relname)::text)) - END || '.'::text) || (att.attname)::text) AS objname, - l.provider, - l.label - FROM (((pg_seclabel l - JOIN pg_class rel ON (((l.classoid = rel.tableoid) AND (l.objoid = rel.oid)))) - JOIN pg_attribute att ON (((rel.oid = att.attrelid) AND (l.objsubid = att.attnum)))) - JOIN pg_namespace nsp ON ((rel.relnamespace = nsp.oid))) - WHERE (l.objsubid <> 0) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - CASE pro.prokind - WHEN 'a'::"char" THEN 'aggregate'::text - WHEN 'f'::"char" THEN 'function'::text - WHEN 'p'::"char" THEN 'procedure'::text - WHEN 'w'::"char" THEN 'window'::text - ELSE NULL::text - END AS objtype, - pro.pronamespace AS objnamespace, - ((( - CASE - WHEN pg_function_is_visible(pro.oid) THEN quote_ident((pro.proname)::text) - ELSE ((quote_ident((nsp.nspname)::text) || '.'::text) || quote_ident((pro.proname)::text)) - END || '('::text) || pg_get_function_arguments(pro.oid)) || ')'::text) AS objname, - l.provider, - l.label - FROM ((pg_seclabel l - JOIN pg_proc pro ON (((l.classoid = pro.tableoid) AND (l.objoid = pro.oid)))) - JOIN pg_namespace nsp ON ((pro.pronamespace = nsp.oid))) - WHERE (l.objsubid = 0) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - CASE - WHEN (typ.typtype = 'd'::"char") THEN 'domain'::text - ELSE 'type'::text - END AS objtype, - typ.typnamespace AS objnamespace, - CASE - WHEN pg_type_is_visible(typ.oid) THEN quote_ident((typ.typname)::text) - ELSE ((quote_ident((nsp.nspname)::text) || '.'::text) || quote_ident((typ.typname)::text)) - END AS objname, - l.provider, - l.label - FROM ((pg_seclabel l - JOIN pg_type typ ON (((l.classoid = typ.tableoid) AND (l.objoid = typ.oid)))) - JOIN pg_namespace nsp ON ((typ.typnamespace = nsp.oid))) - WHERE (l.objsubid = 0) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - 'large object'::text AS objtype, - NULL::oid AS objnamespace, - (l.objoid)::text AS objname, - l.provider, - l.label - FROM (pg_seclabel l - JOIN pg_largeobject_metadata lom ON ((l.objoid = lom.oid))) - WHERE ((l.classoid = ('pg_largeobject'::regclass)::oid) AND (l.objsubid = 0)) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - 'language'::text AS objtype, - NULL::oid AS objnamespace, - quote_ident((lan.lanname)::text) AS objname, - l.provider, - l.label - FROM (pg_seclabel l - JOIN pg_language lan ON (((l.classoid = lan.tableoid) AND (l.objoid = lan.oid)))) - WHERE (l.objsubid = 0) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - 'schema'::text AS objtype, - nsp.oid AS objnamespace, - quote_ident((nsp.nspname)::text) AS objname, - l.provider, - l.label - FROM (pg_seclabel l - JOIN pg_namespace nsp ON (((l.classoid = nsp.tableoid) AND (l.objoid = nsp.oid)))) - WHERE (l.objsubid = 0) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - 'event trigger'::text AS objtype, - NULL::oid AS objnamespace, - quote_ident((evt.evtname)::text) AS objname, - l.provider, - l.label - FROM (pg_seclabel l - JOIN pg_event_trigger evt ON (((l.classoid = evt.tableoid) AND (l.objoid = evt.oid)))) - WHERE (l.objsubid = 0) -UNION ALL - SELECT l.objoid, - l.classoid, - l.objsubid, - 'publication'::text AS objtype, - NULL::oid AS objnamespace, - quote_ident((p.pubname)::text) AS objname, - l.provider, - l.label - FROM (pg_seclabel l - JOIN pg_publication p ON (((l.classoid = p.tableoid) AND (l.objoid = p.oid)))) - WHERE (l.objsubid = 0) -UNION ALL - SELECT l.objoid, - l.classoid, - 0 AS objsubid, - 'subscription'::text AS objtype, - NULL::oid AS objnamespace, - quote_ident((s.subname)::text) AS objname, - l.provider, - l.label - FROM (pg_shseclabel l - JOIN pg_subscription s ON (((l.classoid = s.tableoid) AND (l.objoid = s.oid)))) -UNION ALL - SELECT l.objoid, - l.classoid, - 0 AS objsubid, - 'database'::text AS objtype, - NULL::oid AS objnamespace, - quote_ident((dat.datname)::text) AS objname, - l.provider, - l.label - FROM (pg_shseclabel l - JOIN pg_database dat ON (((l.classoid = dat.tableoid) AND (l.objoid = dat.oid)))) -UNION ALL - SELECT l.objoid, - l.classoid, - 0 AS objsubid, - 'tablespace'::text AS objtype, - NULL::oid AS objnamespace, - quote_ident((spc.spcname)::text) AS objname, - l.provider, - l.label - FROM (pg_shseclabel l - JOIN pg_tablespace spc ON (((l.classoid = spc.tableoid) AND (l.objoid = spc.oid)))) -UNION ALL - SELECT l.objoid, - l.classoid, - 0 AS objsubid, - 'role'::text AS objtype, - NULL::oid AS objnamespace, - quote_ident((rol.rolname)::text) AS objname, - l.provider, - l.label - FROM (pg_shseclabel l - JOIN pg_authid rol ON (((l.classoid = rol.tableoid) AND (l.objoid = rol.oid)))); -pg_sequences| SELECT n.nspname AS schemaname, - c.relname AS sequencename, - pg_get_userbyid(c.relowner) AS sequenceowner, - (s.seqtypid)::regtype AS data_type, - s.seqstart AS start_value, - s.seqmin AS min_value, - s.seqmax AS max_value, - s.seqincrement AS increment_by, - s.seqcycle AS cycle, - s.seqcache AS cache_size, - pg_sequence_last_value((c.oid)::regclass) AS last_value - FROM ((pg_sequence s - JOIN pg_class c ON ((c.oid = s.seqrelid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE ((NOT pg_is_other_temp_schema(n.oid)) AND (c.relkind = 'S'::"char")); -pg_settings| SELECT name, - setting, - unit, - category, - short_desc, - extra_desc, - context, - vartype, - source, - min_val, - max_val, - enumvals, - boot_val, - reset_val, - sourcefile, - sourceline, - pending_restart - FROM pg_show_all_settings() a(name, setting, unit, category, short_desc, extra_desc, context, vartype, source, min_val, max_val, enumvals, boot_val, reset_val, sourcefile, sourceline, pending_restart); -pg_shadow| SELECT pg_authid.rolname AS usename, - pg_authid.oid AS usesysid, - pg_authid.rolcreatedb AS usecreatedb, - pg_authid.rolsuper AS usesuper, - pg_authid.rolreplication AS userepl, - pg_authid.rolbypassrls AS usebypassrls, - pg_authid.rolpassword AS passwd, - pg_authid.rolvaliduntil AS valuntil, - s.setconfig AS useconfig - FROM (pg_authid - LEFT JOIN pg_db_role_setting s ON (((pg_authid.oid = s.setrole) AND (s.setdatabase = (0)::oid)))) - WHERE pg_authid.rolcanlogin; -pg_shmem_allocations| SELECT name, - off, - size, - allocated_size - FROM pg_get_shmem_allocations() pg_get_shmem_allocations(name, off, size, allocated_size); -pg_stat_activity| SELECT s.datid, - d.datname, - s.pid, - s.leader_pid, - s.usesysid, - u.rolname AS usename, - s.application_name, - s.client_addr, - s.client_hostname, - s.client_port, - s.backend_start, - s.xact_start, - s.query_start, - s.state_change, - s.wait_event_type, - s.wait_event, - s.state, - s.backend_xid, - s.backend_xmin, - s.query_id, - s.query, - s.backend_type - FROM ((pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, backend_type, ssl, sslversion, sslcipher, sslbits, ssl_client_dn, ssl_client_serial, ssl_issuer_dn, gss_auth, gss_princ, gss_enc, gss_delegation, leader_pid, query_id) - LEFT JOIN pg_database d ON ((s.datid = d.oid))) - LEFT JOIN pg_authid u ON ((s.usesysid = u.oid))); -pg_stat_all_indexes| SELECT c.oid AS relid, - i.oid AS indexrelid, - n.nspname AS schemaname, - c.relname, - i.relname AS indexrelname, - pg_stat_get_numscans(i.oid) AS idx_scan, - pg_stat_get_lastscan(i.oid) AS last_idx_scan, - pg_stat_get_tuples_returned(i.oid) AS idx_tup_read, - pg_stat_get_tuples_fetched(i.oid) AS idx_tup_fetch - FROM (((pg_class c - JOIN pg_index x ON ((c.oid = x.indrelid))) - JOIN pg_class i ON ((i.oid = x.indexrelid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char", 'm'::"char"])); -pg_stat_all_tables| SELECT c.oid AS relid, - n.nspname AS schemaname, - c.relname, - pg_stat_get_numscans(c.oid) AS seq_scan, - pg_stat_get_lastscan(c.oid) AS last_seq_scan, - pg_stat_get_tuples_returned(c.oid) AS seq_tup_read, - (sum(pg_stat_get_numscans(i.indexrelid)))::bigint AS idx_scan, - max(pg_stat_get_lastscan(i.indexrelid)) AS last_idx_scan, - ((sum(pg_stat_get_tuples_fetched(i.indexrelid)))::bigint + pg_stat_get_tuples_fetched(c.oid)) AS idx_tup_fetch, - pg_stat_get_tuples_inserted(c.oid) AS n_tup_ins, - pg_stat_get_tuples_updated(c.oid) AS n_tup_upd, - pg_stat_get_tuples_deleted(c.oid) AS n_tup_del, - pg_stat_get_tuples_hot_updated(c.oid) AS n_tup_hot_upd, - pg_stat_get_tuples_newpage_updated(c.oid) AS n_tup_newpage_upd, - pg_stat_get_live_tuples(c.oid) AS n_live_tup, - pg_stat_get_dead_tuples(c.oid) AS n_dead_tup, - pg_stat_get_mod_since_analyze(c.oid) AS n_mod_since_analyze, - pg_stat_get_ins_since_vacuum(c.oid) AS n_ins_since_vacuum, - pg_stat_get_last_vacuum_time(c.oid) AS last_vacuum, - pg_stat_get_last_autovacuum_time(c.oid) AS last_autovacuum, - pg_stat_get_last_analyze_time(c.oid) AS last_analyze, - pg_stat_get_last_autoanalyze_time(c.oid) AS last_autoanalyze, - pg_stat_get_vacuum_count(c.oid) AS vacuum_count, - pg_stat_get_autovacuum_count(c.oid) AS autovacuum_count, - pg_stat_get_analyze_count(c.oid) AS analyze_count, - pg_stat_get_autoanalyze_count(c.oid) AS autoanalyze_count - FROM ((pg_class c - LEFT JOIN pg_index i ON ((c.oid = i.indrelid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char", 'm'::"char", 'p'::"char"])) - GROUP BY c.oid, n.nspname, c.relname; -pg_stat_archiver| SELECT archived_count, - last_archived_wal, - last_archived_time, - failed_count, - last_failed_wal, - last_failed_time, - stats_reset - FROM pg_stat_get_archiver() s(archived_count, last_archived_wal, last_archived_time, failed_count, last_failed_wal, last_failed_time, stats_reset); -pg_stat_bgwriter| SELECT pg_stat_get_bgwriter_buf_written_clean() AS buffers_clean, - pg_stat_get_bgwriter_maxwritten_clean() AS maxwritten_clean, - pg_stat_get_buf_alloc() AS buffers_alloc, - pg_stat_get_bgwriter_stat_reset_time() AS stats_reset; -pg_stat_checkpointer| SELECT pg_stat_get_checkpointer_num_timed() AS num_timed, - pg_stat_get_checkpointer_num_requested() AS num_requested, - pg_stat_get_checkpointer_num_performed() AS num_done, - pg_stat_get_checkpointer_restartpoints_timed() AS restartpoints_timed, - pg_stat_get_checkpointer_restartpoints_requested() AS restartpoints_req, - pg_stat_get_checkpointer_restartpoints_performed() AS restartpoints_done, - pg_stat_get_checkpointer_write_time() AS write_time, - pg_stat_get_checkpointer_sync_time() AS sync_time, - pg_stat_get_checkpointer_buffers_written() AS buffers_written, - pg_stat_get_checkpointer_slru_written() AS slru_written, - pg_stat_get_checkpointer_stat_reset_time() AS stats_reset; -pg_stat_database| SELECT oid AS datid, - datname, - CASE - WHEN (oid = (0)::oid) THEN 0 - ELSE pg_stat_get_db_numbackends(oid) - END AS numbackends, - pg_stat_get_db_xact_commit(oid) AS xact_commit, - pg_stat_get_db_xact_rollback(oid) AS xact_rollback, - (pg_stat_get_db_blocks_fetched(oid) - pg_stat_get_db_blocks_hit(oid)) AS blks_read, - pg_stat_get_db_blocks_hit(oid) AS blks_hit, - pg_stat_get_db_tuples_returned(oid) AS tup_returned, - pg_stat_get_db_tuples_fetched(oid) AS tup_fetched, - pg_stat_get_db_tuples_inserted(oid) AS tup_inserted, - pg_stat_get_db_tuples_updated(oid) AS tup_updated, - pg_stat_get_db_tuples_deleted(oid) AS tup_deleted, - pg_stat_get_db_conflict_all(oid) AS conflicts, - pg_stat_get_db_temp_files(oid) AS temp_files, - pg_stat_get_db_temp_bytes(oid) AS temp_bytes, - pg_stat_get_db_deadlocks(oid) AS deadlocks, - pg_stat_get_db_checksum_failures(oid) AS checksum_failures, - pg_stat_get_db_checksum_last_failure(oid) AS checksum_last_failure, - pg_stat_get_db_blk_read_time(oid) AS blk_read_time, - pg_stat_get_db_blk_write_time(oid) AS blk_write_time, - pg_stat_get_db_session_time(oid) AS session_time, - pg_stat_get_db_active_time(oid) AS active_time, - pg_stat_get_db_idle_in_transaction_time(oid) AS idle_in_transaction_time, - pg_stat_get_db_sessions(oid) AS sessions, - pg_stat_get_db_sessions_abandoned(oid) AS sessions_abandoned, - pg_stat_get_db_sessions_fatal(oid) AS sessions_fatal, - pg_stat_get_db_sessions_killed(oid) AS sessions_killed, - pg_stat_get_db_parallel_workers_to_launch(oid) AS parallel_workers_to_launch, - pg_stat_get_db_parallel_workers_launched(oid) AS parallel_workers_launched, - pg_stat_get_db_stat_reset_time(oid) AS stats_reset - FROM ( SELECT 0 AS oid, - NULL::name AS datname - UNION ALL - SELECT pg_database.oid, - pg_database.datname - FROM pg_database) d; -pg_stat_database_conflicts| SELECT oid AS datid, - datname, - pg_stat_get_db_conflict_tablespace(oid) AS confl_tablespace, - pg_stat_get_db_conflict_lock(oid) AS confl_lock, - pg_stat_get_db_conflict_snapshot(oid) AS confl_snapshot, - pg_stat_get_db_conflict_bufferpin(oid) AS confl_bufferpin, - pg_stat_get_db_conflict_startup_deadlock(oid) AS confl_deadlock, - pg_stat_get_db_conflict_logicalslot(oid) AS confl_active_logicalslot - FROM pg_database d; -pg_stat_gssapi| SELECT pid, - gss_auth AS gss_authenticated, - gss_princ AS principal, - gss_enc AS encrypted, - gss_delegation AS credentials_delegated - FROM pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, backend_type, ssl, sslversion, sslcipher, sslbits, ssl_client_dn, ssl_client_serial, ssl_issuer_dn, gss_auth, gss_princ, gss_enc, gss_delegation, leader_pid, query_id) - WHERE (client_port IS NOT NULL); -pg_stat_io| SELECT backend_type, - object, - context, - reads, - read_time, - writes, - write_time, - writebacks, - writeback_time, - extends, - extend_time, - op_bytes, - hits, - evictions, - reuses, - fsyncs, - fsync_time, - stats_reset - FROM pg_stat_get_io() b(backend_type, object, context, reads, read_time, writes, write_time, writebacks, writeback_time, extends, extend_time, op_bytes, hits, evictions, reuses, fsyncs, fsync_time, stats_reset); -pg_stat_progress_analyze| SELECT s.pid, - s.datid, - d.datname, - s.relid, - CASE s.param1 - WHEN 0 THEN 'initializing'::text - WHEN 1 THEN 'acquiring sample rows'::text - WHEN 2 THEN 'acquiring inherited sample rows'::text - WHEN 3 THEN 'computing statistics'::text - WHEN 4 THEN 'computing extended statistics'::text - WHEN 5 THEN 'finalizing analyze'::text - ELSE NULL::text - END AS phase, - s.param2 AS sample_blks_total, - s.param3 AS sample_blks_scanned, - s.param4 AS ext_stats_total, - s.param5 AS ext_stats_computed, - s.param6 AS child_tables_total, - s.param7 AS child_tables_done, - (s.param8)::oid AS current_child_table_relid - FROM (pg_stat_get_progress_info('ANALYZE'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20) - LEFT JOIN pg_database d ON ((s.datid = d.oid))); -pg_stat_progress_basebackup| SELECT pid, - CASE param1 - WHEN 0 THEN 'initializing'::text - WHEN 1 THEN 'waiting for checkpoint to finish'::text - WHEN 2 THEN 'estimating backup size'::text - WHEN 3 THEN 'streaming database files'::text - WHEN 4 THEN 'waiting for wal archiving to finish'::text - WHEN 5 THEN 'transferring wal files'::text - ELSE NULL::text - END AS phase, - CASE param2 - WHEN '-1'::integer THEN NULL::bigint - ELSE param2 - END AS backup_total, - param3 AS backup_streamed, - param4 AS tablespaces_total, - param5 AS tablespaces_streamed - FROM pg_stat_get_progress_info('BASEBACKUP'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20); -pg_stat_progress_cluster| SELECT s.pid, - s.datid, - d.datname, - s.relid, - CASE s.param1 - WHEN 1 THEN 'CLUSTER'::text - WHEN 2 THEN 'VACUUM FULL'::text - ELSE NULL::text - END AS command, - CASE s.param2 - WHEN 0 THEN 'initializing'::text - WHEN 1 THEN 'seq scanning heap'::text - WHEN 2 THEN 'index scanning heap'::text - WHEN 3 THEN 'sorting tuples'::text - WHEN 4 THEN 'writing new heap'::text - WHEN 5 THEN 'swapping relation files'::text - WHEN 6 THEN 'rebuilding index'::text - WHEN 7 THEN 'performing final cleanup'::text - ELSE NULL::text - END AS phase, - (s.param3)::oid AS cluster_index_relid, - s.param4 AS heap_tuples_scanned, - s.param5 AS heap_tuples_written, - s.param6 AS heap_blks_total, - s.param7 AS heap_blks_scanned, - s.param8 AS index_rebuild_count - FROM (pg_stat_get_progress_info('CLUSTER'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20) - LEFT JOIN pg_database d ON ((s.datid = d.oid))); -pg_stat_progress_copy| SELECT s.pid, - s.datid, - d.datname, - s.relid, - CASE s.param5 - WHEN 1 THEN 'COPY FROM'::text - WHEN 2 THEN 'COPY TO'::text - ELSE NULL::text - END AS command, - CASE s.param6 - WHEN 1 THEN 'FILE'::text - WHEN 2 THEN 'PROGRAM'::text - WHEN 3 THEN 'PIPE'::text - WHEN 4 THEN 'CALLBACK'::text - ELSE NULL::text - END AS type, - s.param1 AS bytes_processed, - s.param2 AS bytes_total, - s.param3 AS tuples_processed, - s.param4 AS tuples_excluded, - s.param7 AS tuples_skipped - FROM (pg_stat_get_progress_info('COPY'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20) - LEFT JOIN pg_database d ON ((s.datid = d.oid))); -pg_stat_progress_create_index| SELECT s.pid, - s.datid, - d.datname, - s.relid, - (s.param7)::oid AS index_relid, - CASE s.param1 - WHEN 1 THEN 'CREATE INDEX'::text - WHEN 2 THEN 'CREATE INDEX CONCURRENTLY'::text - WHEN 3 THEN 'REINDEX'::text - WHEN 4 THEN 'REINDEX CONCURRENTLY'::text - ELSE NULL::text - END AS command, - CASE s.param10 - WHEN 0 THEN 'initializing'::text - WHEN 1 THEN 'waiting for writers before build'::text - WHEN 2 THEN ('building index'::text || COALESCE((': '::text || pg_indexam_progress_phasename((s.param9)::oid, s.param11)), ''::text)) - WHEN 3 THEN 'waiting for writers before validation'::text - WHEN 4 THEN 'index validation: scanning index'::text - WHEN 5 THEN 'index validation: sorting tuples'::text - WHEN 6 THEN 'index validation: scanning table'::text - WHEN 7 THEN 'waiting for old snapshots'::text - WHEN 8 THEN 'waiting for readers before marking dead'::text - WHEN 9 THEN 'waiting for readers before dropping'::text - ELSE NULL::text - END AS phase, - s.param4 AS lockers_total, - s.param5 AS lockers_done, - s.param6 AS current_locker_pid, - s.param16 AS blocks_total, - s.param17 AS blocks_done, - s.param12 AS tuples_total, - s.param13 AS tuples_done, - s.param14 AS partitions_total, - s.param15 AS partitions_done - FROM (pg_stat_get_progress_info('CREATE INDEX'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20) - LEFT JOIN pg_database d ON ((s.datid = d.oid))); -pg_stat_progress_vacuum| SELECT s.pid, - s.datid, - d.datname, - s.relid, - CASE s.param1 - WHEN 0 THEN 'initializing'::text - WHEN 1 THEN 'scanning heap'::text - WHEN 2 THEN 'vacuuming indexes'::text - WHEN 3 THEN 'vacuuming heap'::text - WHEN 4 THEN 'cleaning up indexes'::text - WHEN 5 THEN 'truncating heap'::text - WHEN 6 THEN 'performing final cleanup'::text - ELSE NULL::text - END AS phase, - s.param2 AS heap_blks_total, - s.param3 AS heap_blks_scanned, - s.param4 AS heap_blks_vacuumed, - s.param5 AS index_vacuum_count, - s.param6 AS max_dead_tuple_bytes, - s.param7 AS dead_tuple_bytes, - s.param8 AS num_dead_item_ids, - s.param9 AS indexes_total, - s.param10 AS indexes_processed - FROM (pg_stat_get_progress_info('VACUUM'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20) - LEFT JOIN pg_database d ON ((s.datid = d.oid))); -pg_stat_recovery_prefetch| SELECT stats_reset, - prefetch, - hit, - skip_init, - skip_new, - skip_fpw, - skip_rep, - wal_distance, - block_distance, - io_depth - FROM pg_stat_get_recovery_prefetch() s(stats_reset, prefetch, hit, skip_init, skip_new, skip_fpw, skip_rep, wal_distance, block_distance, io_depth); -pg_stat_replication| SELECT s.pid, - s.usesysid, - u.rolname AS usename, - s.application_name, - s.client_addr, - s.client_hostname, - s.client_port, - s.backend_start, - s.backend_xmin, - w.state, - w.sent_lsn, - w.write_lsn, - w.flush_lsn, - w.replay_lsn, - w.write_lag, - w.flush_lag, - w.replay_lag, - w.sync_priority, - w.sync_state, - w.reply_time - FROM ((pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, backend_type, ssl, sslversion, sslcipher, sslbits, ssl_client_dn, ssl_client_serial, ssl_issuer_dn, gss_auth, gss_princ, gss_enc, gss_delegation, leader_pid, query_id) - JOIN pg_stat_get_wal_senders() w(pid, state, sent_lsn, write_lsn, flush_lsn, replay_lsn, write_lag, flush_lag, replay_lag, sync_priority, sync_state, reply_time) ON ((s.pid = w.pid))) - LEFT JOIN pg_authid u ON ((s.usesysid = u.oid))); -pg_stat_replication_slots| SELECT s.slot_name, - s.spill_txns, - s.spill_count, - s.spill_bytes, - s.stream_txns, - s.stream_count, - s.stream_bytes, - s.total_txns, - s.total_bytes, - s.stats_reset - FROM pg_replication_slots r, - LATERAL pg_stat_get_replication_slot((r.slot_name)::text) s(slot_name, spill_txns, spill_count, spill_bytes, stream_txns, stream_count, stream_bytes, total_txns, total_bytes, stats_reset) - WHERE (r.datoid IS NOT NULL); -pg_stat_slru| SELECT name, - blks_zeroed, - blks_hit, - blks_read, - blks_written, - blks_exists, - flushes, - truncates, - stats_reset - FROM pg_stat_get_slru() s(name, blks_zeroed, blks_hit, blks_read, blks_written, blks_exists, flushes, truncates, stats_reset); -pg_stat_ssl| SELECT pid, - ssl, - sslversion AS version, - sslcipher AS cipher, - sslbits AS bits, - ssl_client_dn AS client_dn, - ssl_client_serial AS client_serial, - ssl_issuer_dn AS issuer_dn - FROM pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, backend_type, ssl, sslversion, sslcipher, sslbits, ssl_client_dn, ssl_client_serial, ssl_issuer_dn, gss_auth, gss_princ, gss_enc, gss_delegation, leader_pid, query_id) - WHERE (client_port IS NOT NULL); -pg_stat_subscription| SELECT su.oid AS subid, - su.subname, - st.worker_type, - st.pid, - st.leader_pid, - st.relid, - st.received_lsn, - st.last_msg_send_time, - st.last_msg_receipt_time, - st.latest_end_lsn, - st.latest_end_time - FROM (pg_subscription su - LEFT JOIN pg_stat_get_subscription(NULL::oid) st(subid, relid, pid, leader_pid, received_lsn, last_msg_send_time, last_msg_receipt_time, latest_end_lsn, latest_end_time, worker_type) ON ((st.subid = su.oid))); -pg_stat_subscription_stats| SELECT ss.subid, - s.subname, - ss.apply_error_count, - ss.sync_error_count, - ss.confl_insert_exists, - ss.confl_update_origin_differs, - ss.confl_update_exists, - ss.confl_update_missing, - ss.confl_delete_origin_differs, - ss.confl_delete_missing, - ss.stats_reset - FROM pg_subscription s, - LATERAL pg_stat_get_subscription_stats(s.oid) ss(subid, apply_error_count, sync_error_count, confl_insert_exists, confl_update_origin_differs, confl_update_exists, confl_update_missing, confl_delete_origin_differs, confl_delete_missing, stats_reset); -pg_stat_sys_indexes| SELECT relid, - indexrelid, - schemaname, - relname, - indexrelname, - idx_scan, - last_idx_scan, - idx_tup_read, - idx_tup_fetch - FROM pg_stat_all_indexes - WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); -pg_stat_sys_tables| SELECT relid, - schemaname, - relname, - seq_scan, - last_seq_scan, - seq_tup_read, - idx_scan, - last_idx_scan, - idx_tup_fetch, - n_tup_ins, - n_tup_upd, - n_tup_del, - n_tup_hot_upd, - n_tup_newpage_upd, - n_live_tup, - n_dead_tup, - n_mod_since_analyze, - n_ins_since_vacuum, - last_vacuum, - last_autovacuum, - last_analyze, - last_autoanalyze, - vacuum_count, - autovacuum_count, - analyze_count, - autoanalyze_count - FROM pg_stat_all_tables - WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); -pg_stat_user_functions| SELECT p.oid AS funcid, - n.nspname AS schemaname, - p.proname AS funcname, - pg_stat_get_function_calls(p.oid) AS calls, - pg_stat_get_function_total_time(p.oid) AS total_time, - pg_stat_get_function_self_time(p.oid) AS self_time - FROM (pg_proc p - LEFT JOIN pg_namespace n ON ((n.oid = p.pronamespace))) - WHERE ((p.prolang <> (12)::oid) AND (pg_stat_get_function_calls(p.oid) IS NOT NULL)); -pg_stat_user_indexes| SELECT relid, - indexrelid, - schemaname, - relname, - indexrelname, - idx_scan, - last_idx_scan, - idx_tup_read, - idx_tup_fetch - FROM pg_stat_all_indexes - WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); -pg_stat_user_tables| SELECT relid, - schemaname, - relname, - seq_scan, - last_seq_scan, - seq_tup_read, - idx_scan, - last_idx_scan, - idx_tup_fetch, - n_tup_ins, - n_tup_upd, - n_tup_del, - n_tup_hot_upd, - n_tup_newpage_upd, - n_live_tup, - n_dead_tup, - n_mod_since_analyze, - n_ins_since_vacuum, - last_vacuum, - last_autovacuum, - last_analyze, - last_autoanalyze, - vacuum_count, - autovacuum_count, - analyze_count, - autoanalyze_count - FROM pg_stat_all_tables - WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); -pg_stat_wal| SELECT wal_records, - wal_fpi, - wal_bytes, - wal_buffers_full, - wal_write, - wal_sync, - wal_write_time, - wal_sync_time, - stats_reset - FROM pg_stat_get_wal() w(wal_records, wal_fpi, wal_bytes, wal_buffers_full, wal_write, wal_sync, wal_write_time, wal_sync_time, stats_reset); -pg_stat_wal_receiver| SELECT pid, - status, - receive_start_lsn, - receive_start_tli, - written_lsn, - flushed_lsn, - received_tli, - last_msg_send_time, - last_msg_receipt_time, - latest_end_lsn, - latest_end_time, - slot_name, - sender_host, - sender_port, - conninfo - FROM pg_stat_get_wal_receiver() s(pid, status, receive_start_lsn, receive_start_tli, written_lsn, flushed_lsn, received_tli, last_msg_send_time, last_msg_receipt_time, latest_end_lsn, latest_end_time, slot_name, sender_host, sender_port, conninfo) - WHERE (pid IS NOT NULL); -pg_stat_xact_all_tables| SELECT c.oid AS relid, - n.nspname AS schemaname, - c.relname, - pg_stat_get_xact_numscans(c.oid) AS seq_scan, - pg_stat_get_xact_tuples_returned(c.oid) AS seq_tup_read, - (sum(pg_stat_get_xact_numscans(i.indexrelid)))::bigint AS idx_scan, - ((sum(pg_stat_get_xact_tuples_fetched(i.indexrelid)))::bigint + pg_stat_get_xact_tuples_fetched(c.oid)) AS idx_tup_fetch, - pg_stat_get_xact_tuples_inserted(c.oid) AS n_tup_ins, - pg_stat_get_xact_tuples_updated(c.oid) AS n_tup_upd, - pg_stat_get_xact_tuples_deleted(c.oid) AS n_tup_del, - pg_stat_get_xact_tuples_hot_updated(c.oid) AS n_tup_hot_upd, - pg_stat_get_xact_tuples_newpage_updated(c.oid) AS n_tup_newpage_upd - FROM ((pg_class c - LEFT JOIN pg_index i ON ((c.oid = i.indrelid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char", 'm'::"char", 'p'::"char"])) - GROUP BY c.oid, n.nspname, c.relname; -pg_stat_xact_sys_tables| SELECT relid, - schemaname, - relname, - seq_scan, - seq_tup_read, - idx_scan, - idx_tup_fetch, - n_tup_ins, - n_tup_upd, - n_tup_del, - n_tup_hot_upd, - n_tup_newpage_upd - FROM pg_stat_xact_all_tables - WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); -pg_stat_xact_user_functions| SELECT p.oid AS funcid, - n.nspname AS schemaname, - p.proname AS funcname, - pg_stat_get_xact_function_calls(p.oid) AS calls, - pg_stat_get_xact_function_total_time(p.oid) AS total_time, - pg_stat_get_xact_function_self_time(p.oid) AS self_time - FROM (pg_proc p - LEFT JOIN pg_namespace n ON ((n.oid = p.pronamespace))) - WHERE ((p.prolang <> (12)::oid) AND (pg_stat_get_xact_function_calls(p.oid) IS NOT NULL)); -pg_stat_xact_user_tables| SELECT relid, - schemaname, - relname, - seq_scan, - seq_tup_read, - idx_scan, - idx_tup_fetch, - n_tup_ins, - n_tup_upd, - n_tup_del, - n_tup_hot_upd, - n_tup_newpage_upd - FROM pg_stat_xact_all_tables - WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); -pg_statio_all_indexes| SELECT c.oid AS relid, - i.oid AS indexrelid, - n.nspname AS schemaname, - c.relname, - i.relname AS indexrelname, - (pg_stat_get_blocks_fetched(i.oid) - pg_stat_get_blocks_hit(i.oid)) AS idx_blks_read, - pg_stat_get_blocks_hit(i.oid) AS idx_blks_hit - FROM (((pg_class c - JOIN pg_index x ON ((c.oid = x.indrelid))) - JOIN pg_class i ON ((i.oid = x.indexrelid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char", 'm'::"char"])); -pg_statio_all_sequences| SELECT c.oid AS relid, - n.nspname AS schemaname, - c.relname, - (pg_stat_get_blocks_fetched(c.oid) - pg_stat_get_blocks_hit(c.oid)) AS blks_read, - pg_stat_get_blocks_hit(c.oid) AS blks_hit - FROM (pg_class c - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (c.relkind = 'S'::"char"); -pg_statio_all_tables| SELECT c.oid AS relid, - n.nspname AS schemaname, - c.relname, - (pg_stat_get_blocks_fetched(c.oid) - pg_stat_get_blocks_hit(c.oid)) AS heap_blks_read, - pg_stat_get_blocks_hit(c.oid) AS heap_blks_hit, - i.idx_blks_read, - i.idx_blks_hit, - (pg_stat_get_blocks_fetched(t.oid) - pg_stat_get_blocks_hit(t.oid)) AS toast_blks_read, - pg_stat_get_blocks_hit(t.oid) AS toast_blks_hit, - x.idx_blks_read AS tidx_blks_read, - x.idx_blks_hit AS tidx_blks_hit - FROM ((((pg_class c - LEFT JOIN pg_class t ON ((c.reltoastrelid = t.oid))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - LEFT JOIN LATERAL ( SELECT (sum((pg_stat_get_blocks_fetched(pg_index.indexrelid) - pg_stat_get_blocks_hit(pg_index.indexrelid))))::bigint AS idx_blks_read, - (sum(pg_stat_get_blocks_hit(pg_index.indexrelid)))::bigint AS idx_blks_hit - FROM pg_index - WHERE (pg_index.indrelid = c.oid)) i ON (true)) - LEFT JOIN LATERAL ( SELECT (sum((pg_stat_get_blocks_fetched(pg_index.indexrelid) - pg_stat_get_blocks_hit(pg_index.indexrelid))))::bigint AS idx_blks_read, - (sum(pg_stat_get_blocks_hit(pg_index.indexrelid)))::bigint AS idx_blks_hit - FROM pg_index - WHERE (pg_index.indrelid = t.oid)) x ON (true)) - WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char", 'm'::"char"])); -pg_statio_sys_indexes| SELECT relid, - indexrelid, - schemaname, - relname, - indexrelname, - idx_blks_read, - idx_blks_hit - FROM pg_statio_all_indexes - WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); -pg_statio_sys_sequences| SELECT relid, - schemaname, - relname, - blks_read, - blks_hit - FROM pg_statio_all_sequences - WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); -pg_statio_sys_tables| SELECT relid, - schemaname, - relname, - heap_blks_read, - heap_blks_hit, - idx_blks_read, - idx_blks_hit, - toast_blks_read, - toast_blks_hit, - tidx_blks_read, - tidx_blks_hit - FROM pg_statio_all_tables - WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); -pg_statio_user_indexes| SELECT relid, - indexrelid, - schemaname, - relname, - indexrelname, - idx_blks_read, - idx_blks_hit - FROM pg_statio_all_indexes - WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); -pg_statio_user_sequences| SELECT relid, - schemaname, - relname, - blks_read, - blks_hit - FROM pg_statio_all_sequences - WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); -pg_statio_user_tables| SELECT relid, - schemaname, - relname, - heap_blks_read, - heap_blks_hit, - idx_blks_read, - idx_blks_hit, - toast_blks_read, - toast_blks_hit, - tidx_blks_read, - tidx_blks_hit - FROM pg_statio_all_tables - WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); -pg_stats| SELECT n.nspname AS schemaname, - c.relname AS tablename, - a.attname, - s.stainherit AS inherited, - s.stanullfrac AS null_frac, - s.stawidth AS avg_width, - s.stadistinct AS n_distinct, - CASE - WHEN (s.stakind1 = 1) THEN s.stavalues1 - WHEN (s.stakind2 = 1) THEN s.stavalues2 - WHEN (s.stakind3 = 1) THEN s.stavalues3 - WHEN (s.stakind4 = 1) THEN s.stavalues4 - WHEN (s.stakind5 = 1) THEN s.stavalues5 - ELSE NULL::anyarray - END AS most_common_vals, - CASE - WHEN (s.stakind1 = 1) THEN s.stanumbers1 - WHEN (s.stakind2 = 1) THEN s.stanumbers2 - WHEN (s.stakind3 = 1) THEN s.stanumbers3 - WHEN (s.stakind4 = 1) THEN s.stanumbers4 - WHEN (s.stakind5 = 1) THEN s.stanumbers5 - ELSE NULL::real[] - END AS most_common_freqs, - CASE - WHEN (s.stakind1 = 2) THEN s.stavalues1 - WHEN (s.stakind2 = 2) THEN s.stavalues2 - WHEN (s.stakind3 = 2) THEN s.stavalues3 - WHEN (s.stakind4 = 2) THEN s.stavalues4 - WHEN (s.stakind5 = 2) THEN s.stavalues5 - ELSE NULL::anyarray - END AS histogram_bounds, - CASE - WHEN (s.stakind1 = 3) THEN s.stanumbers1[1] - WHEN (s.stakind2 = 3) THEN s.stanumbers2[1] - WHEN (s.stakind3 = 3) THEN s.stanumbers3[1] - WHEN (s.stakind4 = 3) THEN s.stanumbers4[1] - WHEN (s.stakind5 = 3) THEN s.stanumbers5[1] - ELSE NULL::real - END AS correlation, - CASE - WHEN (s.stakind1 = 4) THEN s.stavalues1 - WHEN (s.stakind2 = 4) THEN s.stavalues2 - WHEN (s.stakind3 = 4) THEN s.stavalues3 - WHEN (s.stakind4 = 4) THEN s.stavalues4 - WHEN (s.stakind5 = 4) THEN s.stavalues5 - ELSE NULL::anyarray - END AS most_common_elems, - CASE - WHEN (s.stakind1 = 4) THEN s.stanumbers1 - WHEN (s.stakind2 = 4) THEN s.stanumbers2 - WHEN (s.stakind3 = 4) THEN s.stanumbers3 - WHEN (s.stakind4 = 4) THEN s.stanumbers4 - WHEN (s.stakind5 = 4) THEN s.stanumbers5 - ELSE NULL::real[] - END AS most_common_elem_freqs, - CASE - WHEN (s.stakind1 = 5) THEN s.stanumbers1 - WHEN (s.stakind2 = 5) THEN s.stanumbers2 - WHEN (s.stakind3 = 5) THEN s.stanumbers3 - WHEN (s.stakind4 = 5) THEN s.stanumbers4 - WHEN (s.stakind5 = 5) THEN s.stanumbers5 - ELSE NULL::real[] - END AS elem_count_histogram, - CASE - WHEN (s.stakind1 = 6) THEN s.stavalues1 - WHEN (s.stakind2 = 6) THEN s.stavalues2 - WHEN (s.stakind3 = 6) THEN s.stavalues3 - WHEN (s.stakind4 = 6) THEN s.stavalues4 - WHEN (s.stakind5 = 6) THEN s.stavalues5 - ELSE NULL::anyarray - END AS range_length_histogram, - CASE - WHEN (s.stakind1 = 6) THEN s.stanumbers1[1] - WHEN (s.stakind2 = 6) THEN s.stanumbers2[1] - WHEN (s.stakind3 = 6) THEN s.stanumbers3[1] - WHEN (s.stakind4 = 6) THEN s.stanumbers4[1] - WHEN (s.stakind5 = 6) THEN s.stanumbers5[1] - ELSE NULL::real - END AS range_empty_frac, - CASE - WHEN (s.stakind1 = 7) THEN s.stavalues1 - WHEN (s.stakind2 = 7) THEN s.stavalues2 - WHEN (s.stakind3 = 7) THEN s.stavalues3 - WHEN (s.stakind4 = 7) THEN s.stavalues4 - WHEN (s.stakind5 = 7) THEN s.stavalues5 - ELSE NULL::anyarray - END AS range_bounds_histogram - FROM (((pg_statistic s - JOIN pg_class c ON ((c.oid = s.starelid))) - JOIN pg_attribute a ON (((c.oid = a.attrelid) AND (a.attnum = s.staattnum)))) - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE ((NOT a.attisdropped) AND has_column_privilege(c.oid, a.attnum, 'select'::text) AND ((c.relrowsecurity = false) OR (NOT row_security_active(c.oid)))); -pg_stats_ext| SELECT cn.nspname AS schemaname, - c.relname AS tablename, - sn.nspname AS statistics_schemaname, - s.stxname AS statistics_name, - pg_get_userbyid(s.stxowner) AS statistics_owner, - ( SELECT array_agg(a.attname ORDER BY a.attnum) AS array_agg - FROM (unnest(s.stxkeys) k(k) - JOIN pg_attribute a ON (((a.attrelid = s.stxrelid) AND (a.attnum = k.k))))) AS attnames, - pg_get_statisticsobjdef_expressions(s.oid) AS exprs, - s.stxkind AS kinds, - sd.stxdinherit AS inherited, - sd.stxdndistinct AS n_distinct, - sd.stxddependencies AS dependencies, - m.most_common_vals, - m.most_common_val_nulls, - m.most_common_freqs, - m.most_common_base_freqs - FROM (((((pg_statistic_ext s - JOIN pg_class c ON ((c.oid = s.stxrelid))) - JOIN pg_statistic_ext_data sd ON ((s.oid = sd.stxoid))) - LEFT JOIN pg_namespace cn ON ((cn.oid = c.relnamespace))) - LEFT JOIN pg_namespace sn ON ((sn.oid = s.stxnamespace))) - LEFT JOIN LATERAL ( SELECT array_agg(pg_mcv_list_items."values") AS most_common_vals, - array_agg(pg_mcv_list_items.nulls) AS most_common_val_nulls, - array_agg(pg_mcv_list_items.frequency) AS most_common_freqs, - array_agg(pg_mcv_list_items.base_frequency) AS most_common_base_freqs - FROM pg_mcv_list_items(sd.stxdmcv) pg_mcv_list_items(index, "values", nulls, frequency, base_frequency)) m ON ((sd.stxdmcv IS NOT NULL))) - WHERE (pg_has_role(c.relowner, 'USAGE'::text) AND ((c.relrowsecurity = false) OR (NOT row_security_active(c.oid)))); -pg_stats_ext_exprs| SELECT cn.nspname AS schemaname, - c.relname AS tablename, - sn.nspname AS statistics_schemaname, - s.stxname AS statistics_name, - pg_get_userbyid(s.stxowner) AS statistics_owner, - stat.expr, - sd.stxdinherit AS inherited, - (stat.a).stanullfrac AS null_frac, - (stat.a).stawidth AS avg_width, - (stat.a).stadistinct AS n_distinct, - CASE - WHEN ((stat.a).stakind1 = 1) THEN (stat.a).stavalues1 - WHEN ((stat.a).stakind2 = 1) THEN (stat.a).stavalues2 - WHEN ((stat.a).stakind3 = 1) THEN (stat.a).stavalues3 - WHEN ((stat.a).stakind4 = 1) THEN (stat.a).stavalues4 - WHEN ((stat.a).stakind5 = 1) THEN (stat.a).stavalues5 - ELSE NULL::anyarray - END AS most_common_vals, - CASE - WHEN ((stat.a).stakind1 = 1) THEN (stat.a).stanumbers1 - WHEN ((stat.a).stakind2 = 1) THEN (stat.a).stanumbers2 - WHEN ((stat.a).stakind3 = 1) THEN (stat.a).stanumbers3 - WHEN ((stat.a).stakind4 = 1) THEN (stat.a).stanumbers4 - WHEN ((stat.a).stakind5 = 1) THEN (stat.a).stanumbers5 - ELSE NULL::real[] - END AS most_common_freqs, - CASE - WHEN ((stat.a).stakind1 = 2) THEN (stat.a).stavalues1 - WHEN ((stat.a).stakind2 = 2) THEN (stat.a).stavalues2 - WHEN ((stat.a).stakind3 = 2) THEN (stat.a).stavalues3 - WHEN ((stat.a).stakind4 = 2) THEN (stat.a).stavalues4 - WHEN ((stat.a).stakind5 = 2) THEN (stat.a).stavalues5 - ELSE NULL::anyarray - END AS histogram_bounds, - CASE - WHEN ((stat.a).stakind1 = 3) THEN (stat.a).stanumbers1[1] - WHEN ((stat.a).stakind2 = 3) THEN (stat.a).stanumbers2[1] - WHEN ((stat.a).stakind3 = 3) THEN (stat.a).stanumbers3[1] - WHEN ((stat.a).stakind4 = 3) THEN (stat.a).stanumbers4[1] - WHEN ((stat.a).stakind5 = 3) THEN (stat.a).stanumbers5[1] - ELSE NULL::real - END AS correlation, - CASE - WHEN ((stat.a).stakind1 = 4) THEN (stat.a).stavalues1 - WHEN ((stat.a).stakind2 = 4) THEN (stat.a).stavalues2 - WHEN ((stat.a).stakind3 = 4) THEN (stat.a).stavalues3 - WHEN ((stat.a).stakind4 = 4) THEN (stat.a).stavalues4 - WHEN ((stat.a).stakind5 = 4) THEN (stat.a).stavalues5 - ELSE NULL::anyarray - END AS most_common_elems, - CASE - WHEN ((stat.a).stakind1 = 4) THEN (stat.a).stanumbers1 - WHEN ((stat.a).stakind2 = 4) THEN (stat.a).stanumbers2 - WHEN ((stat.a).stakind3 = 4) THEN (stat.a).stanumbers3 - WHEN ((stat.a).stakind4 = 4) THEN (stat.a).stanumbers4 - WHEN ((stat.a).stakind5 = 4) THEN (stat.a).stanumbers5 - ELSE NULL::real[] - END AS most_common_elem_freqs, - CASE - WHEN ((stat.a).stakind1 = 5) THEN (stat.a).stanumbers1 - WHEN ((stat.a).stakind2 = 5) THEN (stat.a).stanumbers2 - WHEN ((stat.a).stakind3 = 5) THEN (stat.a).stanumbers3 - WHEN ((stat.a).stakind4 = 5) THEN (stat.a).stanumbers4 - WHEN ((stat.a).stakind5 = 5) THEN (stat.a).stanumbers5 - ELSE NULL::real[] - END AS elem_count_histogram - FROM (((((pg_statistic_ext s - JOIN pg_class c ON ((c.oid = s.stxrelid))) - LEFT JOIN pg_statistic_ext_data sd ON ((s.oid = sd.stxoid))) - LEFT JOIN pg_namespace cn ON ((cn.oid = c.relnamespace))) - LEFT JOIN pg_namespace sn ON ((sn.oid = s.stxnamespace))) - JOIN LATERAL ( SELECT unnest(pg_get_statisticsobjdef_expressions(s.oid)) AS expr, - unnest(sd.stxdexpr) AS a) stat ON ((stat.expr IS NOT NULL))) - WHERE (pg_has_role(c.relowner, 'USAGE'::text) AND ((c.relrowsecurity = false) OR (NOT row_security_active(c.oid)))); -pg_tables| SELECT n.nspname AS schemaname, - c.relname AS tablename, - pg_get_userbyid(c.relowner) AS tableowner, - t.spcname AS tablespace, - c.relhasindex AS hasindexes, - c.relhasrules AS hasrules, - c.relhastriggers AS hastriggers, - c.relrowsecurity AS rowsecurity - FROM ((pg_class c - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - LEFT JOIN pg_tablespace t ON ((t.oid = c.reltablespace))) - WHERE (c.relkind = ANY (ARRAY['r'::"char", 'p'::"char"])); -pg_timezone_abbrevs| SELECT abbrev, - utc_offset, - is_dst - FROM pg_timezone_abbrevs() pg_timezone_abbrevs(abbrev, utc_offset, is_dst); -pg_timezone_names| SELECT name, - abbrev, - utc_offset, - is_dst - FROM pg_timezone_names() pg_timezone_names(name, abbrev, utc_offset, is_dst); -pg_user| SELECT usename, - usesysid, - usecreatedb, - usesuper, - userepl, - usebypassrls, - '********'::text AS passwd, - valuntil, - useconfig - FROM pg_shadow; -pg_user_mappings| SELECT u.oid AS umid, - s.oid AS srvid, - s.srvname, - u.umuser, - CASE - WHEN (u.umuser = (0)::oid) THEN 'public'::name - ELSE a.rolname - END AS usename, - CASE - WHEN (((u.umuser <> (0)::oid) AND (a.rolname = CURRENT_USER) AND (pg_has_role(s.srvowner, 'USAGE'::text) OR has_server_privilege(s.oid, 'USAGE'::text))) OR ((u.umuser = (0)::oid) AND pg_has_role(s.srvowner, 'USAGE'::text)) OR ( SELECT pg_authid.rolsuper - FROM pg_authid - WHERE (pg_authid.rolname = CURRENT_USER))) THEN u.umoptions - ELSE NULL::text[] - END AS umoptions - FROM ((pg_user_mapping u - JOIN pg_foreign_server s ON ((u.umserver = s.oid))) - LEFT JOIN pg_authid a ON ((a.oid = u.umuser))); -pg_views| SELECT n.nspname AS schemaname, - c.relname AS viewname, - pg_get_userbyid(c.relowner) AS viewowner, - pg_get_viewdef(c.oid) AS definition - FROM (pg_class c - LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE (c.relkind = 'v'::"char"); -pg_wait_events| SELECT type, - name, - description - FROM pg_get_wait_events() pg_get_wait_events(type, name, description); -SELECT tablename, rulename, definition FROM pg_rules -WHERE schemaname = 'pg_catalog' -ORDER BY tablename, rulename; -pg_settings|pg_settings_n|CREATE RULE pg_settings_n AS - ON UPDATE TO pg_catalog.pg_settings DO INSTEAD NOTHING; -pg_settings|pg_settings_u|CREATE RULE pg_settings_u AS - ON UPDATE TO pg_catalog.pg_settings - WHERE (new.name = old.name) DO SELECT set_config(old.name, new.setting, false) AS set_config; --- restore normal output mode -\a\t --- --- CREATE OR REPLACE RULE --- -CREATE TABLE ruletest_tbl (a int, b int); -CREATE TABLE ruletest_tbl2 (a int, b int); -CREATE OR REPLACE RULE myrule AS ON INSERT TO ruletest_tbl - DO INSTEAD INSERT INTO ruletest_tbl2 VALUES (10, 10); -INSERT INTO ruletest_tbl VALUES (99, 99); -CREATE OR REPLACE RULE myrule AS ON INSERT TO ruletest_tbl - DO INSTEAD INSERT INTO ruletest_tbl2 VALUES (1000, 1000); -INSERT INTO ruletest_tbl VALUES (99, 99); -SELECT * FROM ruletest_tbl2; - a | b -------+------ - 10 | 10 - 1000 | 1000 -(2 rows) - --- Check that rewrite rules splitting one INSERT into multiple --- conditional statements does not disable FK checking. -create table rule_and_refint_t1 ( - id1a integer, - id1b integer, - primary key (id1a, id1b) -); -create table rule_and_refint_t2 ( - id2a integer, - id2c integer, - primary key (id2a, id2c) -); -create table rule_and_refint_t3 ( - id3a integer, - id3b integer, - id3c integer, - data text, - primary key (id3a, id3b, id3c), - foreign key (id3a, id3b) references rule_and_refint_t1 (id1a, id1b), - foreign key (id3a, id3c) references rule_and_refint_t2 (id2a, id2c) -); -insert into rule_and_refint_t1 values (1, 11); -insert into rule_and_refint_t1 values (1, 12); -insert into rule_and_refint_t1 values (2, 21); -insert into rule_and_refint_t1 values (2, 22); -insert into rule_and_refint_t2 values (1, 11); -insert into rule_and_refint_t2 values (1, 12); -insert into rule_and_refint_t2 values (2, 21); -insert into rule_and_refint_t2 values (2, 22); -insert into rule_and_refint_t3 values (1, 11, 11, 'row1'); -insert into rule_and_refint_t3 values (1, 11, 12, 'row2'); -insert into rule_and_refint_t3 values (1, 12, 11, 'row3'); -insert into rule_and_refint_t3 values (1, 12, 12, 'row4'); -insert into rule_and_refint_t3 values (1, 11, 13, 'row5'); -ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3c_fkey" -DETAIL: Key (id3a, id3c)=(1, 13) is not present in table "rule_and_refint_t2". -insert into rule_and_refint_t3 values (1, 13, 11, 'row6'); -ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3b_fkey" -DETAIL: Key (id3a, id3b)=(1, 13) is not present in table "rule_and_refint_t1". --- Ordinary table -insert into rule_and_refint_t3 values (1, 13, 11, 'row6') - on conflict do nothing; -ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3b_fkey" -DETAIL: Key (id3a, id3b)=(1, 13) is not present in table "rule_and_refint_t1". --- rule not fired, so fk violation -insert into rule_and_refint_t3 values (1, 13, 11, 'row6') - on conflict (id3a, id3b, id3c) do update - set id3b = excluded.id3b; -ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3b_fkey" -DETAIL: Key (id3a, id3b)=(1, 13) is not present in table "rule_and_refint_t1". --- rule fired, so unsupported -insert into shoelace values ('sl9', 0, 'pink', 35.0, 'inch', 0.0) - on conflict (sl_name) do update - set sl_avail = excluded.sl_avail; -ERROR: INSERT with ON CONFLICT clause cannot be used with table that has INSERT or UPDATE rules -create rule rule_and_refint_t3_ins as on insert to rule_and_refint_t3 - where (exists (select 1 from rule_and_refint_t3 - where (((rule_and_refint_t3.id3a = new.id3a) - and (rule_and_refint_t3.id3b = new.id3b)) - and (rule_and_refint_t3.id3c = new.id3c)))) - do instead update rule_and_refint_t3 set data = new.data - where (((rule_and_refint_t3.id3a = new.id3a) - and (rule_and_refint_t3.id3b = new.id3b)) - and (rule_and_refint_t3.id3c = new.id3c)); -insert into rule_and_refint_t3 values (1, 11, 13, 'row7'); -ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3c_fkey" -DETAIL: Key (id3a, id3c)=(1, 13) is not present in table "rule_and_refint_t2". -insert into rule_and_refint_t3 values (1, 13, 11, 'row8'); -ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3b_fkey" -DETAIL: Key (id3a, id3b)=(1, 13) is not present in table "rule_and_refint_t1". --- --- disallow dropping a view's rule (bug #5072) --- -create view rules_fooview as select 'rules_foo'::text; -drop rule "_RETURN" on rules_fooview; -ERROR: cannot drop rule _RETURN on view rules_fooview because view rules_fooview requires it -HINT: You can drop view rules_fooview instead. -drop view rules_fooview; --- --- We used to allow converting a table to a view by creating a "_RETURN" --- rule for it, but no more. --- -create table rules_fooview (x int, y text); -create rule "_RETURN" as on select to rules_fooview do instead - select 1 as x, 'aaa'::text as y; -ERROR: relation "rules_fooview" cannot have ON SELECT rules -DETAIL: This operation is not supported for tables. -drop table rules_fooview; --- likewise, converting a partitioned table or partition to view is not allowed -create table rules_fooview (x int, y text) partition by list (x); -create rule "_RETURN" as on select to rules_fooview do instead - select 1 as x, 'aaa'::text as y; -ERROR: relation "rules_fooview" cannot have ON SELECT rules -DETAIL: This operation is not supported for partitioned tables. -create table rules_fooview_part partition of rules_fooview for values in (1); -create rule "_RETURN" as on select to rules_fooview_part do instead - select 1 as x, 'aaa'::text as y; -ERROR: relation "rules_fooview_part" cannot have ON SELECT rules -DETAIL: This operation is not supported for tables. -drop table rules_fooview; --- --- check for planner problems with complex inherited UPDATES --- -create table id (id serial primary key, name text); --- currently, must respecify PKEY for each inherited subtable -create table test_1 (id integer primary key) inherits (id); -NOTICE: merging column "id" with inherited definition -create table test_2 (id integer primary key) inherits (id); -NOTICE: merging column "id" with inherited definition -create table test_3 (id integer primary key) inherits (id); -NOTICE: merging column "id" with inherited definition -insert into test_1 (name) values ('Test 1'); -insert into test_1 (name) values ('Test 2'); -insert into test_2 (name) values ('Test 3'); -insert into test_2 (name) values ('Test 4'); -insert into test_3 (name) values ('Test 5'); -insert into test_3 (name) values ('Test 6'); -create view id_ordered as select * from id order by id; -create rule update_id_ordered as on update to id_ordered - do instead update id set name = new.name where id = old.id; -select * from id_ordered; - id | name -----+-------- - 1 | Test 1 - 2 | Test 2 - 3 | Test 3 - 4 | Test 4 - 5 | Test 5 - 6 | Test 6 -(6 rows) - -update id_ordered set name = 'update 2' where id = 2; -update id_ordered set name = 'update 4' where id = 4; -update id_ordered set name = 'update 5' where id = 5; -select * from id_ordered; - id | name -----+---------- - 1 | Test 1 - 2 | update 2 - 3 | Test 3 - 4 | update 4 - 5 | update 5 - 6 | Test 6 -(6 rows) - -drop table id cascade; -NOTICE: drop cascades to 4 other objects -DETAIL: drop cascades to table test_1 -drop cascades to table test_2 -drop cascades to table test_3 -drop cascades to view id_ordered --- --- check corner case where an entirely-dummy subplan is created by --- constraint exclusion --- -create temp table t1 (a integer primary key); -create temp table t1_1 (check (a >= 0 and a < 10)) inherits (t1); -create temp table t1_2 (check (a >= 10 and a < 20)) inherits (t1); -create rule t1_ins_1 as on insert to t1 - where new.a >= 0 and new.a < 10 - do instead - insert into t1_1 values (new.a); -create rule t1_ins_2 as on insert to t1 - where new.a >= 10 and new.a < 20 - do instead - insert into t1_2 values (new.a); -create rule t1_upd_1 as on update to t1 - where old.a >= 0 and old.a < 10 - do instead - update t1_1 set a = new.a where a = old.a; -create rule t1_upd_2 as on update to t1 - where old.a >= 10 and old.a < 20 - do instead - update t1_2 set a = new.a where a = old.a; -set constraint_exclusion = on; -insert into t1 select * from generate_series(5,19,1) g; -update t1 set a = 4 where a = 5; -select * from only t1; - a ---- -(0 rows) - -select * from only t1_1; - a ---- - 6 - 7 - 8 - 9 - 4 -(5 rows) - -select * from only t1_2; - a ----- - 10 - 11 - 12 - 13 - 14 - 15 - 16 - 17 - 18 - 19 -(10 rows) - -reset constraint_exclusion; --- test FOR UPDATE in rules -create table rules_base(f1 int, f2 int); -insert into rules_base values(1,2), (11,12); -create rule r1 as on update to rules_base do instead - select * from rules_base where f1 = 1 for update; -update rules_base set f2 = f2 + 1; - f1 | f2 -----+---- - 1 | 2 -(1 row) - -create or replace rule r1 as on update to rules_base do instead - select * from rules_base where f1 = 11 for update of rules_base; -update rules_base set f2 = f2 + 1; - f1 | f2 -----+---- - 11 | 12 -(1 row) - -create or replace rule r1 as on update to rules_base do instead - select * from rules_base where f1 = 11 for update of old; -- error -ERROR: relation "old" in FOR UPDATE clause not found in FROM clause -LINE 2: select * from rules_base where f1 = 11 for update of old; - ^ -drop table rules_base; --- test various flavors of pg_get_viewdef() -select pg_get_viewdef('shoe'::regclass) as unpretty; - unpretty ------------------------------------------------- - SELECT sh.shoename, + - sh.sh_avail, + - sh.slcolor, + - sh.slminlen, + - (sh.slminlen * un.un_fact) AS slminlen_cm,+ - sh.slmaxlen, + - (sh.slmaxlen * un.un_fact) AS slmaxlen_cm,+ - sh.slunit + - FROM shoe_data sh, + - unit un + - WHERE (sh.slunit = un.un_name); -(1 row) - -select pg_get_viewdef('shoe'::regclass,true) as pretty; - pretty ----------------------------------------------- - SELECT sh.shoename, + - sh.sh_avail, + - sh.slcolor, + - sh.slminlen, + - sh.slminlen * un.un_fact AS slminlen_cm,+ - sh.slmaxlen, + - sh.slmaxlen * un.un_fact AS slmaxlen_cm,+ - sh.slunit + - FROM shoe_data sh, + - unit un + - WHERE sh.slunit = un.un_name; -(1 row) - -select pg_get_viewdef('shoe'::regclass,0) as prettier; - prettier ----------------------------------------------- - SELECT sh.shoename, + - sh.sh_avail, + - sh.slcolor, + - sh.slminlen, + - sh.slminlen * un.un_fact AS slminlen_cm,+ - sh.slmaxlen, + - sh.slmaxlen * un.un_fact AS slmaxlen_cm,+ - sh.slunit + - FROM shoe_data sh, + - unit un + - WHERE sh.slunit = un.un_name; -(1 row) - --- --- check multi-row VALUES in rules --- -create table rules_src(f1 int, f2 int default 0); -create table rules_log(f1 int, f2 int, tag text, id serial); -insert into rules_src values(1,2), (11,12); -create rule r1 as on update to rules_src do also - insert into rules_log values(old.*, 'old', default), (new.*, 'new', default); -update rules_src set f2 = f2 + 1; -update rules_src set f2 = f2 * 10; -select * from rules_src; - f1 | f2 -----+----- - 1 | 30 - 11 | 130 -(2 rows) - -select * from rules_log; - f1 | f2 | tag | id -----+-----+-----+---- - 1 | 2 | old | 1 - 1 | 3 | new | 2 - 11 | 12 | old | 3 - 11 | 13 | new | 4 - 1 | 3 | old | 5 - 1 | 30 | new | 6 - 11 | 13 | old | 7 - 11 | 130 | new | 8 -(8 rows) - -create rule r2 as on update to rules_src do also - values(old.*, 'old'), (new.*, 'new'); -update rules_src set f2 = f2 / 10; - column1 | column2 | column3 ----------+---------+--------- - 1 | 30 | old - 1 | 3 | new - 11 | 130 | old - 11 | 13 | new -(4 rows) - -create rule r3 as on insert to rules_src do also - insert into rules_log values(null, null, '-', default), (new.*, 'new', default); -insert into rules_src values(22,23), (33,default); -select * from rules_src; - f1 | f2 -----+---- - 1 | 3 - 11 | 13 - 22 | 23 - 33 | 0 -(4 rows) - -select * from rules_log; - f1 | f2 | tag | id -----+-----+-----+---- - 1 | 2 | old | 1 - 1 | 3 | new | 2 - 11 | 12 | old | 3 - 11 | 13 | new | 4 - 1 | 3 | old | 5 - 1 | 30 | new | 6 - 11 | 13 | old | 7 - 11 | 130 | new | 8 - 1 | 30 | old | 9 - 1 | 3 | new | 10 - 11 | 130 | old | 11 - 11 | 13 | new | 12 - | | - | 13 - 22 | 23 | new | 14 - | | - | 15 - 33 | 0 | new | 16 -(16 rows) - -create rule r4 as on delete to rules_src do notify rules_src_deletion; --- --- Ensure an aliased target relation for insert is correctly deparsed. --- -create rule r5 as on insert to rules_src do instead insert into rules_log AS trgt SELECT NEW.* RETURNING trgt.f1, trgt.f2; -create rule r6 as on update to rules_src do instead UPDATE rules_log AS trgt SET tag = 'updated' WHERE trgt.f1 = new.f1; --- --- Check deparse disambiguation of INSERT/UPDATE/DELETE targets. --- -create rule r7 as on delete to rules_src do instead - with wins as (insert into int4_tbl as trgt values (0) returning *), - wupd as (update int4_tbl trgt set f1 = f1+1 returning *), - wdel as (delete from int4_tbl trgt where f1 = 0 returning *) - insert into rules_log AS trgt select old.* from wins, wupd, wdel - returning trgt.f1, trgt.f2; --- check display of all rules added above -\d+ rules_src - Table "public.rules_src" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - f1 | integer | | | | plain | | - f2 | integer | | | 0 | plain | | -Rules: - r1 AS - ON UPDATE TO rules_src DO INSERT INTO rules_log (f1, f2, tag, id) VALUES (old.f1,old.f2,'old'::text,DEFAULT), (new.f1,new.f2,'new'::text,DEFAULT) - r2 AS - ON UPDATE TO rules_src DO VALUES (old.f1,old.f2,'old'::text), (new.f1,new.f2,'new'::text) - r3 AS - ON INSERT TO rules_src DO INSERT INTO rules_log (f1, f2, tag, id) VALUES (NULL::integer,NULL::integer,'-'::text,DEFAULT), (new.f1,new.f2,'new'::text,DEFAULT) - r4 AS - ON DELETE TO rules_src DO - NOTIFY rules_src_deletion - r5 AS - ON INSERT TO rules_src DO INSTEAD INSERT INTO rules_log AS trgt (f1, f2) SELECT new.f1, - new.f2 - RETURNING trgt.f1, - trgt.f2 - r6 AS - ON UPDATE TO rules_src DO INSTEAD UPDATE rules_log trgt SET tag = 'updated'::text - WHERE trgt.f1 = new.f1 - r7 AS - ON DELETE TO rules_src DO INSTEAD WITH wins AS ( - INSERT INTO int4_tbl AS trgt_1 (f1) - VALUES (0) - RETURNING trgt_1.f1 - ), wupd AS ( - UPDATE int4_tbl trgt_1 SET f1 = trgt_1.f1 + 1 - RETURNING trgt_1.f1 - ), wdel AS ( - DELETE FROM int4_tbl trgt_1 - WHERE trgt_1.f1 = 0 - RETURNING trgt_1.f1 - ) - INSERT INTO rules_log AS trgt (f1, f2) SELECT old.f1, - old.f2 - FROM wins, - wupd, - wdel - RETURNING trgt.f1, - trgt.f2 - --- --- Also check multiassignment deparsing. --- -create table rule_t1(f1 int, f2 int); -create table rule_dest(f1 int, f2 int[], tag text); -create rule rr as on update to rule_t1 do instead UPDATE rule_dest trgt - SET (f2[1], f1, tag) = (SELECT new.f2, new.f1, 'updated'::varchar) - WHERE trgt.f1 = new.f1 RETURNING new.*; -\d+ rule_t1 - Table "public.rule_t1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - f1 | integer | | | | plain | | - f2 | integer | | | | plain | | -Rules: - rr AS - ON UPDATE TO rule_t1 DO INSTEAD UPDATE rule_dest trgt SET (f2[1], f1, tag) = ( SELECT new.f2, - new.f1, - 'updated'::character varying AS "varchar") - WHERE trgt.f1 = new.f1 - RETURNING new.f1, - new.f2 - -drop table rule_t1, rule_dest; --- --- Test implicit LATERAL references to old/new in rules --- -CREATE TABLE rule_t1(a int, b text DEFAULT 'xxx', c int); -CREATE VIEW rule_v1 AS SELECT * FROM rule_t1; -CREATE RULE v1_ins AS ON INSERT TO rule_v1 - DO ALSO INSERT INTO rule_t1 - SELECT * FROM (SELECT a + 10 FROM rule_t1 WHERE a = NEW.a) tt; -CREATE RULE v1_upd AS ON UPDATE TO rule_v1 - DO ALSO UPDATE rule_t1 t - SET c = tt.a * 10 - FROM (SELECT a FROM rule_t1 WHERE a = OLD.a) tt WHERE t.a = tt.a; -INSERT INTO rule_v1 VALUES (1, 'a'), (2, 'b'); -UPDATE rule_v1 SET b = upper(b); -SELECT * FROM rule_t1; - a | b | c -----+-----+----- - 1 | A | 10 - 2 | B | 20 - 11 | XXX | 110 - 12 | XXX | 120 -(4 rows) - -DROP TABLE rule_t1 CASCADE; -NOTICE: drop cascades to view rule_v1 --- --- check alter rename rule --- -CREATE TABLE rule_t1 (a INT); -CREATE VIEW rule_v1 AS SELECT * FROM rule_t1; -CREATE RULE InsertRule AS - ON INSERT TO rule_v1 - DO INSTEAD - INSERT INTO rule_t1 VALUES(new.a); -ALTER RULE InsertRule ON rule_v1 RENAME to NewInsertRule; -INSERT INTO rule_v1 VALUES(1); -SELECT * FROM rule_v1; - a ---- - 1 -(1 row) - -\d+ rule_v1 - View "public.rule_v1" - Column | Type | Collation | Nullable | Default | Storage | Description ---------+---------+-----------+----------+---------+---------+------------- - a | integer | | | | plain | -View definition: - SELECT a - FROM rule_t1; -Rules: - newinsertrule AS - ON INSERT TO rule_v1 DO INSTEAD INSERT INTO rule_t1 (a) - VALUES (new.a) - --- --- error conditions for alter rename rule --- -ALTER RULE InsertRule ON rule_v1 RENAME TO NewInsertRule; -- doesn't exist -ERROR: rule "insertrule" for relation "rule_v1" does not exist -ALTER RULE NewInsertRule ON rule_v1 RENAME TO "_RETURN"; -- already exists -ERROR: rule "_RETURN" for relation "rule_v1" already exists -ALTER RULE "_RETURN" ON rule_v1 RENAME TO abc; -- ON SELECT rule cannot be renamed -ERROR: renaming an ON SELECT rule is not allowed -DROP VIEW rule_v1; -DROP TABLE rule_t1; --- --- check display of VALUES in view definitions --- -create view rule_v1 as values(1,2); -\d+ rule_v1 - View "public.rule_v1" - Column | Type | Collation | Nullable | Default | Storage | Description ----------+---------+-----------+----------+---------+---------+------------- - column1 | integer | | | | plain | - column2 | integer | | | | plain | -View definition: - VALUES (1,2); - -alter table rule_v1 rename column column2 to q2; -\d+ rule_v1 - View "public.rule_v1" - Column | Type | Collation | Nullable | Default | Storage | Description ----------+---------+-----------+----------+---------+---------+------------- - column1 | integer | | | | plain | - q2 | integer | | | | plain | -View definition: - SELECT column1, - column2 AS q2 - FROM (VALUES (1,2)) "*VALUES*"; - -drop view rule_v1; -create view rule_v1(x) as values(1,2); -\d+ rule_v1 - View "public.rule_v1" - Column | Type | Collation | Nullable | Default | Storage | Description ----------+---------+-----------+----------+---------+---------+------------- - x | integer | | | | plain | - column2 | integer | | | | plain | -View definition: - SELECT column1 AS x, - column2 - FROM (VALUES (1,2)) "*VALUES*"; - -drop view rule_v1; -create view rule_v1(x) as select * from (values(1,2)) v; -\d+ rule_v1 - View "public.rule_v1" - Column | Type | Collation | Nullable | Default | Storage | Description ----------+---------+-----------+----------+---------+---------+------------- - x | integer | | | | plain | - column2 | integer | | | | plain | -View definition: - SELECT column1 AS x, - column2 - FROM ( VALUES (1,2)) v; - -drop view rule_v1; -create view rule_v1(x) as select * from (values(1,2)) v(q,w); -\d+ rule_v1 - View "public.rule_v1" - Column | Type | Collation | Nullable | Default | Storage | Description ---------+---------+-----------+----------+---------+---------+------------- - x | integer | | | | plain | - w | integer | | | | plain | -View definition: - SELECT q AS x, - w - FROM ( VALUES (1,2)) v(q, w); - -drop view rule_v1; --- --- Check DO INSTEAD rules with ON CONFLICT --- -CREATE TABLE hats ( - hat_name char(10) primary key, - hat_color char(10) -- hat color -); -CREATE TABLE hat_data ( - hat_name char(10), - hat_color char(10) -- hat color -); -create unique index hat_data_unique_idx - on hat_data (hat_name COLLATE "C" bpchar_pattern_ops); --- DO NOTHING with ON CONFLICT -CREATE RULE hat_nosert AS ON INSERT TO hats - DO INSTEAD - INSERT INTO hat_data VALUES ( - NEW.hat_name, - NEW.hat_color) - ON CONFLICT (hat_name COLLATE "C" bpchar_pattern_ops) WHERE hat_color = 'green' - DO NOTHING - RETURNING *; -SELECT definition FROM pg_rules WHERE tablename = 'hats' ORDER BY rulename; - definition ---------------------------------------------------------------------------------------------- - CREATE RULE hat_nosert AS + - ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + - VALUES (new.hat_name, new.hat_color) ON CONFLICT(hat_name COLLATE "C" bpchar_pattern_ops)+ - WHERE (hat_color = 'green'::bpchar) DO NOTHING + - RETURNING hat_data.hat_name, + - hat_data.hat_color; -(1 row) - --- Works (projects row) -INSERT INTO hats VALUES ('h7', 'black') RETURNING *; - hat_name | hat_color -------------+------------ - h7 | black -(1 row) - --- Works (does nothing) -INSERT INTO hats VALUES ('h7', 'black') RETURNING *; - hat_name | hat_color -----------+----------- -(0 rows) - -SELECT tablename, rulename, definition FROM pg_rules - WHERE tablename = 'hats'; - tablename | rulename | definition ------------+------------+--------------------------------------------------------------------------------------------- - hats | hat_nosert | CREATE RULE hat_nosert AS + - | | ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + - | | VALUES (new.hat_name, new.hat_color) ON CONFLICT(hat_name COLLATE "C" bpchar_pattern_ops)+ - | | WHERE (hat_color = 'green'::bpchar) DO NOTHING + - | | RETURNING hat_data.hat_name, + - | | hat_data.hat_color; -(1 row) - -DROP RULE hat_nosert ON hats; --- DO NOTHING without ON CONFLICT -CREATE RULE hat_nosert_all AS ON INSERT TO hats - DO INSTEAD - INSERT INTO hat_data VALUES ( - NEW.hat_name, - NEW.hat_color) - ON CONFLICT - DO NOTHING - RETURNING *; -SELECT definition FROM pg_rules WHERE tablename = 'hats' ORDER BY rulename; - definition -------------------------------------------------------------------------------------- - CREATE RULE hat_nosert_all AS + - ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color)+ - VALUES (new.hat_name, new.hat_color) ON CONFLICT DO NOTHING + - RETURNING hat_data.hat_name, + - hat_data.hat_color; -(1 row) - -DROP RULE hat_nosert_all ON hats; --- Works (does nothing) -INSERT INTO hats VALUES ('h7', 'black') RETURNING *; - hat_name | hat_color -------------+------------ - h7 | black -(1 row) - --- DO UPDATE with a WHERE clause -CREATE RULE hat_upsert AS ON INSERT TO hats - DO INSTEAD - INSERT INTO hat_data VALUES ( - NEW.hat_name, - NEW.hat_color) - ON CONFLICT (hat_name) - DO UPDATE - SET hat_name = hat_data.hat_name, hat_color = excluded.hat_color - WHERE excluded.hat_color <> 'forbidden' AND hat_data.* != excluded.* - RETURNING *; -SELECT definition FROM pg_rules WHERE tablename = 'hats' ORDER BY rulename; - definition ------------------------------------------------------------------------------------------------------------------------------------------ - CREATE RULE hat_upsert AS + - ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + - VALUES (new.hat_name, new.hat_color) ON CONFLICT(hat_name) DO UPDATE SET hat_name = hat_data.hat_name, hat_color = excluded.hat_color+ - WHERE ((excluded.hat_color <> 'forbidden'::bpchar) AND (hat_data.* <> excluded.*)) + - RETURNING hat_data.hat_name, + - hat_data.hat_color; -(1 row) - --- Works (does upsert) -INSERT INTO hats VALUES ('h8', 'black') RETURNING *; - hat_name | hat_color -------------+------------ - h8 | black -(1 row) - -SELECT * FROM hat_data WHERE hat_name = 'h8'; - hat_name | hat_color -------------+------------ - h8 | black -(1 row) - -INSERT INTO hats VALUES ('h8', 'white') RETURNING *; - hat_name | hat_color -------------+------------ - h8 | white -(1 row) - -SELECT * FROM hat_data WHERE hat_name = 'h8'; - hat_name | hat_color -------------+------------ - h8 | white -(1 row) - -INSERT INTO hats VALUES ('h8', 'forbidden') RETURNING *; - hat_name | hat_color -----------+----------- -(0 rows) - -SELECT * FROM hat_data WHERE hat_name = 'h8'; - hat_name | hat_color -------------+------------ - h8 | white -(1 row) - -SELECT tablename, rulename, definition FROM pg_rules - WHERE tablename = 'hats'; - tablename | rulename | definition ------------+------------+----------------------------------------------------------------------------------------------------------------------------------------- - hats | hat_upsert | CREATE RULE hat_upsert AS + - | | ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + - | | VALUES (new.hat_name, new.hat_color) ON CONFLICT(hat_name) DO UPDATE SET hat_name = hat_data.hat_name, hat_color = excluded.hat_color+ - | | WHERE ((excluded.hat_color <> 'forbidden'::bpchar) AND (hat_data.* <> excluded.*)) + - | | RETURNING hat_data.hat_name, + - | | hat_data.hat_color; -(1 row) - --- ensure explain works for on insert conflict rules -explain (costs off) INSERT INTO hats VALUES ('h8', 'forbidden') RETURNING *; - QUERY PLAN -------------------------------------------------------------------------------------------------- - Insert on hat_data - Conflict Resolution: UPDATE - Conflict Arbiter Indexes: hat_data_unique_idx - Conflict Filter: ((excluded.hat_color <> 'forbidden'::bpchar) AND (hat_data.* <> excluded.*)) - -> Result -(5 rows) - --- ensure upserting into a rule, with a CTE (different offsets!) works -WITH data(hat_name, hat_color) AS MATERIALIZED ( - VALUES ('h8', 'green'), - ('h9', 'blue'), - ('h7', 'forbidden') -) -INSERT INTO hats - SELECT * FROM data -RETURNING *; - hat_name | hat_color -------------+------------ - h8 | green - h9 | blue -(2 rows) - -EXPLAIN (costs off) -WITH data(hat_name, hat_color) AS MATERIALIZED ( - VALUES ('h8', 'green'), - ('h9', 'blue'), - ('h7', 'forbidden') -) -INSERT INTO hats - SELECT * FROM data -RETURNING *; - QUERY PLAN -------------------------------------------------------------------------------------------------- - Insert on hat_data - Conflict Resolution: UPDATE - Conflict Arbiter Indexes: hat_data_unique_idx - Conflict Filter: ((excluded.hat_color <> 'forbidden'::bpchar) AND (hat_data.* <> excluded.*)) - CTE data - -> Values Scan on "*VALUES*" - -> CTE Scan on data -(7 rows) - -SELECT * FROM hat_data WHERE hat_name IN ('h8', 'h9', 'h7') ORDER BY hat_name; - hat_name | hat_color -------------+------------ - h7 | black - h8 | green - h9 | blue -(3 rows) - -DROP RULE hat_upsert ON hats; -drop table hats; -drop table hat_data; --- test for pg_get_functiondef properly regurgitating SET parameters --- Note that the function is kept around to stress pg_dump. -CREATE FUNCTION func_with_set_params() RETURNS integer - AS 'select 1;' - LANGUAGE SQL - SET search_path TO PG_CATALOG - SET extra_float_digits TO 2 - SET work_mem TO '4MB' - SET datestyle to iso, mdy - SET local_preload_libraries TO "Mixed/Case", 'c:/''a"/path', '', '0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789' - IMMUTABLE STRICT; -SELECT pg_get_functiondef('func_with_set_params()'::regprocedure); - pg_get_functiondef --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - CREATE OR REPLACE FUNCTION public.func_with_set_params() + - RETURNS integer + - LANGUAGE sql + - IMMUTABLE STRICT + - SET search_path TO 'pg_catalog' + - SET extra_float_digits TO '2' + - SET work_mem TO '4MB' + - SET "DateStyle" TO 'iso, mdy' + - SET local_preload_libraries TO 'Mixed/Case', 'c:/''a"/path', '', '0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789'+ - AS $function$select 1;$function$ + - -(1 row) - --- tests for pg_get_*def with invalid objects -SELECT pg_get_constraintdef(0); - pg_get_constraintdef ----------------------- - -(1 row) - -SELECT pg_get_functiondef(0); - pg_get_functiondef --------------------- - -(1 row) - -SELECT pg_get_indexdef(0); - pg_get_indexdef ------------------ - -(1 row) - -SELECT pg_get_ruledef(0); - pg_get_ruledef ----------------- - -(1 row) - -SELECT pg_get_statisticsobjdef(0); - pg_get_statisticsobjdef -------------------------- - -(1 row) - -SELECT pg_get_triggerdef(0); - pg_get_triggerdef -------------------- - -(1 row) - -SELECT pg_get_viewdef(0); - pg_get_viewdef ----------------- - -(1 row) - -SELECT pg_get_function_arguments(0); - pg_get_function_arguments ---------------------------- - -(1 row) - -SELECT pg_get_function_identity_arguments(0); - pg_get_function_identity_arguments ------------------------------------- - -(1 row) - -SELECT pg_get_function_result(0); - pg_get_function_result ------------------------- - -(1 row) - -SELECT pg_get_function_arg_default(0, 0); - pg_get_function_arg_default ------------------------------ - -(1 row) - -SELECT pg_get_function_arg_default('pg_class'::regclass, 0); - pg_get_function_arg_default ------------------------------ - -(1 row) - -SELECT pg_get_partkeydef(0); - pg_get_partkeydef -------------------- - -(1 row) - --- test rename for a rule defined on a partitioned table -CREATE TABLE rules_parted_table (a int) PARTITION BY LIST (a); -CREATE TABLE rules_parted_table_1 PARTITION OF rules_parted_table FOR VALUES IN (1); -CREATE RULE rules_parted_table_insert AS ON INSERT to rules_parted_table - DO INSTEAD INSERT INTO rules_parted_table_1 VALUES (NEW.*); -ALTER RULE rules_parted_table_insert ON rules_parted_table RENAME TO rules_parted_table_insert_redirect; -DROP TABLE rules_parted_table; --- --- test MERGE --- -CREATE TABLE rule_merge1 (a int, b text); -CREATE TABLE rule_merge2 (a int, b text); -CREATE RULE rule1 AS ON INSERT TO rule_merge1 - DO INSTEAD INSERT INTO rule_merge2 VALUES (NEW.*); -CREATE RULE rule2 AS ON UPDATE TO rule_merge1 - DO INSTEAD UPDATE rule_merge2 SET a = NEW.a, b = NEW.b - WHERE a = OLD.a; -CREATE RULE rule3 AS ON DELETE TO rule_merge1 - DO INSTEAD DELETE FROM rule_merge2 WHERE a = OLD.a; --- MERGE not supported for table with rules -MERGE INTO rule_merge1 t USING (SELECT 1 AS a) s - ON t.a = s.a - WHEN MATCHED AND t.a < 2 THEN - UPDATE SET b = b || ' updated by merge' - WHEN MATCHED AND t.a > 2 THEN - DELETE - WHEN NOT MATCHED THEN - INSERT VALUES (s.a, ''); -ERROR: cannot execute MERGE on relation "rule_merge1" -DETAIL: MERGE is not supported for relations with rules. --- should be ok with the other table though -MERGE INTO rule_merge2 t USING (SELECT 1 AS a) s - ON t.a = s.a - WHEN MATCHED AND t.a < 2 THEN - UPDATE SET b = b || ' updated by merge' - WHEN MATCHED AND t.a > 2 THEN - DELETE - WHEN NOT MATCHED THEN - INSERT VALUES (s.a, ''); --- also ok if the rules are disabled -ALTER TABLE rule_merge1 DISABLE RULE rule1; -ALTER TABLE rule_merge1 DISABLE RULE rule2; -ALTER TABLE rule_merge1 DISABLE RULE rule3; -MERGE INTO rule_merge1 t USING (SELECT 1 AS a) s - ON t.a = s.a - WHEN MATCHED AND t.a < 2 THEN - UPDATE SET b = b || ' updated by merge' - WHEN MATCHED AND t.a > 2 THEN - DELETE - WHEN NOT MATCHED THEN - INSERT VALUES (s.a, ''); --- test deparsing -CREATE TABLE sf_target(id int, data text, filling int[]); -CREATE FUNCTION merge_sf_test() - RETURNS TABLE(action text, a int, b text, id int, data text, filling int[]) - LANGUAGE sql -BEGIN ATOMIC - MERGE INTO sf_target t - USING rule_merge1 s - ON (s.a = t.id) -WHEN MATCHED - AND (s.a + t.id) = 42 - THEN UPDATE SET data = repeat(t.data, s.a) || s.b, id = length(s.b) -WHEN NOT MATCHED - AND (s.b IS NOT NULL) - THEN INSERT (data, id) - VALUES (s.b, s.a) -WHEN MATCHED - AND length(s.b || t.data) > 10 - THEN UPDATE SET data = s.b -WHEN MATCHED - AND s.a > 200 - THEN UPDATE SET filling[s.a] = t.id -WHEN MATCHED - AND s.a > 100 - THEN DELETE -WHEN MATCHED - THEN DO NOTHING -WHEN NOT MATCHED - AND s.a > 200 - THEN INSERT DEFAULT VALUES -WHEN NOT MATCHED - AND s.a > 100 - THEN INSERT (id, data) OVERRIDING USER VALUE - VALUES (s.a, DEFAULT) -WHEN NOT MATCHED - AND s.a > 0 - THEN INSERT - VALUES (s.a, s.b, DEFAULT) -WHEN NOT MATCHED - THEN INSERT (filling[1], id) - VALUES (s.a, s.a) -RETURNING - merge_action() AS action, *; -END; -\sf merge_sf_test -CREATE OR REPLACE FUNCTION public.merge_sf_test() - RETURNS TABLE(action text, a integer, b text, id integer, data text, filling integer[]) - LANGUAGE sql -BEGIN ATOMIC - MERGE INTO sf_target t - USING rule_merge1 s - ON (s.a = t.id) - WHEN MATCHED - AND ((s.a + t.id) = 42) - THEN UPDATE SET data = (repeat(t.data, s.a) || s.b), id = length(s.b) - WHEN NOT MATCHED - AND (s.b IS NOT NULL) - THEN INSERT (data, id) - VALUES (s.b, s.a) - WHEN MATCHED - AND (length((s.b || t.data)) > 10) - THEN UPDATE SET data = s.b - WHEN MATCHED - AND (s.a > 200) - THEN UPDATE SET filling[s.a] = t.id - WHEN MATCHED - AND (s.a > 100) - THEN DELETE - WHEN MATCHED - THEN DO NOTHING - WHEN NOT MATCHED - AND (s.a > 200) - THEN INSERT DEFAULT VALUES - WHEN NOT MATCHED - AND (s.a > 100) - THEN INSERT (id, data) OVERRIDING USER VALUE - VALUES (s.a, DEFAULT) - WHEN NOT MATCHED - AND (s.a > 0) - THEN INSERT (id, data, filling) - VALUES (s.a, s.b, DEFAULT) - WHEN NOT MATCHED - THEN INSERT (filling[1], id) - VALUES (s.a, s.a) - RETURNING MERGE_ACTION() AS action, - s.a, - s.b, - t.id, - t.data, - t.filling; -END -CREATE FUNCTION merge_sf_test2() - RETURNS void - LANGUAGE sql -BEGIN ATOMIC - MERGE INTO sf_target t - USING rule_merge1 s - ON (s.a = t.id) -WHEN NOT MATCHED - THEN INSERT (data, id) - VALUES (s.a, s.a) -WHEN MATCHED - THEN UPDATE SET data = s.b -WHEN NOT MATCHED BY SOURCE - THEN DELETE; -END; -\sf merge_sf_test2 -CREATE OR REPLACE FUNCTION public.merge_sf_test2() - RETURNS void - LANGUAGE sql -BEGIN ATOMIC - MERGE INTO sf_target t - USING rule_merge1 s - ON (s.a = t.id) - WHEN NOT MATCHED BY TARGET - THEN INSERT (data, id) - VALUES (s.a, s.a) - WHEN MATCHED - THEN UPDATE SET data = s.b - WHEN NOT MATCHED BY SOURCE - THEN DELETE; -END -DROP FUNCTION merge_sf_test; -DROP FUNCTION merge_sf_test2; -DROP TABLE sf_target; --- --- Test enabling/disabling --- -CREATE TABLE ruletest1 (a int); -CREATE TABLE ruletest2 (b int); -CREATE RULE rule1 AS ON INSERT TO ruletest1 - DO INSTEAD INSERT INTO ruletest2 VALUES (NEW.*); -INSERT INTO ruletest1 VALUES (1); -ALTER TABLE ruletest1 DISABLE RULE rule1; -INSERT INTO ruletest1 VALUES (2); -ALTER TABLE ruletest1 ENABLE RULE rule1; -SET session_replication_role = replica; -INSERT INTO ruletest1 VALUES (3); -ALTER TABLE ruletest1 ENABLE REPLICA RULE rule1; -INSERT INTO ruletest1 VALUES (4); -RESET session_replication_role; -INSERT INTO ruletest1 VALUES (5); -SELECT * FROM ruletest1; - a ---- - 2 - 3 - 5 -(3 rows) - -SELECT * FROM ruletest2; - b ---- - 1 - 4 -(2 rows) - -DROP TABLE ruletest1; -DROP TABLE ruletest2; --- --- Test non-SELECT rule on security invoker view. --- Should use view owner's permissions. --- -CREATE USER regress_rule_user1; -CREATE TABLE ruletest_t1 (x int); -CREATE TABLE ruletest_t2 (x int); -CREATE VIEW ruletest_v1 WITH (security_invoker=true) AS - SELECT * FROM ruletest_t1; -GRANT INSERT ON ruletest_v1 TO regress_rule_user1; -CREATE RULE rule1 AS ON INSERT TO ruletest_v1 - DO INSTEAD INSERT INTO ruletest_t2 VALUES (NEW.*); -SET SESSION AUTHORIZATION regress_rule_user1; -INSERT INTO ruletest_v1 VALUES (1); -RESET SESSION AUTHORIZATION; --- Test that main query's relation's permissions are checked before --- the rule action's relation's. -CREATE TABLE ruletest_t3 (x int); -CREATE RULE rule2 AS ON UPDATE TO ruletest_t1 - DO INSTEAD INSERT INTO ruletest_t2 VALUES (OLD.*); -REVOKE ALL ON ruletest_t2 FROM regress_rule_user1; -REVOKE ALL ON ruletest_t3 FROM regress_rule_user1; -ALTER TABLE ruletest_t1 OWNER TO regress_rule_user1; -SET SESSION AUTHORIZATION regress_rule_user1; -UPDATE ruletest_t1 t1 SET x = 0 FROM ruletest_t3 t3 WHERE t1.x = t3.x; -ERROR: permission denied for table ruletest_t3 -RESET SESSION AUTHORIZATION; -SELECT * FROM ruletest_t1; - x ---- -(0 rows) - -SELECT * FROM ruletest_t2; - x ---- - 1 -(1 row) - -DROP VIEW ruletest_v1; -DROP RULE rule2 ON ruletest_t1; -DROP TABLE ruletest_t3; -DROP TABLE ruletest_t2; -DROP TABLE ruletest_t1; -DROP USER regress_rule_user1; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/psql.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/psql.out --- /Users/admin/pgsql/src/test/regress/expected/psql.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/psql.out 2024-12-13 13:20:10 @@ -1,6830 +1,2 @@ --- --- Tests for psql features that aren't closely connected to any --- specific server features --- --- \set --- fail: invalid name -\set invalid/name foo -invalid variable name: "invalid/name" --- fail: invalid value for special variable -\set AUTOCOMMIT foo -unrecognized value "foo" for "AUTOCOMMIT": Boolean expected -\set FETCH_COUNT foo -invalid value "foo" for "FETCH_COUNT": integer expected --- check handling of built-in boolean variable -\echo :ON_ERROR_ROLLBACK -off -\set ON_ERROR_ROLLBACK -\echo :ON_ERROR_ROLLBACK -on -\set ON_ERROR_ROLLBACK foo -unrecognized value "foo" for "ON_ERROR_ROLLBACK" -Available values are: on, off, interactive. -\echo :ON_ERROR_ROLLBACK -on -\set ON_ERROR_ROLLBACK on -\echo :ON_ERROR_ROLLBACK -on -\unset ON_ERROR_ROLLBACK -\echo :ON_ERROR_ROLLBACK -off --- \g and \gx -SELECT 1 as one, 2 as two \g - one | two ------+----- - 1 | 2 -(1 row) - -\gx --[ RECORD 1 ] -one | 1 -two | 2 - -SELECT 3 as three, 4 as four \gx --[ RECORD 1 ] -three | 3 -four | 4 - -\g - three | four --------+------ - 3 | 4 -(1 row) - --- \gx should work in FETCH_COUNT mode too -\set FETCH_COUNT 1 -SELECT 1 as one, 2 as two \g - one | two ------+----- - 1 | 2 -(1 row) - -\gx --[ RECORD 1 ] -one | 1 -two | 2 - -SELECT 3 as three, 4 as four \gx --[ RECORD 1 ] -three | 3 -four | 4 - -\g - three | four --------+------ - 3 | 4 -(1 row) - -\unset FETCH_COUNT --- \g/\gx with pset options -SELECT 1 as one, 2 as two \g (format=csv csv_fieldsep='\t') -one two -1 2 -\g - one | two ------+----- - 1 | 2 -(1 row) - -SELECT 1 as one, 2 as two \gx (title='foo bar') -foo bar --[ RECORD 1 ] -one | 1 -two | 2 - -\g - one | two ------+----- - 1 | 2 -(1 row) - --- \parse (extended query protocol) -\parse -\parse: missing required argument -SELECT 1 \parse '' -SELECT 2 \parse stmt1 -SELECT $1 \parse stmt2 -SELECT $1, $2 \parse stmt3 --- \bind_named (extended query protocol) -\bind_named -\bind_named: missing required argument -\bind_named '' \g - ?column? ----------- - 1 -(1 row) - -\bind_named stmt1 \g - ?column? ----------- - 2 -(1 row) - -\bind_named stmt2 'foo' \g - ?column? ----------- - foo -(1 row) - -\bind_named stmt3 'foo' 'bar' \g - ?column? | ?column? -----------+---------- - foo | bar -(1 row) - --- Repeated calls. The second call generates an error, cleaning up the --- statement name set by the first call. -\bind_named stmt4 -\bind_named -\bind_named: missing required argument -\g -ERROR: there is no parameter $1 -LINE 1: SELECT $1, $2 - ^ --- Last \bind_named wins -\bind_named stmt2 'foo' \bind_named stmt3 'foo2' 'bar2' \g - ?column? | ?column? -----------+---------- - foo2 | bar2 -(1 row) - --- Multiple \g calls mean multiple executions -\bind_named stmt2 'foo3' \g \bind_named stmt3 'foo4' 'bar4' \g - ?column? ----------- - foo3 -(1 row) - - ?column? | ?column? -----------+---------- - foo4 | bar4 -(1 row) - --- \close (extended query protocol) -\close -\close: missing required argument -\close '' -\close stmt2 -\close stmt2 -SELECT name, statement FROM pg_prepared_statements ORDER BY name; - name | statement --------+---------------- - stmt1 | SELECT 2 - stmt3 | SELECT $1, $2 -(2 rows) - --- \bind (extended query protocol) -SELECT 1 \bind \g - ?column? ----------- - 1 -(1 row) - -SELECT $1 \bind 'foo' \g - ?column? ----------- - foo -(1 row) - -SELECT $1, $2 \bind 'foo' 'bar' \g - ?column? | ?column? -----------+---------- - foo | bar -(1 row) - --- last \bind wins -select $1::int as col \bind 'foo' \bind 2 \g - col ------ - 2 -(1 row) - --- Multiple \g calls mean multiple executions -select $1::int as col \bind 1 \g \bind 2 \g - col ------ - 1 -(1 row) - - col ------ - 2 -(1 row) - --- errors --- parse error -SELECT foo \bind \g -ERROR: column "foo" does not exist -LINE 1: SELECT foo - ^ --- tcop error -SELECT 1 \; SELECT 2 \bind \g -ERROR: cannot insert multiple commands into a prepared statement --- bind error -SELECT $1, $2 \bind 'foo' \g -ERROR: bind message supplies 1 parameters, but prepared statement "" requires 2 --- bind_named error -\bind_named stmt2 'baz' \g -ERROR: prepared statement "stmt2" does not exist -\bind_named stmt3 'baz' \g -ERROR: bind message supplies 1 parameters, but prepared statement "stmt3" requires 2 --- \gset -select 10 as test01, 20 as test02, 'Hello' as test03 \gset pref01_ -\echo :pref01_test01 :pref01_test02 :pref01_test03 -10 20 Hello --- should fail: bad variable name -select 10 as "bad name" -\gset -invalid variable name: "bad name" -select 97 as "EOF", 'ok' as _foo \gset IGNORE -attempt to \gset into specially treated variable "IGNOREEOF" ignored -\echo :IGNORE_foo :IGNOREEOF -ok 0 --- multiple backslash commands in one line -select 1 as x, 2 as y \gset pref01_ \\ \echo :pref01_x -1 -select 3 as x, 4 as y \gset pref01_ \echo :pref01_x \echo :pref01_y -3 -4 -select 5 as x, 6 as y \gset pref01_ \\ \g \echo :pref01_x :pref01_y - x | y ----+--- - 5 | 6 -(1 row) - -5 6 -select 7 as x, 8 as y \g \gset pref01_ \echo :pref01_x :pref01_y - x | y ----+--- - 7 | 8 -(1 row) - -7 8 --- NULL should unset the variable -\set var2 xyz -select 1 as var1, NULL as var2, 3 as var3 \gset -\echo :var1 :var2 :var3 -1 :var2 3 --- \gset requires just one tuple -select 10 as test01, 20 as test02 from generate_series(1,3) \gset -more than one row returned for \gset -select 10 as test01, 20 as test02 from generate_series(1,0) \gset -no rows returned for \gset --- \gset returns no tuples -select a from generate_series(1, 10) as a where a = 11 \gset -no rows returned for \gset -\echo :ROW_COUNT -0 --- \gset should work in FETCH_COUNT mode too -\set FETCH_COUNT 1 -select 1 as x, 2 as y \gset pref01_ \\ \echo :pref01_x -1 -select 3 as x, 4 as y \gset pref01_ \echo :pref01_x \echo :pref01_y -3 -4 -select 10 as test01, 20 as test02 from generate_series(1,3) \gset -more than one row returned for \gset -select 10 as test01, 20 as test02 from generate_series(1,0) \gset -no rows returned for \gset -\unset FETCH_COUNT --- \gdesc -SELECT - NULL AS zero, - 1 AS one, - 2.0 AS two, - 'three' AS three, - $1 AS four, - sin($2) as five, - 'foo'::varchar(4) as six, - CURRENT_DATE AS now -\gdesc - Column | Type ---------+---------------------- - zero | text - one | integer - two | numeric - three | text - four | text - five | double precision - six | character varying(4) - now | date -(8 rows) - --- should work with tuple-returning utilities, such as EXECUTE -PREPARE test AS SELECT 1 AS first, 2 AS second; -EXECUTE test \gdesc - Column | Type ---------+--------- - first | integer - second | integer -(2 rows) - -EXPLAIN EXECUTE test \gdesc - Column | Type -------------+------ - QUERY PLAN | text -(1 row) - --- should fail cleanly - syntax error -SELECT 1 + \gdesc -ERROR: syntax error at end of input -LINE 1: SELECT 1 + - ^ --- check behavior with empty results -SELECT \gdesc -The command has no result, or the result has no columns. -CREATE TABLE bububu(a int) \gdesc -The command has no result, or the result has no columns. --- subject command should not have executed -TABLE bububu; -- fail -ERROR: relation "bububu" does not exist -LINE 1: TABLE bububu; - ^ --- query buffer should remain unchanged -SELECT 1 AS x, 'Hello', 2 AS y, true AS "dirty\name" -\gdesc - Column | Type -------------+--------- - x | integer - ?column? | text - y | integer - dirty\name | boolean -(4 rows) - -\g - x | ?column? | y | dirty\name ----+----------+---+------------ - 1 | Hello | 2 | t -(1 row) - --- all on one line -SELECT 3 AS x, 'Hello', 4 AS y, true AS "dirty\name" \gdesc \g - Column | Type -------------+--------- - x | integer - ?column? | text - y | integer - dirty\name | boolean -(4 rows) - - x | ?column? | y | dirty\name ----+----------+---+------------ - 3 | Hello | 4 | t -(1 row) - --- test for server bug #17983 with empty statement in aborted transaction -set search_path = default; -begin; -bogus; -ERROR: syntax error at or near "bogus" -LINE 1: bogus; - ^ -; -\gdesc -The command has no result, or the result has no columns. -rollback; --- \gexec -create temporary table gexec_test(a int, b text, c date, d float); -select format('create index on gexec_test(%I)', attname) -from pg_attribute -where attrelid = 'gexec_test'::regclass and attnum > 0 -order by attnum -\gexec -create index on gexec_test(a) -create index on gexec_test(b) -create index on gexec_test(c) -create index on gexec_test(d) --- \gexec should work in FETCH_COUNT mode too --- (though the fetch limit applies to the executed queries not the meta query) -\set FETCH_COUNT 1 -select 'select 1 as ones', 'select x.y, x.y*2 as double from generate_series(1,4) as x(y)' -union all -select 'drop table gexec_test', NULL -union all -select 'drop table gexec_test', 'select ''2000-01-01''::date as party_over' -\gexec -select 1 as ones - ones ------- - 1 -(1 row) - -select x.y, x.y*2 as double from generate_series(1,4) as x(y) - y | double ----+-------- - 1 | 2 - 2 | 4 - 3 | 6 - 4 | 8 -(4 rows) - -drop table gexec_test -drop table gexec_test -ERROR: table "gexec_test" does not exist -select '2000-01-01'::date as party_over - party_over ------------- - 01-01-2000 -(1 row) - -\unset FETCH_COUNT --- \setenv, \getenv --- ensure MYVAR isn't set -\setenv MYVAR --- in which case, reading it doesn't change the target -\getenv res MYVAR -\echo :res -:res --- now set it -\setenv MYVAR 'environment value' -\getenv res MYVAR -\echo :res -environment value --- show all pset options -\pset -border 1 -columns 0 -csv_fieldsep ',' -expanded off -fieldsep '|' -fieldsep_zero off -footer on -format aligned -linestyle ascii -null '' -numericlocale off -pager 1 -pager_min_lines 0 -recordsep '\n' -recordsep_zero off -tableattr -title -tuples_only off -unicode_border_linestyle single -unicode_column_linestyle single -unicode_header_linestyle single -xheader_width full --- test multi-line headers, wrapping, and newline indicators --- in aligned, unaligned, and wrapped formats -prepare q as select array_to_string(array_agg(repeat('x',2*n)),E'\n') as "ab - -c", array_to_string(array_agg(repeat('y',20-2*n)),E'\n') as "a -bc" from generate_series(1,10) as n(n) group by n>1 order by n>1; -\pset linestyle ascii -\pset expanded off -\pset columns 40 -\pset border 0 -\pset format unaligned -execute q; -ab - -c|a -bc -xx|yyyyyyyyyyyyyyyyyy -xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -(2 rows) -\pset format aligned -execute q; - ab + a + - + bc - c --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx +yyyyyyyyyyyyyyyy + -xxxxxx +yyyyyyyyyyyyyy + -xxxxxxxx +yyyyyyyyyyyy + -xxxxxxxxxx +yyyyyyyyyy + -xxxxxxxxxxxx +yyyyyyyy + -xxxxxxxxxxxxxx +yyyyyy + -xxxxxxxxxxxxxxxx +yyyy + -xxxxxxxxxxxxxxxxxx +yy + -xxxxxxxxxxxxxxxxxxxx -(2 rows) - -\pset format wrapped -execute q; - ab + a + - + bc - c --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx +yyyyyyyyyyyyyyyy + -xxxxxx +yyyyyyyyyyyyyy + -xxxxxxxx +yyyyyyyyyyyy + -xxxxxxxxxx +yyyyyyyyyy + -xxxxxxxxxxxx +yyyyyyyy + -xxxxxxxxxxxxxx +yyyyyy + -xxxxxxxxxxxxxxxx +yyyy + -xxxxxxxxxxxxxxxxxx +yy + -xxxxxxxxxxxxxxxxxxxx -(2 rows) - -\pset border 1 -\pset format unaligned -execute q; -ab - -c|a -bc -xx|yyyyyyyyyyyyyyyyyy -xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -(2 rows) -\pset format aligned -execute q; - ab +| a + - +| bc - c | -----------------------+-------------------- - xx | yyyyyyyyyyyyyyyyyy - xxxx +| yyyyyyyyyyyyyyyy + - xxxxxx +| yyyyyyyyyyyyyy + - xxxxxxxx +| yyyyyyyyyyyy + - xxxxxxxxxx +| yyyyyyyyyy + - xxxxxxxxxxxx +| yyyyyyyy + - xxxxxxxxxxxxxx +| yyyyyy + - xxxxxxxxxxxxxxxx +| yyyy + - xxxxxxxxxxxxxxxxxx +| yy + - xxxxxxxxxxxxxxxxxxxx | -(2 rows) - -\pset format wrapped -execute q; - ab +| a + - +| bc - c | --------------------+-------------------- - xx | yyyyyyyyyyyyyyyyyy - xxxx +| yyyyyyyyyyyyyyyy + - xxxxxx +| yyyyyyyyyyyyyy + - xxxxxxxx +| yyyyyyyyyyyy + - xxxxxxxxxx +| yyyyyyyyyy + - xxxxxxxxxxxx +| yyyyyyyy + - xxxxxxxxxxxxxx +| yyyyyy + - xxxxxxxxxxxxxxxx +| yyyy + - xxxxxxxxxxxxxxxxx.| yy + -.x +| - xxxxxxxxxxxxxxxxx.| -.xxx | -(2 rows) - -\pset border 2 -\pset format unaligned -execute q; -ab - -c|a -bc -xx|yyyyyyyyyyyyyyyyyy -xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -(2 rows) -\pset format aligned -execute q; -+----------------------+--------------------+ -| ab +| a +| -| +| bc | -| c | | -+----------------------+--------------------+ -| xx | yyyyyyyyyyyyyyyyyy | -| xxxx +| yyyyyyyyyyyyyyyy +| -| xxxxxx +| yyyyyyyyyyyyyy +| -| xxxxxxxx +| yyyyyyyyyyyy +| -| xxxxxxxxxx +| yyyyyyyyyy +| -| xxxxxxxxxxxx +| yyyyyyyy +| -| xxxxxxxxxxxxxx +| yyyyyy +| -| xxxxxxxxxxxxxxxx +| yyyy +| -| xxxxxxxxxxxxxxxxxx +| yy +| -| xxxxxxxxxxxxxxxxxxxx | | -+----------------------+--------------------+ -(2 rows) - -\pset format wrapped -execute q; -+-----------------+--------------------+ -| ab +| a +| -| +| bc | -| c | | -+-----------------+--------------------+ -| xx | yyyyyyyyyyyyyyyyyy | -| xxxx +| yyyyyyyyyyyyyyyy +| -| xxxxxx +| yyyyyyyyyyyyyy +| -| xxxxxxxx +| yyyyyyyyyyyy +| -| xxxxxxxxxx +| yyyyyyyyyy +| -| xxxxxxxxxxxx +| yyyyyyyy +| -| xxxxxxxxxxxxxx +| yyyyyy +| -| xxxxxxxxxxxxxxx.| yyyy +| -|.x +| yy +| -| xxxxxxxxxxxxxxx.| | -|.xxx +| | -| xxxxxxxxxxxxxxx.| | -|.xxxxx | | -+-----------------+--------------------+ -(2 rows) - -\pset expanded on -\pset columns 20 -\pset border 0 -\pset format unaligned -execute q; -ab - -c|xx -a -bc|yyyyyyyyyyyyyyyyyy - -ab - -c|xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx -a -bc|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -\pset format aligned -execute q; -* Record 1 -ab+ xx - + -c -a + yyyyyyyyyyyyyyyyyy -bc -* Record 2 -ab+ xxxx + - + xxxxxx + -c xxxxxxxx + - xxxxxxxxxx + - xxxxxxxxxxxx + - xxxxxxxxxxxxxx + - xxxxxxxxxxxxxxxx + - xxxxxxxxxxxxxxxxxx + - xxxxxxxxxxxxxxxxxxxx -a + yyyyyyyyyyyyyyyy + -bc yyyyyyyyyyyyyy + - yyyyyyyyyyyy + - yyyyyyyyyy + - yyyyyyyy + - yyyyyy + - yyyy + - yy + - - -\pset format wrapped -execute q; -* Record 1 -ab+ xx - + -c -a + yyyyyyyyyyyyyyy. -bc .yyy -* Record 2 -ab+ xxxx + - + xxxxxx + -c xxxxxxxx + - xxxxxxxxxx + - xxxxxxxxxxxx + - xxxxxxxxxxxxxx + - xxxxxxxxxxxxxxx. - .x + - xxxxxxxxxxxxxxx. - .xxx + - xxxxxxxxxxxxxxx. - .xxxxx -a + yyyyyyyyyyyyyyy. -bc .y + - yyyyyyyyyyyyyy + - yyyyyyyyyyyy + - yyyyyyyyyy + - yyyyyyyy + - yyyyyy + - yyyy + - yy + - - -\pset border 1 -\pset format unaligned -execute q; -ab - -c|xx -a -bc|yyyyyyyyyyyyyyyyyy - -ab - -c|xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx -a -bc|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -\pset format aligned -execute q; --[ RECORD 1 ]------------ -ab+| xx - +| -c | -a +| yyyyyyyyyyyyyyyyyy -bc | --[ RECORD 2 ]------------ -ab+| xxxx + - +| xxxxxx + -c | xxxxxxxx + - | xxxxxxxxxx + - | xxxxxxxxxxxx + - | xxxxxxxxxxxxxx + - | xxxxxxxxxxxxxxxx + - | xxxxxxxxxxxxxxxxxx + - | xxxxxxxxxxxxxxxxxxxx -a +| yyyyyyyyyyyyyyyy + -bc | yyyyyyyyyyyyyy + - | yyyyyyyyyyyy + - | yyyyyyyyyy + - | yyyyyyyy + - | yyyyyy + - | yyyy + - | yy + - | - -\pset format wrapped -execute q; --[ RECORD 1 ]------ -ab+| xx - +| -c | -a +| yyyyyyyyyyyyyy. -bc |.yyyy --[ RECORD 2 ]------ -ab+| xxxx + - +| xxxxxx + -c | xxxxxxxx + - | xxxxxxxxxx + - | xxxxxxxxxxxx + - | xxxxxxxxxxxxxx+ - | xxxxxxxxxxxxxx. - |.xx + - | xxxxxxxxxxxxxx. - |.xxxx + - | xxxxxxxxxxxxxx. - |.xxxxxx -a +| yyyyyyyyyyyyyy. -bc |.yy + - | yyyyyyyyyyyyyy+ - | yyyyyyyyyyyy + - | yyyyyyyyyy + - | yyyyyyyy + - | yyyyyy + - | yyyy + - | yy + - | - -\pset border 2 -\pset format unaligned -execute q; -ab - -c|xx -a -bc|yyyyyyyyyyyyyyyyyy - -ab - -c|xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx -a -bc|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -\pset format aligned -execute q; -+-[ RECORD 1 ]--------------+ -| ab+| xx | -| +| | -| c | | -| a +| yyyyyyyyyyyyyyyyyy | -| bc | | -+-[ RECORD 2 ]--------------+ -| ab+| xxxx +| -| +| xxxxxx +| -| c | xxxxxxxx +| -| | xxxxxxxxxx +| -| | xxxxxxxxxxxx +| -| | xxxxxxxxxxxxxx +| -| | xxxxxxxxxxxxxxxx +| -| | xxxxxxxxxxxxxxxxxx +| -| | xxxxxxxxxxxxxxxxxxxx | -| a +| yyyyyyyyyyyyyyyy +| -| bc | yyyyyyyyyyyyyy +| -| | yyyyyyyyyyyy +| -| | yyyyyyyyyy +| -| | yyyyyyyy +| -| | yyyyyy +| -| | yyyy +| -| | yy +| -| | | -+----+----------------------+ - -\pset format wrapped -execute q; -+-[ RECORD 1 ]-----+ -| ab+| xx | -| +| | -| c | | -| a +| yyyyyyyyyyy.| -| bc |.yyyyyyy | -+-[ RECORD 2 ]-----+ -| ab+| xxxx +| -| +| xxxxxx +| -| c | xxxxxxxx +| -| | xxxxxxxxxx +| -| | xxxxxxxxxxx.| -| |.x +| -| | xxxxxxxxxxx.| -| |.xxx +| -| | xxxxxxxxxxx.| -| |.xxxxx +| -| | xxxxxxxxxxx.| -| |.xxxxxxx +| -| | xxxxxxxxxxx.| -| |.xxxxxxxxx | -| a +| yyyyyyyyyyy.| -| bc |.yyyyy +| -| | yyyyyyyyyyy.| -| |.yyy +| -| | yyyyyyyyyyy.| -| |.y +| -| | yyyyyyyyyy +| -| | yyyyyyyy +| -| | yyyyyy +| -| | yyyy +| -| | yy +| -| | | -+----+-------------+ - -\pset linestyle old-ascii -\pset expanded off -\pset columns 40 -\pset border 0 -\pset format unaligned -execute q; -ab - -c|a -bc -xx|yyyyyyyyyyyyyyyyyy -xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -(2 rows) -\pset format aligned -execute q; - ab a - + bc - c + --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx yyyyyyyyyyyyyyyy -xxxxxx yyyyyyyyyyyyyy -xxxxxxxx yyyyyyyyyyyy -xxxxxxxxxx yyyyyyyyyy -xxxxxxxxxxxx yyyyyyyy -xxxxxxxxxxxxxx yyyyyy -xxxxxxxxxxxxxxxx yyyy -xxxxxxxxxxxxxxxxxx yy -xxxxxxxxxxxxxxxxxxxx -(2 rows) - -\pset format wrapped -execute q; - ab a - + bc - c + --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx yyyyyyyyyyyyyyyy -xxxxxx yyyyyyyyyyyyyy -xxxxxxxx yyyyyyyyyyyy -xxxxxxxxxx yyyyyyyyyy -xxxxxxxxxxxx yyyyyyyy -xxxxxxxxxxxxxx yyyyyy -xxxxxxxxxxxxxxxx yyyy -xxxxxxxxxxxxxxxxxx yy -xxxxxxxxxxxxxxxxxxxx -(2 rows) - -\pset border 1 -\pset format unaligned -execute q; -ab - -c|a -bc -xx|yyyyyyyyyyyyyyyyyy -xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -(2 rows) -\pset format aligned -execute q; - ab | a -+ |+ bc -+ c |+ -----------------------+-------------------- - xx | yyyyyyyyyyyyyyyyyy - xxxx | yyyyyyyyyyyyyyyy - xxxxxx : yyyyyyyyyyyyyy - xxxxxxxx : yyyyyyyyyyyy - xxxxxxxxxx : yyyyyyyyyy - xxxxxxxxxxxx : yyyyyyyy - xxxxxxxxxxxxxx : yyyyyy - xxxxxxxxxxxxxxxx : yyyy - xxxxxxxxxxxxxxxxxx : yy - xxxxxxxxxxxxxxxxxxxx : -(2 rows) - -\pset format wrapped -execute q; - ab | a -+ |+ bc -+ c |+ --------------------+-------------------- - xx | yyyyyyyyyyyyyyyyyy - xxxx | yyyyyyyyyyyyyyyy - xxxxxx : yyyyyyyyyyyyyy - xxxxxxxx : yyyyyyyyyyyy - xxxxxxxxxx : yyyyyyyyyy - xxxxxxxxxxxx : yyyyyyyy - xxxxxxxxxxxxxx : yyyyyy - xxxxxxxxxxxxxxxx : yyyy - xxxxxxxxxxxxxxxxx : yy - x : - xxxxxxxxxxxxxxxxx - xxx -(2 rows) - -\pset border 2 -\pset format unaligned -execute q; -ab - -c|a -bc -xx|yyyyyyyyyyyyyyyyyy -xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -(2 rows) -\pset format aligned -execute q; -+----------------------+--------------------+ -| ab | a | -|+ |+ bc | -|+ c |+ | -+----------------------+--------------------+ -| xx | yyyyyyyyyyyyyyyyyy | -| xxxx | yyyyyyyyyyyyyyyy | -| xxxxxx : yyyyyyyyyyyyyy | -| xxxxxxxx : yyyyyyyyyyyy | -| xxxxxxxxxx : yyyyyyyyyy | -| xxxxxxxxxxxx : yyyyyyyy | -| xxxxxxxxxxxxxx : yyyyyy | -| xxxxxxxxxxxxxxxx : yyyy | -| xxxxxxxxxxxxxxxxxx : yy | -| xxxxxxxxxxxxxxxxxxxx : | -+----------------------+--------------------+ -(2 rows) - -\pset format wrapped -execute q; -+-----------------+--------------------+ -| ab | a | -|+ |+ bc | -|+ c |+ | -+-----------------+--------------------+ -| xx | yyyyyyyyyyyyyyyyyy | -| xxxx | yyyyyyyyyyyyyyyy | -| xxxxxx : yyyyyyyyyyyyyy | -| xxxxxxxx : yyyyyyyyyyyy | -| xxxxxxxxxx : yyyyyyyyyy | -| xxxxxxxxxxxx : yyyyyyyy | -| xxxxxxxxxxxxxx : yyyyyy | -| xxxxxxxxxxxxxxx : yyyy | -| x : yy | -| xxxxxxxxxxxxxxx : | -| xxx | -| xxxxxxxxxxxxxxx | -| xxxxx | -+-----------------+--------------------+ -(2 rows) - -\pset expanded on -\pset columns 20 -\pset border 0 -\pset format unaligned -execute q; -ab - -c|xx -a -bc|yyyyyyyyyyyyyyyyyy - -ab - -c|xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx -a -bc|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -\pset format aligned -execute q; -* Record 1 - ab xx -+ -+c - a yyyyyyyyyyyyyyyyyy -+bc -* Record 2 - ab xxxx -+ xxxxxx -+c xxxxxxxx - xxxxxxxxxx - xxxxxxxxxxxx - xxxxxxxxxxxxxx - xxxxxxxxxxxxxxxx - xxxxxxxxxxxxxxxxxx - xxxxxxxxxxxxxxxxxxxx - a yyyyyyyyyyyyyyyy -+bc yyyyyyyyyyyyyy - yyyyyyyyyyyy - yyyyyyyyyy - yyyyyyyy - yyyyyy - yyyy - yy - - -\pset format wrapped -execute q; -* Record 1 - ab xx -+ -+c - a yyyyyyyyyyyyyyyy -+bc yy -* Record 2 - ab xxxx -+ xxxxxx -+c xxxxxxxx - xxxxxxxxxx - xxxxxxxxxxxx - xxxxxxxxxxxxxx - xxxxxxxxxxxxxxxx - xxxxxxxxxxxxxxxx - xx - xxxxxxxxxxxxxxxx - xxxx - a yyyyyyyyyyyyyyyy -+bc yyyyyyyyyyyyyy - yyyyyyyyyyyy - yyyyyyyyyy - yyyyyyyy - yyyyyy - yyyy - yy - - -\pset border 1 -\pset format unaligned -execute q; -ab - -c|xx -a -bc|yyyyyyyyyyyyyyyyyy - -ab - -c|xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx -a -bc|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -\pset format aligned -execute q; --[ RECORD 1 ]------------- - ab | xx -+ ; -+c ; - a | yyyyyyyyyyyyyyyyyy -+bc ; --[ RECORD 2 ]------------- - ab | xxxx -+ : xxxxxx -+c : xxxxxxxx - : xxxxxxxxxx - : xxxxxxxxxxxx - : xxxxxxxxxxxxxx - : xxxxxxxxxxxxxxxx - : xxxxxxxxxxxxxxxxxx - : xxxxxxxxxxxxxxxxxxxx - a | yyyyyyyyyyyyyyyy -+bc : yyyyyyyyyyyyyy - : yyyyyyyyyyyy - : yyyyyyyyyy - : yyyyyyyy - : yyyyyy - : yyyy - : yy - : - -\pset format wrapped -execute q; --[ RECORD 1 ]------- - ab | xx -+ ; -+c ; - a | yyyyyyyyyyyyyy -+bc ; yyyy --[ RECORD 2 ]------- - ab | xxxx -+ : xxxxxx -+c : xxxxxxxx - : xxxxxxxxxx - : xxxxxxxxxxxx - : xxxxxxxxxxxxxx - : xxxxxxxxxxxxxx - ; xx - : xxxxxxxxxxxxxx - ; xxxx - : xxxxxxxxxxxxxx - ; xxxxxx - a | yyyyyyyyyyyyyy -+bc ; yy - : yyyyyyyyyyyyyy - : yyyyyyyyyyyy - : yyyyyyyyyy - : yyyyyyyy - : yyyyyy - : yyyy - : yy - : - -\pset border 2 -\pset format unaligned -execute q; -ab - -c|xx -a -bc|yyyyyyyyyyyyyyyyyy - -ab - -c|xxxx -xxxxxx -xxxxxxxx -xxxxxxxxxx -xxxxxxxxxxxx -xxxxxxxxxxxxxx -xxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxx -a -bc|yyyyyyyyyyyyyyyy -yyyyyyyyyyyyyy -yyyyyyyyyyyy -yyyyyyyyyy -yyyyyyyy -yyyyyy -yyyy -yy - -\pset format aligned -execute q; -+-[ RECORD 1 ]--------------+ -| ab | xx | -|+ ; | -|+c ; | -| a | yyyyyyyyyyyyyyyyyy | -|+bc ; | -+-[ RECORD 2 ]--------------+ -| ab | xxxx | -|+ : xxxxxx | -|+c : xxxxxxxx | -| : xxxxxxxxxx | -| : xxxxxxxxxxxx | -| : xxxxxxxxxxxxxx | -| : xxxxxxxxxxxxxxxx | -| : xxxxxxxxxxxxxxxxxx | -| : xxxxxxxxxxxxxxxxxxxx | -| a | yyyyyyyyyyyyyyyy | -|+bc : yyyyyyyyyyyyyy | -| : yyyyyyyyyyyy | -| : yyyyyyyyyy | -| : yyyyyyyy | -| : yyyyyy | -| : yyyy | -| : yy | -| : | -+----+----------------------+ - -\pset format wrapped -execute q; -+-[ RECORD 1 ]-----+ -| ab | xx | -|+ ; | -|+c ; | -| a | yyyyyyyyyyy | -|+bc ; yyyyyyy | -+-[ RECORD 2 ]-----+ -| ab | xxxx | -|+ : xxxxxx | -|+c : xxxxxxxx | -| : xxxxxxxxxx | -| : xxxxxxxxxxx | -| ; x | -| : xxxxxxxxxxx | -| ; xxx | -| : xxxxxxxxxxx | -| ; xxxxx | -| : xxxxxxxxxxx | -| ; xxxxxxx | -| : xxxxxxxxxxx | -| ; xxxxxxxxx | -| a | yyyyyyyyyyy | -|+bc ; yyyyy | -| : yyyyyyyyyyy | -| ; yyy | -| : yyyyyyyyyyy | -| ; y | -| : yyyyyyyyyy | -| : yyyyyyyy | -| : yyyyyy | -| : yyyy | -| : yy | -| : | -+----+-------------+ - -deallocate q; --- test single-line header and data -prepare q as select repeat('x',2*n) as "0123456789abcdef", repeat('y',20-2*n) as "0123456789" from generate_series(1,10) as n; -\pset linestyle ascii -\pset expanded off -\pset columns 40 -\pset border 0 -\pset format unaligned -execute q; -0123456789abcdef|0123456789 -xx|yyyyyyyyyyyyyyyyyy -xxxx|yyyyyyyyyyyyyyyy -xxxxxx|yyyyyyyyyyyyyy -xxxxxxxx|yyyyyyyyyyyy -xxxxxxxxxx|yyyyyyyyyy -xxxxxxxxxxxx|yyyyyyyy -xxxxxxxxxxxxxx|yyyyyy -xxxxxxxxxxxxxxxx|yyyy -xxxxxxxxxxxxxxxxxx|yy -xxxxxxxxxxxxxxxxxxxx| -(10 rows) -\pset format aligned -execute q; - 0123456789abcdef 0123456789 --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx yyyyyyyyyyyyyyyy -xxxxxx yyyyyyyyyyyyyy -xxxxxxxx yyyyyyyyyyyy -xxxxxxxxxx yyyyyyyyyy -xxxxxxxxxxxx yyyyyyyy -xxxxxxxxxxxxxx yyyyyy -xxxxxxxxxxxxxxxx yyyy -xxxxxxxxxxxxxxxxxx yy -xxxxxxxxxxxxxxxxxxxx -(10 rows) - -\pset format wrapped -execute q; - 0123456789abcdef 0123456789 --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx yyyyyyyyyyyyyyyy -xxxxxx yyyyyyyyyyyyyy -xxxxxxxx yyyyyyyyyyyy -xxxxxxxxxx yyyyyyyyyy -xxxxxxxxxxxx yyyyyyyy -xxxxxxxxxxxxxx yyyyyy -xxxxxxxxxxxxxxxx yyyy -xxxxxxxxxxxxxxxxxx yy -xxxxxxxxxxxxxxxxxxxx -(10 rows) - -\pset border 1 -\pset format unaligned -execute q; -0123456789abcdef|0123456789 -xx|yyyyyyyyyyyyyyyyyy -xxxx|yyyyyyyyyyyyyyyy -xxxxxx|yyyyyyyyyyyyyy -xxxxxxxx|yyyyyyyyyyyy -xxxxxxxxxx|yyyyyyyyyy -xxxxxxxxxxxx|yyyyyyyy -xxxxxxxxxxxxxx|yyyyyy -xxxxxxxxxxxxxxxx|yyyy -xxxxxxxxxxxxxxxxxx|yy -xxxxxxxxxxxxxxxxxxxx| -(10 rows) -\pset format aligned -execute q; - 0123456789abcdef | 0123456789 -----------------------+-------------------- - xx | yyyyyyyyyyyyyyyyyy - xxxx | yyyyyyyyyyyyyyyy - xxxxxx | yyyyyyyyyyyyyy - xxxxxxxx | yyyyyyyyyyyy - xxxxxxxxxx | yyyyyyyyyy - xxxxxxxxxxxx | yyyyyyyy - xxxxxxxxxxxxxx | yyyyyy - xxxxxxxxxxxxxxxx | yyyy - xxxxxxxxxxxxxxxxxx | yy - xxxxxxxxxxxxxxxxxxxx | -(10 rows) - -\pset format wrapped -execute q; - 0123456789abcdef | 0123456789 ----------------------+------------------ - xx | yyyyyyyyyyyyyyyy. - |.yy - xxxx | yyyyyyyyyyyyyyyy - xxxxxx | yyyyyyyyyyyyyy - xxxxxxxx | yyyyyyyyyyyy - xxxxxxxxxx | yyyyyyyyyy - xxxxxxxxxxxx | yyyyyyyy - xxxxxxxxxxxxxx | yyyyyy - xxxxxxxxxxxxxxxx | yyyy - xxxxxxxxxxxxxxxxxx | yy - xxxxxxxxxxxxxxxxxxx.| -.x | -(10 rows) - -\pset border 2 -\pset format unaligned -execute q; -0123456789abcdef|0123456789 -xx|yyyyyyyyyyyyyyyyyy -xxxx|yyyyyyyyyyyyyyyy -xxxxxx|yyyyyyyyyyyyyy -xxxxxxxx|yyyyyyyyyyyy -xxxxxxxxxx|yyyyyyyyyy -xxxxxxxxxxxx|yyyyyyyy -xxxxxxxxxxxxxx|yyyyyy -xxxxxxxxxxxxxxxx|yyyy -xxxxxxxxxxxxxxxxxx|yy -xxxxxxxxxxxxxxxxxxxx| -(10 rows) -\pset format aligned -execute q; -+----------------------+--------------------+ -| 0123456789abcdef | 0123456789 | -+----------------------+--------------------+ -| xx | yyyyyyyyyyyyyyyyyy | -| xxxx | yyyyyyyyyyyyyyyy | -| xxxxxx | yyyyyyyyyyyyyy | -| xxxxxxxx | yyyyyyyyyyyy | -| xxxxxxxxxx | yyyyyyyyyy | -| xxxxxxxxxxxx | yyyyyyyy | -| xxxxxxxxxxxxxx | yyyyyy | -| xxxxxxxxxxxxxxxx | yyyy | -| xxxxxxxxxxxxxxxxxx | yy | -| xxxxxxxxxxxxxxxxxxxx | | -+----------------------+--------------------+ -(10 rows) - -\pset format wrapped -execute q; -+--------------------+-----------------+ -| 0123456789abcdef | 0123456789 | -+--------------------+-----------------+ -| xx | yyyyyyyyyyyyyyy.| -| |.yyy | -| xxxx | yyyyyyyyyyyyyyy.| -| |.y | -| xxxxxx | yyyyyyyyyyyyyy | -| xxxxxxxx | yyyyyyyyyyyy | -| xxxxxxxxxx | yyyyyyyyyy | -| xxxxxxxxxxxx | yyyyyyyy | -| xxxxxxxxxxxxxx | yyyyyy | -| xxxxxxxxxxxxxxxx | yyyy | -| xxxxxxxxxxxxxxxxxx | yy | -| xxxxxxxxxxxxxxxxxx.| | -|.xx | | -+--------------------+-----------------+ -(10 rows) - -\pset expanded on -\pset columns 30 -\pset border 0 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; -* Record 1 -0123456789abcdef xx -0123456789 yyyyyyyyyyyyyyyyyy -* Record 2 -0123456789abcdef xxxx -0123456789 yyyyyyyyyyyyyyyy -* Record 3 -0123456789abcdef xxxxxx -0123456789 yyyyyyyyyyyyyy -* Record 4 -0123456789abcdef xxxxxxxx -0123456789 yyyyyyyyyyyy -* Record 5 -0123456789abcdef xxxxxxxxxx -0123456789 yyyyyyyyyy -* Record 6 -0123456789abcdef xxxxxxxxxxxx -0123456789 yyyyyyyy -* Record 7 -0123456789abcdef xxxxxxxxxxxxxx -0123456789 yyyyyy -* Record 8 -0123456789abcdef xxxxxxxxxxxxxxxx -0123456789 yyyy -* Record 9 -0123456789abcdef xxxxxxxxxxxxxxxxxx -0123456789 yy -* Record 10 -0123456789abcdef xxxxxxxxxxxxxxxxxxxx -0123456789 - -\pset format wrapped -execute q; -* Record 1 -0123456789abcdef xx -0123456789 yyyyyyyyyyyy. - .yyyyyy -* Record 2 -0123456789abcdef xxxx -0123456789 yyyyyyyyyyyy. - .yyyy -* Record 3 -0123456789abcdef xxxxxx -0123456789 yyyyyyyyyyyy. - .yy -* Record 4 -0123456789abcdef xxxxxxxx -0123456789 yyyyyyyyyyyy -* Record 5 -0123456789abcdef xxxxxxxxxx -0123456789 yyyyyyyyyy -* Record 6 -0123456789abcdef xxxxxxxxxxxx -0123456789 yyyyyyyy -* Record 7 -0123456789abcdef xxxxxxxxxxxx. - .xx -0123456789 yyyyyy -* Record 8 -0123456789abcdef xxxxxxxxxxxx. - .xxxx -0123456789 yyyy -* Record 9 -0123456789abcdef xxxxxxxxxxxx. - .xxxxxx -0123456789 yy -* Record 10 -0123456789abcdef xxxxxxxxxxxx. - .xxxxxxxx -0123456789 - -\pset border 1 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; --[ RECORD 1 ]----+--------------------- -0123456789abcdef | xx -0123456789 | yyyyyyyyyyyyyyyyyy --[ RECORD 2 ]----+--------------------- -0123456789abcdef | xxxx -0123456789 | yyyyyyyyyyyyyyyy --[ RECORD 3 ]----+--------------------- -0123456789abcdef | xxxxxx -0123456789 | yyyyyyyyyyyyyy --[ RECORD 4 ]----+--------------------- -0123456789abcdef | xxxxxxxx -0123456789 | yyyyyyyyyyyy --[ RECORD 5 ]----+--------------------- -0123456789abcdef | xxxxxxxxxx -0123456789 | yyyyyyyyyy --[ RECORD 6 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxx -0123456789 | yyyyyyyy --[ RECORD 7 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxx -0123456789 | yyyyyy --[ RECORD 8 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxx -0123456789 | yyyy --[ RECORD 9 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxx -0123456789 | yy --[ RECORD 10 ]---+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxxxx -0123456789 | - -\pset format wrapped -execute q; --[ RECORD 1 ]----+----------- -0123456789abcdef | xx -0123456789 | yyyyyyyyyy. - |.yyyyyyyy --[ RECORD 2 ]----+----------- -0123456789abcdef | xxxx -0123456789 | yyyyyyyyyy. - |.yyyyyy --[ RECORD 3 ]----+----------- -0123456789abcdef | xxxxxx -0123456789 | yyyyyyyyyy. - |.yyyy --[ RECORD 4 ]----+----------- -0123456789abcdef | xxxxxxxx -0123456789 | yyyyyyyyyy. - |.yy --[ RECORD 5 ]----+----------- -0123456789abcdef | xxxxxxxxxx -0123456789 | yyyyyyyyyy --[ RECORD 6 ]----+----------- -0123456789abcdef | xxxxxxxxxx. - |.xx -0123456789 | yyyyyyyy --[ RECORD 7 ]----+----------- -0123456789abcdef | xxxxxxxxxx. - |.xxxx -0123456789 | yyyyyy --[ RECORD 8 ]----+----------- -0123456789abcdef | xxxxxxxxxx. - |.xxxxxx -0123456789 | yyyy --[ RECORD 9 ]----+----------- -0123456789abcdef | xxxxxxxxxx. - |.xxxxxxxx -0123456789 | yy --[ RECORD 10 ]---+----------- -0123456789abcdef | xxxxxxxxxx. - |.xxxxxxxxxx -0123456789 | - -\pset border 2 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; -+-[ RECORD 1 ]-----+----------------------+ -| 0123456789abcdef | xx | -| 0123456789 | yyyyyyyyyyyyyyyyyy | -+-[ RECORD 2 ]-----+----------------------+ -| 0123456789abcdef | xxxx | -| 0123456789 | yyyyyyyyyyyyyyyy | -+-[ RECORD 3 ]-----+----------------------+ -| 0123456789abcdef | xxxxxx | -| 0123456789 | yyyyyyyyyyyyyy | -+-[ RECORD 4 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxx | -| 0123456789 | yyyyyyyyyyyy | -+-[ RECORD 5 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxx | -| 0123456789 | yyyyyyyyyy | -+-[ RECORD 6 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxx | -| 0123456789 | yyyyyyyy | -+-[ RECORD 7 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxx | -| 0123456789 | yyyyyy | -+-[ RECORD 8 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxx | -| 0123456789 | yyyy | -+-[ RECORD 9 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxxx | -| 0123456789 | yy | -+-[ RECORD 10 ]----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxxxxx | -| 0123456789 | | -+------------------+----------------------+ - -\pset format wrapped -execute q; -+-[ RECORD 1 ]-----+---------+ -| 0123456789abcdef | xx | -| 0123456789 | yyyyyyy.| -| |.yyyyyyy.| -| |.yyyy | -+-[ RECORD 2 ]-----+---------+ -| 0123456789abcdef | xxxx | -| 0123456789 | yyyyyyy.| -| |.yyyyyyy.| -| |.yy | -+-[ RECORD 3 ]-----+---------+ -| 0123456789abcdef | xxxxxx | -| 0123456789 | yyyyyyy.| -| |.yyyyyyy | -+-[ RECORD 4 ]-----+---------+ -| 0123456789abcdef | xxxxxxx.| -| |.x | -| 0123456789 | yyyyyyy.| -| |.yyyyy | -+-[ RECORD 5 ]-----+---------+ -| 0123456789abcdef | xxxxxxx.| -| |.xxx | -| 0123456789 | yyyyyyy.| -| |.yyy | -+-[ RECORD 6 ]-----+---------+ -| 0123456789abcdef | xxxxxxx.| -| |.xxxxx | -| 0123456789 | yyyyyyy.| -| |.y | -+-[ RECORD 7 ]-----+---------+ -| 0123456789abcdef | xxxxxxx.| -| |.xxxxxxx | -| 0123456789 | yyyyyy | -+-[ RECORD 8 ]-----+---------+ -| 0123456789abcdef | xxxxxxx.| -| |.xxxxxxx.| -| |.xx | -| 0123456789 | yyyy | -+-[ RECORD 9 ]-----+---------+ -| 0123456789abcdef | xxxxxxx.| -| |.xxxxxxx.| -| |.xxxx | -| 0123456789 | yy | -+-[ RECORD 10 ]----+---------+ -| 0123456789abcdef | xxxxxxx.| -| |.xxxxxxx.| -| |.xxxxxx | -| 0123456789 | | -+------------------+---------+ - -\pset expanded on -\pset columns 20 -\pset border 0 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; -* Record 1 -0123456789abcdef xx -0123456789 yyyyyyyyyyyyyyyyyy -* Record 2 -0123456789abcdef xxxx -0123456789 yyyyyyyyyyyyyyyy -* Record 3 -0123456789abcdef xxxxxx -0123456789 yyyyyyyyyyyyyy -* Record 4 -0123456789abcdef xxxxxxxx -0123456789 yyyyyyyyyyyy -* Record 5 -0123456789abcdef xxxxxxxxxx -0123456789 yyyyyyyyyy -* Record 6 -0123456789abcdef xxxxxxxxxxxx -0123456789 yyyyyyyy -* Record 7 -0123456789abcdef xxxxxxxxxxxxxx -0123456789 yyyyyy -* Record 8 -0123456789abcdef xxxxxxxxxxxxxxxx -0123456789 yyyy -* Record 9 -0123456789abcdef xxxxxxxxxxxxxxxxxx -0123456789 yy -* Record 10 -0123456789abcdef xxxxxxxxxxxxxxxxxxxx -0123456789 - -\pset format wrapped -execute q; -* Record 1 -0123456789abcdef xx -0123456789 yyy. - .yyy. - .yyy. - .yyy. - .yyy. - .yyy -* Record 2 -0123456789abcdef xxx. - .x -0123456789 yyy. - .yyy. - .yyy. - .yyy. - .yyy. - .y -* Record 3 -0123456789abcdef xxx. - .xxx -0123456789 yyy. - .yyy. - .yyy. - .yyy. - .yy -* Record 4 -0123456789abcdef xxx. - .xxx. - .xx -0123456789 yyy. - .yyy. - .yyy. - .yyy -* Record 5 -0123456789abcdef xxx. - .xxx. - .xxx. - .x -0123456789 yyy. - .yyy. - .yyy. - .y -* Record 6 -0123456789abcdef xxx. - .xxx. - .xxx. - .xxx -0123456789 yyy. - .yyy. - .yy -* Record 7 -0123456789abcdef xxx. - .xxx. - .xxx. - .xxx. - .xx -0123456789 yyy. - .yyy -* Record 8 -0123456789abcdef xxx. - .xxx. - .xxx. - .xxx. - .xxx. - .x -0123456789 yyy. - .y -* Record 9 -0123456789abcdef xxx. - .xxx. - .xxx. - .xxx. - .xxx. - .xxx -0123456789 yy -* Record 10 -0123456789abcdef xxx. - .xxx. - .xxx. - .xxx. - .xxx. - .xxx. - .xx -0123456789 - -\pset border 1 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; --[ RECORD 1 ]----+--------------------- -0123456789abcdef | xx -0123456789 | yyyyyyyyyyyyyyyyyy --[ RECORD 2 ]----+--------------------- -0123456789abcdef | xxxx -0123456789 | yyyyyyyyyyyyyyyy --[ RECORD 3 ]----+--------------------- -0123456789abcdef | xxxxxx -0123456789 | yyyyyyyyyyyyyy --[ RECORD 4 ]----+--------------------- -0123456789abcdef | xxxxxxxx -0123456789 | yyyyyyyyyyyy --[ RECORD 5 ]----+--------------------- -0123456789abcdef | xxxxxxxxxx -0123456789 | yyyyyyyyyy --[ RECORD 6 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxx -0123456789 | yyyyyyyy --[ RECORD 7 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxx -0123456789 | yyyyyy --[ RECORD 8 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxx -0123456789 | yyyy --[ RECORD 9 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxx -0123456789 | yy --[ RECORD 10 ]---+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxxxx -0123456789 | - -\pset format wrapped -execute q; --[ RECORD 1 ]----+---- -0123456789abcdef | xx -0123456789 | yyy. - |.yyy. - |.yyy. - |.yyy. - |.yyy. - |.yyy --[ RECORD 2 ]----+---- -0123456789abcdef | xxx. - |.x -0123456789 | yyy. - |.yyy. - |.yyy. - |.yyy. - |.yyy. - |.y --[ RECORD 3 ]----+---- -0123456789abcdef | xxx. - |.xxx -0123456789 | yyy. - |.yyy. - |.yyy. - |.yyy. - |.yy --[ RECORD 4 ]----+---- -0123456789abcdef | xxx. - |.xxx. - |.xx -0123456789 | yyy. - |.yyy. - |.yyy. - |.yyy --[ RECORD 5 ]----+---- -0123456789abcdef | xxx. - |.xxx. - |.xxx. - |.x -0123456789 | yyy. - |.yyy. - |.yyy. - |.y --[ RECORD 6 ]----+---- -0123456789abcdef | xxx. - |.xxx. - |.xxx. - |.xxx -0123456789 | yyy. - |.yyy. - |.yy --[ RECORD 7 ]----+---- -0123456789abcdef | xxx. - |.xxx. - |.xxx. - |.xxx. - |.xx -0123456789 | yyy. - |.yyy --[ RECORD 8 ]----+---- -0123456789abcdef | xxx. - |.xxx. - |.xxx. - |.xxx. - |.xxx. - |.x -0123456789 | yyy. - |.y --[ RECORD 9 ]----+---- -0123456789abcdef | xxx. - |.xxx. - |.xxx. - |.xxx. - |.xxx. - |.xxx -0123456789 | yy --[ RECORD 10 ]---+---- -0123456789abcdef | xxx. - |.xxx. - |.xxx. - |.xxx. - |.xxx. - |.xxx. - |.xx -0123456789 | - -\pset border 2 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; -+-[ RECORD 1 ]-----+----------------------+ -| 0123456789abcdef | xx | -| 0123456789 | yyyyyyyyyyyyyyyyyy | -+-[ RECORD 2 ]-----+----------------------+ -| 0123456789abcdef | xxxx | -| 0123456789 | yyyyyyyyyyyyyyyy | -+-[ RECORD 3 ]-----+----------------------+ -| 0123456789abcdef | xxxxxx | -| 0123456789 | yyyyyyyyyyyyyy | -+-[ RECORD 4 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxx | -| 0123456789 | yyyyyyyyyyyy | -+-[ RECORD 5 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxx | -| 0123456789 | yyyyyyyyyy | -+-[ RECORD 6 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxx | -| 0123456789 | yyyyyyyy | -+-[ RECORD 7 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxx | -| 0123456789 | yyyyyy | -+-[ RECORD 8 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxx | -| 0123456789 | yyyy | -+-[ RECORD 9 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxxx | -| 0123456789 | yy | -+-[ RECORD 10 ]----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxxxxx | -| 0123456789 | | -+------------------+----------------------+ - -\pset format wrapped -execute q; -+-[ RECORD 1 ]-----+-----+ -| 0123456789abcdef | xx | -| 0123456789 | yyy.| -| |.yyy.| -| |.yyy.| -| |.yyy.| -| |.yyy.| -| |.yyy | -+-[ RECORD 2 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.x | -| 0123456789 | yyy.| -| |.yyy.| -| |.yyy.| -| |.yyy.| -| |.yyy.| -| |.y | -+-[ RECORD 3 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx | -| 0123456789 | yyy.| -| |.yyy.| -| |.yyy.| -| |.yyy.| -| |.yy | -+-[ RECORD 4 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx.| -| |.xx | -| 0123456789 | yyy.| -| |.yyy.| -| |.yyy.| -| |.yyy | -+-[ RECORD 5 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx.| -| |.xxx.| -| |.x | -| 0123456789 | yyy.| -| |.yyy.| -| |.yyy.| -| |.y | -+-[ RECORD 6 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx | -| 0123456789 | yyy.| -| |.yyy.| -| |.yy | -+-[ RECORD 7 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.xx | -| 0123456789 | yyy.| -| |.yyy | -+-[ RECORD 8 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.x | -| 0123456789 | yyy.| -| |.y | -+-[ RECORD 9 ]-----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx | -| 0123456789 | yy | -+-[ RECORD 10 ]----+-----+ -| 0123456789abcdef | xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.xxx.| -| |.xx | -| 0123456789 | | -+------------------+-----+ - -\pset linestyle old-ascii -\pset expanded off -\pset columns 40 -\pset border 0 -\pset format unaligned -execute q; -0123456789abcdef|0123456789 -xx|yyyyyyyyyyyyyyyyyy -xxxx|yyyyyyyyyyyyyyyy -xxxxxx|yyyyyyyyyyyyyy -xxxxxxxx|yyyyyyyyyyyy -xxxxxxxxxx|yyyyyyyyyy -xxxxxxxxxxxx|yyyyyyyy -xxxxxxxxxxxxxx|yyyyyy -xxxxxxxxxxxxxxxx|yyyy -xxxxxxxxxxxxxxxxxx|yy -xxxxxxxxxxxxxxxxxxxx| -(10 rows) -\pset format aligned -execute q; - 0123456789abcdef 0123456789 --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx yyyyyyyyyyyyyyyy -xxxxxx yyyyyyyyyyyyyy -xxxxxxxx yyyyyyyyyyyy -xxxxxxxxxx yyyyyyyyyy -xxxxxxxxxxxx yyyyyyyy -xxxxxxxxxxxxxx yyyyyy -xxxxxxxxxxxxxxxx yyyy -xxxxxxxxxxxxxxxxxx yy -xxxxxxxxxxxxxxxxxxxx -(10 rows) - -\pset format wrapped -execute q; - 0123456789abcdef 0123456789 --------------------- ------------------ -xx yyyyyyyyyyyyyyyyyy -xxxx yyyyyyyyyyyyyyyy -xxxxxx yyyyyyyyyyyyyy -xxxxxxxx yyyyyyyyyyyy -xxxxxxxxxx yyyyyyyyyy -xxxxxxxxxxxx yyyyyyyy -xxxxxxxxxxxxxx yyyyyy -xxxxxxxxxxxxxxxx yyyy -xxxxxxxxxxxxxxxxxx yy -xxxxxxxxxxxxxxxxxxxx -(10 rows) - -\pset border 1 -\pset format unaligned -execute q; -0123456789abcdef|0123456789 -xx|yyyyyyyyyyyyyyyyyy -xxxx|yyyyyyyyyyyyyyyy -xxxxxx|yyyyyyyyyyyyyy -xxxxxxxx|yyyyyyyyyyyy -xxxxxxxxxx|yyyyyyyyyy -xxxxxxxxxxxx|yyyyyyyy -xxxxxxxxxxxxxx|yyyyyy -xxxxxxxxxxxxxxxx|yyyy -xxxxxxxxxxxxxxxxxx|yy -xxxxxxxxxxxxxxxxxxxx| -(10 rows) -\pset format aligned -execute q; - 0123456789abcdef | 0123456789 -----------------------+-------------------- - xx | yyyyyyyyyyyyyyyyyy - xxxx | yyyyyyyyyyyyyyyy - xxxxxx | yyyyyyyyyyyyyy - xxxxxxxx | yyyyyyyyyyyy - xxxxxxxxxx | yyyyyyyyyy - xxxxxxxxxxxx | yyyyyyyy - xxxxxxxxxxxxxx | yyyyyy - xxxxxxxxxxxxxxxx | yyyy - xxxxxxxxxxxxxxxxxx | yy - xxxxxxxxxxxxxxxxxxxx | -(10 rows) - -\pset format wrapped -execute q; - 0123456789abcdef | 0123456789 ----------------------+------------------ - xx | yyyyyyyyyyyyyyyy - ; yy - xxxx | yyyyyyyyyyyyyyyy - xxxxxx | yyyyyyyyyyyyyy - xxxxxxxx | yyyyyyyyyyyy - xxxxxxxxxx | yyyyyyyyyy - xxxxxxxxxxxx | yyyyyyyy - xxxxxxxxxxxxxx | yyyyyy - xxxxxxxxxxxxxxxx | yyyy - xxxxxxxxxxxxxxxxxx | yy - xxxxxxxxxxxxxxxxxxx | - x -(10 rows) - -\pset border 2 -\pset format unaligned -execute q; -0123456789abcdef|0123456789 -xx|yyyyyyyyyyyyyyyyyy -xxxx|yyyyyyyyyyyyyyyy -xxxxxx|yyyyyyyyyyyyyy -xxxxxxxx|yyyyyyyyyyyy -xxxxxxxxxx|yyyyyyyyyy -xxxxxxxxxxxx|yyyyyyyy -xxxxxxxxxxxxxx|yyyyyy -xxxxxxxxxxxxxxxx|yyyy -xxxxxxxxxxxxxxxxxx|yy -xxxxxxxxxxxxxxxxxxxx| -(10 rows) -\pset format aligned -execute q; -+----------------------+--------------------+ -| 0123456789abcdef | 0123456789 | -+----------------------+--------------------+ -| xx | yyyyyyyyyyyyyyyyyy | -| xxxx | yyyyyyyyyyyyyyyy | -| xxxxxx | yyyyyyyyyyyyyy | -| xxxxxxxx | yyyyyyyyyyyy | -| xxxxxxxxxx | yyyyyyyyyy | -| xxxxxxxxxxxx | yyyyyyyy | -| xxxxxxxxxxxxxx | yyyyyy | -| xxxxxxxxxxxxxxxx | yyyy | -| xxxxxxxxxxxxxxxxxx | yy | -| xxxxxxxxxxxxxxxxxxxx | | -+----------------------+--------------------+ -(10 rows) - -\pset format wrapped -execute q; -+--------------------+-----------------+ -| 0123456789abcdef | 0123456789 | -+--------------------+-----------------+ -| xx | yyyyyyyyyyyyyyy | -| ; yyy | -| xxxx | yyyyyyyyyyyyyyy | -| ; y | -| xxxxxx | yyyyyyyyyyyyyy | -| xxxxxxxx | yyyyyyyyyyyy | -| xxxxxxxxxx | yyyyyyyyyy | -| xxxxxxxxxxxx | yyyyyyyy | -| xxxxxxxxxxxxxx | yyyyyy | -| xxxxxxxxxxxxxxxx | yyyy | -| xxxxxxxxxxxxxxxxxx | yy | -| xxxxxxxxxxxxxxxxxx | | -| xx | -+--------------------+-----------------+ -(10 rows) - -\pset expanded on -\pset border 0 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; -* Record 1 -0123456789abcdef xx -0123456789 yyyyyyyyyyyyyyyyyy -* Record 2 -0123456789abcdef xxxx -0123456789 yyyyyyyyyyyyyyyy -* Record 3 -0123456789abcdef xxxxxx -0123456789 yyyyyyyyyyyyyy -* Record 4 -0123456789abcdef xxxxxxxx -0123456789 yyyyyyyyyyyy -* Record 5 -0123456789abcdef xxxxxxxxxx -0123456789 yyyyyyyyyy -* Record 6 -0123456789abcdef xxxxxxxxxxxx -0123456789 yyyyyyyy -* Record 7 -0123456789abcdef xxxxxxxxxxxxxx -0123456789 yyyyyy -* Record 8 -0123456789abcdef xxxxxxxxxxxxxxxx -0123456789 yyyy -* Record 9 -0123456789abcdef xxxxxxxxxxxxxxxxxx -0123456789 yy -* Record 10 -0123456789abcdef xxxxxxxxxxxxxxxxxxxx -0123456789 - -\pset format wrapped -execute q; -* Record 1 -0123456789abcdef xx -0123456789 yyyyyyyyyyyyyyyyyy -* Record 2 -0123456789abcdef xxxx -0123456789 yyyyyyyyyyyyyyyy -* Record 3 -0123456789abcdef xxxxxx -0123456789 yyyyyyyyyyyyyy -* Record 4 -0123456789abcdef xxxxxxxx -0123456789 yyyyyyyyyyyy -* Record 5 -0123456789abcdef xxxxxxxxxx -0123456789 yyyyyyyyyy -* Record 6 -0123456789abcdef xxxxxxxxxxxx -0123456789 yyyyyyyy -* Record 7 -0123456789abcdef xxxxxxxxxxxxxx -0123456789 yyyyyy -* Record 8 -0123456789abcdef xxxxxxxxxxxxxxxx -0123456789 yyyy -* Record 9 -0123456789abcdef xxxxxxxxxxxxxxxxxx -0123456789 yy -* Record 10 -0123456789abcdef xxxxxxxxxxxxxxxxxxxx -0123456789 - -\pset border 1 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; --[ RECORD 1 ]----+--------------------- -0123456789abcdef | xx -0123456789 | yyyyyyyyyyyyyyyyyy --[ RECORD 2 ]----+--------------------- -0123456789abcdef | xxxx -0123456789 | yyyyyyyyyyyyyyyy --[ RECORD 3 ]----+--------------------- -0123456789abcdef | xxxxxx -0123456789 | yyyyyyyyyyyyyy --[ RECORD 4 ]----+--------------------- -0123456789abcdef | xxxxxxxx -0123456789 | yyyyyyyyyyyy --[ RECORD 5 ]----+--------------------- -0123456789abcdef | xxxxxxxxxx -0123456789 | yyyyyyyyyy --[ RECORD 6 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxx -0123456789 | yyyyyyyy --[ RECORD 7 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxx -0123456789 | yyyyyy --[ RECORD 8 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxx -0123456789 | yyyy --[ RECORD 9 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxx -0123456789 | yy --[ RECORD 10 ]---+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxxxx -0123456789 | - -\pset format wrapped -execute q; --[ RECORD 1 ]----+--------------------- -0123456789abcdef | xx -0123456789 | yyyyyyyyyyyyyyyyyy --[ RECORD 2 ]----+--------------------- -0123456789abcdef | xxxx -0123456789 | yyyyyyyyyyyyyyyy --[ RECORD 3 ]----+--------------------- -0123456789abcdef | xxxxxx -0123456789 | yyyyyyyyyyyyyy --[ RECORD 4 ]----+--------------------- -0123456789abcdef | xxxxxxxx -0123456789 | yyyyyyyyyyyy --[ RECORD 5 ]----+--------------------- -0123456789abcdef | xxxxxxxxxx -0123456789 | yyyyyyyyyy --[ RECORD 6 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxx -0123456789 | yyyyyyyy --[ RECORD 7 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxx -0123456789 | yyyyyy --[ RECORD 8 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxx -0123456789 | yyyy --[ RECORD 9 ]----+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxx -0123456789 | yy --[ RECORD 10 ]---+--------------------- -0123456789abcdef | xxxxxxxxxxxxxxxxxxxx -0123456789 | - -\pset border 2 -\pset format unaligned -execute q; -0123456789abcdef|xx -0123456789|yyyyyyyyyyyyyyyyyy - -0123456789abcdef|xxxx -0123456789|yyyyyyyyyyyyyyyy - -0123456789abcdef|xxxxxx -0123456789|yyyyyyyyyyyyyy - -0123456789abcdef|xxxxxxxx -0123456789|yyyyyyyyyyyy - -0123456789abcdef|xxxxxxxxxx -0123456789|yyyyyyyyyy - -0123456789abcdef|xxxxxxxxxxxx -0123456789|yyyyyyyy - -0123456789abcdef|xxxxxxxxxxxxxx -0123456789|yyyyyy - -0123456789abcdef|xxxxxxxxxxxxxxxx -0123456789|yyyy - -0123456789abcdef|xxxxxxxxxxxxxxxxxx -0123456789|yy - -0123456789abcdef|xxxxxxxxxxxxxxxxxxxx -0123456789| -\pset format aligned -execute q; -+-[ RECORD 1 ]-----+----------------------+ -| 0123456789abcdef | xx | -| 0123456789 | yyyyyyyyyyyyyyyyyy | -+-[ RECORD 2 ]-----+----------------------+ -| 0123456789abcdef | xxxx | -| 0123456789 | yyyyyyyyyyyyyyyy | -+-[ RECORD 3 ]-----+----------------------+ -| 0123456789abcdef | xxxxxx | -| 0123456789 | yyyyyyyyyyyyyy | -+-[ RECORD 4 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxx | -| 0123456789 | yyyyyyyyyyyy | -+-[ RECORD 5 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxx | -| 0123456789 | yyyyyyyyyy | -+-[ RECORD 6 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxx | -| 0123456789 | yyyyyyyy | -+-[ RECORD 7 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxx | -| 0123456789 | yyyyyy | -+-[ RECORD 8 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxx | -| 0123456789 | yyyy | -+-[ RECORD 9 ]-----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxxx | -| 0123456789 | yy | -+-[ RECORD 10 ]----+----------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxxxxx | -| 0123456789 | | -+------------------+----------------------+ - -\pset format wrapped -execute q; -+-[ RECORD 1 ]-----+-------------------+ -| 0123456789abcdef | xx | -| 0123456789 | yyyyyyyyyyyyyyyyy | -| ; y | -+-[ RECORD 2 ]-----+-------------------+ -| 0123456789abcdef | xxxx | -| 0123456789 | yyyyyyyyyyyyyyyy | -+-[ RECORD 3 ]-----+-------------------+ -| 0123456789abcdef | xxxxxx | -| 0123456789 | yyyyyyyyyyyyyy | -+-[ RECORD 4 ]-----+-------------------+ -| 0123456789abcdef | xxxxxxxx | -| 0123456789 | yyyyyyyyyyyy | -+-[ RECORD 5 ]-----+-------------------+ -| 0123456789abcdef | xxxxxxxxxx | -| 0123456789 | yyyyyyyyyy | -+-[ RECORD 6 ]-----+-------------------+ -| 0123456789abcdef | xxxxxxxxxxxx | -| 0123456789 | yyyyyyyy | -+-[ RECORD 7 ]-----+-------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxx | -| 0123456789 | yyyyyy | -+-[ RECORD 8 ]-----+-------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxx | -| 0123456789 | yyyy | -+-[ RECORD 9 ]-----+-------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxx | -| ; x | -| 0123456789 | yy | -+-[ RECORD 10 ]----+-------------------+ -| 0123456789abcdef | xxxxxxxxxxxxxxxxx | -| ; xxx | -| 0123456789 | | -+------------------+-------------------+ - -deallocate q; -\pset linestyle ascii -\pset border 1 --- support table for output-format tests (useful to create a footer) -create table psql_serial_tab (id serial); --- test header/footer/tuples_only behavior in aligned/unaligned/wrapped cases -\pset format aligned -\pset expanded off -\d psql_serial_tab_id_seq - Sequence "public.psql_serial_tab_id_seq" - Type | Start | Minimum | Maximum | Increment | Cycles? | Cache ----------+-------+---------+------------+-----------+---------+------- - integer | 1 | 1 | 2147483647 | 1 | no | 1 -Owned by: public.psql_serial_tab.id - -\pset tuples_only true -\df exp - pg_catalog | exp | double precision | double precision | func - pg_catalog | exp | numeric | numeric | func - -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq -Sequence "public.psql_serial_tab_id_seq" --[ RECORD 1 ]--------- -Type | integer -Start | 1 -Minimum | 1 -Maximum | 2147483647 -Increment | 1 -Cycles? | no -Cache | 1 - -Owned by: public.psql_serial_tab.id - -\pset tuples_only true -\df exp -Schema | pg_catalog -Name | exp -Result data type | double precision -Argument data types | double precision -Type | func ---------------------+----------------- -Schema | pg_catalog -Name | exp -Result data type | numeric -Argument data types | numeric -Type | func - -\pset tuples_only false --- empty table is a special case for this format -select 1 where false; -(0 rows) - -\pset format unaligned -\pset expanded off -\d psql_serial_tab_id_seq -Sequence "public.psql_serial_tab_id_seq" -Type|Start|Minimum|Maximum|Increment|Cycles?|Cache -integer|1|1|2147483647|1|no|1 -Owned by: public.psql_serial_tab.id -\pset tuples_only true -\df exp -pg_catalog|exp|double precision|double precision|func -pg_catalog|exp|numeric|numeric|func -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq -Sequence "public.psql_serial_tab_id_seq" - -Type|integer -Start|1 -Minimum|1 -Maximum|2147483647 -Increment|1 -Cycles?|no -Cache|1 - -Owned by: public.psql_serial_tab.id -\pset tuples_only true -\df exp -Schema|pg_catalog -Name|exp -Result data type|double precision -Argument data types|double precision -Type|func - -Schema|pg_catalog -Name|exp -Result data type|numeric -Argument data types|numeric -Type|func -\pset tuples_only false -\pset format wrapped -\pset expanded off -\d psql_serial_tab_id_seq - Sequence "public.psql_serial_tab_id_seq" - Type | Start | Minimum | Maximum | Increment | Cycles? | Cache ----------+-------+---------+------------+-----------+---------+------- - integer | 1 | 1 | 2147483647 | 1 | no | 1 -Owned by: public.psql_serial_tab.id - -\pset tuples_only true -\df exp - pg_catalog | exp | double precision | double precision | func - pg_catalog | exp | numeric | numeric | func - -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq -Sequence "public.psql_serial_tab_id_seq" --[ RECORD 1 ]--------- -Type | integer -Start | 1 -Minimum | 1 -Maximum | 2147483647 -Increment | 1 -Cycles? | no -Cache | 1 - -Owned by: public.psql_serial_tab.id - -\pset tuples_only true -\df exp -Schema | pg_catalog -Name | exp -Result data type | double precision -Argument data types | double precision -Type | func ---------------------+----------------- -Schema | pg_catalog -Name | exp -Result data type | numeric -Argument data types | numeric -Type | func - -\pset tuples_only false --- check conditional am display -\pset expanded off -CREATE SCHEMA tableam_display; -CREATE ROLE regress_display_role; -ALTER SCHEMA tableam_display OWNER TO regress_display_role; -SET search_path TO tableam_display; -CREATE ACCESS METHOD heap_psql TYPE TABLE HANDLER heap_tableam_handler; -SET ROLE TO regress_display_role; --- Use only relations with a physical size of zero. -CREATE TABLE tbl_heap_psql(f1 int, f2 char(100)) using heap_psql; -CREATE TABLE tbl_heap(f1 int, f2 char(100)) using heap; -CREATE VIEW view_heap_psql AS SELECT f1 from tbl_heap_psql; -CREATE MATERIALIZED VIEW mat_view_heap_psql USING heap_psql AS SELECT f1 from tbl_heap_psql; -\d+ tbl_heap_psql - Table "tableam_display.tbl_heap_psql" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+----------------+-----------+----------+---------+----------+--------------+------------- - f1 | integer | | | | plain | | - f2 | character(100) | | | | extended | | - -\d+ tbl_heap - Table "tableam_display.tbl_heap" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+----------------+-----------+----------+---------+----------+--------------+------------- - f1 | integer | | | | plain | | - f2 | character(100) | | | | extended | | - -\set HIDE_TABLEAM off -\d+ tbl_heap_psql - Table "tableam_display.tbl_heap_psql" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+----------------+-----------+----------+---------+----------+--------------+------------- - f1 | integer | | | | plain | | - f2 | character(100) | | | | extended | | -Access method: heap_psql - -\d+ tbl_heap - Table "tableam_display.tbl_heap" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+----------------+-----------+----------+---------+----------+--------------+------------- - f1 | integer | | | | plain | | - f2 | character(100) | | | | extended | | -Access method: heap - --- AM is displayed for tables, indexes and materialized views. -\d+ - List of relations - Schema | Name | Type | Owner | Persistence | Access method | Size | Description ------------------+--------------------+-------------------+----------------------+-------------+---------------+---------+------------- - tableam_display | mat_view_heap_psql | materialized view | regress_display_role | permanent | heap_psql | 0 bytes | - tableam_display | tbl_heap | table | regress_display_role | permanent | heap | 0 bytes | - tableam_display | tbl_heap_psql | table | regress_display_role | permanent | heap_psql | 0 bytes | - tableam_display | view_heap_psql | view | regress_display_role | permanent | | 0 bytes | -(4 rows) - -\dt+ - List of relations - Schema | Name | Type | Owner | Persistence | Access method | Size | Description ------------------+---------------+-------+----------------------+-------------+---------------+---------+------------- - tableam_display | tbl_heap | table | regress_display_role | permanent | heap | 0 bytes | - tableam_display | tbl_heap_psql | table | regress_display_role | permanent | heap_psql | 0 bytes | -(2 rows) - -\dm+ - List of relations - Schema | Name | Type | Owner | Persistence | Access method | Size | Description ------------------+--------------------+-------------------+----------------------+-------------+---------------+---------+------------- - tableam_display | mat_view_heap_psql | materialized view | regress_display_role | permanent | heap_psql | 0 bytes | -(1 row) - --- But not for views and sequences. -\dv+ - List of relations - Schema | Name | Type | Owner | Persistence | Size | Description ------------------+----------------+------+----------------------+-------------+---------+------------- - tableam_display | view_heap_psql | view | regress_display_role | permanent | 0 bytes | -(1 row) - -\set HIDE_TABLEAM on -\d+ - List of relations - Schema | Name | Type | Owner | Persistence | Size | Description ------------------+--------------------+-------------------+----------------------+-------------+---------+------------- - tableam_display | mat_view_heap_psql | materialized view | regress_display_role | permanent | 0 bytes | - tableam_display | tbl_heap | table | regress_display_role | permanent | 0 bytes | - tableam_display | tbl_heap_psql | table | regress_display_role | permanent | 0 bytes | - tableam_display | view_heap_psql | view | regress_display_role | permanent | 0 bytes | -(4 rows) - -RESET ROLE; -RESET search_path; -DROP SCHEMA tableam_display CASCADE; -NOTICE: drop cascades to 4 other objects -DETAIL: drop cascades to table tableam_display.tbl_heap_psql -drop cascades to table tableam_display.tbl_heap -drop cascades to view tableam_display.view_heap_psql -drop cascades to materialized view tableam_display.mat_view_heap_psql -DROP ACCESS METHOD heap_psql; -DROP ROLE regress_display_role; --- test numericlocale (as best we can without control of psql's locale) -\pset format aligned -\pset expanded off -\pset numericlocale true -select n, -n as m, n * 111 as x, '1e90'::float8 as f -from generate_series(0,3) n; - n | m | x | f ----+----+-----+------- - 0 | 0 | 0 | 1e+90 - 1 | -1 | 111 | 1e+90 - 2 | -2 | 222 | 1e+90 - 3 | -3 | 333 | 1e+90 -(4 rows) - -\pset numericlocale false --- test asciidoc output format -\pset format asciidoc -\pset border 1 -\pset expanded off -\d psql_serial_tab_id_seq - -.Sequence "public.psql_serial_tab_id_seq" -[options="header",cols="l,>l,>l,>l,l",frame="none"] -|==== -^l|Type ^l|Start ^l|Minimum ^l|Maximum ^l|Increment ^l|Cycles? ^l|Cache -|integer |1 |1 |2147483647 |1 |no |1 -|==== - -.... -Owned by: public.psql_serial_tab.id -.... -\pset tuples_only true -\df exp - -[cols="l|1 -l|1 -l|2147483647 -l|1 -l|1 -|==== - -.... -Owned by: public.psql_serial_tab.id -.... -\pset tuples_only true -\df exp - -[cols="h,l",frame="none"] -|==== -2+| -l|1 -2+^|Record 2 -l|2 -|==== -\pset border 1 -execute q; - -[cols="h,l",frame="none"] -|==== -2+^|Record 1 -l|1 -2+^|Record 2 -l|2 -|==== -\pset border 2 -execute q; - -[cols="h,l",frame="all",grid="all"] -|==== -2+^|Record 1 -l|1 -2+^|Record 2 -l|2 -|==== -deallocate q; --- test csv output format -\pset format csv -\pset border 1 -\pset expanded off -\d psql_serial_tab_id_seq -Type,Start,Minimum,Maximum,Increment,Cycles?,Cache -integer,1,1,2147483647,1,no,1 -\pset tuples_only true -\df exp -pg_catalog,exp,double precision,double precision,func -pg_catalog,exp,numeric,numeric,func -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq -Type,integer -Start,1 -Minimum,1 -Maximum,2147483647 -Increment,1 -Cycles?,no -Cache,1 -\pset tuples_only true -\df exp -Schema,pg_catalog -Name,exp -Result data type,double precision -Argument data types,double precision -Type,func -Schema,pg_catalog -Name,exp -Result data type,numeric -Argument data types,numeric -Type,func -\pset tuples_only false -prepare q as - select 'some"text' as "a""title", E' \n' as "junk", - ' ' as "empty", n as int - from generate_series(1,2) as n; -\pset expanded off -execute q; -"a""title",junk,empty,int -"some""text"," -", ,1 -"some""text"," -", ,2 -\pset expanded on -execute q; -"a""title","some""text" -junk," -" -empty, -int,1 -"a""title","some""text" -junk," -" -empty, -int,2 -deallocate q; --- special cases -\pset expanded off -select 'comma,comma' as comma, 'semi;semi' as semi; -comma,semi -"comma,comma",semi;semi -\pset csv_fieldsep ';' -select 'comma,comma' as comma, 'semi;semi' as semi; -comma;semi -comma,comma;"semi;semi" -select '\.' as data; -data -"\." -\pset csv_fieldsep '.' -select '\' as d1, '' as d2; -"d1"."d2" -"\"."" --- illegal csv separators -\pset csv_fieldsep '' -\pset: csv_fieldsep must be a single one-byte character -\pset csv_fieldsep '\0' -\pset: csv_fieldsep must be a single one-byte character -\pset csv_fieldsep '\n' -\pset: csv_fieldsep cannot be a double quote, a newline, or a carriage return -\pset csv_fieldsep '\r' -\pset: csv_fieldsep cannot be a double quote, a newline, or a carriage return -\pset csv_fieldsep '"' -\pset: csv_fieldsep cannot be a double quote, a newline, or a carriage return -\pset csv_fieldsep ',,' -\pset: csv_fieldsep must be a single one-byte character -\pset csv_fieldsep ',' --- test html output format -\pset format html -\pset border 1 -\pset expanded off -\d psql_serial_tab_id_seq - - - - - - - - - - - - - - - - - - - - -
Sequence "public.psql_serial_tab_id_seq"
TypeStartMinimumMaximumIncrementCycles?Cache
integer1121474836471no1
-

Owned by: public.psql_serial_tab.id
-

-\pset tuples_only true -\df exp - - - - - - - - - - - - - - - -
pg_catalogexpdouble precisiondouble precisionfunc
pg_catalogexpnumericnumericfunc
- -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Sequence "public.psql_serial_tab_id_seq"
Record 1
Typeinteger
Start1
Minimum1
Maximum2147483647
Increment1
Cycles?no
Cache1
-

Owned by: public.psql_serial_tab.id
-

-\pset tuples_only true -\df exp - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 
Schemapg_catalog
Nameexp
Result data typedouble precision
Argument data typesdouble precision
Typefunc
 
Schemapg_catalog
Nameexp
Result data typenumeric
Argument data typesnumeric
Typefunc
- -\pset tuples_only false -prepare q as - select 'some"text' as "a&title", E' \n' as "junk", - ' ' as "empty", n as int - from generate_series(1,2) as n; -\pset expanded off -\pset border 0 -execute q; - - - - - - - - - - - - - - - - - - - -
a&titlejunkemptyint
some"text  <foo>
-<bar>
  1
some"text  <foo>
-<bar>
  2
-

(2 rows)
-

-\pset border 1 -execute q; - - - - - - - - - - - - - - - - - - - -
a&titlejunkemptyint
some"text  <foo>
-<bar>
  1
some"text  <foo>
-<bar>
  2
-

(2 rows)
-

-\pset tableattr foobar -execute q; - - - - - - - - - - - - - - - - - - - -
a&titlejunkemptyint
some"text  <foo>
-<bar>
  1
some"text  <foo>
-<bar>
  2
-

(2 rows)
-

-\pset tableattr -\pset expanded on -\pset border 0 -execute q; - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Record 1
a&titlesome"text
junk  <foo>
-<bar>
empty 
int1
Record 2
a&titlesome"text
junk  <foo>
-<bar>
empty 
int2
- -\pset border 1 -execute q; - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Record 1
a&titlesome"text
junk  <foo>
-<bar>
empty 
int1
Record 2
a&titlesome"text
junk  <foo>
-<bar>
empty 
int2
- -\pset tableattr foobar -execute q; - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Record 1
a&titlesome"text
junk  <foo>
-<bar>
empty 
int1
Record 2
a&titlesome"text
junk  <foo>
-<bar>
empty 
int2
- -\pset tableattr -deallocate q; --- test latex output format -\pset format latex -\pset border 1 -\pset expanded off -\d psql_serial_tab_id_seq -\begin{center} -Sequence "public.psql\_serial\_tab\_id\_seq" -\end{center} - -\begin{tabular}{l | r | r | r | r | l | r} -\textit{Type} & \textit{Start} & \textit{Minimum} & \textit{Maximum} & \textit{Increment} & \textit{Cycles?} & \textit{Cache} \\ -\hline -integer & 1 & 1 & 2147483647 & 1 & no & 1 \\ -\end{tabular} - -\noindent Owned by: public.psql\_serial\_tab.id \\ - -\pset tuples_only true -\df exp -\begin{tabular}{l | l | l | l | l} -pg\_catalog & exp & double precision & double precision & func \\ -pg\_catalog & exp & numeric & numeric & func \\ -\end{tabular} - -\noindent -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq -\begin{center} -Sequence "public.psql\_serial\_tab\_id\_seq" -\end{center} - -\begin{tabular}{c|l} -\multicolumn{2}{c}{\textit{Record 1}} \\ -\hline -Type & integer \\ -Start & 1 \\ -Minimum & 1 \\ -Maximum & 2147483647 \\ -Increment & 1 \\ -Cycles? & no \\ -Cache & 1 \\ -\end{tabular} - -\noindent Owned by: public.psql\_serial\_tab.id \\ - -\pset tuples_only true -\df exp -\begin{tabular}{c|l} -\hline -Schema & pg\_catalog \\ -Name & exp \\ -Result data type & double precision \\ -Argument data types & double precision \\ -Type & func \\ -\hline -Schema & pg\_catalog \\ -Name & exp \\ -Result data type & numeric \\ -Argument data types & numeric \\ -Type & func \\ -\end{tabular} - -\noindent -\pset tuples_only false -prepare q as - select 'some\more_text' as "a$title", E' #%&^~|\n{bar}' as "junk", - ' ' as "empty", n as int - from generate_series(1,2) as n; -\pset expanded off -\pset border 0 -execute q; -\begin{tabular}{lllr} -\textit{a\$title} & \textit{junk} & \textit{empty} & \textit{int} \\ -\hline -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 1 \\ -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 2 \\ -\end{tabular} - -\noindent (2 rows) \\ - -\pset border 1 -execute q; -\begin{tabular}{l | l | l | r} -\textit{a\$title} & \textit{junk} & \textit{empty} & \textit{int} \\ -\hline -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 1 \\ -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 2 \\ -\end{tabular} - -\noindent (2 rows) \\ - -\pset border 2 -execute q; -\begin{tabular}{| l | l | l | r |} -\hline -\textit{a\$title} & \textit{junk} & \textit{empty} & \textit{int} \\ -\hline -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 1 \\ -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 2 \\ -\hline -\end{tabular} - -\noindent (2 rows) \\ - -\pset border 3 -execute q; -\begin{tabular}{| l | l | l | r |} -\hline -\textit{a\$title} & \textit{junk} & \textit{empty} & \textit{int} \\ -\hline -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 1 \\ -\hline -some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 2 \\ -\hline -\end{tabular} - -\noindent (2 rows) \\ - -\pset expanded on -\pset border 0 -execute q; -\begin{tabular}{cl} -\multicolumn{2}{c}{\textit{Record 1}} \\ -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\multicolumn{2}{c}{\textit{Record 2}} \\ -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\end{tabular} - -\noindent -\pset border 1 -execute q; -\begin{tabular}{c|l} -\multicolumn{2}{c}{\textit{Record 1}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\multicolumn{2}{c}{\textit{Record 2}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\end{tabular} - -\noindent -\pset border 2 -execute q; -\begin{tabular}{|c|l|} -\hline -\multicolumn{2}{|c|}{\textit{Record 1}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\hline -\multicolumn{2}{|c|}{\textit{Record 2}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\hline -\end{tabular} - -\noindent -\pset border 3 -execute q; -\begin{tabular}{|c|l|} -\hline -\multicolumn{2}{|c|}{\textit{Record 1}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\hline -\multicolumn{2}{|c|}{\textit{Record 2}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\hline -\end{tabular} - -\noindent -deallocate q; --- test latex-longtable output format -\pset format latex-longtable -\pset border 1 -\pset expanded off -\d psql_serial_tab_id_seq -\begin{longtable}{l | r | r | r | r | l | r} -\small\textbf{\textit{Type}} & \small\textbf{\textit{Start}} & \small\textbf{\textit{Minimum}} & \small\textbf{\textit{Maximum}} & \small\textbf{\textit{Increment}} & \small\textbf{\textit{Cycles?}} & \small\textbf{\textit{Cache}} \\ -\midrule -\endfirsthead -\small\textbf{\textit{Type}} & \small\textbf{\textit{Start}} & \small\textbf{\textit{Minimum}} & \small\textbf{\textit{Maximum}} & \small\textbf{\textit{Increment}} & \small\textbf{\textit{Cycles?}} & \small\textbf{\textit{Cache}} \\ -\midrule -\endhead -\caption[Sequence "public.psql\_serial\_tab\_id\_seq" (Continued)]{Sequence "public.psql\_serial\_tab\_id\_seq"} -\endfoot -\caption[Sequence "public.psql\_serial\_tab\_id\_seq"]{Sequence "public.psql\_serial\_tab\_id\_seq"} -\endlastfoot -\raggedright{integer} -& -\raggedright{1} -& -\raggedright{1} -& -\raggedright{2147483647} -& -\raggedright{1} -& -\raggedright{no} -& -\raggedright{1} \tabularnewline -\end{longtable} -\pset tuples_only true -\df exp -\begin{longtable}{l | l | l | l | l} -\raggedright{pg\_catalog} -& -\raggedright{exp} -& -\raggedright{double precision} -& -\raggedright{double precision} -& -\raggedright{func} \tabularnewline -\raggedright{pg\_catalog} -& -\raggedright{exp} -& -\raggedright{numeric} -& -\raggedright{numeric} -& -\raggedright{func} \tabularnewline -\end{longtable} -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq -\begin{center} -Sequence "public.psql\_serial\_tab\_id\_seq" -\end{center} - -\begin{tabular}{c|l} -\multicolumn{2}{c}{\textit{Record 1}} \\ -\hline -Type & integer \\ -Start & 1 \\ -Minimum & 1 \\ -Maximum & 2147483647 \\ -Increment & 1 \\ -Cycles? & no \\ -Cache & 1 \\ -\end{tabular} - -\noindent Owned by: public.psql\_serial\_tab.id \\ - -\pset tuples_only true -\df exp -\begin{tabular}{c|l} -\hline -Schema & pg\_catalog \\ -Name & exp \\ -Result data type & double precision \\ -Argument data types & double precision \\ -Type & func \\ -\hline -Schema & pg\_catalog \\ -Name & exp \\ -Result data type & numeric \\ -Argument data types & numeric \\ -Type & func \\ -\end{tabular} - -\noindent -\pset tuples_only false -prepare q as - select 'some\more_text' as "a$title", E' #%&^~|\n{bar}' as "junk", - ' ' as "empty", n as int - from generate_series(1,2) as n; -\pset expanded off -\pset border 0 -execute q; -\begin{longtable}{lllr} -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endfirsthead -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endhead -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{1} \tabularnewline -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{2} \tabularnewline -\end{longtable} -\pset border 1 -execute q; -\begin{longtable}{l | l | l | r} -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endfirsthead -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endhead -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{1} \tabularnewline -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{2} \tabularnewline -\end{longtable} -\pset border 2 -execute q; -\begin{longtable}{| l | l | l | r |} -\toprule -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endfirsthead -\toprule -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endhead -\bottomrule -\endfoot -\bottomrule -\endlastfoot -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{1} \tabularnewline -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{2} \tabularnewline -\end{longtable} -\pset border 3 -execute q; -\begin{longtable}{| l | l | l | r |} -\toprule -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endfirsthead -\toprule -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\endhead -\bottomrule -\endfoot -\bottomrule -\endlastfoot -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{1} \tabularnewline - \hline -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{2} \tabularnewline - \hline -\end{longtable} -\pset tableattr lr -execute q; -\begin{longtable}{| p{lr\textwidth} | p{lr\textwidth} | p{lr\textwidth} | r |} -\toprule -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\midrule -\endfirsthead -\toprule -\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ -\endhead -\bottomrule -\endfoot -\bottomrule -\endlastfoot -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{1} \tabularnewline - \hline -\raggedright{some\textbackslash{}more\_text} -& -\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} -& -\raggedright{ } -& -\raggedright{2} \tabularnewline - \hline -\end{longtable} -\pset tableattr -\pset expanded on -\pset border 0 -execute q; -\begin{tabular}{cl} -\multicolumn{2}{c}{\textit{Record 1}} \\ -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\multicolumn{2}{c}{\textit{Record 2}} \\ -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\end{tabular} - -\noindent -\pset border 1 -execute q; -\begin{tabular}{c|l} -\multicolumn{2}{c}{\textit{Record 1}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\multicolumn{2}{c}{\textit{Record 2}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\end{tabular} - -\noindent -\pset border 2 -execute q; -\begin{tabular}{|c|l|} -\hline -\multicolumn{2}{|c|}{\textit{Record 1}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\hline -\multicolumn{2}{|c|}{\textit{Record 2}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\hline -\end{tabular} - -\noindent -\pset border 3 -execute q; -\begin{tabular}{|c|l|} -\hline -\multicolumn{2}{|c|}{\textit{Record 1}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\hline -\multicolumn{2}{|c|}{\textit{Record 2}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\hline -\end{tabular} - -\noindent -\pset tableattr lr -execute q; -\begin{tabular}{|c|l|} -\hline -\multicolumn{2}{|c|}{\textit{Record 1}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 1 \\ -\hline -\multicolumn{2}{|c|}{\textit{Record 2}} \\ -\hline -a\$title & some\textbackslash{}more\_text \\ -junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ -empty & \\ -int & 2 \\ -\hline -\end{tabular} - -\noindent -\pset tableattr -deallocate q; --- test troff-ms output format -\pset format troff-ms -\pset border 1 -\pset expanded off -\d psql_serial_tab_id_seq -.LP -.DS C -Sequence "public.psql_serial_tab_id_seq" -.DE -.LP -.TS -center; -l | r | r | r | r | l | r. -\fIType\fP \fIStart\fP \fIMinimum\fP \fIMaximum\fP \fIIncrement\fP \fICycles?\fP \fICache\fP -_ -integer 1 1 2147483647 1 no 1 -.TE -.DS L -Owned by: public.psql_serial_tab.id -.DE -\pset tuples_only true -\df exp -.LP -.TS -center; -l | l | l | l | l. -pg_catalog exp double precision double precision func -pg_catalog exp numeric numeric func -.TE -.DS L -.DE -\pset tuples_only false -\pset expanded on -\d psql_serial_tab_id_seq -.LP -.DS C -Sequence "public.psql_serial_tab_id_seq" -.DE -.LP -.TS -center; -c s. -\fIRecord 1\fP -_ -.T& -c | l. -Type integer -Start 1 -Minimum 1 -Maximum 2147483647 -Increment 1 -Cycles? no -Cache 1 -.TE -.DS L -Owned by: public.psql_serial_tab.id -.DE -\pset tuples_only true -\df exp -.LP -.TS -center; -c l; -_ -Schema pg_catalog -Name exp -Result data type double precision -Argument data types double precision -Type func -_ -Schema pg_catalog -Name exp -Result data type numeric -Argument data types numeric -Type func -.TE -.DS L -.DE -\pset tuples_only false -prepare q as - select 'some\text' as "a\title", E' \n' as "junk", - ' ' as "empty", n as int - from generate_series(1,2) as n; -\pset expanded off -\pset border 0 -execute q; -.LP -.TS -center; -lllr. -\fIa\(rstitle\fP \fIjunk\fP \fIempty\fP \fIint\fP -_ -some\(rstext - 1 -some\(rstext - 2 -.TE -.DS L -(2 rows) -.DE -\pset border 1 -execute q; -.LP -.TS -center; -l | l | l | r. -\fIa\(rstitle\fP \fIjunk\fP \fIempty\fP \fIint\fP -_ -some\(rstext - 1 -some\(rstext - 2 -.TE -.DS L -(2 rows) -.DE -\pset border 2 -execute q; -.LP -.TS -center box; -l | l | l | r. -\fIa\(rstitle\fP \fIjunk\fP \fIempty\fP \fIint\fP -_ -some\(rstext - 1 -some\(rstext - 2 -.TE -.DS L -(2 rows) -.DE -\pset expanded on -\pset border 0 -execute q; -.LP -.TS -center; -c s. -\fIRecord 1\fP -.T& -c l. -a\(rstitle some\(rstext -junk - -empty -int 1 -.T& -c s. -\fIRecord 2\fP -.T& -c l. -a\(rstitle some\(rstext -junk - -empty -int 2 -.TE -.DS L -.DE -\pset border 1 -execute q; -.LP -.TS -center; -c s. -\fIRecord 1\fP -_ -.T& -c | l. -a\(rstitle some\(rstext -junk - -empty -int 1 -.T& -c s. -\fIRecord 2\fP -_ -.T& -c | l. -a\(rstitle some\(rstext -junk - -empty -int 2 -.TE -.DS L -.DE -\pset border 2 -execute q; -.LP -.TS -center box; -c s. -\fIRecord 1\fP -_ -.T& -c l. -a\(rstitle some\(rstext -junk - -empty -int 1 -_ -.T& -c s. -\fIRecord 2\fP -_ -.T& -c l. -a\(rstitle some\(rstext -junk - -empty -int 2 -.TE -.DS L -.DE -deallocate q; --- check ambiguous format requests -\pset format a -\pset: ambiguous abbreviation "a" matches both "aligned" and "asciidoc" -\pset format l --- clean up after output format tests -drop table psql_serial_tab; -\pset format aligned -\pset expanded off -\pset border 1 --- \echo and allied features -\echo this is a test -this is a test -\echo -n without newline -without newline\echo with -n newline -with -n newline -\echo '-n' with newline --n with newline -\set foo bar -\echo foo = :foo -foo = bar -\qecho this is a test -this is a test -\qecho foo = :foo -foo = bar -\warn this is a test -this is a test -\warn foo = :foo -foo = bar --- tests for \if ... \endif -\if true - select 'okay'; - ?column? ----------- - okay -(1 row) - - select 'still okay'; - ?column? ------------- - still okay -(1 row) - -\else - not okay; - still not okay -\endif --- at this point query buffer should still have last valid line -\g - ?column? ------------- - still okay -(1 row) - --- \if should work okay on part of a query -select - \if true - 42 - \else - (bogus - \endif - forty_two; - forty_two ------------ - 42 -(1 row) - -select \if false \\ (bogus \else \\ 42 \endif \\ forty_two; - forty_two ------------ - 42 -(1 row) - --- test a large nested if using a variety of true-equivalents -\if true - \if 1 - \if yes - \if on - \echo 'all true' -all true - \else - \echo 'should not print #1-1' - \endif - \else - \echo 'should not print #1-2' - \endif - \else - \echo 'should not print #1-3' - \endif -\else - \echo 'should not print #1-4' -\endif --- test a variety of false-equivalents in an if/elif/else structure -\if false - \echo 'should not print #2-1' -\elif 0 - \echo 'should not print #2-2' -\elif no - \echo 'should not print #2-3' -\elif off - \echo 'should not print #2-4' -\else - \echo 'all false' -all false -\endif --- test true-false elif after initial true branch -\if true - \echo 'should print #2-5' -should print #2-5 -\elif true - \echo 'should not print #2-6' -\elif false - \echo 'should not print #2-7' -\else - \echo 'should not print #2-8' -\endif --- test simple true-then-else -\if true - \echo 'first thing true' -first thing true -\else - \echo 'should not print #3-1' -\endif --- test simple false-true-else -\if false - \echo 'should not print #4-1' -\elif true - \echo 'second thing true' -second thing true -\else - \echo 'should not print #5-1' -\endif --- invalid boolean expressions are false -\if invalid boolean expression -unrecognized value "invalid boolean expression" for "\if expression": Boolean expected - \echo 'will not print #6-1' -\else - \echo 'will print anyway #6-2' -will print anyway #6-2 -\endif --- test un-matched endif -\endif -\endif: no matching \if --- test un-matched else -\else -\else: no matching \if --- test un-matched elif -\elif -\elif: no matching \if --- test double-else error -\if true -\else -\else -\else: cannot occur after \else -\endif --- test elif out-of-order -\if false -\else -\elif -\elif: cannot occur after \else -\endif --- test if-endif matching in a false branch -\if false - \if false - \echo 'should not print #7-1' - \else - \echo 'should not print #7-2' - \endif - \echo 'should not print #7-3' -\else - \echo 'should print #7-4' -should print #7-4 -\endif --- show that vars and backticks are not expanded when ignoring extra args -\set foo bar -\echo :foo :'foo' :"foo" -bar 'bar' "bar" -\pset fieldsep | `nosuchcommand` :foo :'foo' :"foo" -\pset: extra argument "nosuchcommand" ignored -\pset: extra argument ":foo" ignored -\pset: extra argument ":'foo'" ignored -\pset: extra argument ":"foo"" ignored --- show that vars and backticks are not expanded and commands are ignored --- when in a false if-branch -\set try_to_quit '\\q' -\if false - :try_to_quit - \echo `nosuchcommand` :foo :'foo' :"foo" - \pset fieldsep | `nosuchcommand` :foo :'foo' :"foo" - \a - SELECT $1 \bind 1 \g - \bind_named stmt1 1 2 \g - \C arg1 - \c arg1 arg2 arg3 arg4 - \cd arg1 - \close stmt1 - \conninfo - \copy arg1 arg2 arg3 arg4 arg5 arg6 - \copyright - SELECT 1 as one, 2, 3 \crosstabview - \dt arg1 - \e arg1 arg2 - \ef whole_line - \ev whole_line - \echo arg1 arg2 arg3 arg4 arg5 - \echo arg1 - \encoding arg1 - \errverbose - \f arg1 - \g arg1 - \gx arg1 - \gexec - SELECT 1 AS one \gset - \h - \? - \html - \i arg1 - \ir arg1 - \l arg1 - \lo arg1 arg2 -invalid command \lo - \lo_list - \o arg1 - \p - SELECT 1 \parse - \password arg1 - \prompt arg1 arg2 - \pset arg1 arg2 - \q - \reset - \s arg1 - \set arg1 arg2 arg3 arg4 arg5 arg6 arg7 - \setenv arg1 arg2 - \sf whole_line - \sv whole_line - \t arg1 - \T arg1 - \timing arg1 - \unset arg1 - \w arg1 - \watch arg1 arg2 - \x arg1 - -- \else here is eaten as part of OT_FILEPIPE argument - \w |/no/such/file \else - -- \endif here is eaten as part of whole-line argument - \! whole_line \endif - \z -\else - \echo 'should print #8-1' -should print #8-1 -\endif --- :{?...} defined variable test -\set i 1 -\if :{?i} - \echo '#9-1 ok, variable i is defined' -#9-1 ok, variable i is defined -\else - \echo 'should not print #9-2' -\endif -\if :{?no_such_variable} - \echo 'should not print #10-1' -\else - \echo '#10-2 ok, variable no_such_variable is not defined' -#10-2 ok, variable no_such_variable is not defined -\endif -SELECT :{?i} AS i_is_defined; - i_is_defined --------------- - t -(1 row) - -SELECT NOT :{?no_such_var} AS no_such_var_is_not_defined; - no_such_var_is_not_defined ----------------------------- - t -(1 row) - --- SHOW_CONTEXT -\set SHOW_CONTEXT never -do $$ -begin - raise notice 'foo'; - raise exception 'bar'; -end $$; -NOTICE: foo -ERROR: bar -\set SHOW_CONTEXT errors -do $$ -begin - raise notice 'foo'; - raise exception 'bar'; -end $$; -NOTICE: foo -ERROR: bar -CONTEXT: PL/pgSQL function inline_code_block line 4 at RAISE -\set SHOW_CONTEXT always -do $$ -begin - raise notice 'foo'; - raise exception 'bar'; -end $$; -NOTICE: foo -CONTEXT: PL/pgSQL function inline_code_block line 3 at RAISE -ERROR: bar -CONTEXT: PL/pgSQL function inline_code_block line 4 at RAISE --- test printing and clearing the query buffer -SELECT 1; - ?column? ----------- - 1 -(1 row) - -\p -SELECT 1; -SELECT 2 \r -\p -SELECT 1; -SELECT 3 \p -SELECT 3 -UNION SELECT 4 \p -SELECT 3 -UNION SELECT 4 -UNION SELECT 5 -ORDER BY 1; - ?column? ----------- - 3 - 4 - 5 -(3 rows) - -\r -\p -SELECT 3 -UNION SELECT 4 -UNION SELECT 5 -ORDER BY 1; --- tests for special result variables --- working query, 2 rows selected -SELECT 1 AS stuff UNION SELECT 2; - stuff -------- - 1 - 2 -(2 rows) - -\echo 'error:' :ERROR -error: false -\echo 'error code:' :SQLSTATE -error code: 00000 -\echo 'number of rows:' :ROW_COUNT -number of rows: 2 --- syntax error -SELECT 1 UNION; -ERROR: syntax error at or near ";" -LINE 1: SELECT 1 UNION; - ^ -\echo 'error:' :ERROR -error: true -\echo 'error code:' :SQLSTATE -error code: 42601 -\echo 'number of rows:' :ROW_COUNT -number of rows: 0 -\echo 'last error message:' :LAST_ERROR_MESSAGE -last error message: syntax error at or near ";" -\echo 'last error code:' :LAST_ERROR_SQLSTATE -last error code: 42601 --- empty query -; -\echo 'error:' :ERROR -error: false -\echo 'error code:' :SQLSTATE -error code: 00000 -\echo 'number of rows:' :ROW_COUNT -number of rows: 0 --- must have kept previous values -\echo 'last error message:' :LAST_ERROR_MESSAGE -last error message: syntax error at or near ";" -\echo 'last error code:' :LAST_ERROR_SQLSTATE -last error code: 42601 --- other query error -DROP TABLE this_table_does_not_exist; -ERROR: table "this_table_does_not_exist" does not exist -\echo 'error:' :ERROR -error: true -\echo 'error code:' :SQLSTATE -error code: 42P01 -\echo 'number of rows:' :ROW_COUNT -number of rows: 0 -\echo 'last error message:' :LAST_ERROR_MESSAGE -last error message: table "this_table_does_not_exist" does not exist -\echo 'last error code:' :LAST_ERROR_SQLSTATE -last error code: 42P01 --- nondefault verbosity error settings (except verbose, which is too unstable) -\set VERBOSITY terse -SELECT 1 UNION; -ERROR: syntax error at or near ";" at character 15 -\echo 'error:' :ERROR -error: true -\echo 'error code:' :SQLSTATE -error code: 42601 -\echo 'last error message:' :LAST_ERROR_MESSAGE -last error message: syntax error at or near ";" -\set VERBOSITY sqlstate -SELECT 1/0; -ERROR: 22012 -\echo 'error:' :ERROR -error: true -\echo 'error code:' :SQLSTATE -error code: 22012 -\echo 'last error message:' :LAST_ERROR_MESSAGE -last error message: division by zero -\set VERBOSITY default --- working \gdesc -SELECT 3 AS three, 4 AS four \gdesc - Column | Type ---------+--------- - three | integer - four | integer -(2 rows) - -\echo 'error:' :ERROR -error: false -\echo 'error code:' :SQLSTATE -error code: 00000 -\echo 'number of rows:' :ROW_COUNT -number of rows: 2 --- \gdesc with an error -SELECT 4 AS \gdesc -ERROR: syntax error at end of input -LINE 1: SELECT 4 AS - ^ -\echo 'error:' :ERROR -error: true -\echo 'error code:' :SQLSTATE -error code: 42601 -\echo 'number of rows:' :ROW_COUNT -number of rows: 0 -\echo 'last error message:' :LAST_ERROR_MESSAGE -last error message: syntax error at end of input -\echo 'last error code:' :LAST_ERROR_SQLSTATE -last error code: 42601 --- check row count for a query with chunked results -\set FETCH_COUNT 10 -select unique2 from tenk1 order by unique2 limit 19; - unique2 ---------- - 0 - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 - 11 - 12 - 13 - 14 - 15 - 16 - 17 - 18 -(19 rows) - -\echo 'error:' :ERROR -error: false -\echo 'error code:' :SQLSTATE -error code: 00000 -\echo 'number of rows:' :ROW_COUNT -number of rows: 19 --- chunked results with an error after the first chunk --- (we must disable parallel query here, else the behavior is timing-dependent) -set debug_parallel_query = off; -select 1/(15-unique2) from tenk1 order by unique2 limit 19; - ?column? ----------- - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 -ERROR: division by zero -\echo 'error:' :ERROR -error: true -\echo 'error code:' :SQLSTATE -error code: 22012 -\echo 'number of rows:' :ROW_COUNT -number of rows: 0 -\echo 'last error message:' :LAST_ERROR_MESSAGE -last error message: division by zero -\echo 'last error code:' :LAST_ERROR_SQLSTATE -last error code: 22012 -reset debug_parallel_query; -\unset FETCH_COUNT -create schema testpart; -create role regress_partitioning_role; -alter schema testpart owner to regress_partitioning_role; -set role to regress_partitioning_role; --- run test inside own schema and hide other partitions -set search_path to testpart; -create table testtable_apple(logdate date); -create table testtable_orange(logdate date); -create index testtable_apple_index on testtable_apple(logdate); -create index testtable_orange_index on testtable_orange(logdate); -create table testpart_apple(logdate date) partition by range(logdate); -create table testpart_orange(logdate date) partition by range(logdate); -create index testpart_apple_index on testpart_apple(logdate); -create index testpart_orange_index on testpart_orange(logdate); --- only partition related object should be displayed -\dP test*apple* - List of partitioned relations - Schema | Name | Owner | Type | Parent name | Table -----------+----------------------+---------------------------+-------------------+-------------+---------------- - testpart | testpart_apple | regress_partitioning_role | partitioned table | | - testpart | testpart_apple_index | regress_partitioning_role | partitioned index | | testpart_apple -(2 rows) - -\dPt test*apple* - List of partitioned tables - Schema | Name | Owner | Parent name -----------+----------------+---------------------------+------------- - testpart | testpart_apple | regress_partitioning_role | -(1 row) - -\dPi test*apple* - List of partitioned indexes - Schema | Name | Owner | Parent name | Table -----------+----------------------+---------------------------+-------------+---------------- - testpart | testpart_apple_index | regress_partitioning_role | | testpart_apple -(1 row) - -drop table testtable_apple; -drop table testtable_orange; -drop table testpart_apple; -drop table testpart_orange; -create table parent_tab (id int) partition by range (id); -create index parent_index on parent_tab (id); -create table child_0_10 partition of parent_tab - for values from (0) to (10); -create table child_10_20 partition of parent_tab - for values from (10) to (20); -create table child_20_30 partition of parent_tab - for values from (20) to (30); -insert into parent_tab values (generate_series(0,29)); -create table child_30_40 partition of parent_tab -for values from (30) to (40) - partition by range(id); -create table child_30_35 partition of child_30_40 - for values from (30) to (35); -create table child_35_40 partition of child_30_40 - for values from (35) to (40); -insert into parent_tab values (generate_series(30,39)); -\dPt - List of partitioned tables - Schema | Name | Owner -----------+------------+--------------------------- - testpart | parent_tab | regress_partitioning_role -(1 row) - -\dPi - List of partitioned indexes - Schema | Name | Owner | Table -----------+--------------+---------------------------+------------ - testpart | parent_index | regress_partitioning_role | parent_tab -(1 row) - -\dP testpart.* - List of partitioned relations - Schema | Name | Owner | Type | Parent name | Table -----------+--------------------+---------------------------+-------------------+--------------+------------- - testpart | parent_tab | regress_partitioning_role | partitioned table | | - testpart | child_30_40 | regress_partitioning_role | partitioned table | parent_tab | - testpart | parent_index | regress_partitioning_role | partitioned index | | parent_tab - testpart | child_30_40_id_idx | regress_partitioning_role | partitioned index | parent_index | child_30_40 -(4 rows) - -\dP - List of partitioned relations - Schema | Name | Owner | Type | Table -----------+--------------+---------------------------+-------------------+------------ - testpart | parent_tab | regress_partitioning_role | partitioned table | - testpart | parent_index | regress_partitioning_role | partitioned index | parent_tab -(2 rows) - -\dPtn - List of partitioned tables - Schema | Name | Owner | Parent name -----------+-------------+---------------------------+------------- - testpart | parent_tab | regress_partitioning_role | - testpart | child_30_40 | regress_partitioning_role | parent_tab -(2 rows) - -\dPin - List of partitioned indexes - Schema | Name | Owner | Parent name | Table -----------+--------------------+---------------------------+--------------+------------- - testpart | parent_index | regress_partitioning_role | | parent_tab - testpart | child_30_40_id_idx | regress_partitioning_role | parent_index | child_30_40 -(2 rows) - -\dPn - List of partitioned relations - Schema | Name | Owner | Type | Parent name | Table -----------+--------------------+---------------------------+-------------------+--------------+------------- - testpart | parent_tab | regress_partitioning_role | partitioned table | | - testpart | child_30_40 | regress_partitioning_role | partitioned table | parent_tab | - testpart | parent_index | regress_partitioning_role | partitioned index | | parent_tab - testpart | child_30_40_id_idx | regress_partitioning_role | partitioned index | parent_index | child_30_40 -(4 rows) - -\dPn testpart.* - List of partitioned relations - Schema | Name | Owner | Type | Parent name | Table -----------+--------------------+---------------------------+-------------------+--------------+------------- - testpart | parent_tab | regress_partitioning_role | partitioned table | | - testpart | child_30_40 | regress_partitioning_role | partitioned table | parent_tab | - testpart | parent_index | regress_partitioning_role | partitioned index | | parent_tab - testpart | child_30_40_id_idx | regress_partitioning_role | partitioned index | parent_index | child_30_40 -(4 rows) - -drop table parent_tab cascade; -drop schema testpart; -set search_path to default; -set role to default; -drop role regress_partitioning_role; --- \d on toast table (use pg_statistic's toast table, which has a known name) -\d pg_toast.pg_toast_2619 -TOAST table "pg_toast.pg_toast_2619" - Column | Type -------------+--------- - chunk_id | oid - chunk_seq | integer - chunk_data | bytea -Owning table: "pg_catalog.pg_statistic" -Indexes: - "pg_toast_2619_index" PRIMARY KEY, btree (chunk_id, chunk_seq) - --- check printing info about access methods -\dA -List of access methods - Name | Type ---------+------- - brin | Index - btree | Index - gin | Index - gist | Index - hash | Index - heap | Table - heap2 | Table - spgist | Index -(8 rows) - -\dA * -List of access methods - Name | Type ---------+------- - brin | Index - btree | Index - gin | Index - gist | Index - hash | Index - heap | Table - heap2 | Table - spgist | Index -(8 rows) - -\dA h* -List of access methods - Name | Type --------+------- - hash | Index - heap | Table - heap2 | Table -(3 rows) - -\dA foo -List of access methods - Name | Type -------+------ -(0 rows) - -\dA foo bar -List of access methods - Name | Type -------+------ -(0 rows) - -\dA: extra argument "bar" ignored -\dA+ - List of access methods - Name | Type | Handler | Description ---------+-------+----------------------+---------------------------------------- - brin | Index | brinhandler | block range index (BRIN) access method - btree | Index | bthandler | b-tree index access method - gin | Index | ginhandler | GIN index access method - gist | Index | gisthandler | GiST index access method - hash | Index | hashhandler | hash index access method - heap | Table | heap_tableam_handler | heap table access method - heap2 | Table | heap_tableam_handler | - spgist | Index | spghandler | SP-GiST index access method -(8 rows) - -\dA+ * - List of access methods - Name | Type | Handler | Description ---------+-------+----------------------+---------------------------------------- - brin | Index | brinhandler | block range index (BRIN) access method - btree | Index | bthandler | b-tree index access method - gin | Index | ginhandler | GIN index access method - gist | Index | gisthandler | GiST index access method - hash | Index | hashhandler | hash index access method - heap | Table | heap_tableam_handler | heap table access method - heap2 | Table | heap_tableam_handler | - spgist | Index | spghandler | SP-GiST index access method -(8 rows) - -\dA+ h* - List of access methods - Name | Type | Handler | Description --------+-------+----------------------+-------------------------- - hash | Index | hashhandler | hash index access method - heap | Table | heap_tableam_handler | heap table access method - heap2 | Table | heap_tableam_handler | -(3 rows) - -\dA+ foo - List of access methods - Name | Type | Handler | Description -------+------+---------+------------- -(0 rows) - -\dAc brin pg*.oid* - List of operator classes - AM | Input type | Storage type | Operator class | Default? -------+------------+--------------+----------------------+---------- - brin | oid | | oid_bloom_ops | no - brin | oid | | oid_minmax_multi_ops | no - brin | oid | | oid_minmax_ops | yes -(3 rows) - -\dAf spgist - List of operator families - AM | Operator family | Applicable types ---------+-----------------+------------------ - spgist | box_ops | box - spgist | kd_point_ops | point - spgist | network_ops | inet - spgist | poly_ops | polygon - spgist | quad_point_ops | point - spgist | range_ops | anyrange - spgist | text_ops | text -(7 rows) - -\dAf btree int4 - List of operator families - AM | Operator family | Applicable types --------+-----------------+--------------------------- - btree | integer_ops | smallint, integer, bigint -(1 row) - -\dAo+ btree float_ops - List of operators of operator families - AM | Operator family | Operator | Strategy | Purpose | Sort opfamily --------+-----------------+---------------------------------------+----------+---------+--------------- - btree | float_ops | <(double precision,double precision) | 1 | search | - btree | float_ops | <=(double precision,double precision) | 2 | search | - btree | float_ops | =(double precision,double precision) | 3 | search | - btree | float_ops | >=(double precision,double precision) | 4 | search | - btree | float_ops | >(double precision,double precision) | 5 | search | - btree | float_ops | <(real,real) | 1 | search | - btree | float_ops | <=(real,real) | 2 | search | - btree | float_ops | =(real,real) | 3 | search | - btree | float_ops | >=(real,real) | 4 | search | - btree | float_ops | >(real,real) | 5 | search | - btree | float_ops | <(double precision,real) | 1 | search | - btree | float_ops | <=(double precision,real) | 2 | search | - btree | float_ops | =(double precision,real) | 3 | search | - btree | float_ops | >=(double precision,real) | 4 | search | - btree | float_ops | >(double precision,real) | 5 | search | - btree | float_ops | <(real,double precision) | 1 | search | - btree | float_ops | <=(real,double precision) | 2 | search | - btree | float_ops | =(real,double precision) | 3 | search | - btree | float_ops | >=(real,double precision) | 4 | search | - btree | float_ops | >(real,double precision) | 5 | search | -(20 rows) - -\dAo * pg_catalog.jsonb_path_ops - List of operators of operator families - AM | Operator family | Operator | Strategy | Purpose ------+-----------------+--------------------+----------+--------- - gin | jsonb_path_ops | @>(jsonb,jsonb) | 7 | search - gin | jsonb_path_ops | @?(jsonb,jsonpath) | 15 | search - gin | jsonb_path_ops | @@(jsonb,jsonpath) | 16 | search -(3 rows) - -\dAp+ btree float_ops - List of support functions of operator families - AM | Operator family | Registered left type | Registered right type | Number | Function --------+-----------------+----------------------+-----------------------+--------+------------------------------------------------------------------------------ - btree | float_ops | double precision | double precision | 1 | btfloat8cmp(double precision,double precision) - btree | float_ops | double precision | double precision | 2 | btfloat8sortsupport(internal) - btree | float_ops | double precision | double precision | 3 | in_range(double precision,double precision,double precision,boolean,boolean) - btree | float_ops | real | real | 1 | btfloat4cmp(real,real) - btree | float_ops | real | real | 2 | btfloat4sortsupport(internal) - btree | float_ops | double precision | real | 1 | btfloat84cmp(double precision,real) - btree | float_ops | real | double precision | 1 | btfloat48cmp(real,double precision) - btree | float_ops | real | double precision | 3 | in_range(real,real,double precision,boolean,boolean) -(8 rows) - -\dAp * pg_catalog.uuid_ops - List of support functions of operator families - AM | Operator family | Registered left type | Registered right type | Number | Function --------+-----------------+----------------------+-----------------------+--------+-------------------- - btree | uuid_ops | uuid | uuid | 1 | uuid_cmp - btree | uuid_ops | uuid | uuid | 2 | uuid_sortsupport - btree | uuid_ops | uuid | uuid | 4 | btequalimage - hash | uuid_ops | uuid | uuid | 1 | uuid_hash - hash | uuid_ops | uuid | uuid | 2 | uuid_hash_extended -(5 rows) - --- check \dconfig -set work_mem = 10240; -\dconfig work_mem -List of configuration parameters - Parameter | Value ------------+------- - work_mem | 10MB -(1 row) - -\dconfig+ work* - List of configuration parameters - Parameter | Value | Type | Context | Access privileges ------------+-------+---------+---------+------------------- - work_mem | 10MB | integer | user | -(1 row) - -reset work_mem; --- check \df, \do with argument specifications -\df *sqrt - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+--------------+------------------+---------------------+------ - pg_catalog | dsqrt | double precision | double precision | func - pg_catalog | numeric_sqrt | numeric | numeric | func - pg_catalog | sqrt | double precision | double precision | func - pg_catalog | sqrt | numeric | numeric | func -(4 rows) - -\df *sqrt num* - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+--------------+------------------+---------------------+------ - pg_catalog | numeric_sqrt | numeric | numeric | func - pg_catalog | sqrt | numeric | numeric | func -(2 rows) - -\df int*pl - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+-------------+------------------+---------------------+------ - pg_catalog | int24pl | integer | smallint, integer | func - pg_catalog | int28pl | bigint | smallint, bigint | func - pg_catalog | int2pl | smallint | smallint, smallint | func - pg_catalog | int42pl | integer | integer, smallint | func - pg_catalog | int48pl | bigint | integer, bigint | func - pg_catalog | int4pl | integer | integer, integer | func - pg_catalog | int82pl | bigint | bigint, smallint | func - pg_catalog | int84pl | bigint | bigint, integer | func - pg_catalog | int8pl | bigint | bigint, bigint | func - pg_catalog | interval_pl | interval | interval, interval | func -(10 rows) - -\df int*pl int4 - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+---------+------------------+---------------------+------ - pg_catalog | int42pl | integer | integer, smallint | func - pg_catalog | int48pl | bigint | integer, bigint | func - pg_catalog | int4pl | integer | integer, integer | func -(3 rows) - -\df int*pl * pg_catalog.int8 - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+---------+------------------+---------------------+------ - pg_catalog | int28pl | bigint | smallint, bigint | func - pg_catalog | int48pl | bigint | integer, bigint | func - pg_catalog | int8pl | bigint | bigint, bigint | func -(3 rows) - -\df acl* aclitem[] - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+-------------+------------------+----------------------------------------------------------------------------------------------------+------ - pg_catalog | aclcontains | boolean | aclitem[], aclitem | func - pg_catalog | aclexplode | SETOF record | acl aclitem[], OUT grantor oid, OUT grantee oid, OUT privilege_type text, OUT is_grantable boolean | func - pg_catalog | aclinsert | aclitem[] | aclitem[], aclitem | func - pg_catalog | aclremove | aclitem[] | aclitem[], aclitem | func -(4 rows) - -\df has_database_privilege oid text - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+------------------------+------------------+---------------------+------ - pg_catalog | has_database_privilege | boolean | oid, text | func - pg_catalog | has_database_privilege | boolean | oid, text, text | func -(2 rows) - -\df has_database_privilege oid text - - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+------------------------+------------------+---------------------+------ - pg_catalog | has_database_privilege | boolean | oid, text | func -(1 row) - -\dfa bit* small* - List of functions - Schema | Name | Result data type | Argument data types | Type -------------+---------+------------------+---------------------+------ - pg_catalog | bit_and | smallint | smallint | agg - pg_catalog | bit_or | smallint | smallint | agg - pg_catalog | bit_xor | smallint | smallint | agg -(3 rows) - -\df *._pg_expandarray - List of functions - Schema | Name | Result data type | Argument data types | Type ---------------------+-----------------+------------------+-------------------------------------------+------ - information_schema | _pg_expandarray | SETOF record | anyarray, OUT x anyelement, OUT n integer | func -(1 row) - -\do - pg_catalog.int4 - List of operators - Schema | Name | Left arg type | Right arg type | Result type | Description -------------+------+---------------+----------------+-------------+------------- - pg_catalog | - | | integer | integer | negate -(1 row) - -\do && anyarray * - List of operators - Schema | Name | Left arg type | Right arg type | Result type | Description -------------+------+---------------+----------------+-------------+------------- - pg_catalog | && | anyarray | anyarray | boolean | overlaps -(1 row) - --- check \df+ --- we have to use functions with a predictable owner name, so make a role -create role regress_psql_user superuser; -begin; -set session authorization regress_psql_user; -create function psql_df_internal (float8) - returns float8 - language internal immutable parallel safe strict - as 'dsin'; -create function psql_df_sql (x integer) - returns integer - security definer - begin atomic select x + 1; end; -create function psql_df_plpgsql () - returns void - language plpgsql - as $$ begin return; end; $$; -comment on function psql_df_plpgsql () is 'some comment'; -\df+ psql_df_* - List of functions - Schema | Name | Result data type | Argument data types | Type | Volatility | Parallel | Owner | Security | Access privileges | Language | Internal name | Description ---------+------------------+------------------+---------------------+------+------------+----------+-------------------+----------+-------------------+----------+---------------+-------------- - public | psql_df_internal | double precision | double precision | func | immutable | safe | regress_psql_user | invoker | | internal | dsin | - public | psql_df_plpgsql | void | | func | volatile | unsafe | regress_psql_user | invoker | | plpgsql | | some comment - public | psql_df_sql | integer | x integer | func | volatile | unsafe | regress_psql_user | definer | | sql | | -(3 rows) - -rollback; -drop role regress_psql_user; --- check \sf -\sf information_schema._pg_index_position -CREATE OR REPLACE FUNCTION information_schema._pg_index_position(oid, smallint) - RETURNS integer - LANGUAGE sql - STABLE STRICT -BEGIN ATOMIC - SELECT (ss.a).n AS n - FROM ( SELECT information_schema._pg_expandarray(pg_index.indkey) AS a - FROM pg_index - WHERE (pg_index.indexrelid = $1)) ss - WHERE ((ss.a).x = $2); -END -\sf+ information_schema._pg_index_position - CREATE OR REPLACE FUNCTION information_schema._pg_index_position(oid, smallint) - RETURNS integer - LANGUAGE sql - STABLE STRICT -1 BEGIN ATOMIC -2 SELECT (ss.a).n AS n -3 FROM ( SELECT information_schema._pg_expandarray(pg_index.indkey) AS a -4 FROM pg_index -5 WHERE (pg_index.indexrelid = $1)) ss -6 WHERE ((ss.a).x = $2); -7 END -\sf+ interval_pl_time - CREATE OR REPLACE FUNCTION pg_catalog.interval_pl_time(interval, time without time zone) - RETURNS time without time zone - LANGUAGE sql - IMMUTABLE PARALLEL SAFE STRICT COST 1 -1 RETURN ($2 + $1) -\sf ts_debug(text); -CREATE OR REPLACE FUNCTION pg_catalog.ts_debug(document text, OUT alias text, OUT description text, OUT token text, OUT dictionaries regdictionary[], OUT dictionary regdictionary, OUT lexemes text[]) - RETURNS SETOF record - LANGUAGE sql - STABLE PARALLEL SAFE STRICT -BEGIN ATOMIC - SELECT ts_debug.alias, - ts_debug.description, - ts_debug.token, - ts_debug.dictionaries, - ts_debug.dictionary, - ts_debug.lexemes - FROM ts_debug(get_current_ts_config(), ts_debug.document) ts_debug(alias, description, token, dictionaries, dictionary, lexemes); -END -\sf+ ts_debug(text) - CREATE OR REPLACE FUNCTION pg_catalog.ts_debug(document text, OUT alias text, OUT description text, OUT token text, OUT dictionaries regdictionary[], OUT dictionary regdictionary, OUT lexemes text[]) - RETURNS SETOF record - LANGUAGE sql - STABLE PARALLEL SAFE STRICT -1 BEGIN ATOMIC -2 SELECT ts_debug.alias, -3 ts_debug.description, -4 ts_debug.token, -5 ts_debug.dictionaries, -6 ts_debug.dictionary, -7 ts_debug.lexemes -8 FROM ts_debug(get_current_ts_config(), ts_debug.document) ts_debug(alias, description, token, dictionaries, dictionary, lexemes); -9 END --- AUTOCOMMIT -CREATE TABLE ac_test (a int); -\set AUTOCOMMIT off -INSERT INTO ac_test VALUES (1); -COMMIT; -SELECT * FROM ac_test; - a ---- - 1 -(1 row) - -COMMIT; -INSERT INTO ac_test VALUES (2); -ROLLBACK; -SELECT * FROM ac_test; - a ---- - 1 -(1 row) - -COMMIT; -BEGIN; -INSERT INTO ac_test VALUES (3); -COMMIT; -SELECT * FROM ac_test; - a ---- - 1 - 3 -(2 rows) - -COMMIT; -BEGIN; -INSERT INTO ac_test VALUES (4); -ROLLBACK; -SELECT * FROM ac_test; - a ---- - 1 - 3 -(2 rows) - -COMMIT; -\set AUTOCOMMIT on -DROP TABLE ac_test; -SELECT * FROM ac_test; -- should be gone now -ERROR: relation "ac_test" does not exist -LINE 1: SELECT * FROM ac_test; - ^ --- ON_ERROR_ROLLBACK -\set ON_ERROR_ROLLBACK on -CREATE TABLE oer_test (a int); -BEGIN; -INSERT INTO oer_test VALUES (1); -INSERT INTO oer_test VALUES ('foo'); -ERROR: invalid input syntax for type integer: "foo" -LINE 1: INSERT INTO oer_test VALUES ('foo'); - ^ -INSERT INTO oer_test VALUES (3); -COMMIT; -SELECT * FROM oer_test; - a ---- - 1 - 3 -(2 rows) - -BEGIN; -INSERT INTO oer_test VALUES (4); -ROLLBACK; -SELECT * FROM oer_test; - a ---- - 1 - 3 -(2 rows) - -BEGIN; -INSERT INTO oer_test VALUES (5); -COMMIT AND CHAIN; -INSERT INTO oer_test VALUES (6); -COMMIT; -SELECT * FROM oer_test; - a ---- - 1 - 3 - 5 - 6 -(4 rows) - -DROP TABLE oer_test; -\set ON_ERROR_ROLLBACK off --- ECHO errors -\set ECHO errors -ERROR: relation "notexists" does not exist -LINE 1: SELECT * FROM notexists; - ^ -STATEMENT: SELECT * FROM notexists; --- --- combined queries --- -CREATE FUNCTION warn(msg TEXT) RETURNS BOOLEAN LANGUAGE plpgsql -AS $$ - BEGIN RAISE NOTICE 'warn %', msg ; RETURN TRUE ; END -$$; --- show both -SELECT 1 AS one \; SELECT warn('1.5') \; SELECT 2 AS two ; -NOTICE: warn 1.5 -CONTEXT: PL/pgSQL function warn(text) line 2 at RAISE - one ------ - 1 -(1 row) - - warn ------- - t -(1 row) - - two ------ - 2 -(1 row) - --- \gset applies to last query only -SELECT 3 AS three \; SELECT warn('3.5') \; SELECT 4 AS four \gset -NOTICE: warn 3.5 -CONTEXT: PL/pgSQL function warn(text) line 2 at RAISE - three -------- - 3 -(1 row) - - warn ------- - t -(1 row) - -\echo :three :four -:three 4 --- syntax error stops all processing -SELECT 5 \; SELECT 6 + \; SELECT warn('6.5') \; SELECT 7 ; -ERROR: syntax error at or near ";" -LINE 1: SELECT 5 ; SELECT 6 + ; SELECT warn('6.5') ; SELECT 7 ; - ^ --- with aborted transaction, stop on first error -BEGIN \; SELECT 8 AS eight \; SELECT 9/0 AS nine \; ROLLBACK \; SELECT 10 AS ten ; - eight -------- - 8 -(1 row) - -ERROR: division by zero --- close previously aborted transaction -ROLLBACK; --- miscellaneous SQL commands --- (non SELECT output is sent to stderr, thus is not shown in expected results) -SELECT 'ok' AS "begin" \; -CREATE TABLE psql_comics(s TEXT) \; -INSERT INTO psql_comics VALUES ('Calvin'), ('hobbes') \; -COPY psql_comics FROM STDIN \; -UPDATE psql_comics SET s = 'Hobbes' WHERE s = 'hobbes' \; -DELETE FROM psql_comics WHERE s = 'Moe' \; -COPY psql_comics TO STDOUT \; -TRUNCATE psql_comics \; -DROP TABLE psql_comics \; -SELECT 'ok' AS "done" ; - begin -------- - ok -(1 row) - -Calvin -Susie -Hobbes - done ------- - ok -(1 row) - -\set SHOW_ALL_RESULTS off -SELECT 1 AS one \; SELECT warn('1.5') \; SELECT 2 AS two ; -NOTICE: warn 1.5 -CONTEXT: PL/pgSQL function warn(text) line 2 at RAISE - two ------ - 2 -(1 row) - -\set SHOW_ALL_RESULTS on -DROP FUNCTION warn(TEXT); --- --- \g with file --- -\getenv abs_builddir PG_ABS_BUILDDIR -\set g_out_file :abs_builddir '/results/psql-output1' -CREATE TEMPORARY TABLE reload_output( - lineno int NOT NULL GENERATED ALWAYS AS IDENTITY, - line text -); -SELECT 1 AS a \g :g_out_file -COPY reload_output(line) FROM :'g_out_file'; -SELECT 2 AS b\; SELECT 3 AS c\; SELECT 4 AS d \g :g_out_file -COPY reload_output(line) FROM :'g_out_file'; -COPY (SELECT 'foo') TO STDOUT \; COPY (SELECT 'bar') TO STDOUT \g :g_out_file -COPY reload_output(line) FROM :'g_out_file'; -SELECT line FROM reload_output ORDER BY lineno; - line ---------- - a - --- - 1 - (1 row) - - b - --- - 2 - (1 row) - - c - --- - 3 - (1 row) - - d - --- - 4 - (1 row) - - foo - bar -(22 rows) - -TRUNCATE TABLE reload_output; --- --- \o with file --- -\set o_out_file :abs_builddir '/results/psql-output2' -\o :o_out_file -SELECT max(unique1) FROM onek; -SELECT 1 AS a\; SELECT 2 AS b\; SELECT 3 AS c; --- COPY TO file --- The data goes to :g_out_file and the status to :o_out_file -\set QUIET false -COPY (SELECT unique1 FROM onek ORDER BY unique1 LIMIT 10) TO :'g_out_file'; --- DML command status -UPDATE onek SET unique1 = unique1 WHERE false; -\set QUIET true -\o --- Check the contents of the files generated. -COPY reload_output(line) FROM :'g_out_file'; -SELECT line FROM reload_output ORDER BY lineno; - line ------- - 0 - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 -(10 rows) - -TRUNCATE TABLE reload_output; -COPY reload_output(line) FROM :'o_out_file'; -SELECT line FROM reload_output ORDER BY lineno; - line ----------- - max - ----- - 999 - (1 row) - - a - --- - 1 - (1 row) - - b - --- - 2 - (1 row) - - c - --- - 3 - (1 row) - - COPY 10 - UPDATE 0 -(22 rows) - -TRUNCATE TABLE reload_output; --- Multiple COPY TO STDOUT with output file -\o :o_out_file --- The data goes to :o_out_file with no status generated. -COPY (SELECT 'foo1') TO STDOUT \; COPY (SELECT 'bar1') TO STDOUT; --- Combination of \o and \g file with multiple COPY queries. -COPY (SELECT 'foo2') TO STDOUT \; COPY (SELECT 'bar2') TO STDOUT \g :g_out_file -\o --- Check the contents of the files generated. -COPY reload_output(line) FROM :'g_out_file'; -SELECT line FROM reload_output ORDER BY lineno; - line ------- - foo2 - bar2 -(2 rows) - -TRUNCATE TABLE reload_output; -COPY reload_output(line) FROM :'o_out_file'; -SELECT line FROM reload_output ORDER BY lineno; - line ------- - foo1 - bar1 -(2 rows) - -DROP TABLE reload_output; --- --- AUTOCOMMIT and combined queries --- -\set AUTOCOMMIT off -\echo '# AUTOCOMMIT:' :AUTOCOMMIT -# AUTOCOMMIT: off --- BEGIN is now implicit -CREATE TABLE foo(s TEXT) \; -ROLLBACK; -CREATE TABLE foo(s TEXT) \; -INSERT INTO foo(s) VALUES ('hello'), ('world') \; -COMMIT; -DROP TABLE foo \; -ROLLBACK; --- table foo is still there -SELECT * FROM foo ORDER BY 1 \; -DROP TABLE foo \; -COMMIT; - s -------- - hello - world -(2 rows) - -\set AUTOCOMMIT on -\echo '# AUTOCOMMIT:' :AUTOCOMMIT -# AUTOCOMMIT: on --- BEGIN now explicit for multi-statement transactions -BEGIN \; -CREATE TABLE foo(s TEXT) \; -INSERT INTO foo(s) VALUES ('hello'), ('world') \; -COMMIT; -BEGIN \; -DROP TABLE foo \; -ROLLBACK \; --- implicit transactions -SELECT * FROM foo ORDER BY 1 \; -DROP TABLE foo; - s -------- - hello - world -(2 rows) - --- --- test ON_ERROR_ROLLBACK and combined queries --- -CREATE FUNCTION psql_error(msg TEXT) RETURNS BOOLEAN AS $$ - BEGIN - RAISE EXCEPTION 'error %', msg; - END; -$$ LANGUAGE plpgsql; -\set ON_ERROR_ROLLBACK on -\echo '# ON_ERROR_ROLLBACK:' :ON_ERROR_ROLLBACK -# ON_ERROR_ROLLBACK: on -\echo '# AUTOCOMMIT:' :AUTOCOMMIT -# AUTOCOMMIT: on -BEGIN; -CREATE TABLE bla(s NO_SUCH_TYPE); -- fails -ERROR: type "no_such_type" does not exist -LINE 1: CREATE TABLE bla(s NO_SUCH_TYPE); - ^ -CREATE TABLE bla(s TEXT); -- succeeds -SELECT psql_error('oops!'); -- fails -ERROR: error oops! -CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE -INSERT INTO bla VALUES ('Calvin'), ('Hobbes'); -COMMIT; -SELECT * FROM bla ORDER BY 1; - s --------- - Calvin - Hobbes -(2 rows) - -BEGIN; -INSERT INTO bla VALUES ('Susie'); -- succeeds --- now with combined queries -INSERT INTO bla VALUES ('Rosalyn') \; -- will rollback -SELECT 'before error' AS show \; -- will show nevertheless! - SELECT psql_error('boum!') \; -- failure - SELECT 'after error' AS noshow; -- hidden by preceding error - show --------------- - before error -(1 row) - -ERROR: error boum! -CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE -INSERT INTO bla(s) VALUES ('Moe') \; -- will rollback - SELECT psql_error('bam!'); -ERROR: error bam! -CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE -INSERT INTO bla VALUES ('Miss Wormwood'); -- succeeds -COMMIT; -SELECT * FROM bla ORDER BY 1; - s ---------------- - Calvin - Hobbes - Miss Wormwood - Susie -(4 rows) - --- some with autocommit off -\set AUTOCOMMIT off -\echo '# AUTOCOMMIT:' :AUTOCOMMIT -# AUTOCOMMIT: off --- implicit BEGIN -INSERT INTO bla VALUES ('Dad'); -- succeeds -SELECT psql_error('bad!'); -- implicit partial rollback -ERROR: error bad! -CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE -INSERT INTO bla VALUES ('Mum') \; -- will rollback -SELECT COUNT(*) AS "#mum" -FROM bla WHERE s = 'Mum' \; -- but be counted here -SELECT psql_error('bad!'); -- implicit partial rollback - #mum ------- - 1 -(1 row) - -ERROR: error bad! -CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE -COMMIT; -SELECT COUNT(*) AS "#mum" -FROM bla WHERE s = 'Mum' \; -- no mum here -SELECT * FROM bla ORDER BY 1; - #mum ------- - 0 -(1 row) - - s ---------------- - Calvin - Dad - Hobbes - Miss Wormwood - Susie -(5 rows) - -COMMIT; --- reset all -\set AUTOCOMMIT on -\set ON_ERROR_ROLLBACK off -\echo '# final ON_ERROR_ROLLBACK:' :ON_ERROR_ROLLBACK -# final ON_ERROR_ROLLBACK: off -DROP TABLE bla; -DROP FUNCTION psql_error; --- check describing invalid multipart names -\dA regression.heap -improper qualified name (too many dotted names): regression.heap -\dA nonesuch.heap -improper qualified name (too many dotted names): nonesuch.heap -\dt host.regression.pg_catalog.pg_class -improper qualified name (too many dotted names): host.regression.pg_catalog.pg_class -\dt |.pg_catalog.pg_class -cross-database references are not implemented: |.pg_catalog.pg_class -\dt nonesuch.pg_catalog.pg_class -cross-database references are not implemented: nonesuch.pg_catalog.pg_class -\da host.regression.pg_catalog.sum -improper qualified name (too many dotted names): host.regression.pg_catalog.sum -\da +.pg_catalog.sum -cross-database references are not implemented: +.pg_catalog.sum -\da nonesuch.pg_catalog.sum -cross-database references are not implemented: nonesuch.pg_catalog.sum -\dAc nonesuch.brin -improper qualified name (too many dotted names): nonesuch.brin -\dAc regression.brin -improper qualified name (too many dotted names): regression.brin -\dAf nonesuch.brin -improper qualified name (too many dotted names): nonesuch.brin -\dAf regression.brin -improper qualified name (too many dotted names): regression.brin -\dAo nonesuch.brin -improper qualified name (too many dotted names): nonesuch.brin -\dAo regression.brin -improper qualified name (too many dotted names): regression.brin -\dAp nonesuch.brin -improper qualified name (too many dotted names): nonesuch.brin -\dAp regression.brin -improper qualified name (too many dotted names): regression.brin -\db nonesuch.pg_default -improper qualified name (too many dotted names): nonesuch.pg_default -\db regression.pg_default -improper qualified name (too many dotted names): regression.pg_default -\dc host.regression.public.conversion -improper qualified name (too many dotted names): host.regression.public.conversion -\dc (.public.conversion -cross-database references are not implemented: (.public.conversion -\dc nonesuch.public.conversion -cross-database references are not implemented: nonesuch.public.conversion -\dC host.regression.pg_catalog.int8 -improper qualified name (too many dotted names): host.regression.pg_catalog.int8 -\dC ).pg_catalog.int8 -cross-database references are not implemented: ).pg_catalog.int8 -\dC nonesuch.pg_catalog.int8 -cross-database references are not implemented: nonesuch.pg_catalog.int8 -\dd host.regression.pg_catalog.pg_class -improper qualified name (too many dotted names): host.regression.pg_catalog.pg_class -\dd [.pg_catalog.pg_class -cross-database references are not implemented: [.pg_catalog.pg_class -\dd nonesuch.pg_catalog.pg_class -cross-database references are not implemented: nonesuch.pg_catalog.pg_class -\dD host.regression.public.gtestdomain1 -improper qualified name (too many dotted names): host.regression.public.gtestdomain1 -\dD ].public.gtestdomain1 -cross-database references are not implemented: ].public.gtestdomain1 -\dD nonesuch.public.gtestdomain1 -cross-database references are not implemented: nonesuch.public.gtestdomain1 -\ddp host.regression.pg_catalog.pg_class -improper qualified name (too many dotted names): host.regression.pg_catalog.pg_class -\ddp {.pg_catalog.pg_class -cross-database references are not implemented: {.pg_catalog.pg_class -\ddp nonesuch.pg_catalog.pg_class -cross-database references are not implemented: nonesuch.pg_catalog.pg_class -\dE host.regression.public.ft -improper qualified name (too many dotted names): host.regression.public.ft -\dE }.public.ft -cross-database references are not implemented: }.public.ft -\dE nonesuch.public.ft -cross-database references are not implemented: nonesuch.public.ft -\di host.regression.public.tenk1_hundred -improper qualified name (too many dotted names): host.regression.public.tenk1_hundred -\di ..public.tenk1_hundred -improper qualified name (too many dotted names): ..public.tenk1_hundred -\di nonesuch.public.tenk1_hundred -cross-database references are not implemented: nonesuch.public.tenk1_hundred -\dm host.regression.public.mvtest_bb -improper qualified name (too many dotted names): host.regression.public.mvtest_bb -\dm ^.public.mvtest_bb -cross-database references are not implemented: ^.public.mvtest_bb -\dm nonesuch.public.mvtest_bb -cross-database references are not implemented: nonesuch.public.mvtest_bb -\ds host.regression.public.check_seq -improper qualified name (too many dotted names): host.regression.public.check_seq -\ds regression|mydb.public.check_seq -cross-database references are not implemented: regression|mydb.public.check_seq -\ds nonesuch.public.check_seq -cross-database references are not implemented: nonesuch.public.check_seq -\dt host.regression.public.b_star -improper qualified name (too many dotted names): host.regression.public.b_star -\dt regres+ion.public.b_star -cross-database references are not implemented: regres+ion.public.b_star -\dt nonesuch.public.b_star -cross-database references are not implemented: nonesuch.public.b_star -\dv host.regression.public.shoe -improper qualified name (too many dotted names): host.regression.public.shoe -\dv regress(ion).public.shoe -cross-database references are not implemented: regress(ion).public.shoe -\dv nonesuch.public.shoe -cross-database references are not implemented: nonesuch.public.shoe -\des nonesuch.server -improper qualified name (too many dotted names): nonesuch.server -\des regression.server -improper qualified name (too many dotted names): regression.server -\des nonesuch.server -improper qualified name (too many dotted names): nonesuch.server -\des regression.server -improper qualified name (too many dotted names): regression.server -\des nonesuch.username -improper qualified name (too many dotted names): nonesuch.username -\des regression.username -improper qualified name (too many dotted names): regression.username -\dew nonesuch.fdw -improper qualified name (too many dotted names): nonesuch.fdw -\dew regression.fdw -improper qualified name (too many dotted names): regression.fdw -\df host.regression.public.namelen -improper qualified name (too many dotted names): host.regression.public.namelen -\df regres[qrstuv]ion.public.namelen -cross-database references are not implemented: regres[qrstuv]ion.public.namelen -\df nonesuch.public.namelen -cross-database references are not implemented: nonesuch.public.namelen -\dF host.regression.pg_catalog.arabic -improper qualified name (too many dotted names): host.regression.pg_catalog.arabic -\dF regres{1,2}ion.pg_catalog.arabic -cross-database references are not implemented: regres{1,2}ion.pg_catalog.arabic -\dF nonesuch.pg_catalog.arabic -cross-database references are not implemented: nonesuch.pg_catalog.arabic -\dFd host.regression.pg_catalog.arabic_stem -improper qualified name (too many dotted names): host.regression.pg_catalog.arabic_stem -\dFd regres?ion.pg_catalog.arabic_stem -cross-database references are not implemented: regres?ion.pg_catalog.arabic_stem -\dFd nonesuch.pg_catalog.arabic_stem -cross-database references are not implemented: nonesuch.pg_catalog.arabic_stem -\dFp host.regression.pg_catalog.default -improper qualified name (too many dotted names): host.regression.pg_catalog.default -\dFp ^regression.pg_catalog.default -cross-database references are not implemented: ^regression.pg_catalog.default -\dFp nonesuch.pg_catalog.default -cross-database references are not implemented: nonesuch.pg_catalog.default -\dFt host.regression.pg_catalog.ispell -improper qualified name (too many dotted names): host.regression.pg_catalog.ispell -\dFt regression$.pg_catalog.ispell -cross-database references are not implemented: regression$.pg_catalog.ispell -\dFt nonesuch.pg_catalog.ispell -cross-database references are not implemented: nonesuch.pg_catalog.ispell -\dg nonesuch.pg_database_owner -improper qualified name (too many dotted names): nonesuch.pg_database_owner -\dg regression.pg_database_owner -improper qualified name (too many dotted names): regression.pg_database_owner -\dL host.regression.plpgsql -improper qualified name (too many dotted names): host.regression.plpgsql -\dL *.plpgsql -cross-database references are not implemented: *.plpgsql -\dL nonesuch.plpgsql -cross-database references are not implemented: nonesuch.plpgsql -\dn host.regression.public -improper qualified name (too many dotted names): host.regression.public -\dn """".public -cross-database references are not implemented: """".public -\dn nonesuch.public -cross-database references are not implemented: nonesuch.public -\do host.regression.public.!=- -improper qualified name (too many dotted names): host.regression.public.!=- -\do "regression|mydb".public.!=- -cross-database references are not implemented: "regression|mydb".public.!=- -\do nonesuch.public.!=- -cross-database references are not implemented: nonesuch.public.!=- -\dO host.regression.pg_catalog.POSIX -improper qualified name (too many dotted names): host.regression.pg_catalog.POSIX -\dO .pg_catalog.POSIX -cross-database references are not implemented: .pg_catalog.POSIX -\dO nonesuch.pg_catalog.POSIX -cross-database references are not implemented: nonesuch.pg_catalog.POSIX -\dp host.regression.public.a_star -improper qualified name (too many dotted names): host.regression.public.a_star -\dp "regres+ion".public.a_star -cross-database references are not implemented: "regres+ion".public.a_star -\dp nonesuch.public.a_star -cross-database references are not implemented: nonesuch.public.a_star -\dP host.regression.public.mlparted -improper qualified name (too many dotted names): host.regression.public.mlparted -\dP "regres(sion)".public.mlparted -cross-database references are not implemented: "regres(sion)".public.mlparted -\dP nonesuch.public.mlparted -cross-database references are not implemented: nonesuch.public.mlparted -\drds nonesuch.lc_messages -improper qualified name (too many dotted names): nonesuch.lc_messages -\drds regression.lc_messages -improper qualified name (too many dotted names): regression.lc_messages -\dRp public.mypub -improper qualified name (too many dotted names): public.mypub -\dRp regression.mypub -improper qualified name (too many dotted names): regression.mypub -\dRs public.mysub -improper qualified name (too many dotted names): public.mysub -\dRs regression.mysub -improper qualified name (too many dotted names): regression.mysub -\dT host.regression.public.widget -improper qualified name (too many dotted names): host.regression.public.widget -\dT "regression{1,2}".public.widget -cross-database references are not implemented: "regression{1,2}".public.widget -\dT nonesuch.public.widget -cross-database references are not implemented: nonesuch.public.widget -\dx regression.plpgsql -improper qualified name (too many dotted names): regression.plpgsql -\dx nonesuch.plpgsql -improper qualified name (too many dotted names): nonesuch.plpgsql -\dX host.regression.public.func_deps_stat -improper qualified name (too many dotted names): host.regression.public.func_deps_stat -\dX "^regression$".public.func_deps_stat -cross-database references are not implemented: "^regression$".public.func_deps_stat -\dX nonesuch.public.func_deps_stat -cross-database references are not implemented: nonesuch.public.func_deps_stat -\dy regression.myevt -improper qualified name (too many dotted names): regression.myevt -\dy nonesuch.myevt -improper qualified name (too many dotted names): nonesuch.myevt --- check that dots within quoted name segments are not counted -\dA "no.such.access.method" -List of access methods - Name | Type -------+------ -(0 rows) - -\dt "no.such.table.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\da "no.such.aggregate.function" - List of aggregate functions - Schema | Name | Result data type | Argument data types | Description ---------+------+------------------+---------------------+------------- -(0 rows) - -\dAc "no.such.operator.class" - List of operator classes - AM | Input type | Storage type | Operator class | Default? -----+------------+--------------+----------------+---------- -(0 rows) - -\dAf "no.such.operator.family" - List of operator families - AM | Operator family | Applicable types -----+-----------------+------------------ -(0 rows) - -\dAo "no.such.operator.of.operator.family" - List of operators of operator families - AM | Operator family | Operator | Strategy | Purpose -----+-----------------+----------+----------+--------- -(0 rows) - -\dAp "no.such.operator.support.function.of.operator.family" - List of support functions of operator families - AM | Operator family | Registered left type | Registered right type | Number | Function -----+-----------------+----------------------+-----------------------+--------+---------- -(0 rows) - -\db "no.such.tablespace" - List of tablespaces - Name | Owner | Location -------+-------+---------- -(0 rows) - -\dc "no.such.conversion" - List of conversions - Schema | Name | Source | Destination | Default? ---------+------+--------+-------------+---------- -(0 rows) - -\dC "no.such.cast" - List of casts - Source type | Target type | Function | Implicit? --------------+-------------+----------+----------- -(0 rows) - -\dd "no.such.object.description" - Object descriptions - Schema | Name | Object | Description ---------+------+--------+------------- -(0 rows) - -\dD "no.such.domain" - List of domains - Schema | Name | Type | Collation | Nullable | Default | Check ---------+------+------+-----------+----------+---------+------- -(0 rows) - -\ddp "no.such.default.access.privilege" - Default access privileges - Owner | Schema | Type | Access privileges --------+--------+------+------------------- -(0 rows) - -\di "no.such.index.relation" - List of relations - Schema | Name | Type | Owner | Table ---------+------+------+-------+------- -(0 rows) - -\dm "no.such.materialized.view" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\ds "no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\dt "no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\dv "no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\des "no.such.foreign.server" - List of foreign servers - Name | Owner | Foreign-data wrapper -------+-------+---------------------- -(0 rows) - -\dew "no.such.foreign.data.wrapper" - List of foreign-data wrappers - Name | Owner | Handler | Validator -------+-------+---------+----------- -(0 rows) - -\df "no.such.function" - List of functions - Schema | Name | Result data type | Argument data types | Type ---------+------+------------------+---------------------+------ -(0 rows) - -\dF "no.such.text.search.configuration" -List of text search configurations - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFd "no.such.text.search.dictionary" -List of text search dictionaries - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFp "no.such.text.search.parser" - List of text search parsers - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFt "no.such.text.search.template" -List of text search templates - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dg "no.such.role" - List of roles - Role name | Attributes ------------+------------ - -\dL "no.such.language" - List of languages - Name | Owner | Trusted | Description -------+-------+---------+------------- -(0 rows) - -\dn "no.such.schema" -List of schemas - Name | Owner -------+------- -(0 rows) - -\do "no.such.operator" - List of operators - Schema | Name | Left arg type | Right arg type | Result type | Description ---------+------+---------------+----------------+-------------+------------- -(0 rows) - -\dO "no.such.collation" - List of collations - Schema | Name | Provider | Collate | Ctype | Locale | ICU Rules | Deterministic? ---------+------+----------+---------+-------+--------+-----------+---------------- -(0 rows) - -\dp "no.such.access.privilege" - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+------+------+-------------------+-------------------+---------- -(0 rows) - -\dP "no.such.partitioned.relation" - List of partitioned relations - Schema | Name | Owner | Type | Parent name | Table ---------+------+-------+------+-------------+------- -(0 rows) - -\drds "no.such.setting" - List of settings - Role | Database | Settings -------+----------+---------- -(0 rows) - -\dRp "no.such.publication" - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root -------+-------+------------+---------+---------+---------+-----------+-------------------+---------- -(0 rows) - -\dRs "no.such.subscription" - List of subscriptions - Name | Owner | Enabled | Publication -------+-------+---------+------------- -(0 rows) - -\dT "no.such.data.type" - List of data types - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dx "no.such.installed.extension" - List of installed extensions - Name | Version | Schema | Description -------+---------+--------+------------- -(0 rows) - -\dX "no.such.extended.statistics" - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+------+------------+-----------+--------------+----- -(0 rows) - -\dy "no.such.event.trigger" - List of event triggers - Name | Event | Owner | Enabled | Function | Tags -------+-------+-------+---------+----------+------ -(0 rows) - --- again, but with dotted schema qualifications. -\dA "no.such.schema"."no.such.access.method" -improper qualified name (too many dotted names): "no.such.schema"."no.such.access.method" -\dt "no.such.schema"."no.such.table.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\da "no.such.schema"."no.such.aggregate.function" - List of aggregate functions - Schema | Name | Result data type | Argument data types | Description ---------+------+------------------+---------------------+------------- -(0 rows) - -\dAc "no.such.schema"."no.such.operator.class" -improper qualified name (too many dotted names): "no.such.schema"."no.such.operator.class" -\dAf "no.such.schema"."no.such.operator.family" -improper qualified name (too many dotted names): "no.such.schema"."no.such.operator.family" -\dAo "no.such.schema"."no.such.operator.of.operator.family" -improper qualified name (too many dotted names): "no.such.schema"."no.such.operator.of.operator.family" -\dAp "no.such.schema"."no.such.operator.support.function.of.operator.family" -improper qualified name (too many dotted names): "no.such.schema"."no.such.operator.support.function.of.operator.family" -\db "no.such.schema"."no.such.tablespace" -improper qualified name (too many dotted names): "no.such.schema"."no.such.tablespace" -\dc "no.such.schema"."no.such.conversion" - List of conversions - Schema | Name | Source | Destination | Default? ---------+------+--------+-------------+---------- -(0 rows) - -\dC "no.such.schema"."no.such.cast" - List of casts - Source type | Target type | Function | Implicit? --------------+-------------+----------+----------- -(0 rows) - -\dd "no.such.schema"."no.such.object.description" - Object descriptions - Schema | Name | Object | Description ---------+------+--------+------------- -(0 rows) - -\dD "no.such.schema"."no.such.domain" - List of domains - Schema | Name | Type | Collation | Nullable | Default | Check ---------+------+------+-----------+----------+---------+------- -(0 rows) - -\ddp "no.such.schema"."no.such.default.access.privilege" - Default access privileges - Owner | Schema | Type | Access privileges --------+--------+------+------------------- -(0 rows) - -\di "no.such.schema"."no.such.index.relation" - List of relations - Schema | Name | Type | Owner | Table ---------+------+------+-------+------- -(0 rows) - -\dm "no.such.schema"."no.such.materialized.view" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\ds "no.such.schema"."no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\dt "no.such.schema"."no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\dv "no.such.schema"."no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\des "no.such.schema"."no.such.foreign.server" -improper qualified name (too many dotted names): "no.such.schema"."no.such.foreign.server" -\dew "no.such.schema"."no.such.foreign.data.wrapper" -improper qualified name (too many dotted names): "no.such.schema"."no.such.foreign.data.wrapper" -\df "no.such.schema"."no.such.function" - List of functions - Schema | Name | Result data type | Argument data types | Type ---------+------+------------------+---------------------+------ -(0 rows) - -\dF "no.such.schema"."no.such.text.search.configuration" -List of text search configurations - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFd "no.such.schema"."no.such.text.search.dictionary" -List of text search dictionaries - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFp "no.such.schema"."no.such.text.search.parser" - List of text search parsers - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFt "no.such.schema"."no.such.text.search.template" -List of text search templates - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dg "no.such.schema"."no.such.role" -improper qualified name (too many dotted names): "no.such.schema"."no.such.role" -\dL "no.such.schema"."no.such.language" -cross-database references are not implemented: "no.such.schema"."no.such.language" -\do "no.such.schema"."no.such.operator" - List of operators - Schema | Name | Left arg type | Right arg type | Result type | Description ---------+------+---------------+----------------+-------------+------------- -(0 rows) - -\dO "no.such.schema"."no.such.collation" - List of collations - Schema | Name | Provider | Collate | Ctype | Locale | ICU Rules | Deterministic? ---------+------+----------+---------+-------+--------+-----------+---------------- -(0 rows) - -\dp "no.such.schema"."no.such.access.privilege" - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+------+------+-------------------+-------------------+---------- -(0 rows) - -\dP "no.such.schema"."no.such.partitioned.relation" - List of partitioned relations - Schema | Name | Owner | Type | Parent name | Table ---------+------+-------+------+-------------+------- -(0 rows) - -\drds "no.such.schema"."no.such.setting" -improper qualified name (too many dotted names): "no.such.schema"."no.such.setting" -\dRp "no.such.schema"."no.such.publication" -improper qualified name (too many dotted names): "no.such.schema"."no.such.publication" -\dRs "no.such.schema"."no.such.subscription" -improper qualified name (too many dotted names): "no.such.schema"."no.such.subscription" -\dT "no.such.schema"."no.such.data.type" - List of data types - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dx "no.such.schema"."no.such.installed.extension" -improper qualified name (too many dotted names): "no.such.schema"."no.such.installed.extension" -\dX "no.such.schema"."no.such.extended.statistics" - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+------+------------+-----------+--------------+----- -(0 rows) - -\dy "no.such.schema"."no.such.event.trigger" -improper qualified name (too many dotted names): "no.such.schema"."no.such.event.trigger" --- again, but with current database and dotted schema qualifications. -\dt regression."no.such.schema"."no.such.table.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\da regression."no.such.schema"."no.such.aggregate.function" - List of aggregate functions - Schema | Name | Result data type | Argument data types | Description ---------+------+------------------+---------------------+------------- -(0 rows) - -\dc regression."no.such.schema"."no.such.conversion" - List of conversions - Schema | Name | Source | Destination | Default? ---------+------+--------+-------------+---------- -(0 rows) - -\dC regression."no.such.schema"."no.such.cast" - List of casts - Source type | Target type | Function | Implicit? --------------+-------------+----------+----------- -(0 rows) - -\dd regression."no.such.schema"."no.such.object.description" - Object descriptions - Schema | Name | Object | Description ---------+------+--------+------------- -(0 rows) - -\dD regression."no.such.schema"."no.such.domain" - List of domains - Schema | Name | Type | Collation | Nullable | Default | Check ---------+------+------+-----------+----------+---------+------- -(0 rows) - -\di regression."no.such.schema"."no.such.index.relation" - List of relations - Schema | Name | Type | Owner | Table ---------+------+------+-------+------- -(0 rows) - -\dm regression."no.such.schema"."no.such.materialized.view" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\ds regression."no.such.schema"."no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\dt regression."no.such.schema"."no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\dv regression."no.such.schema"."no.such.relation" - List of relations - Schema | Name | Type | Owner ---------+------+------+------- -(0 rows) - -\df regression."no.such.schema"."no.such.function" - List of functions - Schema | Name | Result data type | Argument data types | Type ---------+------+------------------+---------------------+------ -(0 rows) - -\dF regression."no.such.schema"."no.such.text.search.configuration" -List of text search configurations - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFd regression."no.such.schema"."no.such.text.search.dictionary" -List of text search dictionaries - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFp regression."no.such.schema"."no.such.text.search.parser" - List of text search parsers - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dFt regression."no.such.schema"."no.such.text.search.template" -List of text search templates - Schema | Name | Description ---------+------+------------- -(0 rows) - -\do regression."no.such.schema"."no.such.operator" - List of operators - Schema | Name | Left arg type | Right arg type | Result type | Description ---------+------+---------------+----------------+-------------+------------- -(0 rows) - -\dO regression."no.such.schema"."no.such.collation" - List of collations - Schema | Name | Provider | Collate | Ctype | Locale | ICU Rules | Deterministic? ---------+------+----------+---------+-------+--------+-----------+---------------- -(0 rows) - -\dp regression."no.such.schema"."no.such.access.privilege" - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+------+------+-------------------+-------------------+---------- -(0 rows) - -\dP regression."no.such.schema"."no.such.partitioned.relation" - List of partitioned relations - Schema | Name | Owner | Type | Parent name | Table ---------+------+-------+------+-------------+------- -(0 rows) - -\dT regression."no.such.schema"."no.such.data.type" - List of data types - Schema | Name | Description ---------+------+------------- -(0 rows) - -\dX regression."no.such.schema"."no.such.extended.statistics" - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+------+------------+-----------+--------------+----- -(0 rows) - --- again, but with dotted database and dotted schema qualifications. -\dt "no.such.database"."no.such.schema"."no.such.table.relation" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.table.relation" -\da "no.such.database"."no.such.schema"."no.such.aggregate.function" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.aggregate.function" -\dc "no.such.database"."no.such.schema"."no.such.conversion" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.conversion" -\dC "no.such.database"."no.such.schema"."no.such.cast" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.cast" -\dd "no.such.database"."no.such.schema"."no.such.object.description" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.object.description" -\dD "no.such.database"."no.such.schema"."no.such.domain" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.domain" -\ddp "no.such.database"."no.such.schema"."no.such.default.access.privilege" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.default.access.privilege" -\di "no.such.database"."no.such.schema"."no.such.index.relation" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.index.relation" -\dm "no.such.database"."no.such.schema"."no.such.materialized.view" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.materialized.view" -\ds "no.such.database"."no.such.schema"."no.such.relation" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.relation" -\dt "no.such.database"."no.such.schema"."no.such.relation" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.relation" -\dv "no.such.database"."no.such.schema"."no.such.relation" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.relation" -\df "no.such.database"."no.such.schema"."no.such.function" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.function" -\dF "no.such.database"."no.such.schema"."no.such.text.search.configuration" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.text.search.configuration" -\dFd "no.such.database"."no.such.schema"."no.such.text.search.dictionary" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.text.search.dictionary" -\dFp "no.such.database"."no.such.schema"."no.such.text.search.parser" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.text.search.parser" -\dFt "no.such.database"."no.such.schema"."no.such.text.search.template" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.text.search.template" -\do "no.such.database"."no.such.schema"."no.such.operator" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.operator" -\dO "no.such.database"."no.such.schema"."no.such.collation" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.collation" -\dp "no.such.database"."no.such.schema"."no.such.access.privilege" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.access.privilege" -\dP "no.such.database"."no.such.schema"."no.such.partitioned.relation" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.partitioned.relation" -\dT "no.such.database"."no.such.schema"."no.such.data.type" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.data.type" -\dX "no.such.database"."no.such.schema"."no.such.extended.statistics" -cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.extended.statistics" --- check \drg and \du -CREATE ROLE regress_du_role0; -CREATE ROLE regress_du_role1; -CREATE ROLE regress_du_role2; -CREATE ROLE regress_du_admin; -GRANT regress_du_role0 TO regress_du_admin WITH ADMIN TRUE; -GRANT regress_du_role1 TO regress_du_admin WITH ADMIN TRUE; -GRANT regress_du_role2 TO regress_du_admin WITH ADMIN TRUE; -GRANT regress_du_role0 TO regress_du_role1 WITH ADMIN TRUE, INHERIT TRUE, SET TRUE GRANTED BY regress_du_admin; -GRANT regress_du_role0 TO regress_du_role2 WITH ADMIN TRUE, INHERIT FALSE, SET FALSE GRANTED BY regress_du_admin; -GRANT regress_du_role1 TO regress_du_role2 WITH ADMIN TRUE , INHERIT FALSE, SET TRUE GRANTED BY regress_du_admin; -GRANT regress_du_role0 TO regress_du_role1 WITH ADMIN FALSE, INHERIT TRUE, SET FALSE GRANTED BY regress_du_role1; -GRANT regress_du_role0 TO regress_du_role2 WITH ADMIN FALSE, INHERIT TRUE , SET TRUE GRANTED BY regress_du_role1; -GRANT regress_du_role0 TO regress_du_role1 WITH ADMIN FALSE, INHERIT FALSE, SET TRUE GRANTED BY regress_du_role2; -GRANT regress_du_role0 TO regress_du_role2 WITH ADMIN FALSE, INHERIT FALSE, SET FALSE GRANTED BY regress_du_role2; -\drg regress_du_role* - List of role grants - Role name | Member of | Options | Grantor -------------------+------------------+---------------------+------------------ - regress_du_role1 | regress_du_role0 | ADMIN, INHERIT, SET | regress_du_admin - regress_du_role1 | regress_du_role0 | INHERIT | regress_du_role1 - regress_du_role1 | regress_du_role0 | SET | regress_du_role2 - regress_du_role2 | regress_du_role0 | ADMIN | regress_du_admin - regress_du_role2 | regress_du_role0 | INHERIT, SET | regress_du_role1 - regress_du_role2 | regress_du_role0 | | regress_du_role2 - regress_du_role2 | regress_du_role1 | ADMIN, SET | regress_du_admin -(7 rows) - -\du regress_du_role* - List of roles - Role name | Attributes -------------------+-------------- - regress_du_role0 | Cannot login - regress_du_role1 | Cannot login - regress_du_role2 | Cannot login - -DROP ROLE regress_du_role0; -DROP ROLE regress_du_role1; -DROP ROLE regress_du_role2; -DROP ROLE regress_du_admin; --- Test display of empty privileges. -BEGIN; --- Create an owner for tested objects because output contains owner name. -CREATE ROLE regress_zeropriv_owner; -SET LOCAL ROLE regress_zeropriv_owner; -CREATE DOMAIN regress_zeropriv_domain AS int; -REVOKE ALL ON DOMAIN regress_zeropriv_domain FROM CURRENT_USER, PUBLIC; -\dD+ regress_zeropriv_domain - List of domains - Schema | Name | Type | Collation | Nullable | Default | Check | Access privileges | Description ---------+-------------------------+---------+-----------+----------+---------+-------+-------------------+------------- - public | regress_zeropriv_domain | integer | | | | | (none) | -(1 row) - -CREATE PROCEDURE regress_zeropriv_proc() LANGUAGE sql AS ''; -REVOKE ALL ON PROCEDURE regress_zeropriv_proc() FROM CURRENT_USER, PUBLIC; -\df+ regress_zeropriv_proc - List of functions - Schema | Name | Result data type | Argument data types | Type | Volatility | Parallel | Owner | Security | Access privileges | Language | Internal name | Description ---------+-----------------------+------------------+---------------------+------+------------+----------+------------------------+----------+-------------------+----------+---------------+------------- - public | regress_zeropriv_proc | | | proc | volatile | unsafe | regress_zeropriv_owner | invoker | (none) | sql | | -(1 row) - -CREATE TABLE regress_zeropriv_tbl (a int); -REVOKE ALL ON TABLE regress_zeropriv_tbl FROM CURRENT_USER; -\dp regress_zeropriv_tbl - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+----------------------+-------+-------------------+-------------------+---------- - public | regress_zeropriv_tbl | table | (none) | | -(1 row) - -CREATE TYPE regress_zeropriv_type AS (a int); -REVOKE ALL ON TYPE regress_zeropriv_type FROM CURRENT_USER, PUBLIC; -\dT+ regress_zeropriv_type - List of data types - Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description ---------+-----------------------+-----------------------+-------+----------+------------------------+-------------------+------------- - public | regress_zeropriv_type | regress_zeropriv_type | tuple | | regress_zeropriv_owner | (none) | -(1 row) - -ROLLBACK; --- Test display of default privileges with \pset null. -CREATE TABLE defprivs (a int); -\pset null '(default)' -\z defprivs - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+----------+-------+-------------------+-------------------+---------- - public | defprivs | table | (default) | | -(1 row) - -\pset null '' -DROP TABLE defprivs; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/psql_crosstab.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/psql_crosstab.out --- /Users/admin/pgsql/src/test/regress/expected/psql_crosstab.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/psql_crosstab.out 2024-12-13 13:20:10 @@ -1,216 +1,2 @@ --- --- \crosstabview --- -CREATE TABLE ctv_data (v, h, c, i, d) AS -VALUES - ('v1','h2','foo', 3, '2015-04-01'::date), - ('v2','h1','bar', 3, '2015-01-02'), - ('v1','h0','baz', NULL, '2015-07-12'), - ('v0','h4','qux', 4, '2015-07-15'), - ('v0','h4','dbl', -3, '2014-12-15'), - ('v0',NULL,'qux', 5, '2014-07-15'), - ('v1','h2','quux',7, '2015-04-04'); --- make plans more stable -ANALYZE ctv_data; --- running \crosstabview after query uses query in buffer -SELECT v, EXTRACT(year FROM d), count(*) - FROM ctv_data - GROUP BY 1, 2 - ORDER BY 1, 2; - v | extract | count -----+---------+------- - v0 | 2014 | 2 - v0 | 2015 | 1 - v1 | 2015 | 3 - v2 | 2015 | 1 -(4 rows) - --- basic usage with 3 columns - \crosstabview - v | 2014 | 2015 -----+------+------ - v0 | 2 | 1 - v1 | | 3 - v2 | | 1 -(3 rows) - --- ordered months in horizontal header, quoted column name -SELECT v, to_char(d, 'Mon') AS "month name", EXTRACT(month FROM d) AS num, - count(*) FROM ctv_data GROUP BY 1,2,3 ORDER BY 1 - \crosstabview v "month name" 4 num - v | Jan | Apr | Jul | Dec -----+-----+-----+-----+----- - v0 | | | 2 | 1 - v1 | | 2 | 1 | - v2 | 1 | | | -(3 rows) - --- ordered months in vertical header, ordered years in horizontal header -SELECT EXTRACT(year FROM d) AS year, to_char(d,'Mon') AS """month"" name", - EXTRACT(month FROM d) AS month, - format('sum=%s avg=%s', sum(i), avg(i)::numeric(2,1)) - FROM ctv_data - GROUP BY EXTRACT(year FROM d), to_char(d,'Mon'), EXTRACT(month FROM d) -ORDER BY month -\crosstabview """month"" name" year format year - "month" name | 2014 | 2015 ---------------+-----------------+---------------- - Jan | | sum=3 avg=3.0 - Apr | | sum=10 avg=5.0 - Jul | sum=5 avg=5.0 | sum=4 avg=4.0 - Dec | sum=-3 avg=-3.0 | -(4 rows) - --- combine contents vertically into the same cell (V/H duplicates) -SELECT v, h, string_agg(c, E'\n') FROM ctv_data GROUP BY v, h ORDER BY 1,2,3 - \crosstabview 1 2 3 - v | h4 | | h0 | h2 | h1 -----+-----+-----+-----+------+----- - v0 | qux+| qux | | | - | dbl | | | | - v1 | | | baz | foo +| - | | | | quux | - v2 | | | | | bar -(3 rows) - --- horizontal ASC order from window function -SELECT v,h, string_agg(c, E'\n') AS c, row_number() OVER(ORDER BY h) AS r -FROM ctv_data GROUP BY v, h ORDER BY 1,3,2 - \crosstabview v h c r - v | h0 | h1 | h2 | h4 | -----+-----+-----+------+-----+----- - v0 | | | | qux+| qux - | | | | dbl | - v1 | baz | | foo +| | - | | | quux | | - v2 | | bar | | | -(3 rows) - --- horizontal DESC order from window function -SELECT v, h, string_agg(c, E'\n') AS c, row_number() OVER(ORDER BY h DESC) AS r -FROM ctv_data GROUP BY v, h ORDER BY 1,3,2 - \crosstabview v h c r - v | | h4 | h2 | h1 | h0 -----+-----+-----+------+-----+----- - v0 | qux | qux+| | | - | | dbl | | | - v1 | | | foo +| | baz - | | | quux | | - v2 | | | | bar | -(3 rows) - --- horizontal ASC order from window function, NULLs pushed rightmost -SELECT v,h, string_agg(c, E'\n') AS c, row_number() OVER(ORDER BY h NULLS LAST) AS r -FROM ctv_data GROUP BY v, h ORDER BY 1,3,2 - \crosstabview v h c r - v | h0 | h1 | h2 | h4 | -----+-----+-----+------+-----+----- - v0 | | | | qux+| qux - | | | | dbl | - v1 | baz | | foo +| | - | | | quux | | - v2 | | bar | | | -(3 rows) - --- only null, no column name, 2 columns: error -SELECT null,null \crosstabview -\crosstabview: query must return at least three columns --- only null, no column name, 3 columns: works -SELECT null,null,null \crosstabview - ?column? | -----------+-- - | -(1 row) - --- null display -\pset null '#null#' -SELECT v,h, string_agg(i::text, E'\n') AS i FROM ctv_data -GROUP BY v, h ORDER BY h,v - \crosstabview v h i - v | h0 | h1 | h2 | h4 | #null# -----+--------+----+----+----+-------- - v1 | #null# | | 3 +| | - | | | 7 | | - v2 | | 3 | | | - v0 | | | | 4 +| 5 - | | | | -3 | -(3 rows) - -\pset null '' --- refer to columns by position -SELECT v,h,string_agg(i::text, E'\n'), string_agg(c, E'\n') -FROM ctv_data GROUP BY v, h ORDER BY h,v - \crosstabview 2 1 4 - h | v1 | v2 | v0 -----+------+-----+----- - h0 | baz | | - h1 | | bar | - h2 | foo +| | - | quux | | - h4 | | | qux+ - | | | dbl - | | | qux -(5 rows) - --- refer to columns by positions and names mixed -SELECT v,h, string_agg(i::text, E'\n') AS i, string_agg(c, E'\n') AS c -FROM ctv_data GROUP BY v, h ORDER BY h,v - \crosstabview 1 "h" 4 - v | h0 | h1 | h2 | h4 | -----+-----+-----+------+-----+----- - v1 | baz | | foo +| | - | | | quux | | - v2 | | bar | | | - v0 | | | | qux+| qux - | | | | dbl | -(3 rows) - --- refer to columns by quoted names, check downcasing of unquoted name -SELECT 1 as "22", 2 as b, 3 as "Foo" - \crosstabview "22" B "Foo" - 22 | 2 -----+--- - 1 | 3 -(1 row) - --- error: bad column name -SELECT v,h,c,i FROM ctv_data - \crosstabview v h j -\crosstabview: column name not found: "j" --- error: need to quote name -SELECT 1 as "22", 2 as b, 3 as "Foo" - \crosstabview 1 2 Foo -\crosstabview: column name not found: "foo" --- error: need to not quote name -SELECT 1 as "22", 2 as b, 3 as "Foo" - \crosstabview 1 "B" "Foo" -\crosstabview: column name not found: "B" --- error: bad column number -SELECT v,h,i,c FROM ctv_data - \crosstabview 2 1 5 -\crosstabview: column number 5 is out of range 1..4 --- error: same H and V columns -SELECT v,h,i,c FROM ctv_data - \crosstabview 2 h 4 -\crosstabview: vertical and horizontal headers must be different columns --- error: too many columns -SELECT a,a,1 FROM generate_series(1,3000) AS a - \crosstabview -\crosstabview: maximum number of columns (1600) exceeded --- error: only one column -SELECT 1 \crosstabview -\crosstabview: query must return at least three columns -DROP TABLE ctv_data; --- check error reporting (bug #14476) -CREATE TABLE ctv_data (x int, y int, v text); -INSERT INTO ctv_data SELECT 1, x, '*' || x FROM generate_series(1,10) x; -SELECT * FROM ctv_data \crosstabview - x | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 ----+----+----+----+----+----+----+----+----+----+----- - 1 | *1 | *2 | *3 | *4 | *5 | *6 | *7 | *8 | *9 | *10 -(1 row) - -INSERT INTO ctv_data VALUES (1, 10, '*'); -- duplicate data to cause error -SELECT * FROM ctv_data \crosstabview -\crosstabview: query result contains multiple data values for row "1", column "10" -DROP TABLE ctv_data; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/amutils.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/amutils.out --- /Users/admin/pgsql/src/test/regress/expected/amutils.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/amutils.out 2024-12-13 13:20:10 @@ -1,254 +1,2 @@ --- --- Test index AM property-reporting functions --- -select prop, - pg_indexam_has_property(a.oid, prop) as "AM", - pg_index_has_property('onek_hundred'::regclass, prop) as "Index", - pg_index_column_has_property('onek_hundred'::regclass, 1, prop) as "Column" - from pg_am a, - unnest(array['asc', 'desc', 'nulls_first', 'nulls_last', - 'orderable', 'distance_orderable', 'returnable', - 'search_array', 'search_nulls', - 'clusterable', 'index_scan', 'bitmap_scan', - 'backward_scan', - 'can_order', 'can_unique', 'can_multi_col', - 'can_exclude', 'can_include', - 'bogus']::text[]) - with ordinality as u(prop,ord) - where a.amname = 'btree' - order by ord; - prop | AM | Index | Column ---------------------+----+-------+-------- - asc | | | t - desc | | | f - nulls_first | | | f - nulls_last | | | t - orderable | | | t - distance_orderable | | | f - returnable | | | t - search_array | | | t - search_nulls | | | t - clusterable | | t | - index_scan | | t | - bitmap_scan | | t | - backward_scan | | t | - can_order | t | | - can_unique | t | | - can_multi_col | t | | - can_exclude | t | | - can_include | t | | - bogus | | | -(19 rows) - -select prop, - pg_indexam_has_property(a.oid, prop) as "AM", - pg_index_has_property('gcircleind'::regclass, prop) as "Index", - pg_index_column_has_property('gcircleind'::regclass, 1, prop) as "Column" - from pg_am a, - unnest(array['asc', 'desc', 'nulls_first', 'nulls_last', - 'orderable', 'distance_orderable', 'returnable', - 'search_array', 'search_nulls', - 'clusterable', 'index_scan', 'bitmap_scan', - 'backward_scan', - 'can_order', 'can_unique', 'can_multi_col', - 'can_exclude', 'can_include', - 'bogus']::text[]) - with ordinality as u(prop,ord) - where a.amname = 'gist' - order by ord; - prop | AM | Index | Column ---------------------+----+-------+-------- - asc | | | f - desc | | | f - nulls_first | | | f - nulls_last | | | f - orderable | | | f - distance_orderable | | | t - returnable | | | f - search_array | | | f - search_nulls | | | t - clusterable | | t | - index_scan | | t | - bitmap_scan | | t | - backward_scan | | f | - can_order | f | | - can_unique | f | | - can_multi_col | t | | - can_exclude | t | | - can_include | t | | - bogus | | | -(19 rows) - -select prop, - pg_index_column_has_property('onek_hundred'::regclass, 1, prop) as btree, - pg_index_column_has_property('hash_i4_index'::regclass, 1, prop) as hash, - pg_index_column_has_property('gcircleind'::regclass, 1, prop) as gist, - pg_index_column_has_property('sp_radix_ind'::regclass, 1, prop) as spgist_radix, - pg_index_column_has_property('sp_quad_ind'::regclass, 1, prop) as spgist_quad, - pg_index_column_has_property('botharrayidx'::regclass, 1, prop) as gin, - pg_index_column_has_property('brinidx'::regclass, 1, prop) as brin - from unnest(array['asc', 'desc', 'nulls_first', 'nulls_last', - 'orderable', 'distance_orderable', 'returnable', - 'search_array', 'search_nulls', - 'bogus']::text[]) - with ordinality as u(prop,ord) - order by ord; - prop | btree | hash | gist | spgist_radix | spgist_quad | gin | brin ---------------------+-------+------+------+--------------+-------------+-----+------ - asc | t | f | f | f | f | f | f - desc | f | f | f | f | f | f | f - nulls_first | f | f | f | f | f | f | f - nulls_last | t | f | f | f | f | f | f - orderable | t | f | f | f | f | f | f - distance_orderable | f | f | t | f | t | f | f - returnable | t | f | f | t | t | f | f - search_array | t | f | f | f | f | f | f - search_nulls | t | f | t | t | t | f | t - bogus | | | | | | | -(10 rows) - -select prop, - pg_index_has_property('onek_hundred'::regclass, prop) as btree, - pg_index_has_property('hash_i4_index'::regclass, prop) as hash, - pg_index_has_property('gcircleind'::regclass, prop) as gist, - pg_index_has_property('sp_radix_ind'::regclass, prop) as spgist, - pg_index_has_property('botharrayidx'::regclass, prop) as gin, - pg_index_has_property('brinidx'::regclass, prop) as brin - from unnest(array['clusterable', 'index_scan', 'bitmap_scan', - 'backward_scan', - 'bogus']::text[]) - with ordinality as u(prop,ord) - order by ord; - prop | btree | hash | gist | spgist | gin | brin ----------------+-------+------+------+--------+-----+------ - clusterable | t | f | t | f | f | f - index_scan | t | t | t | t | f | f - bitmap_scan | t | t | t | t | t | t - backward_scan | t | t | f | f | f | f - bogus | | | | | | -(5 rows) - -select amname, prop, pg_indexam_has_property(a.oid, prop) as p - from pg_am a, - unnest(array['can_order', 'can_unique', 'can_multi_col', - 'can_exclude', 'can_include', 'bogus']::text[]) - with ordinality as u(prop,ord) - where amtype = 'i' - order by amname, ord; - amname | prop | p ---------+---------------+--- - brin | can_order | f - brin | can_unique | f - brin | can_multi_col | t - brin | can_exclude | f - brin | can_include | f - brin | bogus | - btree | can_order | t - btree | can_unique | t - btree | can_multi_col | t - btree | can_exclude | t - btree | can_include | t - btree | bogus | - gin | can_order | f - gin | can_unique | f - gin | can_multi_col | t - gin | can_exclude | f - gin | can_include | f - gin | bogus | - gist | can_order | f - gist | can_unique | f - gist | can_multi_col | t - gist | can_exclude | t - gist | can_include | t - gist | bogus | - hash | can_order | f - hash | can_unique | f - hash | can_multi_col | f - hash | can_exclude | t - hash | can_include | f - hash | bogus | - spgist | can_order | f - spgist | can_unique | f - spgist | can_multi_col | f - spgist | can_exclude | t - spgist | can_include | t - spgist | bogus | -(36 rows) - --- --- additional checks for pg_index_column_has_property --- -CREATE TEMP TABLE foo (f1 int, f2 int, f3 int, f4 int); -CREATE INDEX fooindex ON foo (f1 desc, f2 asc, f3 nulls first, f4 nulls last); -select col, prop, pg_index_column_has_property(o, col, prop) - from (values ('fooindex'::regclass)) v1(o), - (values (1,'orderable'),(2,'asc'),(3,'desc'), - (4,'nulls_first'),(5,'nulls_last'), - (6, 'bogus')) v2(idx,prop), - generate_series(1,4) col - order by col, idx; - col | prop | pg_index_column_has_property ------+-------------+------------------------------ - 1 | orderable | t - 1 | asc | f - 1 | desc | t - 1 | nulls_first | t - 1 | nulls_last | f - 1 | bogus | - 2 | orderable | t - 2 | asc | t - 2 | desc | f - 2 | nulls_first | f - 2 | nulls_last | t - 2 | bogus | - 3 | orderable | t - 3 | asc | t - 3 | desc | f - 3 | nulls_first | t - 3 | nulls_last | f - 3 | bogus | - 4 | orderable | t - 4 | asc | t - 4 | desc | f - 4 | nulls_first | f - 4 | nulls_last | t - 4 | bogus | -(24 rows) - -CREATE INDEX foocover ON foo (f1) INCLUDE (f2,f3); -select col, prop, pg_index_column_has_property(o, col, prop) - from (values ('foocover'::regclass)) v1(o), - (values (1,'orderable'),(2,'asc'),(3,'desc'), - (4,'nulls_first'),(5,'nulls_last'), - (6,'distance_orderable'),(7,'returnable'), - (8, 'bogus')) v2(idx,prop), - generate_series(1,3) col - order by col, idx; - col | prop | pg_index_column_has_property ------+--------------------+------------------------------ - 1 | orderable | t - 1 | asc | t - 1 | desc | f - 1 | nulls_first | f - 1 | nulls_last | t - 1 | distance_orderable | f - 1 | returnable | t - 1 | bogus | - 2 | orderable | f - 2 | asc | - 2 | desc | - 2 | nulls_first | - 2 | nulls_last | - 2 | distance_orderable | f - 2 | returnable | t - 2 | bogus | - 3 | orderable | f - 3 | asc | - 3 | desc | - 3 | nulls_first | - 3 | nulls_last | - 3 | distance_orderable | f - 3 | returnable | t - 3 | bogus | -(24 rows) - +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/stats_ext.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/stats_ext.out --- /Users/admin/pgsql/src/test/regress/expected/stats_ext.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/stats_ext.out 2024-12-13 13:20:10 @@ -1,3347 +1,2 @@ --- Generic extended statistics support --- --- Note: tables for which we check estimated row counts should be created --- with autovacuum_enabled = off, so that we don't have unstable results --- from auto-analyze happening when we didn't expect it. --- --- check the number of estimated/actual rows in the top node -create function check_estimated_rows(text) returns table (estimated int, actual int) -language plpgsql as -$$ -declare - ln text; - tmp text[]; - first_row bool := true; -begin - for ln in - execute format('explain analyze %s', $1) - loop - if first_row then - first_row := false; - tmp := regexp_match(ln, 'rows=(\d*) .* rows=(\d*)'); - return query select tmp[1]::int, tmp[2]::int; - end if; - end loop; -end; -$$; --- Verify failures -CREATE TABLE ext_stats_test (x text, y int, z int); -CREATE STATISTICS tst; -ERROR: syntax error at or near ";" -LINE 1: CREATE STATISTICS tst; - ^ -CREATE STATISTICS tst ON a, b; -ERROR: syntax error at or near ";" -LINE 1: CREATE STATISTICS tst ON a, b; - ^ -CREATE STATISTICS tst FROM sometab; -ERROR: syntax error at or near "FROM" -LINE 1: CREATE STATISTICS tst FROM sometab; - ^ -CREATE STATISTICS tst ON a, b FROM nonexistent; -ERROR: relation "nonexistent" does not exist -CREATE STATISTICS tst ON a, b FROM ext_stats_test; -ERROR: column "a" does not exist -CREATE STATISTICS tst ON x, x, y FROM ext_stats_test; -ERROR: duplicate column name in statistics definition -CREATE STATISTICS tst ON x, x, y, x, x, y, x, x, y FROM ext_stats_test; -ERROR: cannot have more than 8 columns in statistics -CREATE STATISTICS tst ON x, x, y, x, x, (x || 'x'), (y + 1), (x || 'x'), (x || 'x'), (y + 1) FROM ext_stats_test; -ERROR: cannot have more than 8 columns in statistics -CREATE STATISTICS tst ON (x || 'x'), (x || 'x'), (y + 1), (x || 'x'), (x || 'x'), (y + 1), (x || 'x'), (x || 'x'), (y + 1) FROM ext_stats_test; -ERROR: cannot have more than 8 columns in statistics -CREATE STATISTICS tst ON (x || 'x'), (x || 'x'), y FROM ext_stats_test; -ERROR: duplicate expression in statistics definition -CREATE STATISTICS tst (unrecognized) ON x, y FROM ext_stats_test; -ERROR: unrecognized statistics kind "unrecognized" --- incorrect expressions -CREATE STATISTICS tst ON (y) FROM ext_stats_test; -- single column reference -ERROR: extended statistics require at least 2 columns -CREATE STATISTICS tst ON y + z FROM ext_stats_test; -- missing parentheses -ERROR: syntax error at or near "+" -LINE 1: CREATE STATISTICS tst ON y + z FROM ext_stats_test; - ^ -CREATE STATISTICS tst ON (x, y) FROM ext_stats_test; -- tuple expression -ERROR: syntax error at or near "," -LINE 1: CREATE STATISTICS tst ON (x, y) FROM ext_stats_test; - ^ -DROP TABLE ext_stats_test; --- Ensure stats are dropped sanely, and test IF NOT EXISTS while at it -CREATE TABLE ab1 (a INTEGER, b INTEGER, c INTEGER); -CREATE STATISTICS IF NOT EXISTS ab1_a_b_stats ON a, b FROM ab1; -COMMENT ON STATISTICS ab1_a_b_stats IS 'new comment'; -CREATE ROLE regress_stats_ext; -SET SESSION AUTHORIZATION regress_stats_ext; -COMMENT ON STATISTICS ab1_a_b_stats IS 'changed comment'; -ERROR: must be owner of statistics object ab1_a_b_stats -DROP STATISTICS ab1_a_b_stats; -ERROR: must be owner of statistics object ab1_a_b_stats -ALTER STATISTICS ab1_a_b_stats RENAME TO ab1_a_b_stats_new; -ERROR: must be owner of statistics object ab1_a_b_stats -RESET SESSION AUTHORIZATION; -DROP ROLE regress_stats_ext; -CREATE STATISTICS IF NOT EXISTS ab1_a_b_stats ON a, b FROM ab1; -NOTICE: statistics object "ab1_a_b_stats" already exists, skipping -DROP STATISTICS ab1_a_b_stats; -CREATE SCHEMA regress_schema_2; -CREATE STATISTICS regress_schema_2.ab1_a_b_stats ON a, b FROM ab1; --- Let's also verify the pg_get_statisticsobjdef output looks sane. -SELECT pg_get_statisticsobjdef(oid) FROM pg_statistic_ext WHERE stxname = 'ab1_a_b_stats'; - pg_get_statisticsobjdef -------------------------------------------------------------------- - CREATE STATISTICS regress_schema_2.ab1_a_b_stats ON a, b FROM ab1 -(1 row) - -DROP STATISTICS regress_schema_2.ab1_a_b_stats; --- Ensure statistics are dropped when columns are -CREATE STATISTICS ab1_b_c_stats ON b, c FROM ab1; -CREATE STATISTICS ab1_a_b_c_stats ON a, b, c FROM ab1; -CREATE STATISTICS ab1_b_a_stats ON b, a FROM ab1; -ALTER TABLE ab1 DROP COLUMN a; -\d ab1 - Table "public.ab1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - b | integer | | | - c | integer | | | -Statistics objects: - "public.ab1_b_c_stats" ON b, c FROM ab1 - --- Ensure statistics are dropped when table is -SELECT stxname FROM pg_statistic_ext WHERE stxname LIKE 'ab1%'; - stxname ---------------- - ab1_b_c_stats -(1 row) - -DROP TABLE ab1; -SELECT stxname FROM pg_statistic_ext WHERE stxname LIKE 'ab1%'; - stxname ---------- -(0 rows) - --- Ensure things work sanely with SET STATISTICS 0 -CREATE TABLE ab1 (a INTEGER, b INTEGER); -ALTER TABLE ab1 ALTER a SET STATISTICS 0; -INSERT INTO ab1 SELECT a, a%23 FROM generate_series(1, 1000) a; -CREATE STATISTICS ab1_a_b_stats ON a, b FROM ab1; -ANALYZE ab1; -WARNING: statistics object "public.ab1_a_b_stats" could not be computed for relation "public.ab1" -ALTER TABLE ab1 ALTER a SET STATISTICS -1; --- setting statistics target 0 skips the statistics, without printing any message, so check catalog -ALTER STATISTICS ab1_a_b_stats SET STATISTICS 0; -\d ab1 - Table "public.ab1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Statistics objects: - "public.ab1_a_b_stats" ON a, b FROM ab1; STATISTICS 0 - -ANALYZE ab1; -SELECT stxname, stxdndistinct, stxddependencies, stxdmcv, stxdinherit - FROM pg_statistic_ext s LEFT JOIN pg_statistic_ext_data d ON (d.stxoid = s.oid) - WHERE s.stxname = 'ab1_a_b_stats'; - stxname | stxdndistinct | stxddependencies | stxdmcv | stxdinherit ----------------+---------------+------------------+---------+------------- - ab1_a_b_stats | | | | -(1 row) - -ALTER STATISTICS ab1_a_b_stats SET STATISTICS -1; -\d+ ab1 - Table "public.ab1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - a | integer | | | | plain | | - b | integer | | | | plain | | -Statistics objects: - "public.ab1_a_b_stats" ON a, b FROM ab1 - --- partial analyze doesn't build stats either -ANALYZE ab1 (a); -WARNING: statistics object "public.ab1_a_b_stats" could not be computed for relation "public.ab1" -ANALYZE ab1; -DROP TABLE ab1; -ALTER STATISTICS ab1_a_b_stats SET STATISTICS 0; -ERROR: statistics object "ab1_a_b_stats" does not exist -ALTER STATISTICS IF EXISTS ab1_a_b_stats SET STATISTICS 0; -NOTICE: statistics object "ab1_a_b_stats" does not exist, skipping --- Ensure we can build statistics for tables with inheritance. -CREATE TABLE ab1 (a INTEGER, b INTEGER); -CREATE TABLE ab1c () INHERITS (ab1); -INSERT INTO ab1 VALUES (1,1); -CREATE STATISTICS ab1_a_b_stats ON a, b FROM ab1; -ANALYZE ab1; -DROP TABLE ab1 CASCADE; -NOTICE: drop cascades to table ab1c --- Tests for stats with inheritance -CREATE TABLE stxdinh(a int, b int); -CREATE TABLE stxdinh1() INHERITS(stxdinh); -CREATE TABLE stxdinh2() INHERITS(stxdinh); -INSERT INTO stxdinh SELECT mod(a,50), mod(a,100) FROM generate_series(0, 1999) a; -INSERT INTO stxdinh1 SELECT mod(a,100), mod(a,100) FROM generate_series(0, 999) a; -INSERT INTO stxdinh2 SELECT mod(a,100), mod(a,100) FROM generate_series(0, 999) a; -VACUUM ANALYZE stxdinh, stxdinh1, stxdinh2; --- Ensure non-inherited stats are not applied to inherited query --- Without stats object, it looks like this -SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinh* GROUP BY 1, 2'); - estimated | actual ------------+-------- - 400 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinh* WHERE a = 0 AND b = 0'); - estimated | actual ------------+-------- - 3 | 40 -(1 row) - -CREATE STATISTICS stxdinh ON a, b FROM stxdinh; -VACUUM ANALYZE stxdinh, stxdinh1, stxdinh2; --- See if the extended stats affect the estimates -SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinh* GROUP BY 1, 2'); - estimated | actual ------------+-------- - 150 | 150 -(1 row) - --- Dependencies are applied at individual relations (within append), so --- this estimate changes a bit because we improve estimates for the parent -SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinh* WHERE a = 0 AND b = 0'); - estimated | actual ------------+-------- - 22 | 40 -(1 row) - --- Ensure correct (non-inherited) stats are applied to inherited query -SELECT * FROM check_estimated_rows('SELECT a, b FROM ONLY stxdinh GROUP BY 1, 2'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT a, b FROM ONLY stxdinh WHERE a = 0 AND b = 0'); - estimated | actual ------------+-------- - 20 | 20 -(1 row) - -DROP TABLE stxdinh, stxdinh1, stxdinh2; --- Ensure inherited stats ARE applied to inherited query in partitioned table -CREATE TABLE stxdinp(i int, a int, b int) PARTITION BY RANGE (i); -CREATE TABLE stxdinp1 PARTITION OF stxdinp FOR VALUES FROM (1) TO (100); -INSERT INTO stxdinp SELECT 1, a/100, a/100 FROM generate_series(1, 999) a; -CREATE STATISTICS stxdinp ON (a + 1), a, b FROM stxdinp; -VACUUM ANALYZE stxdinp; -- partitions are processed recursively -SELECT 1 FROM pg_statistic_ext WHERE stxrelid = 'stxdinp'::regclass; - ?column? ----------- - 1 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinp GROUP BY 1, 2'); - estimated | actual ------------+-------- - 10 | 10 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT a + 1, b FROM ONLY stxdinp GROUP BY 1, 2'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -DROP TABLE stxdinp; --- basic test for statistics on expressions -CREATE TABLE ab1 (a INTEGER, b INTEGER, c TIMESTAMP, d TIMESTAMPTZ); --- expression stats may be built on a single expression column -CREATE STATISTICS ab1_exprstat_1 ON (a+b) FROM ab1; --- with a single expression, we only enable expression statistics -CREATE STATISTICS ab1_exprstat_2 ON (a+b) FROM ab1; -SELECT stxkind FROM pg_statistic_ext WHERE stxname = 'ab1_exprstat_2'; - stxkind ---------- - {e} -(1 row) - --- adding anything to the expression builds all statistics kinds -CREATE STATISTICS ab1_exprstat_3 ON (a+b), a FROM ab1; -SELECT stxkind FROM pg_statistic_ext WHERE stxname = 'ab1_exprstat_3'; - stxkind ------------ - {d,f,m,e} -(1 row) - --- date_trunc on timestamptz is not immutable, but that should not matter -CREATE STATISTICS ab1_exprstat_4 ON date_trunc('day', d) FROM ab1; --- date_trunc on timestamp is immutable -CREATE STATISTICS ab1_exprstat_5 ON date_trunc('day', c) FROM ab1; --- check use of a boolean-returning expression -CREATE STATISTICS ab1_exprstat_6 ON - (case a when 1 then true else false end), b FROM ab1; --- insert some data and run analyze, to test that these cases build properly -INSERT INTO ab1 -SELECT x / 10, x / 3, - '2020-10-01'::timestamp + x * interval '1 day', - '2020-10-01'::timestamptz + x * interval '1 day' -FROM generate_series(1, 100) x; -ANALYZE ab1; --- apply some stats -SELECT * FROM check_estimated_rows('SELECT * FROM ab1 WHERE (case a when 1 then true else false end) AND b=2'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -DROP TABLE ab1; --- Verify supported object types for extended statistics -CREATE schema tststats; -CREATE TABLE tststats.t (a int, b int, c text); -CREATE INDEX ti ON tststats.t (a, b); -CREATE SEQUENCE tststats.s; -CREATE VIEW tststats.v AS SELECT * FROM tststats.t; -CREATE MATERIALIZED VIEW tststats.mv AS SELECT * FROM tststats.t; -CREATE TYPE tststats.ty AS (a int, b int, c text); -CREATE FOREIGN DATA WRAPPER extstats_dummy_fdw; -CREATE SERVER extstats_dummy_srv FOREIGN DATA WRAPPER extstats_dummy_fdw; -CREATE FOREIGN TABLE tststats.f (a int, b int, c text) SERVER extstats_dummy_srv; -CREATE TABLE tststats.pt (a int, b int, c text) PARTITION BY RANGE (a, b); -CREATE TABLE tststats.pt1 PARTITION OF tststats.pt FOR VALUES FROM (-10, -10) TO (10, 10); -CREATE STATISTICS tststats.s1 ON a, b FROM tststats.t; -CREATE STATISTICS tststats.s2 ON a, b FROM tststats.ti; -ERROR: cannot define statistics for relation "ti" -DETAIL: This operation is not supported for indexes. -CREATE STATISTICS tststats.s3 ON a, b FROM tststats.s; -ERROR: cannot define statistics for relation "s" -DETAIL: This operation is not supported for sequences. -CREATE STATISTICS tststats.s4 ON a, b FROM tststats.v; -ERROR: cannot define statistics for relation "v" -DETAIL: This operation is not supported for views. -CREATE STATISTICS tststats.s5 ON a, b FROM tststats.mv; -CREATE STATISTICS tststats.s6 ON a, b FROM tststats.ty; -ERROR: cannot define statistics for relation "ty" -DETAIL: This operation is not supported for composite types. -CREATE STATISTICS tststats.s7 ON a, b FROM tststats.f; -CREATE STATISTICS tststats.s8 ON a, b FROM tststats.pt; -CREATE STATISTICS tststats.s9 ON a, b FROM tststats.pt1; -DO $$ -DECLARE - relname text := reltoastrelid::regclass FROM pg_class WHERE oid = 'tststats.t'::regclass; -BEGIN - EXECUTE 'CREATE STATISTICS tststats.s10 ON a, b FROM ' || relname; -EXCEPTION WHEN wrong_object_type THEN - RAISE NOTICE 'stats on toast table not created'; -END; -$$; -NOTICE: stats on toast table not created -DROP SCHEMA tststats CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table tststats.t -drop cascades to sequence tststats.s -drop cascades to view tststats.v -drop cascades to materialized view tststats.mv -drop cascades to type tststats.ty -drop cascades to foreign table tststats.f -drop cascades to table tststats.pt -DROP FOREIGN DATA WRAPPER extstats_dummy_fdw CASCADE; -NOTICE: drop cascades to server extstats_dummy_srv --- n-distinct tests -CREATE TABLE ndistinct ( - filler1 TEXT, - filler2 NUMERIC, - a INT, - b INT, - filler3 DATE, - c INT, - d INT -) -WITH (autovacuum_enabled = off); --- over-estimates when using only per-column statistics -INSERT INTO ndistinct (a, b, c, filler1) - SELECT i/100, i/100, i/100, (i/100) || ' dollars and zero cents' - FROM generate_series(1,1000) s(i); -ANALYZE ndistinct; --- Group Aggregate, due to over-estimate of the number of groups -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 100 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c'); - estimated | actual ------------+-------- - 100 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c'); - estimated | actual ------------+-------- - 100 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d'); - estimated | actual ------------+-------- - 200 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d'); - estimated | actual ------------+-------- - 200 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (a+1)'); - estimated | actual ------------+-------- - 100 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); - estimated | actual ------------+-------- - 100 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); - estimated | actual ------------+-------- - 100 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); - estimated | actual ------------+-------- - 100 | 11 -(1 row) - --- correct command -CREATE STATISTICS s10 ON a, b, c FROM ndistinct; -ANALYZE ndistinct; -SELECT s.stxkind, d.stxdndistinct - FROM pg_statistic_ext s, pg_statistic_ext_data d - WHERE s.stxrelid = 'ndistinct'::regclass - AND d.stxoid = s.oid; - stxkind | stxdndistinct ----------+----------------------------------------------------- - {d,f,m} | {"3, 4": 11, "3, 6": 11, "4, 6": 11, "3, 4, 6": 11} -(1 row) - --- minor improvement, make sure the ctid does not break the matching -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY ctid, a, b'); - estimated | actual ------------+-------- - 1000 | 1000 -(1 row) - --- Hash Aggregate, thanks to estimates improved by the statistic -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 11 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c'); - estimated | actual ------------+-------- - 11 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c'); - estimated | actual ------------+-------- - 11 | 11 -(1 row) - --- partial improvement (match on attributes) -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (a+1)'); - estimated | actual ------------+-------- - 11 | 11 -(1 row) - --- expressions - no improvement -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); - estimated | actual ------------+-------- - 11 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); - estimated | actual ------------+-------- - 11 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); - estimated | actual ------------+-------- - 11 | 11 -(1 row) - --- last two plans keep using Group Aggregate, because 'd' is not covered --- by the statistic and while it's NULL-only we assume 200 values for it -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d'); - estimated | actual ------------+-------- - 200 | 11 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d'); - estimated | actual ------------+-------- - 200 | 11 -(1 row) - -TRUNCATE TABLE ndistinct; --- under-estimates when using only per-column statistics -INSERT INTO ndistinct (a, b, c, filler1) - SELECT mod(i,13), mod(i,17), mod(i,19), - mod(i,23) || ' dollars and zero cents' - FROM generate_series(1,1000) s(i); -ANALYZE ndistinct; -SELECT s.stxkind, d.stxdndistinct - FROM pg_statistic_ext s, pg_statistic_ext_data d - WHERE s.stxrelid = 'ndistinct'::regclass - AND d.stxoid = s.oid; - stxkind | stxdndistinct ----------+---------------------------------------------------------- - {d,f,m} | {"3, 4": 221, "3, 6": 247, "4, 6": 323, "3, 4, 6": 1000} -(1 row) - --- correct estimates -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 221 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c'); - estimated | actual ------------+-------- - 1000 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d'); - estimated | actual ------------+-------- - 1000 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d'); - estimated | actual ------------+-------- - 323 | 323 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, d'); - estimated | actual ------------+-------- - 200 | 13 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (a+1)'); - estimated | actual ------------+-------- - 221 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); - estimated | actual ------------+-------- - 221 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); - estimated | actual ------------+-------- - 1000 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); - estimated | actual ------------+-------- - 221 | 221 -(1 row) - -DROP STATISTICS s10; -SELECT s.stxkind, d.stxdndistinct - FROM pg_statistic_ext s, pg_statistic_ext_data d - WHERE s.stxrelid = 'ndistinct'::regclass - AND d.stxoid = s.oid; - stxkind | stxdndistinct ----------+--------------- -(0 rows) - --- dropping the statistics results in under-estimates -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 100 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c'); - estimated | actual ------------+-------- - 100 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d'); - estimated | actual ------------+-------- - 200 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d'); - estimated | actual ------------+-------- - 200 | 323 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, d'); - estimated | actual ------------+-------- - 200 | 13 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (a+1)'); - estimated | actual ------------+-------- - 100 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); - estimated | actual ------------+-------- - 100 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); - estimated | actual ------------+-------- - 100 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); - estimated | actual ------------+-------- - 100 | 221 -(1 row) - --- ndistinct estimates with statistics on expressions -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); - estimated | actual ------------+-------- - 100 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); - estimated | actual ------------+-------- - 100 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); - estimated | actual ------------+-------- - 100 | 221 -(1 row) - -CREATE STATISTICS s10 (ndistinct) ON (a+1), (b+100), (2*c) FROM ndistinct; -ANALYZE ndistinct; -SELECT s.stxkind, d.stxdndistinct - FROM pg_statistic_ext s, pg_statistic_ext_data d - WHERE s.stxrelid = 'ndistinct'::regclass - AND d.stxoid = s.oid; - stxkind | stxdndistinct ----------+------------------------------------------------------------------- - {d,e} | {"-1, -2": 221, "-1, -3": 247, "-2, -3": 323, "-1, -2, -3": 1000} -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); - estimated | actual ------------+-------- - 221 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); - estimated | actual ------------+-------- - 1000 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); - estimated | actual ------------+-------- - 221 | 221 -(1 row) - -DROP STATISTICS s10; --- a mix of attributes and expressions -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 100 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (2*c)'); - estimated | actual ------------+-------- - 100 | 247 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (2*c)'); - estimated | actual ------------+-------- - 100 | 1000 -(1 row) - -CREATE STATISTICS s10 (ndistinct) ON a, b, (2*c) FROM ndistinct; -ANALYZE ndistinct; -SELECT s.stxkind, d.stxdndistinct - FROM pg_statistic_ext s, pg_statistic_ext_data d - WHERE s.stxrelid = 'ndistinct'::regclass - AND d.stxoid = s.oid; - stxkind | stxdndistinct ----------+------------------------------------------------------------- - {d,e} | {"3, 4": 221, "3, -1": 247, "4, -1": 323, "3, 4, -1": 1000} -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 221 | 221 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (2*c)'); - estimated | actual ------------+-------- - 247 | 247 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (2*c)'); - estimated | actual ------------+-------- - 1000 | 1000 -(1 row) - -DROP STATISTICS s10; --- combination of multiple ndistinct statistics, with/without expressions -TRUNCATE ndistinct; --- two mostly independent groups of columns -INSERT INTO ndistinct (a, b, c, d) - SELECT mod(i,3), mod(i,9), mod(i,5), mod(i,20) - FROM generate_series(1,1000) s(i); -ANALYZE ndistinct; -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 27 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); - estimated | actual ------------+-------- - 27 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); - estimated | actual ------------+-------- - 27 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); - estimated | actual ------------+-------- - 27 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); - estimated | actual ------------+-------- - 100 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); - estimated | actual ------------+-------- - 100 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); - estimated | actual ------------+-------- - 100 | 180 -(1 row) - --- basic statistics on both attributes (no expressions) -CREATE STATISTICS s11 (ndistinct) ON a, b FROM ndistinct; -CREATE STATISTICS s12 (ndistinct) ON c, d FROM ndistinct; -ANALYZE ndistinct; -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); - estimated | actual ------------+-------- - 100 | 180 -(1 row) - --- replace the second statistics by statistics on expressions -DROP STATISTICS s12; -CREATE STATISTICS s12 (ndistinct) ON (c * 10), (d - 1) FROM ndistinct; -ANALYZE ndistinct; -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); - estimated | actual ------------+-------- - 100 | 180 -(1 row) - --- replace the second statistics by statistics on both attributes and expressions -DROP STATISTICS s12; -CREATE STATISTICS s12 (ndistinct) ON c, d, (c * 10), (d - 1) FROM ndistinct; -ANALYZE ndistinct; -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); - estimated | actual ------------+-------- - 100 | 180 -(1 row) - --- replace the other statistics by statistics on both attributes and expressions -DROP STATISTICS s11; -CREATE STATISTICS s11 (ndistinct) ON a, b, (a*5), (b+1) FROM ndistinct; -ANALYZE ndistinct; -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); - estimated | actual ------------+-------- - 100 | 180 -(1 row) - --- replace statistics by somewhat overlapping ones (this expected to get worse estimate --- because the first statistics shall be applied to 3 columns, and the second one can't --- be really applied) -DROP STATISTICS s11; -DROP STATISTICS s12; -CREATE STATISTICS s11 (ndistinct) ON a, b, (a*5), (b+1) FROM ndistinct; -CREATE STATISTICS s12 (ndistinct) ON a, (b+1), (c * 10) FROM ndistinct; -ANALYZE ndistinct; -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); - estimated | actual ------------+-------- - 9 | 9 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); - estimated | actual ------------+-------- - 45 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); - estimated | actual ------------+-------- - 100 | 45 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); - estimated | actual ------------+-------- - 100 | 180 -(1 row) - -DROP STATISTICS s11; -DROP STATISTICS s12; --- functional dependencies tests -CREATE TABLE functional_dependencies ( - filler1 TEXT, - filler2 NUMERIC, - a INT, - b TEXT, - filler3 DATE, - c INT, - d TEXT -) -WITH (autovacuum_enabled = off); -CREATE INDEX fdeps_ab_idx ON functional_dependencies (a, b); -CREATE INDEX fdeps_abc_idx ON functional_dependencies (a, b, c); --- random data (no functional dependencies) -INSERT INTO functional_dependencies (a, b, c, filler1) - SELECT mod(i, 5), mod(i, 7), mod(i, 11), i FROM generate_series(1,1000) s(i); -ANALYZE functional_dependencies; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 29 | 29 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 3 | 3 -(1 row) - --- create statistics -CREATE STATISTICS func_deps_stat (dependencies) ON a, b, c FROM functional_dependencies; -ANALYZE functional_dependencies; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 29 | 29 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 3 | 3 -(1 row) - --- a => b, a => c, b => c -TRUNCATE functional_dependencies; -DROP STATISTICS func_deps_stat; --- now do the same thing, but with expressions -INSERT INTO functional_dependencies (a, b, c, filler1) - SELECT i, i, i, i FROM generate_series(1,5000) s(i); -ANALYZE functional_dependencies; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE mod(a, 11) = 1 AND mod(b::int, 13) = 1'); - estimated | actual ------------+-------- - 1 | 35 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE mod(a, 11) = 1 AND mod(b::int, 13) = 1 AND mod(c, 7) = 1'); - estimated | actual ------------+-------- - 1 | 5 -(1 row) - --- create statistics -CREATE STATISTICS func_deps_stat (dependencies) ON (mod(a,11)), (mod(b::int, 13)), (mod(c, 7)) FROM functional_dependencies; -ANALYZE functional_dependencies; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE mod(a, 11) = 1 AND mod(b::int, 13) = 1'); - estimated | actual ------------+-------- - 35 | 35 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE mod(a, 11) = 1 AND mod(b::int, 13) = 1 AND mod(c, 7) = 1'); - estimated | actual ------------+-------- - 5 | 5 -(1 row) - --- a => b, a => c, b => c -TRUNCATE functional_dependencies; -DROP STATISTICS func_deps_stat; -INSERT INTO functional_dependencies (a, b, c, filler1) - SELECT mod(i,100), mod(i,50), mod(i,25), i FROM generate_series(1,5000) s(i); -ANALYZE functional_dependencies; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - --- IN -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ''1'''); - estimated | actual ------------+-------- - 2 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 4 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 8 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b = ''1'''); - estimated | actual ------------+-------- - 4 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 26, 51, 76) AND b IN (''1'', ''26'') AND c = 1'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 26, 51, 76) AND b IN (''1'', ''26'') AND c IN (1)'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 26, 27, 51, 52, 76, 77) AND b IN (''1'', ''2'', ''26'', ''27'') AND c IN (1, 2)'); - estimated | actual ------------+-------- - 3 | 400 -(1 row) - --- OR clauses referencing the same attribute -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 51) AND b = ''1'''); - estimated | actual ------------+-------- - 2 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 51) AND (b = ''1'' OR b = ''2'')'); - estimated | actual ------------+-------- - 4 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 2 OR a = 51 OR a = 52) AND (b = ''1'' OR b = ''2'')'); - estimated | actual ------------+-------- - 8 | 200 -(1 row) - --- OR clauses referencing different attributes -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR b = ''1'') AND b = ''1'''); - estimated | actual ------------+-------- - 3 | 100 -(1 row) - --- ANY -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 51]) AND b = ''1'''); - estimated | actual ------------+-------- - 2 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 51]) AND b = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 4 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 8 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 26, 51, 76]) AND b = ANY (ARRAY[''1'', ''26'']) AND c = 1'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 26, 51, 76]) AND b = ANY (ARRAY[''1'', ''26'']) AND c = ANY (ARRAY[1])'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 2, 26, 27, 51, 52, 76, 77]) AND b = ANY (ARRAY[''1'', ''2'', ''26'', ''27'']) AND c = ANY (ARRAY[1, 2])'); - estimated | actual ------------+-------- - 3 | 400 -(1 row) - --- ANY with inequalities should not benefit from functional dependencies -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a < ANY (ARRAY[1, 51]) AND b > ''1'''); - estimated | actual ------------+-------- - 2472 | 2400 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a >= ANY (ARRAY[1, 51]) AND b <= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1441 | 1250 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a <= ANY (ARRAY[1, 2, 51, 52]) AND b >= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 3909 | 2550 -(1 row) - --- ALL (should not benefit from functional dependencies) -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ALL (ARRAY[''1''])'); - estimated | actual ------------+-------- - 2 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - --- create statistics -CREATE STATISTICS func_deps_stat (dependencies) ON a, b, c FROM functional_dependencies; -ANALYZE functional_dependencies; --- print the detected dependencies -SELECT dependencies FROM pg_stats_ext WHERE statistics_name = 'func_deps_stat'; - dependencies ------------------------------------------------------------------------------------------------------------- - {"3 => 4": 1.000000, "3 => 6": 1.000000, "4 => 6": 1.000000, "3, 4 => 6": 1.000000, "3, 6 => 4": 1.000000} -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - --- IN -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ''1'''); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b = ''1'''); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 26, 51, 76) AND b IN (''1'', ''26'') AND c = 1'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 26, 51, 76) AND b IN (''1'', ''26'') AND c IN (1)'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 26, 27, 51, 52, 76, 77) AND b IN (''1'', ''2'', ''26'', ''27'') AND c IN (1, 2)'); - estimated | actual ------------+-------- - 400 | 400 -(1 row) - --- OR clauses referencing the same attribute -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 51) AND b = ''1'''); - estimated | actual ------------+-------- - 99 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 51) AND (b = ''1'' OR b = ''2'')'); - estimated | actual ------------+-------- - 99 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 2 OR a = 51 OR a = 52) AND (b = ''1'' OR b = ''2'')'); - estimated | actual ------------+-------- - 197 | 200 -(1 row) - --- OR clauses referencing different attributes are incompatible -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR b = ''1'') AND b = ''1'''); - estimated | actual ------------+-------- - 3 | 100 -(1 row) - --- ANY -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 51]) AND b = ''1'''); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 51]) AND b = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 26, 51, 76]) AND b = ANY (ARRAY[''1'', ''26'']) AND c = 1'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 26, 51, 76]) AND b = ANY (ARRAY[''1'', ''26'']) AND c = ANY (ARRAY[1])'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 2, 26, 27, 51, 52, 76, 77]) AND b = ANY (ARRAY[''1'', ''2'', ''26'', ''27'']) AND c = ANY (ARRAY[1, 2])'); - estimated | actual ------------+-------- - 400 | 400 -(1 row) - --- ANY with inequalities should not benefit from functional dependencies -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a < ANY (ARRAY[1, 51]) AND b > ''1'''); - estimated | actual ------------+-------- - 2472 | 2400 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a >= ANY (ARRAY[1, 51]) AND b <= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1441 | 1250 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a <= ANY (ARRAY[1, 2, 51, 52]) AND b >= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 3909 | 2550 -(1 row) - --- ALL (should not benefit from functional dependencies) -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ALL (ARRAY[''1''])'); - estimated | actual ------------+-------- - 2 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - --- changing the type of column c causes all its stats to be dropped, reverting --- to default estimates without any statistics, i.e. 0.5% selectivity for each --- condition -ALTER TABLE functional_dependencies ALTER COLUMN c TYPE numeric; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -ANALYZE functional_dependencies; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -DROP STATISTICS func_deps_stat; --- now try functional dependencies with expressions -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = 2 AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = 2 AND upper(b) = ''1'' AND (c + 1) = 2'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - --- IN -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 52, 102, 152) AND upper(b) IN (''1'', ''26'') AND (c + 1) = 2'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 52, 102, 152) AND upper(b) IN (''1'', ''26'') AND (c + 1) IN (2)'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 52, 54, 102, 104, 152, 154) AND upper(b) IN (''1'', ''2'', ''26'', ''27'') AND (c + 1) IN (2, 3)'); - estimated | actual ------------+-------- - 1 | 400 -(1 row) - --- OR clauses referencing the same attribute -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 102) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 102) AND (upper(b) = ''1'' OR upper(b) = ''2'')'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 4 OR (a * 2) = 102 OR (a * 2) = 104) AND (upper(b) = ''1'' OR upper(b) = ''2'')'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - --- OR clauses referencing different attributes -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR upper(b) = ''1'') AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - --- ANY -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 102]) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 102]) AND upper(b) = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 4, 102, 104]) AND upper(b) = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 52, 102, 152]) AND upper(b) = ANY (ARRAY[''1'', ''26'']) AND (c + 1) = 2'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 52, 102, 152]) AND upper(b) = ANY (ARRAY[''1'', ''26'']) AND (c + 1) = ANY (ARRAY[2])'); - estimated | actual ------------+-------- - 1 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 4, 52, 54, 102, 104, 152, 154]) AND upper(b) = ANY (ARRAY[''1'', ''2'', ''26'', ''27'']) AND (c + 1) = ANY (ARRAY[2, 3])'); - estimated | actual ------------+-------- - 1 | 400 -(1 row) - --- ANY with inequalities should not benefit from functional dependencies --- the estimates however improve thanks to having expression statistics -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) < ANY (ARRAY[2, 102]) AND upper(b) > ''1'''); - estimated | actual ------------+-------- - 926 | 2400 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) >= ANY (ARRAY[2, 102]) AND upper(b) <= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1543 | 1250 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) <= ANY (ARRAY[2, 4, 102, 104]) AND upper(b) >= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 2229 | 2550 -(1 row) - --- ALL (should not benefit from functional dependencies) -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ALL (ARRAY[''1''])'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - --- create statistics on expressions -CREATE STATISTICS func_deps_stat (dependencies) ON (a * 2), upper(b), (c + 1) FROM functional_dependencies; -ANALYZE functional_dependencies; --- print the detected dependencies -SELECT dependencies FROM pg_stats_ext WHERE statistics_name = 'func_deps_stat'; - dependencies ------------------------------------------------------------------------------------------------------------------------- - {"-1 => -2": 1.000000, "-1 => -3": 1.000000, "-2 => -3": 1.000000, "-1, -2 => -3": 1.000000, "-1, -3 => -2": 1.000000} -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = 2 AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = 2 AND upper(b) = ''1'' AND (c + 1) = 2'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - --- IN -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) IN (''1'', ''2'')'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 52, 102, 152) AND upper(b) IN (''1'', ''26'') AND (c + 1) = 2'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 52, 102, 152) AND upper(b) IN (''1'', ''26'') AND (c + 1) IN (2)'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 52, 54, 102, 104, 152, 154) AND upper(b) IN (''1'', ''2'', ''26'', ''27'') AND (c + 1) IN (2, 3)'); - estimated | actual ------------+-------- - 400 | 400 -(1 row) - --- OR clauses referencing the same attribute -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 102) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 99 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 102) AND (upper(b) = ''1'' OR upper(b) = ''2'')'); - estimated | actual ------------+-------- - 99 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 4 OR (a * 2) = 102 OR (a * 2) = 104) AND (upper(b) = ''1'' OR upper(b) = ''2'')'); - estimated | actual ------------+-------- - 197 | 200 -(1 row) - --- OR clauses referencing different attributes -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR upper(b) = ''1'') AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 3 | 100 -(1 row) - --- ANY -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 102]) AND upper(b) = ''1'''); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 102]) AND upper(b) = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 4, 102, 104]) AND upper(b) = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 52, 102, 152]) AND upper(b) = ANY (ARRAY[''1'', ''26'']) AND (c + 1) = 2'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 52, 102, 152]) AND upper(b) = ANY (ARRAY[''1'', ''26'']) AND (c + 1) = ANY (ARRAY[2])'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 4, 52, 54, 102, 104, 152, 154]) AND upper(b) = ANY (ARRAY[''1'', ''2'', ''26'', ''27'']) AND (c + 1) = ANY (ARRAY[2, 3])'); - estimated | actual ------------+-------- - 400 | 400 -(1 row) - --- ANY with inequalities should not benefit from functional dependencies --- the estimates however improve thanks to having expression statistics -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) < ANY (ARRAY[2, 102]) AND upper(b) > ''1'''); - estimated | actual ------------+-------- - 2472 | 2400 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) >= ANY (ARRAY[2, 102]) AND upper(b) <= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1441 | 1250 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) <= ANY (ARRAY[2, 4, 102, 104]) AND upper(b) >= ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 3909 | 2550 -(1 row) - --- ALL (should not benefit from functional dependencies) -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ALL (ARRAY[''1''])'); - estimated | actual ------------+-------- - 2 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) = ALL (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - --- check the ability to use multiple functional dependencies -CREATE TABLE functional_dependencies_multi ( - a INTEGER, - b INTEGER, - c INTEGER, - d INTEGER -) -WITH (autovacuum_enabled = off); -INSERT INTO functional_dependencies_multi (a, b, c, d) - SELECT - mod(i,7), - mod(i,7), - mod(i,11), - mod(i,11) - FROM generate_series(1,5000) s(i); -ANALYZE functional_dependencies_multi; --- estimates without any functional dependencies -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE a = 0 AND b = 0'); - estimated | actual ------------+-------- - 102 | 714 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE 0 = a AND 0 = b'); - estimated | actual ------------+-------- - 102 | 714 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE c = 0 AND d = 0'); - estimated | actual ------------+-------- - 41 | 454 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE a = 0 AND b = 0 AND c = 0 AND d = 0'); - estimated | actual ------------+-------- - 1 | 64 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE 0 = a AND b = 0 AND 0 = c AND d = 0'); - estimated | actual ------------+-------- - 1 | 64 -(1 row) - --- create separate functional dependencies -CREATE STATISTICS functional_dependencies_multi_1 (dependencies) ON a, b FROM functional_dependencies_multi; -CREATE STATISTICS functional_dependencies_multi_2 (dependencies) ON c, d FROM functional_dependencies_multi; -ANALYZE functional_dependencies_multi; -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE a = 0 AND b = 0'); - estimated | actual ------------+-------- - 714 | 714 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE 0 = a AND 0 = b'); - estimated | actual ------------+-------- - 714 | 714 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE c = 0 AND d = 0'); - estimated | actual ------------+-------- - 454 | 454 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE a = 0 AND b = 0 AND c = 0 AND d = 0'); - estimated | actual ------------+-------- - 65 | 64 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE 0 = a AND b = 0 AND 0 = c AND d = 0'); - estimated | actual ------------+-------- - 65 | 64 -(1 row) - -DROP TABLE functional_dependencies_multi; --- MCV lists -CREATE TABLE mcv_lists ( - filler1 TEXT, - filler2 NUMERIC, - a INT, - b VARCHAR, - filler3 DATE, - c INT, - d TEXT, - ia INT[] -) -WITH (autovacuum_enabled = off); --- random data (no MCV list) -INSERT INTO mcv_lists (a, b, c, filler1) - SELECT mod(i,37), mod(i,41), mod(i,43), mod(i,47) FROM generate_series(1,5000) s(i); -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 3 | 4 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 1 | 1 -(1 row) - --- create statistics -CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, c FROM mcv_lists; -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 3 | 4 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 1 | 1 -(1 row) - -TRUNCATE mcv_lists; -DROP STATISTICS mcv_lists_stats; --- random data (no MCV list), but with expression -INSERT INTO mcv_lists (a, b, c, filler1) - SELECT i, i, i, i FROM generate_series(1,1000) s(i); -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,7) = 1 AND mod(b::int,11) = 1'); - estimated | actual ------------+-------- - 1 | 13 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,7) = 1 AND mod(b::int,11) = 1 AND mod(c,13) = 1'); - estimated | actual ------------+-------- - 1 | 1 -(1 row) - --- create statistics -CREATE STATISTICS mcv_lists_stats (mcv) ON (mod(a,7)), (mod(b::int,11)), (mod(c,13)) FROM mcv_lists; -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,7) = 1 AND mod(b::int,11) = 1'); - estimated | actual ------------+-------- - 13 | 13 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,7) = 1 AND mod(b::int,11) = 1 AND mod(c,13) = 1'); - estimated | actual ------------+-------- - 1 | 1 -(1 row) - --- 100 distinct combinations, all in the MCV list -TRUNCATE mcv_lists; -DROP STATISTICS mcv_lists_stats; -INSERT INTO mcv_lists (a, b, c, ia, filler1) - SELECT mod(i,100), mod(i,50), mod(i,25), array[mod(i,25)], i - FROM generate_series(1,5000) s(i); -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = a AND ''1'' = b'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 1 AND b < ''1'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > a AND ''1'' > b'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= 0 AND b <= ''0'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 0 >= a AND ''0'' >= b'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 5 AND b < ''1'' AND c < 5'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 5 AND ''1'' > b AND 5 > c'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= 4 AND b <= ''0'' AND c <= 4'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 4 >= a AND ''0'' >= b AND 4 >= c'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1'); - estimated | actual ------------+-------- - 343 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1 OR d IS NOT NULL'); - estimated | actual ------------+-------- - 343 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (1, 2, 51, 52) AND b IN ( ''1'', ''2'')'); - estimated | actual ------------+-------- - 8 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (1, 2, 51, 52, NULL) AND b IN ( ''1'', ''2'', NULL)'); - estimated | actual ------------+-------- - 8 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 8 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[NULL, 1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2'', NULL])'); - estimated | actual ------------+-------- - 8 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= ANY (ARRAY[1, 2, 3]) AND b IN (''1'', ''2'', ''3'')'); - estimated | actual ------------+-------- - 26 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= ANY (ARRAY[1, NULL, 2, 3]) AND b IN (''1'', ''2'', NULL, ''3'')'); - estimated | actual ------------+-------- - 26 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND c > ANY (ARRAY[1, 2, 3])'); - estimated | actual ------------+-------- - 10 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND c > ANY (ARRAY[1, 2, 3, NULL])'); - estimated | actual ------------+-------- - 10 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND b IN (''1'', ''2'', ''3'') AND c > ANY (ARRAY[1, 2, 3])'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND b IN (''1'', ''2'', NULL, ''3'') AND c > ANY (ARRAY[1, 2, NULL, 3])'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[4,5]) AND 4 = ANY(ia)'); - estimated | actual ------------+-------- - 4 | 50 -(1 row) - --- create statistics -CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, c, ia FROM mcv_lists; -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = a AND ''1'' = b'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 1 AND b < ''1'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > a AND ''1'' > b'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= 0 AND b <= ''0'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 0 >= a AND ''0'' >= b'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'' AND c = 1'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 5 AND b < ''1'' AND c < 5'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 5 AND ''1'' > b AND 5 > c'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= 4 AND b <= ''0'' AND c <= 4'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 4 >= a AND ''0'' >= b AND 4 >= c'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1 OR d IS NOT NULL'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1 OR d IS NOT NULL'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (1, 2, 51, 52) AND b IN ( ''1'', ''2'')'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (1, 2, 51, 52, NULL) AND b IN ( ''1'', ''2'', NULL)'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2''])'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[NULL, 1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2'', NULL])'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= ANY (ARRAY[1, 2, 3]) AND b IN (''1'', ''2'', ''3'')'); - estimated | actual ------------+-------- - 150 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= ANY (ARRAY[1, NULL, 2, 3]) AND b IN (''1'', ''2'', NULL, ''3'')'); - estimated | actual ------------+-------- - 150 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND c > ANY (ARRAY[1, 2, 3])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND c > ANY (ARRAY[1, 2, 3, NULL])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND b IN (''1'', ''2'', ''3'') AND c > ANY (ARRAY[1, 2, 3])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND b IN (''1'', ''2'', NULL, ''3'') AND c > ANY (ARRAY[1, 2, NULL, 3])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[4,5]) AND 4 = ANY(ia)'); - estimated | actual ------------+-------- - 4 | 50 -(1 row) - --- check change of unrelated column type does not reset the MCV statistics -ALTER TABLE mcv_lists ALTER COLUMN d TYPE VARCHAR(64); -SELECT d.stxdmcv IS NOT NULL - FROM pg_statistic_ext s, pg_statistic_ext_data d - WHERE s.stxname = 'mcv_lists_stats' - AND d.stxoid = s.oid; - ?column? ----------- - t -(1 row) - --- check change of column type resets the MCV statistics -ALTER TABLE mcv_lists ALTER COLUMN c TYPE numeric; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - --- 100 distinct combinations, all in the MCV list, but with expressions -TRUNCATE mcv_lists; -DROP STATISTICS mcv_lists_stats; -INSERT INTO mcv_lists (a, b, c, filler1) - SELECT i, i, i, i FROM generate_series(1,1000) s(i); -ANALYZE mcv_lists; --- without any stats on the expressions, we have to use default selectivities, which --- is why the estimates here are different from the pre-computed case above -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = mod(a,20) AND 1 = mod(b::int,10)'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < 1 AND mod(b::int,10) < 1'); - estimated | actual ------------+-------- - 111 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > mod(a,20) AND 1 > mod(b::int,10)'); - estimated | actual ------------+-------- - 111 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1 AND mod(c,5) = 1'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 OR mod(b::int,10) = 1 OR mod(c,25) = 1 OR d IS NOT NULL'); - estimated | actual ------------+-------- - 15 | 120 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) IN (1, 2, 51, 52, NULL) AND mod(b::int,10) IN ( 1, 2, NULL)'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = ANY (ARRAY[1, 2, 51, 52]) AND mod(b::int,10) = ANY (ARRAY[1, 2])'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) <= ANY (ARRAY[1, NULL, 2, 3]) AND mod(b::int,10) IN (1, 2, NULL, 3)'); - estimated | actual ------------+-------- - 11 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < ALL (ARRAY[4, 5]) AND mod(b::int,10) IN (1, 2, 3) AND mod(c,5) > ANY (ARRAY[1, 2, 3])'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - --- create statistics with expressions only (we create three separate stats, in order not to build more complex extended stats) -CREATE STATISTICS mcv_lists_stats_1 ON (mod(a,20)) FROM mcv_lists; -CREATE STATISTICS mcv_lists_stats_2 ON (mod(b::int,10)) FROM mcv_lists; -CREATE STATISTICS mcv_lists_stats_3 ON (mod(c,5)) FROM mcv_lists; -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1'); - estimated | actual ------------+-------- - 5 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = mod(a,20) AND 1 = mod(b::int,10)'); - estimated | actual ------------+-------- - 5 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < 1 AND mod(b::int,10) < 1'); - estimated | actual ------------+-------- - 5 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > mod(a,20) AND 1 > mod(b::int,10)'); - estimated | actual ------------+-------- - 5 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1 AND mod(c,5) = 1'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 OR mod(b::int,10) = 1 OR mod(c,25) = 1 OR d IS NOT NULL'); - estimated | actual ------------+-------- - 149 | 120 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) IN (1, 2, 51, 52, NULL) AND mod(b::int,10) IN ( 1, 2, NULL)'); - estimated | actual ------------+-------- - 20 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = ANY (ARRAY[1, 2, 51, 52]) AND mod(b::int,10) = ANY (ARRAY[1, 2])'); - estimated | actual ------------+-------- - 20 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) <= ANY (ARRAY[1, NULL, 2, 3]) AND mod(b::int,10) IN (1, 2, NULL, 3)'); - estimated | actual ------------+-------- - 116 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < ALL (ARRAY[4, 5]) AND mod(b::int,10) IN (1, 2, 3) AND mod(c,5) > ANY (ARRAY[1, 2, 3])'); - estimated | actual ------------+-------- - 12 | 100 -(1 row) - -DROP STATISTICS mcv_lists_stats_1; -DROP STATISTICS mcv_lists_stats_2; -DROP STATISTICS mcv_lists_stats_3; --- create statistics with both MCV and expressions -CREATE STATISTICS mcv_lists_stats (mcv) ON (mod(a,20)), (mod(b::int,10)), (mod(c,5)) FROM mcv_lists; -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = mod(a,20) AND 1 = mod(b::int,10)'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < 1 AND mod(b::int,10) < 1'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > mod(a,20) AND 1 > mod(b::int,10)'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1 AND mod(c,5) = 1'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 OR mod(b::int,10) = 1 OR mod(c,25) = 1 OR d IS NOT NULL'); - estimated | actual ------------+-------- - 105 | 120 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) IN (1, 2, 51, 52, NULL) AND mod(b::int,10) IN ( 1, 2, NULL)'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = ANY (ARRAY[1, 2, 51, 52]) AND mod(b::int,10) = ANY (ARRAY[1, 2])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) <= ANY (ARRAY[1, NULL, 2, 3]) AND mod(b::int,10) IN (1, 2, NULL, 3)'); - estimated | actual ------------+-------- - 150 | 150 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < ALL (ARRAY[4, 5]) AND mod(b::int,10) IN (1, 2, 3) AND mod(c,5) > ANY (ARRAY[1, 2, 3])'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - --- we can't use the statistic for OR clauses that are not fully covered (missing 'd' attribute) -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 OR mod(b::int,10) = 1 OR mod(c,5) = 1 OR d IS NOT NULL'); - estimated | actual ------------+-------- - 200 | 200 -(1 row) - --- 100 distinct combinations with NULL values, all in the MCV list -TRUNCATE mcv_lists; -DROP STATISTICS mcv_lists_stats; -INSERT INTO mcv_lists (a, b, c, filler1) - SELECT - (CASE WHEN mod(i,100) = 1 THEN NULL ELSE mod(i,100) END), - (CASE WHEN mod(i,50) = 1 THEN NULL ELSE mod(i,50) END), - (CASE WHEN mod(i,25) = 1 THEN NULL ELSE mod(i,25) END), - i - FROM generate_series(1,5000) s(i); -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NULL'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NULL AND c IS NULL'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NOT NULL'); - estimated | actual ------------+-------- - 49 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NOT NULL AND b IS NULL AND c IS NOT NULL'); - estimated | actual ------------+-------- - 95 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (0, 1) AND b IN (''0'', ''1'')'); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - --- create statistics -CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, c FROM mcv_lists; -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NULL'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NULL AND c IS NULL'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NOT NULL'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NOT NULL AND b IS NULL AND c IS NOT NULL'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (0, 1) AND b IN (''0'', ''1'')'); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - --- test pg_mcv_list_items with a very simple (single item) MCV list -TRUNCATE mcv_lists; -INSERT INTO mcv_lists (a, b, c) SELECT 1, 2, 3 FROM generate_series(1,1000) s(i); -ANALYZE mcv_lists; -SELECT m.* - FROM pg_statistic_ext s, pg_statistic_ext_data d, - pg_mcv_list_items(d.stxdmcv) m - WHERE s.stxname = 'mcv_lists_stats' - AND d.stxoid = s.oid; - index | values | nulls | frequency | base_frequency --------+---------+---------+-----------+---------------- - 0 | {1,2,3} | {f,f,f} | 1 | 1 -(1 row) - --- 2 distinct combinations with NULL values, all in the MCV list -TRUNCATE mcv_lists; -DROP STATISTICS mcv_lists_stats; -INSERT INTO mcv_lists (a, b, c, d) - SELECT - NULL, -- always NULL - (CASE WHEN mod(i,2) = 0 THEN NULL ELSE 'x' END), - (CASE WHEN mod(i,2) = 0 THEN NULL ELSE 0 END), - (CASE WHEN mod(i,2) = 0 THEN NULL ELSE 'x' END) - FROM generate_series(1,5000) s(i); -ANALYZE mcv_lists; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE b = ''x'' OR d = ''x'''); - estimated | actual ------------+-------- - 3750 | 2500 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''x'' OR d = ''x'''); - estimated | actual ------------+-------- - 3750 | 2500 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND (b = ''x'' OR d = ''x'')'); - estimated | actual ------------+-------- - 3750 | 2500 -(1 row) - --- create statistics -CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, d FROM mcv_lists; -ANALYZE mcv_lists; --- test pg_mcv_list_items with MCV list containing variable-length data and NULLs -SELECT m.* - FROM pg_statistic_ext s, pg_statistic_ext_data d, - pg_mcv_list_items(d.stxdmcv) m - WHERE s.stxname = 'mcv_lists_stats' - AND d.stxoid = s.oid; - index | values | nulls | frequency | base_frequency --------+------------------+---------+-----------+---------------- - 0 | {NULL,x,x} | {t,f,f} | 0.5 | 0.25 - 1 | {NULL,NULL,NULL} | {t,t,t} | 0.5 | 0.25 -(2 rows) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE b = ''x'' OR d = ''x'''); - estimated | actual ------------+-------- - 2500 | 2500 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''x'' OR d = ''x'''); - estimated | actual ------------+-------- - 2500 | 2500 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND (b = ''x'' OR d = ''x'')'); - estimated | actual ------------+-------- - 2500 | 2500 -(1 row) - --- mcv with pass-by-ref fixlen types, e.g. uuid -CREATE TABLE mcv_lists_uuid ( - a UUID, - b UUID, - c UUID -) -WITH (autovacuum_enabled = off); -INSERT INTO mcv_lists_uuid (a, b, c) - SELECT - fipshash(mod(i,100)::text)::uuid, - fipshash(mod(i,50)::text)::uuid, - fipshash(mod(i,25)::text)::uuid - FROM generate_series(1,5000) s(i); -ANALYZE mcv_lists_uuid; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND c = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'''); - estimated | actual ------------+-------- - 1 | 50 -(1 row) - -CREATE STATISTICS mcv_lists_uuid_stats (mcv) ON a, b, c - FROM mcv_lists_uuid; -ANALYZE mcv_lists_uuid; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND c = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'''); - estimated | actual ------------+-------- - 50 | 50 -(1 row) - -DROP TABLE mcv_lists_uuid; --- mcv with arrays -CREATE TABLE mcv_lists_arrays ( - a TEXT[], - b NUMERIC[], - c INT[] -) -WITH (autovacuum_enabled = off); -INSERT INTO mcv_lists_arrays (a, b, c) - SELECT - ARRAY[fipshash((i/100)::text), fipshash((i/100-1)::text), fipshash((i/100+1)::text)], - ARRAY[(i/100-1)::numeric/1000, (i/100)::numeric/1000, (i/100+1)::numeric/1000], - ARRAY[(i/100-1), i/100, (i/100+1)] - FROM generate_series(1,5000) s(i); -CREATE STATISTICS mcv_lists_arrays_stats (mcv) ON a, b, c - FROM mcv_lists_arrays; -ANALYZE mcv_lists_arrays; --- mcv with bool -CREATE TABLE mcv_lists_bool ( - a BOOL, - b BOOL, - c BOOL -) -WITH (autovacuum_enabled = off); -INSERT INTO mcv_lists_bool (a, b, c) - SELECT - (mod(i,2) = 0), (mod(i,4) = 0), (mod(i,8) = 0) - FROM generate_series(1,10000) s(i); -ANALYZE mcv_lists_bool; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE a AND b AND c'); - estimated | actual ------------+-------- - 156 | 1250 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND b AND c'); - estimated | actual ------------+-------- - 156 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND NOT b AND c'); - estimated | actual ------------+-------- - 469 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND b AND NOT c'); - estimated | actual ------------+-------- - 1094 | 0 -(1 row) - -CREATE STATISTICS mcv_lists_bool_stats (mcv) ON a, b, c - FROM mcv_lists_bool; -ANALYZE mcv_lists_bool; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE a AND b AND c'); - estimated | actual ------------+-------- - 1250 | 1250 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND b AND c'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND NOT b AND c'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND b AND NOT c'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - --- mcv covering just a small fraction of data -CREATE TABLE mcv_lists_partial ( - a INT, - b INT, - c INT -); --- 10 frequent groups, each with 100 elements -INSERT INTO mcv_lists_partial (a, b, c) - SELECT - mod(i,10), - mod(i,10), - mod(i,10) - FROM generate_series(0,999) s(i); --- 100 groups that will make it to the MCV list (includes the 10 frequent ones) -INSERT INTO mcv_lists_partial (a, b, c) - SELECT - i, - i, - i - FROM generate_series(0,99) s(i); --- 4000 groups in total, most of which won't make it (just a single item) -INSERT INTO mcv_lists_partial (a, b, c) - SELECT - i, - i, - i - FROM generate_series(0,3999) s(i); -ANALYZE mcv_lists_partial; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 AND b = 0 AND c = 0'); - estimated | actual ------------+-------- - 1 | 102 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 OR b = 0 OR c = 0'); - estimated | actual ------------+-------- - 300 | 102 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 10 AND b = 10 AND c = 10'); - estimated | actual ------------+-------- - 1 | 2 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 10 OR b = 10 OR c = 10'); - estimated | actual ------------+-------- - 6 | 2 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 AND b = 0 AND c = 10'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 OR b = 0 OR c = 10'); - estimated | actual ------------+-------- - 204 | 104 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE (a = 0 AND b = 0 AND c = 0) OR (a = 1 AND b = 1 AND c = 1) OR (a = 2 AND b = 2 AND c = 2)'); - estimated | actual ------------+-------- - 1 | 306 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE (a = 0 AND b = 0) OR (a = 0 AND c = 0) OR (b = 0 AND c = 0)'); - estimated | actual ------------+-------- - 6 | 102 -(1 row) - -CREATE STATISTICS mcv_lists_partial_stats (mcv) ON a, b, c - FROM mcv_lists_partial; -ANALYZE mcv_lists_partial; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 AND b = 0 AND c = 0'); - estimated | actual ------------+-------- - 102 | 102 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 OR b = 0 OR c = 0'); - estimated | actual ------------+-------- - 96 | 102 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 10 AND b = 10 AND c = 10'); - estimated | actual ------------+-------- - 2 | 2 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 10 OR b = 10 OR c = 10'); - estimated | actual ------------+-------- - 2 | 2 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 AND b = 0 AND c = 10'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 OR b = 0 OR c = 10'); - estimated | actual ------------+-------- - 102 | 104 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE (a = 0 AND b = 0 AND c = 0) OR (a = 1 AND b = 1 AND c = 1) OR (a = 2 AND b = 2 AND c = 2)'); - estimated | actual ------------+-------- - 306 | 306 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE (a = 0 AND b = 0) OR (a = 0 AND c = 0) OR (b = 0 AND c = 0)'); - estimated | actual ------------+-------- - 108 | 102 -(1 row) - -DROP TABLE mcv_lists_partial; --- check the ability to use multiple MCV lists -CREATE TABLE mcv_lists_multi ( - a INTEGER, - b INTEGER, - c INTEGER, - d INTEGER -) -WITH (autovacuum_enabled = off); -INSERT INTO mcv_lists_multi (a, b, c, d) - SELECT - mod(i,5), - mod(i,5), - mod(i,7), - mod(i,7) - FROM generate_series(1,5000) s(i); -ANALYZE mcv_lists_multi; --- estimates without any mcv statistics -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 AND b = 0'); - estimated | actual ------------+-------- - 200 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE c = 0 AND d = 0'); - estimated | actual ------------+-------- - 102 | 714 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE b = 0 AND c = 0'); - estimated | actual ------------+-------- - 143 | 142 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE b = 0 OR c = 0'); - estimated | actual ------------+-------- - 1571 | 1572 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 AND b = 0 AND c = 0 AND d = 0'); - estimated | actual ------------+-------- - 4 | 142 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE (a = 0 AND b = 0) OR (c = 0 AND d = 0)'); - estimated | actual ------------+-------- - 298 | 1572 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 OR b = 0 OR c = 0 OR d = 0'); - estimated | actual ------------+-------- - 2649 | 1572 -(1 row) - --- create separate MCV statistics -CREATE STATISTICS mcv_lists_multi_1 (mcv) ON a, b FROM mcv_lists_multi; -CREATE STATISTICS mcv_lists_multi_2 (mcv) ON c, d FROM mcv_lists_multi; -ANALYZE mcv_lists_multi; -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 AND b = 0'); - estimated | actual ------------+-------- - 1000 | 1000 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE c = 0 AND d = 0'); - estimated | actual ------------+-------- - 714 | 714 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE b = 0 AND c = 0'); - estimated | actual ------------+-------- - 143 | 142 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE b = 0 OR c = 0'); - estimated | actual ------------+-------- - 1571 | 1572 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 AND b = 0 AND c = 0 AND d = 0'); - estimated | actual ------------+-------- - 143 | 142 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE (a = 0 AND b = 0) OR (c = 0 AND d = 0)'); - estimated | actual ------------+-------- - 1571 | 1572 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 OR b = 0 OR c = 0 OR d = 0'); - estimated | actual ------------+-------- - 1571 | 1572 -(1 row) - -DROP TABLE mcv_lists_multi; --- statistics on integer expressions -CREATE TABLE expr_stats (a int, b int, c int); -INSERT INTO expr_stats SELECT mod(i,10), mod(i,10), mod(i,10) FROM generate_series(1,1000) s(i); -ANALYZE expr_stats; -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE (2*a) = 0 AND (3*b) = 0'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE (a+b) = 0 AND (a-b) = 0'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -CREATE STATISTICS expr_stats_1 (mcv) ON (a+b), (a-b), (2*a), (3*b) FROM expr_stats; -ANALYZE expr_stats; -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE (2*a) = 0 AND (3*b) = 0'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE (a+b) = 0 AND (a-b) = 0'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -DROP STATISTICS expr_stats_1; -DROP TABLE expr_stats; --- statistics on a mix columns and expressions -CREATE TABLE expr_stats (a int, b int, c int); -INSERT INTO expr_stats SELECT mod(i,10), mod(i,10), mod(i,10) FROM generate_series(1,1000) s(i); -ANALYZE expr_stats; -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND (2*a) = 0 AND (3*b) = 0'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 3 AND b = 3 AND (a-b) = 0'); - estimated | actual ------------+-------- - 1 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND b = 1 AND (a-b) = 0'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -CREATE STATISTICS expr_stats_1 (mcv) ON a, b, (2*a), (3*b), (a+b), (a-b) FROM expr_stats; -ANALYZE expr_stats; -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND (2*a) = 0 AND (3*b) = 0'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 3 AND b = 3 AND (a-b) = 0'); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND b = 1 AND (a-b) = 0'); - estimated | actual ------------+-------- - 1 | 0 -(1 row) - -DROP TABLE expr_stats; --- statistics on expressions with different data types -CREATE TABLE expr_stats (a int, b name, c text); -INSERT INTO expr_stats SELECT mod(i,10), fipshash(mod(i,10)::text), fipshash(mod(i,10)::text) FROM generate_series(1,1000) s(i); -ANALYZE expr_stats; -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND (b || c) <= ''z'' AND (c || b) >= ''0'''); - estimated | actual ------------+-------- - 11 | 100 -(1 row) - -CREATE STATISTICS expr_stats_1 (mcv) ON a, b, (b || c), (c || b) FROM expr_stats; -ANALYZE expr_stats; -SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND (b || c) <= ''z'' AND (c || b) >= ''0'''); - estimated | actual ------------+-------- - 100 | 100 -(1 row) - -DROP TABLE expr_stats; --- test handling of a mix of compatible and incompatible expressions -CREATE TABLE expr_stats_incompatible_test ( - c0 double precision, - c1 boolean NOT NULL -); -CREATE STATISTICS expr_stat_comp_1 ON c0, c1 FROM expr_stats_incompatible_test; -INSERT INTO expr_stats_incompatible_test VALUES (1234,false), (5678,true); -ANALYZE expr_stats_incompatible_test; -SELECT c0 FROM ONLY expr_stats_incompatible_test WHERE -( - upper('x') LIKE ('x'||('[0,1]'::int4range)) - AND - (c0 IN (0, 1) OR c1) -); - c0 ----- -(0 rows) - -DROP TABLE expr_stats_incompatible_test; --- Permission tests. Users should not be able to see specific data values in --- the extended statistics, if they lack permission to see those values in --- the underlying table. --- --- Currently this is only relevant for MCV stats. -CREATE SCHEMA tststats; -CREATE TABLE tststats.priv_test_tbl ( - a int, - b int -); -INSERT INTO tststats.priv_test_tbl - SELECT mod(i,5), mod(i,10) FROM generate_series(1,100) s(i); -CREATE STATISTICS tststats.priv_test_stats (mcv) ON a, b - FROM tststats.priv_test_tbl; -ANALYZE tststats.priv_test_tbl; --- Check printing info about extended statistics by \dX -create table stts_t1 (a int, b int); -create statistics (ndistinct) on a, b from stts_t1; -create statistics (ndistinct, dependencies) on a, b from stts_t1; -create statistics (ndistinct, dependencies, mcv) on a, b from stts_t1; -create table stts_t2 (a int, b int, c int); -create statistics on b, c from stts_t2; -create table stts_t3 (col1 int, col2 int, col3 int); -create statistics stts_hoge on col1, col2, col3 from stts_t3; -create schema stts_s1; -create schema stts_s2; -create statistics stts_s1.stts_foo on col1, col2 from stts_t3; -create statistics stts_s2.stts_yama (dependencies, mcv) on col1, col3 from stts_t3; -insert into stts_t1 select i,i from generate_series(1,100) i; -analyze stts_t1; -set search_path to public, stts_s1, stts_s2, tststats; -\dX - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV -----------+------------------------+------------------------------------------------------------------+-----------+--------------+--------- - public | func_deps_stat | (a * 2), upper(b), (c + 1::numeric) FROM functional_dependencies | | defined | - public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined - public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined - public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined - public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined - public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | - public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | - public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined - public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined - stts_s1 | stts_foo | col1, col2 FROM stts_t3 | defined | defined | defined - stts_s2 | stts_yama | col1, col3 FROM stts_t3 | | defined | defined - tststats | priv_test_stats | a, b FROM priv_test_tbl | | | defined -(12 rows) - -\dX stts_t* - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+-------------------+-------------------+-----------+--------------+--------- - public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | - public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | - public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined - public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined -(4 rows) - -\dX *stts_hoge - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+-----------+-------------------------------+-----------+--------------+--------- - public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined -(1 row) - -\dX+ - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV -----------+------------------------+------------------------------------------------------------------+-----------+--------------+--------- - public | func_deps_stat | (a * 2), upper(b), (c + 1::numeric) FROM functional_dependencies | | defined | - public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined - public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined - public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined - public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined - public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | - public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | - public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined - public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined - stts_s1 | stts_foo | col1, col2 FROM stts_t3 | defined | defined | defined - stts_s2 | stts_yama | col1, col3 FROM stts_t3 | | defined | defined - tststats | priv_test_stats | a, b FROM priv_test_tbl | | | defined -(12 rows) - -\dX+ stts_t* - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+-------------------+-------------------+-----------+--------------+--------- - public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | - public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | - public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined - public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined -(4 rows) - -\dX+ *stts_hoge - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+-----------+-------------------------------+-----------+--------------+--------- - public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined -(1 row) - -\dX+ stts_s2.stts_yama - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ----------+-----------+-------------------------+-----------+--------------+--------- - stts_s2 | stts_yama | col1, col3 FROM stts_t3 | | defined | defined -(1 row) - -create statistics (mcv) ON a, b, (a+b), (a-b) FROM stts_t1; -create statistics (mcv) ON a, b, (a+b), (a-b) FROM stts_t1; -create statistics (mcv) ON (a+b), (a-b) FROM stts_t1; -\dX stts_t*expr* - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+-----------------------------+-------------------------------------+-----------+--------------+--------- - public | stts_t1_a_b_expr_expr_stat | a, b, (a + b), (a - b) FROM stts_t1 | | | defined - public | stts_t1_a_b_expr_expr_stat1 | a, b, (a + b), (a - b) FROM stts_t1 | | | defined - public | stts_t1_expr_expr_stat | (a + b), (a - b) FROM stts_t1 | | | defined -(3 rows) - -drop statistics stts_t1_a_b_expr_expr_stat; -drop statistics stts_t1_a_b_expr_expr_stat1; -drop statistics stts_t1_expr_expr_stat; -set search_path to public, stts_s1; -\dX - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ----------+------------------------+------------------------------------------------------------------+-----------+--------------+--------- - public | func_deps_stat | (a * 2), upper(b), (c + 1::numeric) FROM functional_dependencies | | defined | - public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined - public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined - public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined - public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined - public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | - public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | - public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined - public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined - stts_s1 | stts_foo | col1, col2 FROM stts_t3 | defined | defined | defined -(10 rows) - -create role regress_stats_ext nosuperuser; -set role regress_stats_ext; -\dX - List of extended statistics - Schema | Name | Definition | Ndistinct | Dependencies | MCV ---------+------------------------+------------------------------------------------------------------+-----------+--------------+--------- - public | func_deps_stat | (a * 2), upper(b), (c + 1::numeric) FROM functional_dependencies | | defined | - public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined - public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined - public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined - public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined - public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | - public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | - public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined - public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined -(9 rows) - -reset role; -drop table stts_t1, stts_t2, stts_t3; -drop schema stts_s1, stts_s2 cascade; -drop user regress_stats_ext; -reset search_path; --- User with no access -CREATE USER regress_stats_user1; -GRANT USAGE ON SCHEMA tststats TO regress_stats_user1; -SET SESSION AUTHORIZATION regress_stats_user1; -SELECT * FROM tststats.priv_test_tbl; -- Permission denied -ERROR: permission denied for table priv_test_tbl --- Check individual columns if we don't have table privilege -SELECT * FROM tststats.priv_test_tbl - WHERE a = 1 and tststats.priv_test_tbl.* > (1, 1) is not null; -ERROR: permission denied for table priv_test_tbl --- Attempt to gain access using a leaky operator -CREATE FUNCTION op_leak(int, int) RETURNS bool - AS 'BEGIN RAISE NOTICE ''op_leak => %, %'', $1, $2; RETURN $1 < $2; END' - LANGUAGE plpgsql; -CREATE OPERATOR <<< (procedure = op_leak, leftarg = int, rightarg = int, - restrict = scalarltsel); -SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied -ERROR: permission denied for table priv_test_tbl -SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 OR b <<< 0; -ERROR: permission denied for table priv_test_tbl -DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied -ERROR: permission denied for table priv_test_tbl --- Grant access via a security barrier view, but hide all data -RESET SESSION AUTHORIZATION; -CREATE VIEW tststats.priv_test_view WITH (security_barrier=true) - AS SELECT * FROM tststats.priv_test_tbl WHERE false; -GRANT SELECT, DELETE ON tststats.priv_test_view TO regress_stats_user1; --- Should now have access via the view, but see nothing and leak nothing -SET SESSION AUTHORIZATION regress_stats_user1; -SELECT * FROM tststats.priv_test_view WHERE a <<< 0 AND b <<< 0; -- Should not leak - a | b ----+--- -(0 rows) - -SELECT * FROM tststats.priv_test_view WHERE a <<< 0 OR b <<< 0; -- Should not leak - a | b ----+--- -(0 rows) - -DELETE FROM tststats.priv_test_view WHERE a <<< 0 AND b <<< 0; -- Should not leak --- Grant table access, but hide all data with RLS -RESET SESSION AUTHORIZATION; -ALTER TABLE tststats.priv_test_tbl ENABLE ROW LEVEL SECURITY; -GRANT SELECT, DELETE ON tststats.priv_test_tbl TO regress_stats_user1; --- Should now have direct table access, but see nothing and leak nothing -SET SESSION AUTHORIZATION regress_stats_user1; -SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak - a | b ----+--- -(0 rows) - -SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 OR b <<< 0; - a | b ----+--- -(0 rows) - -DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak --- privilege checks for pg_stats_ext and pg_stats_ext_exprs -RESET SESSION AUTHORIZATION; -CREATE TABLE stats_ext_tbl (id INT PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY, col TEXT); -INSERT INTO stats_ext_tbl (col) VALUES ('secret'), ('secret'), ('very secret'); -CREATE STATISTICS s_col ON id, col FROM stats_ext_tbl; -CREATE STATISTICS s_expr ON mod(id, 2), lower(col) FROM stats_ext_tbl; -ANALYZE stats_ext_tbl; --- unprivileged role should not have access -SET SESSION AUTHORIZATION regress_stats_user1; -SELECT statistics_name, most_common_vals FROM pg_stats_ext x - WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*); - statistics_name | most_common_vals ------------------+------------------ -(0 rows) - -SELECT statistics_name, most_common_vals FROM pg_stats_ext_exprs x - WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*); - statistics_name | most_common_vals ------------------+------------------ -(0 rows) - --- give unprivileged role ownership of table -RESET SESSION AUTHORIZATION; -ALTER TABLE stats_ext_tbl OWNER TO regress_stats_user1; --- unprivileged role should now have access -SET SESSION AUTHORIZATION regress_stats_user1; -SELECT statistics_name, most_common_vals FROM pg_stats_ext x - WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*); - statistics_name | most_common_vals ------------------+------------------------------------------- - s_col | {{1,secret},{2,secret},{3,"very secret"}} - s_expr | {{0,secret},{1,secret},{1,"very secret"}} -(2 rows) - -SELECT statistics_name, most_common_vals FROM pg_stats_ext_exprs x - WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*); - statistics_name | most_common_vals ------------------+------------------ - s_expr | {secret} - s_expr | {1} -(2 rows) - --- Tidy up -DROP OPERATOR <<< (int, int); -DROP FUNCTION op_leak(int, int); -RESET SESSION AUTHORIZATION; -DROP TABLE stats_ext_tbl; -DROP SCHEMA tststats CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table tststats.priv_test_tbl -drop cascades to view tststats.priv_test_view -DROP USER regress_stats_user1; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/collate.linux.utf8_1.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/collate.linux.utf8.out --- /Users/admin/pgsql/src/test/regress/expected/collate.linux.utf8_1.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/collate.linux.utf8.out 2024-12-13 13:20:10 @@ -1,11 +1,2 @@ -/* - * This test is for Linux/glibc systems and assumes that a full set of - * locales is installed. It must be run in a database with UTF-8 encoding, - * because other encodings don't support all the characters used. - */ -SELECT getdatabaseencoding() <> 'UTF8' OR - (SELECT count(*) FROM pg_collation WHERE collname IN ('de_DE', 'en_US', 'sv_SE', 'tr_TR') AND collencoding = pg_char_to_encoding('UTF8')) <> 4 OR - version() !~ 'linux-gnu' - AS skip_test \gset -\if :skip_test -\quit +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/collate.windows.win1252_1.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/collate.windows.win1252.out --- /Users/admin/pgsql/src/test/regress/expected/collate.windows.win1252_1.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/collate.windows.win1252.out 2024-12-13 13:20:10 @@ -1,13 +1,2 @@ -/* - * This test is meant to run on Windows systems that has successfully - * run pg_import_system_collations(). Also, the database must have - * WIN1252 encoding, because of the locales' own encodings. Because - * of this, some test are lost from UTF-8 version, such as Turkish - * dotted and undotted 'i'. - */ -SELECT getdatabaseencoding() <> 'WIN1252' OR - (SELECT count(*) FROM pg_collation WHERE collname IN ('de_DE', 'en_US', 'sv_SE') AND collencoding = pg_char_to_encoding('WIN1252')) <> 3 OR - (version() !~ 'Visual C\+\+' AND version() !~ 'mingw32' AND version() !~ 'windows') - AS skip_test \gset -\if :skip_test -\quit +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/select_parallel.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/select_parallel.out --- /Users/admin/pgsql/src/test/regress/expected/select_parallel.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/select_parallel.out 2024-12-13 13:20:11 @@ -1,1436 +1,2 @@ --- --- PARALLEL --- --- Save parallel worker stats, used for comparison at the end -select pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -select parallel_workers_to_launch as parallel_workers_to_launch_before, - parallel_workers_launched as parallel_workers_launched_before - from pg_stat_database - where datname = current_database() \gset -create function sp_parallel_restricted(int) returns int as - $$begin return $1; end$$ language plpgsql parallel restricted; -begin; --- encourage use of parallel plans -set parallel_setup_cost=0; -set parallel_tuple_cost=0; -set min_parallel_table_scan_size=0; -set max_parallel_workers_per_gather=4; --- Parallel Append with partial-subplans -explain (costs off) - select round(avg(aa)), sum(aa) from a_star; - QUERY PLAN --------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 3 - -> Partial Aggregate - -> Parallel Append - -> Parallel Seq Scan on d_star a_star_4 - -> Parallel Seq Scan on f_star a_star_6 - -> Parallel Seq Scan on e_star a_star_5 - -> Parallel Seq Scan on b_star a_star_2 - -> Parallel Seq Scan on c_star a_star_3 - -> Parallel Seq Scan on a_star a_star_1 -(11 rows) - -select round(avg(aa)), sum(aa) from a_star a1; - round | sum --------+----- - 14 | 355 -(1 row) - --- Parallel Append with both partial and non-partial subplans -alter table c_star set (parallel_workers = 0); -alter table d_star set (parallel_workers = 0); -explain (costs off) - select round(avg(aa)), sum(aa) from a_star; - QUERY PLAN --------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 3 - -> Partial Aggregate - -> Parallel Append - -> Seq Scan on d_star a_star_4 - -> Seq Scan on c_star a_star_3 - -> Parallel Seq Scan on f_star a_star_6 - -> Parallel Seq Scan on e_star a_star_5 - -> Parallel Seq Scan on b_star a_star_2 - -> Parallel Seq Scan on a_star a_star_1 -(11 rows) - -select round(avg(aa)), sum(aa) from a_star a2; - round | sum --------+----- - 14 | 355 -(1 row) - --- Parallel Append with only non-partial subplans -alter table a_star set (parallel_workers = 0); -alter table b_star set (parallel_workers = 0); -alter table e_star set (parallel_workers = 0); -alter table f_star set (parallel_workers = 0); -explain (costs off) - select round(avg(aa)), sum(aa) from a_star; - QUERY PLAN ------------------------------------------------------ - Finalize Aggregate - -> Gather - Workers Planned: 3 - -> Partial Aggregate - -> Parallel Append - -> Seq Scan on d_star a_star_4 - -> Seq Scan on f_star a_star_6 - -> Seq Scan on e_star a_star_5 - -> Seq Scan on b_star a_star_2 - -> Seq Scan on c_star a_star_3 - -> Seq Scan on a_star a_star_1 -(11 rows) - -select round(avg(aa)), sum(aa) from a_star a3; - round | sum --------+----- - 14 | 355 -(1 row) - --- Disable Parallel Append -alter table a_star reset (parallel_workers); -alter table b_star reset (parallel_workers); -alter table c_star reset (parallel_workers); -alter table d_star reset (parallel_workers); -alter table e_star reset (parallel_workers); -alter table f_star reset (parallel_workers); -set enable_parallel_append to off; -explain (costs off) - select round(avg(aa)), sum(aa) from a_star; - QUERY PLAN --------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 1 - -> Partial Aggregate - -> Append - -> Parallel Seq Scan on a_star a_star_1 - -> Parallel Seq Scan on b_star a_star_2 - -> Parallel Seq Scan on c_star a_star_3 - -> Parallel Seq Scan on d_star a_star_4 - -> Parallel Seq Scan on e_star a_star_5 - -> Parallel Seq Scan on f_star a_star_6 -(11 rows) - -select round(avg(aa)), sum(aa) from a_star a4; - round | sum --------+----- - 14 | 355 -(1 row) - -reset enable_parallel_append; --- Parallel Append that runs serially -create function sp_test_func() returns setof text as -$$ select 'foo'::varchar union all select 'bar'::varchar $$ -language sql stable; -select sp_test_func() order by 1; - sp_test_func --------------- - bar - foo -(2 rows) - --- Parallel Append is not to be used when the subpath depends on the outer param -create table part_pa_test(a int, b int) partition by range(a); -create table part_pa_test_p1 partition of part_pa_test for values from (minvalue) to (0); -create table part_pa_test_p2 partition of part_pa_test for values from (0) to (maxvalue); -explain (costs off) - select (select max((select pa1.b from part_pa_test pa1 where pa1.a = pa2.a))) - from part_pa_test pa2; - QUERY PLAN --------------------------------------------------------------- - Aggregate - -> Gather - Workers Planned: 3 - -> Parallel Append - -> Parallel Seq Scan on part_pa_test_p1 pa2_1 - -> Parallel Seq Scan on part_pa_test_p2 pa2_2 - SubPlan 2 - -> Result - SubPlan 1 - -> Append - -> Seq Scan on part_pa_test_p1 pa1_1 - Filter: (a = pa2.a) - -> Seq Scan on part_pa_test_p2 pa1_2 - Filter: (a = pa2.a) -(14 rows) - -drop table part_pa_test; --- test with leader participation disabled -set parallel_leader_participation = off; -explain (costs off) - select count(*) from tenk1 where stringu1 = 'GRAAAA'; - QUERY PLAN ---------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Parallel Seq Scan on tenk1 - Filter: (stringu1 = 'GRAAAA'::name) -(6 rows) - -select count(*) from tenk1 where stringu1 = 'GRAAAA'; - count -------- - 15 -(1 row) - --- test with leader participation disabled, but no workers available (so --- the leader will have to run the plan despite the setting) -set max_parallel_workers = 0; -explain (costs off) - select count(*) from tenk1 where stringu1 = 'GRAAAA'; - QUERY PLAN ---------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Parallel Seq Scan on tenk1 - Filter: (stringu1 = 'GRAAAA'::name) -(6 rows) - -select count(*) from tenk1 where stringu1 = 'GRAAAA'; - count -------- - 15 -(1 row) - -reset max_parallel_workers; -reset parallel_leader_participation; --- test that parallel_restricted function doesn't run in worker -alter table tenk1 set (parallel_workers = 4); -explain (verbose, costs off) -select sp_parallel_restricted(unique1) from tenk1 - where stringu1 = 'GRAAAA' order by 1; - QUERY PLAN ---------------------------------------------------------- - Sort - Output: (sp_parallel_restricted(unique1)) - Sort Key: (sp_parallel_restricted(tenk1.unique1)) - -> Gather - Output: sp_parallel_restricted(unique1) - Workers Planned: 4 - -> Parallel Seq Scan on public.tenk1 - Output: unique1 - Filter: (tenk1.stringu1 = 'GRAAAA'::name) -(9 rows) - --- test parallel plan when group by expression is in target list. -explain (costs off) - select length(stringu1) from tenk1 group by length(stringu1); - QUERY PLAN ---------------------------------------------------- - Finalize HashAggregate - Group Key: (length((stringu1)::text)) - -> Gather - Workers Planned: 4 - -> Partial HashAggregate - Group Key: length((stringu1)::text) - -> Parallel Seq Scan on tenk1 -(7 rows) - -select length(stringu1) from tenk1 group by length(stringu1); - length --------- - 6 -(1 row) - -explain (costs off) - select stringu1, count(*) from tenk1 group by stringu1 order by stringu1; - QUERY PLAN ----------------------------------------------------- - Sort - Sort Key: stringu1 - -> Finalize HashAggregate - Group Key: stringu1 - -> Gather - Workers Planned: 4 - -> Partial HashAggregate - Group Key: stringu1 - -> Parallel Seq Scan on tenk1 -(9 rows) - --- test that parallel plan for aggregates is not selected when --- target list contains parallel restricted clause. -explain (costs off) - select sum(sp_parallel_restricted(unique1)) from tenk1 - group by(sp_parallel_restricted(unique1)); - QUERY PLAN -------------------------------------------------------------------- - HashAggregate - Group Key: sp_parallel_restricted(unique1) - -> Gather - Workers Planned: 4 - -> Parallel Index Only Scan using tenk1_unique1 on tenk1 -(5 rows) - --- test prepared statement -prepare tenk1_count(integer) As select count((unique1)) from tenk1 where hundred > $1; -explain (costs off) execute tenk1_count(1); - QUERY PLAN ----------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Parallel Seq Scan on tenk1 - Filter: (hundred > 1) -(6 rows) - -execute tenk1_count(1); - count -------- - 9800 -(1 row) - -deallocate tenk1_count; --- test parallel plans for queries containing un-correlated subplans. -alter table tenk2 set (parallel_workers = 0); -explain (costs off) - select count(*) from tenk1 where (two, four) not in - (select hundred, thousand from tenk2 where thousand > 100); - QUERY PLAN ----------------------------------------------------------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Parallel Seq Scan on tenk1 - Filter: (NOT (ANY ((two = (hashed SubPlan 1).col1) AND (four = (hashed SubPlan 1).col2)))) - SubPlan 1 - -> Seq Scan on tenk2 - Filter: (thousand > 100) -(9 rows) - -select count(*) from tenk1 where (two, four) not in - (select hundred, thousand from tenk2 where thousand > 100); - count -------- - 10000 -(1 row) - --- this is not parallel-safe due to use of random() within SubLink's testexpr: -explain (costs off) - select * from tenk1 where (unique1 + random())::integer not in - (select ten from tenk2); - QUERY PLAN -------------------------------------------------------------------------------------------------------- - Seq Scan on tenk1 - Filter: (NOT (ANY ((((unique1)::double precision + random()))::integer = (hashed SubPlan 1).col1))) - SubPlan 1 - -> Seq Scan on tenk2 -(4 rows) - -alter table tenk2 reset (parallel_workers); --- test parallel plan for a query containing initplan. -set enable_indexscan = off; -set enable_indexonlyscan = off; -set enable_bitmapscan = off; -alter table tenk2 set (parallel_workers = 2); -explain (costs off) - select count(*) from tenk1 - where tenk1.unique1 = (Select max(tenk2.unique1) from tenk2); - QUERY PLAN ------------------------------------------------------- - Aggregate - InitPlan 1 - -> Finalize Aggregate - -> Gather - Workers Planned: 2 - -> Partial Aggregate - -> Parallel Seq Scan on tenk2 - -> Gather - Workers Planned: 4 - -> Parallel Seq Scan on tenk1 - Filter: (unique1 = (InitPlan 1).col1) -(11 rows) - -select count(*) from tenk1 - where tenk1.unique1 = (Select max(tenk2.unique1) from tenk2); - count -------- - 1 -(1 row) - -reset enable_indexscan; -reset enable_indexonlyscan; -reset enable_bitmapscan; -alter table tenk2 reset (parallel_workers); --- test parallel index scans. -set enable_seqscan to off; -set enable_bitmapscan to off; -set random_page_cost = 2; -explain (costs off) - select count((unique1)) from tenk1 where hundred > 1; - QUERY PLAN --------------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Parallel Index Scan using tenk1_hundred on tenk1 - Index Cond: (hundred > 1) -(6 rows) - -select count((unique1)) from tenk1 where hundred > 1; - count -------- - 9800 -(1 row) - --- Parallel ScalarArrayOp index scan -explain (costs off) - select count((unique1)) from tenk1 - where hundred = any ((select array_agg(i) from generate_series(1, 100, 15) i)::int[]); - QUERY PLAN ---------------------------------------------------------------------- - Finalize Aggregate - InitPlan 1 - -> Aggregate - -> Function Scan on generate_series i - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Parallel Index Scan using tenk1_hundred on tenk1 - Index Cond: (hundred = ANY ((InitPlan 1).col1)) -(9 rows) - -select count((unique1)) from tenk1 -where hundred = any ((select array_agg(i) from generate_series(1, 100, 15) i)::int[]); - count -------- - 700 -(1 row) - --- test parallel index-only scans. -explain (costs off) - select count(*) from tenk1 where thousand > 95; - QUERY PLAN --------------------------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Parallel Index Only Scan using tenk1_thous_tenthous on tenk1 - Index Cond: (thousand > 95) -(6 rows) - -select count(*) from tenk1 where thousand > 95; - count -------- - 9040 -(1 row) - --- test rescan cases too -set enable_material = false; -explain (costs off) -select * from - (select count(unique1) from tenk1 where hundred > 10) ss - right join (values (1),(2),(3)) v(x) on true; - QUERY PLAN --------------------------------------------------------------------------- - Nested Loop Left Join - -> Values Scan on "*VALUES*" - -> Finalize Aggregate - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Parallel Index Scan using tenk1_hundred on tenk1 - Index Cond: (hundred > 10) -(8 rows) - -select * from - (select count(unique1) from tenk1 where hundred > 10) ss - right join (values (1),(2),(3)) v(x) on true; - count | x --------+--- - 8900 | 1 - 8900 | 2 - 8900 | 3 -(3 rows) - -explain (costs off) -select * from - (select count(*) from tenk1 where thousand > 99) ss - right join (values (1),(2),(3)) v(x) on true; - QUERY PLAN --------------------------------------------------------------------------------------- - Nested Loop Left Join - -> Values Scan on "*VALUES*" - -> Finalize Aggregate - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Parallel Index Only Scan using tenk1_thous_tenthous on tenk1 - Index Cond: (thousand > 99) -(8 rows) - -select * from - (select count(*) from tenk1 where thousand > 99) ss - right join (values (1),(2),(3)) v(x) on true; - count | x --------+--- - 9000 | 1 - 9000 | 2 - 9000 | 3 -(3 rows) - --- test rescans for a Limit node with a parallel node beneath it. -reset enable_seqscan; -set enable_indexonlyscan to off; -set enable_indexscan to off; -alter table tenk1 set (parallel_workers = 0); -alter table tenk2 set (parallel_workers = 1); -explain (costs off) -select count(*) from tenk1 - left join (select tenk2.unique1 from tenk2 order by 1 limit 1000) ss - on tenk1.unique1 < ss.unique1 + 1 - where tenk1.unique1 < 2; - QUERY PLAN ------------------------------------------------------------- - Aggregate - -> Nested Loop Left Join - Join Filter: (tenk1.unique1 < (tenk2.unique1 + 1)) - -> Seq Scan on tenk1 - Filter: (unique1 < 2) - -> Limit - -> Gather Merge - Workers Planned: 1 - -> Sort - Sort Key: tenk2.unique1 - -> Parallel Seq Scan on tenk2 -(11 rows) - -select count(*) from tenk1 - left join (select tenk2.unique1 from tenk2 order by 1 limit 1000) ss - on tenk1.unique1 < ss.unique1 + 1 - where tenk1.unique1 < 2; - count -------- - 1999 -(1 row) - ---reset the value of workers for each table as it was before this test. -alter table tenk1 set (parallel_workers = 4); -alter table tenk2 reset (parallel_workers); -reset enable_material; -reset enable_bitmapscan; -reset enable_indexonlyscan; -reset enable_indexscan; --- test parallel bitmap heap scan. -set enable_seqscan to off; -set enable_indexscan to off; -set enable_hashjoin to off; -set enable_mergejoin to off; -set enable_material to off; --- test prefetching, if the platform allows it -DO $$ -BEGIN - SET effective_io_concurrency = 50; -EXCEPTION WHEN invalid_parameter_value THEN -END $$; -set work_mem='64kB'; --set small work mem to force lossy pages -explain (costs off) - select count(*) from tenk1, tenk2 where tenk1.hundred > 1 and tenk2.thousand=0; - QUERY PLAN ------------------------------------------------------------- - Aggregate - -> Nested Loop - -> Gather - Workers Planned: 4 - -> Parallel Seq Scan on tenk2 - Disabled: true - Filter: (thousand = 0) - -> Gather - Workers Planned: 4 - -> Parallel Bitmap Heap Scan on tenk1 - Recheck Cond: (hundred > 1) - -> Bitmap Index Scan on tenk1_hundred - Index Cond: (hundred > 1) -(13 rows) - -select count(*) from tenk1, tenk2 where tenk1.hundred > 1 and tenk2.thousand=0; - count -------- - 98000 -(1 row) - -create table bmscantest (a int, t text); -insert into bmscantest select r, 'fooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo' FROM generate_series(1,100000) r; -create index i_bmtest ON bmscantest(a); -select count(*) from bmscantest where a>1; - count -------- - 99999 -(1 row) - --- test accumulation of stats for parallel nodes -reset enable_seqscan; -alter table tenk2 set (parallel_workers = 0); -explain (analyze, timing off, summary off, costs off, buffers off) - select count(*) from tenk1, tenk2 where tenk1.hundred > 1 - and tenk2.thousand=0; - QUERY PLAN --------------------------------------------------------------------------- - Aggregate (actual rows=1 loops=1) - -> Nested Loop (actual rows=98000 loops=1) - -> Seq Scan on tenk2 (actual rows=10 loops=1) - Filter: (thousand = 0) - Rows Removed by Filter: 9990 - -> Gather (actual rows=9800 loops=10) - Workers Planned: 4 - Workers Launched: 4 - -> Parallel Seq Scan on tenk1 (actual rows=1960 loops=50) - Filter: (hundred > 1) - Rows Removed by Filter: 40 -(11 rows) - -alter table tenk2 reset (parallel_workers); -reset work_mem; -create function explain_parallel_sort_stats() returns setof text -language plpgsql as -$$ -declare ln text; -begin - for ln in - explain (analyze, timing off, summary off, costs off, buffers off) - select * from - (select ten from tenk1 where ten < 100 order by ten) ss - right join (values (1),(2),(3)) v(x) on true - loop - ln := regexp_replace(ln, 'Memory: \S*', 'Memory: xxx'); - return next ln; - end loop; -end; -$$; -select * from explain_parallel_sort_stats(); - explain_parallel_sort_stats --------------------------------------------------------------------------- - Nested Loop Left Join (actual rows=30000 loops=1) - -> Values Scan on "*VALUES*" (actual rows=3 loops=1) - -> Gather Merge (actual rows=10000 loops=3) - Workers Planned: 4 - Workers Launched: 4 - -> Sort (actual rows=2000 loops=15) - Sort Key: tenk1.ten - Sort Method: quicksort Memory: xxx - Worker 0: Sort Method: quicksort Memory: xxx - Worker 1: Sort Method: quicksort Memory: xxx - Worker 2: Sort Method: quicksort Memory: xxx - Worker 3: Sort Method: quicksort Memory: xxx - -> Parallel Seq Scan on tenk1 (actual rows=2000 loops=15) - Filter: (ten < 100) -(14 rows) - -reset enable_indexscan; -reset enable_hashjoin; -reset enable_mergejoin; -reset enable_material; -reset effective_io_concurrency; -drop table bmscantest; -drop function explain_parallel_sort_stats(); --- test parallel merge join path. -set enable_hashjoin to off; -set enable_nestloop to off; -explain (costs off) - select count(*) from tenk1, tenk2 where tenk1.unique1 = tenk2.unique1; - QUERY PLAN -------------------------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Merge Join - Merge Cond: (tenk1.unique1 = tenk2.unique1) - -> Parallel Index Only Scan using tenk1_unique1 on tenk1 - -> Index Only Scan using tenk2_unique1 on tenk2 -(8 rows) - -select count(*) from tenk1, tenk2 where tenk1.unique1 = tenk2.unique1; - count -------- - 10000 -(1 row) - -reset enable_hashjoin; -reset enable_nestloop; --- test parallel nestloop join path with materialization of the inner path -alter table tenk2 set (parallel_workers = 0); -explain (costs off) -select * from tenk1 t1, tenk2 t2 where t1.two > t2.two; - QUERY PLAN -------------------------------------------- - Gather - Workers Planned: 4 - -> Nested Loop - Join Filter: (t1.two > t2.two) - -> Parallel Seq Scan on tenk1 t1 - -> Materialize - -> Seq Scan on tenk2 t2 -(7 rows) - --- test that parallel nestloop join is not generated if the inner path is --- not parallel-safe -explain (costs off) -select * from tenk1 t1 - left join lateral - (select t1.unique1 as x, * from tenk2 t2 order by 1) t2 - on true -where t1.two > t2.two; - QUERY PLAN -------------------------------------------- - Nested Loop - -> Gather - Workers Planned: 4 - -> Parallel Seq Scan on tenk1 t1 - -> Subquery Scan on t2 - Filter: (t1.two > t2.two) - -> Seq Scan on tenk2 t2_1 -(7 rows) - -alter table tenk2 reset (parallel_workers); --- test gather merge -set enable_hashagg = false; -explain (costs off) - select count(*) from tenk1 group by twenty; - QUERY PLAN ----------------------------------------------------- - Finalize GroupAggregate - Group Key: twenty - -> Gather Merge - Workers Planned: 4 - -> Partial GroupAggregate - Group Key: twenty - -> Sort - Sort Key: twenty - -> Parallel Seq Scan on tenk1 -(9 rows) - -select count(*) from tenk1 group by twenty; - count -------- - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 -(20 rows) - ---test expressions in targetlist are pushed down for gather merge -create function sp_simple_func(var1 integer) returns integer -as $$ -begin - return var1 + 10; -end; -$$ language plpgsql PARALLEL SAFE; -explain (costs off, verbose) - select ten, sp_simple_func(ten) from tenk1 where ten < 100 order by ten; - QUERY PLAN ------------------------------------------------------ - Gather Merge - Output: ten, (sp_simple_func(ten)) - Workers Planned: 4 - -> Result - Output: ten, sp_simple_func(ten) - -> Sort - Output: ten - Sort Key: tenk1.ten - -> Parallel Seq Scan on public.tenk1 - Output: ten - Filter: (tenk1.ten < 100) -(11 rows) - -drop function sp_simple_func(integer); --- test handling of SRFs in targetlist (bug in 10.0) -explain (costs off) - select count(*), generate_series(1,2) from tenk1 group by twenty; - QUERY PLAN ----------------------------------------------------------- - ProjectSet - -> Finalize GroupAggregate - Group Key: twenty - -> Gather Merge - Workers Planned: 4 - -> Partial GroupAggregate - Group Key: twenty - -> Sort - Sort Key: twenty - -> Parallel Seq Scan on tenk1 -(10 rows) - -select count(*), generate_series(1,2) from tenk1 group by twenty; - count | generate_series --------+----------------- - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 - 500 | 1 - 500 | 2 -(40 rows) - --- test gather merge with parallel leader participation disabled -set parallel_leader_participation = off; -explain (costs off) - select count(*) from tenk1 group by twenty; - QUERY PLAN ----------------------------------------------------- - Finalize GroupAggregate - Group Key: twenty - -> Gather Merge - Workers Planned: 4 - -> Partial GroupAggregate - Group Key: twenty - -> Sort - Sort Key: twenty - -> Parallel Seq Scan on tenk1 -(9 rows) - -select count(*) from tenk1 group by twenty; - count -------- - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 - 500 -(20 rows) - -reset parallel_leader_participation; ---test rescan behavior of gather merge -set enable_material = false; -explain (costs off) -select * from - (select string4, count(unique2) - from tenk1 group by string4 order by string4) ss - right join (values (1),(2),(3)) v(x) on true; - QUERY PLAN ----------------------------------------------------------- - Nested Loop Left Join - -> Values Scan on "*VALUES*" - -> Finalize GroupAggregate - Group Key: tenk1.string4 - -> Gather Merge - Workers Planned: 4 - -> Partial GroupAggregate - Group Key: tenk1.string4 - -> Sort - Sort Key: tenk1.string4 - -> Parallel Seq Scan on tenk1 -(11 rows) - -select * from - (select string4, count(unique2) - from tenk1 group by string4 order by string4) ss - right join (values (1),(2),(3)) v(x) on true; - string4 | count | x ----------+-------+--- - AAAAxx | 2500 | 1 - HHHHxx | 2500 | 1 - OOOOxx | 2500 | 1 - VVVVxx | 2500 | 1 - AAAAxx | 2500 | 2 - HHHHxx | 2500 | 2 - OOOOxx | 2500 | 2 - VVVVxx | 2500 | 2 - AAAAxx | 2500 | 3 - HHHHxx | 2500 | 3 - OOOOxx | 2500 | 3 - VVVVxx | 2500 | 3 -(12 rows) - -reset enable_material; -reset enable_hashagg; --- check parallelized int8 aggregate (bug #14897) -explain (costs off) -select avg(unique1::int8) from tenk1; - QUERY PLAN -------------------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 4 - -> Partial Aggregate - -> Parallel Index Only Scan using tenk1_unique1 on tenk1 -(5 rows) - -select avg(unique1::int8) from tenk1; - avg ------------------------ - 4999.5000000000000000 -(1 row) - --- gather merge test with a LIMIT -explain (costs off) - select fivethous from tenk1 order by fivethous limit 4; - QUERY PLAN ----------------------------------------------- - Limit - -> Gather Merge - Workers Planned: 4 - -> Sort - Sort Key: fivethous - -> Parallel Seq Scan on tenk1 -(6 rows) - -select fivethous from tenk1 order by fivethous limit 4; - fivethous ------------ - 0 - 0 - 1 - 1 -(4 rows) - --- gather merge test with 0 worker -set max_parallel_workers = 0; -explain (costs off) - select string4 from tenk1 order by string4 limit 5; - QUERY PLAN ----------------------------------------------- - Limit - -> Gather Merge - Workers Planned: 4 - -> Sort - Sort Key: string4 - -> Parallel Seq Scan on tenk1 -(6 rows) - -select string4 from tenk1 order by string4 limit 5; - string4 ---------- - AAAAxx - AAAAxx - AAAAxx - AAAAxx - AAAAxx -(5 rows) - --- gather merge test with 0 workers, with parallel leader --- participation disabled (the leader will have to run the plan --- despite the setting) -set parallel_leader_participation = off; -explain (costs off) - select string4 from tenk1 order by string4 limit 5; - QUERY PLAN ----------------------------------------------- - Limit - -> Gather Merge - Workers Planned: 4 - -> Sort - Sort Key: string4 - -> Parallel Seq Scan on tenk1 -(6 rows) - -select string4 from tenk1 order by string4 limit 5; - string4 ---------- - AAAAxx - AAAAxx - AAAAxx - AAAAxx - AAAAxx -(5 rows) - -reset parallel_leader_participation; -reset max_parallel_workers; -create function parallel_safe_volatile(a int) returns int as - $$ begin return a; end; $$ parallel safe volatile language plpgsql; --- Test gather merge atop of a sort of a partial path -explain (costs off) -select * from tenk1 where four = 2 -order by four, hundred, parallel_safe_volatile(thousand); - QUERY PLAN ---------------------------------------------------------------- - Gather Merge - Workers Planned: 4 - -> Sort - Sort Key: hundred, (parallel_safe_volatile(thousand)) - -> Parallel Seq Scan on tenk1 - Filter: (four = 2) -(6 rows) - --- Test gather merge atop of an incremental sort a of partial path -set min_parallel_index_scan_size = 0; -set enable_seqscan = off; -explain (costs off) -select * from tenk1 where four = 2 -order by four, hundred, parallel_safe_volatile(thousand); - QUERY PLAN ---------------------------------------------------------------- - Gather Merge - Workers Planned: 4 - -> Incremental Sort - Sort Key: hundred, (parallel_safe_volatile(thousand)) - Presorted Key: hundred - -> Parallel Index Scan using tenk1_hundred on tenk1 - Filter: (four = 2) -(7 rows) - -reset min_parallel_index_scan_size; -reset enable_seqscan; --- Test GROUP BY with a gather merge path atop of a sort of a partial path -explain (costs off) -select count(*) from tenk1 -group by twenty, parallel_safe_volatile(two); - QUERY PLAN --------------------------------------------------------------------- - Finalize GroupAggregate - Group Key: twenty, (parallel_safe_volatile(two)) - -> Gather Merge - Workers Planned: 4 - -> Sort - Sort Key: twenty, (parallel_safe_volatile(two)) - -> Partial HashAggregate - Group Key: twenty, parallel_safe_volatile(two) - -> Parallel Seq Scan on tenk1 -(9 rows) - -drop function parallel_safe_volatile(int); -SAVEPOINT settings; -SET LOCAL debug_parallel_query = 1; -explain (costs off) - select stringu1::int2 from tenk1 where unique1 = 1; - QUERY PLAN ------------------------------------------------ - Gather - Workers Planned: 1 - Single Copy: true - -> Index Scan using tenk1_unique1 on tenk1 - Index Cond: (unique1 = 1) -(5 rows) - -ROLLBACK TO SAVEPOINT settings; --- exercise record typmod remapping between backends -CREATE FUNCTION make_record(n int) - RETURNS RECORD LANGUAGE plpgsql PARALLEL SAFE AS -$$ -BEGIN - RETURN CASE n - WHEN 1 THEN ROW(1) - WHEN 2 THEN ROW(1, 2) - WHEN 3 THEN ROW(1, 2, 3) - WHEN 4 THEN ROW(1, 2, 3, 4) - ELSE ROW(1, 2, 3, 4, 5) - END; -END; -$$; -SAVEPOINT settings; -SET LOCAL debug_parallel_query = 1; -SELECT make_record(x) FROM (SELECT generate_series(1, 5) x) ss ORDER BY x; - make_record -------------- - (1) - (1,2) - (1,2,3) - (1,2,3,4) - (1,2,3,4,5) -(5 rows) - -ROLLBACK TO SAVEPOINT settings; -DROP function make_record(n int); --- test the sanity of parallel query after the active role is dropped. -drop role if exists regress_parallel_worker; -NOTICE: role "regress_parallel_worker" does not exist, skipping -create role regress_parallel_worker; -set role regress_parallel_worker; -reset session authorization; -drop role regress_parallel_worker; -set debug_parallel_query = 1; -select count(*) from tenk1; - count -------- - 10000 -(1 row) - -reset debug_parallel_query; -reset role; --- Window function calculation can't be pushed to workers. -explain (costs off, verbose) - select count(*) from tenk1 a where (unique1, two) in - (select unique1, row_number() over() from tenk1 b); - QUERY PLAN ----------------------------------------------------------------------------------------- - Aggregate - Output: count(*) - -> Hash Right Semi Join - Hash Cond: ((b.unique1 = a.unique1) AND ((row_number() OVER (?)) = a.two)) - -> WindowAgg - Output: b.unique1, row_number() OVER (?) - -> Gather - Output: b.unique1 - Workers Planned: 4 - -> Parallel Index Only Scan using tenk1_unique1 on public.tenk1 b - Output: b.unique1 - -> Hash - Output: a.unique1, a.two - -> Gather - Output: a.unique1, a.two - Workers Planned: 4 - -> Parallel Seq Scan on public.tenk1 a - Output: a.unique1, a.two -(18 rows) - --- LIMIT/OFFSET within sub-selects can't be pushed to workers. -explain (costs off) - select * from tenk1 a where two in - (select two from tenk1 b where stringu1 like '%AAAA' limit 3); - QUERY PLAN ---------------------------------------------------------------- - Hash Semi Join - Hash Cond: (a.two = b.two) - -> Gather - Workers Planned: 4 - -> Parallel Seq Scan on tenk1 a - -> Hash - -> Limit - -> Gather - Workers Planned: 4 - -> Parallel Seq Scan on tenk1 b - Filter: (stringu1 ~~ '%AAAA'::text) -(11 rows) - --- to increase the parallel query test coverage -SAVEPOINT settings; -SET LOCAL debug_parallel_query = 1; -EXPLAIN (analyze, timing off, summary off, costs off, buffers off) SELECT * FROM tenk1; - QUERY PLAN -------------------------------------------------------------- - Gather (actual rows=10000 loops=1) - Workers Planned: 4 - Workers Launched: 4 - -> Parallel Seq Scan on tenk1 (actual rows=2000 loops=5) -(4 rows) - -ROLLBACK TO SAVEPOINT settings; --- provoke error in worker --- (make the error message long enough to require multiple bufferloads) -SAVEPOINT settings; -SET LOCAL debug_parallel_query = 1; -select (stringu1 || repeat('abcd', 5000))::int2 from tenk1 where unique1 = 1; -ERROR: invalid input syntax for type smallint: "BAAAAAabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd" -CONTEXT: parallel worker -ROLLBACK TO SAVEPOINT settings; --- test interaction with set-returning functions -SAVEPOINT settings; --- multiple subqueries under a single Gather node --- must set parallel_setup_cost > 0 to discourage multiple Gather nodes -SET LOCAL parallel_setup_cost = 10; -EXPLAIN (COSTS OFF) -SELECT unique1 FROM tenk1 WHERE fivethous = tenthous + 1 -UNION ALL -SELECT unique1 FROM tenk1 WHERE fivethous = tenthous + 1; - QUERY PLAN ----------------------------------------------------- - Gather - Workers Planned: 4 - -> Parallel Append - -> Parallel Seq Scan on tenk1 - Filter: (fivethous = (tenthous + 1)) - -> Parallel Seq Scan on tenk1 tenk1_1 - Filter: (fivethous = (tenthous + 1)) -(7 rows) - -ROLLBACK TO SAVEPOINT settings; --- can't use multiple subqueries under a single Gather node due to initPlans -EXPLAIN (COSTS OFF) -SELECT unique1 FROM tenk1 WHERE fivethous = - (SELECT unique1 FROM tenk1 WHERE fivethous = 1 LIMIT 1) -UNION ALL -SELECT unique1 FROM tenk1 WHERE fivethous = - (SELECT unique2 FROM tenk1 WHERE fivethous = 1 LIMIT 1) -ORDER BY 1; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: tenk1.unique1 - -> Append - -> Gather - Workers Planned: 4 - InitPlan 1 - -> Limit - -> Gather - Workers Planned: 4 - -> Parallel Seq Scan on tenk1 tenk1_2 - Filter: (fivethous = 1) - -> Parallel Seq Scan on tenk1 - Filter: (fivethous = (InitPlan 1).col1) - -> Gather - Workers Planned: 4 - InitPlan 2 - -> Limit - -> Gather - Workers Planned: 4 - -> Parallel Seq Scan on tenk1 tenk1_3 - Filter: (fivethous = 1) - -> Parallel Seq Scan on tenk1 tenk1_1 - Filter: (fivethous = (InitPlan 2).col1) -(23 rows) - --- test interaction with SRFs -SELECT * FROM information_schema.foreign_data_wrapper_options -ORDER BY 1, 2, 3; - foreign_data_wrapper_catalog | foreign_data_wrapper_name | option_name | option_value -------------------------------+---------------------------+-------------+-------------- -(0 rows) - -EXPLAIN (VERBOSE, COSTS OFF) -SELECT generate_series(1, two), array(select generate_series(1, two)) - FROM tenk1 ORDER BY tenthous; - QUERY PLAN ---------------------------------------------------------------------------- - ProjectSet - Output: generate_series(1, tenk1.two), ARRAY(SubPlan 1), tenk1.tenthous - -> Gather Merge - Output: tenk1.two, tenk1.tenthous - Workers Planned: 4 - -> Result - Output: tenk1.two, tenk1.tenthous - -> Sort - Output: tenk1.tenthous, tenk1.two - Sort Key: tenk1.tenthous - -> Parallel Seq Scan on public.tenk1 - Output: tenk1.tenthous, tenk1.two - SubPlan 1 - -> ProjectSet - Output: generate_series(1, tenk1.two) - -> Result -(16 rows) - --- must disallow pushing sort below gather when pathkey contains an SRF -EXPLAIN (VERBOSE, COSTS OFF) -SELECT unnest(ARRAY[]::integer[]) + 1 AS pathkey - FROM tenk1 t1 JOIN tenk1 t2 ON TRUE - ORDER BY pathkey; - QUERY PLAN ------------------------------------------------------------------------------------------------------ - Sort - Output: (((unnest('{}'::integer[])) + 1)) - Sort Key: (((unnest('{}'::integer[])) + 1)) - -> Result - Output: ((unnest('{}'::integer[])) + 1) - -> ProjectSet - Output: unnest('{}'::integer[]) - -> Nested Loop - -> Gather - Workers Planned: 4 - -> Parallel Index Only Scan using tenk1_hundred on public.tenk1 t1 - -> Materialize - -> Gather - Workers Planned: 4 - -> Parallel Index Only Scan using tenk1_hundred on public.tenk1 t2 -(15 rows) - --- test passing expanded-value representations to workers -CREATE FUNCTION make_some_array(int,int) returns int[] as -$$declare x int[]; - begin - x[1] := $1; - x[2] := $2; - return x; - end$$ language plpgsql parallel safe; -CREATE TABLE fooarr(f1 text, f2 int[], f3 text); -INSERT INTO fooarr VALUES('1', ARRAY[1,2], 'one'); -PREPARE pstmt(text, int[]) AS SELECT * FROM fooarr WHERE f1 = $1 AND f2 = $2; -EXPLAIN (COSTS OFF) EXECUTE pstmt('1', make_some_array(1,2)); - QUERY PLAN ------------------------------------------------------------------- - Gather - Workers Planned: 3 - -> Parallel Seq Scan on fooarr - Filter: ((f1 = '1'::text) AND (f2 = '{1,2}'::integer[])) -(4 rows) - -EXECUTE pstmt('1', make_some_array(1,2)); - f1 | f2 | f3 -----+-------+----- - 1 | {1,2} | one -(1 row) - -DEALLOCATE pstmt; --- test interaction between subquery and partial_paths -CREATE VIEW tenk1_vw_sec WITH (security_barrier) AS SELECT * FROM tenk1; -EXPLAIN (COSTS OFF) -SELECT 1 FROM tenk1_vw_sec - WHERE (SELECT sum(f1) FROM int4_tbl WHERE f1 < unique1) < 100; - QUERY PLAN -------------------------------------------------------------------- - Subquery Scan on tenk1_vw_sec - Filter: ((SubPlan 1) < 100) - -> Gather - Workers Planned: 4 - -> Parallel Index Only Scan using tenk1_unique1 on tenk1 - SubPlan 1 - -> Aggregate - -> Seq Scan on int4_tbl - Filter: (f1 < tenk1_vw_sec.unique1) -(9 rows) - -rollback; --- test that a newly-created session role propagates to workers. -begin; -create role regress_parallel_worker; -set session authorization regress_parallel_worker; -select current_setting('session_authorization'); - current_setting -------------------------- - regress_parallel_worker -(1 row) - -set debug_parallel_query = 1; -select current_setting('session_authorization'); - current_setting -------------------------- - regress_parallel_worker -(1 row) - -rollback; --- test that function option SET ROLE works in parallel workers. -create role regress_parallel_worker; -create function set_and_report_role() returns text as - $$ select current_setting('role') $$ language sql parallel safe - set role = regress_parallel_worker; -create function set_role_and_error(int) returns int as - $$ select 1 / $1 $$ language sql parallel safe - set role = regress_parallel_worker; -set debug_parallel_query = 0; -select set_and_report_role(); - set_and_report_role -------------------------- - regress_parallel_worker -(1 row) - -select set_role_and_error(0); -ERROR: division by zero -CONTEXT: SQL function "set_role_and_error" statement 1 -set debug_parallel_query = 1; -select set_and_report_role(); - set_and_report_role -------------------------- - regress_parallel_worker -(1 row) - -select set_role_and_error(0); -ERROR: division by zero -CONTEXT: SQL function "set_role_and_error" statement 1 -parallel worker -reset debug_parallel_query; -drop function set_and_report_role(); -drop function set_role_and_error(int); -drop role regress_parallel_worker; --- don't freeze in ParallelFinish while holding an LWLock -BEGIN; -CREATE FUNCTION my_cmp (int4, int4) -RETURNS int LANGUAGE sql AS -$$ - SELECT - CASE WHEN $1 < $2 THEN -1 - WHEN $1 > $2 THEN 1 - ELSE 0 - END; -$$; -CREATE TABLE parallel_hang (i int4); -INSERT INTO parallel_hang - (SELECT * FROM generate_series(1, 400) gs); -CREATE OPERATOR CLASS int4_custom_ops FOR TYPE int4 USING btree AS - OPERATOR 1 < (int4, int4), OPERATOR 2 <= (int4, int4), - OPERATOR 3 = (int4, int4), OPERATOR 4 >= (int4, int4), - OPERATOR 5 > (int4, int4), FUNCTION 1 my_cmp(int4, int4); -CREATE UNIQUE INDEX parallel_hang_idx - ON parallel_hang - USING btree (i int4_custom_ops); -SET debug_parallel_query = on; -DELETE FROM parallel_hang WHERE 380 <= i AND i <= 420; -ROLLBACK; --- Check parallel worker stats -select pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -select parallel_workers_to_launch > :'parallel_workers_to_launch_before' AS wrk_to_launch, - parallel_workers_launched > :'parallel_workers_launched_before' AS wrk_launched - from pg_stat_database - where datname = current_database(); - wrk_to_launch | wrk_launched ----------------+-------------- - t | t -(1 row) - +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/write_parallel.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/write_parallel.out --- /Users/admin/pgsql/src/test/regress/expected/write_parallel.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/write_parallel.out 2024-12-13 13:20:11 @@ -1,80 +1,2 @@ --- --- PARALLEL --- -begin; --- encourage use of parallel plans -set parallel_setup_cost=0; -set parallel_tuple_cost=0; -set min_parallel_table_scan_size=0; -set max_parallel_workers_per_gather=4; --- --- Test write operations that has an underlying query that is eligible --- for parallel plans --- -explain (costs off) create table parallel_write as - select length(stringu1) from tenk1 group by length(stringu1); - QUERY PLAN ---------------------------------------------------- - Finalize HashAggregate - Group Key: (length((stringu1)::text)) - -> Gather - Workers Planned: 4 - -> Partial HashAggregate - Group Key: length((stringu1)::text) - -> Parallel Seq Scan on tenk1 -(7 rows) - -create table parallel_write as - select length(stringu1) from tenk1 group by length(stringu1); -drop table parallel_write; -explain (costs off) select length(stringu1) into parallel_write - from tenk1 group by length(stringu1); - QUERY PLAN ---------------------------------------------------- - Finalize HashAggregate - Group Key: (length((stringu1)::text)) - -> Gather - Workers Planned: 4 - -> Partial HashAggregate - Group Key: length((stringu1)::text) - -> Parallel Seq Scan on tenk1 -(7 rows) - -select length(stringu1) into parallel_write - from tenk1 group by length(stringu1); -drop table parallel_write; -explain (costs off) create materialized view parallel_mat_view as - select length(stringu1) from tenk1 group by length(stringu1); - QUERY PLAN ---------------------------------------------------- - Finalize HashAggregate - Group Key: (length((stringu1)::text)) - -> Gather - Workers Planned: 4 - -> Partial HashAggregate - Group Key: length((stringu1)::text) - -> Parallel Seq Scan on tenk1 -(7 rows) - -create materialized view parallel_mat_view as - select length(stringu1) from tenk1 group by length(stringu1); -create unique index on parallel_mat_view(length); -refresh materialized view parallel_mat_view; -refresh materialized view concurrently parallel_mat_view; -drop materialized view parallel_mat_view; -prepare prep_stmt as select length(stringu1) from tenk1 group by length(stringu1); -explain (costs off) create table parallel_write as execute prep_stmt; - QUERY PLAN ---------------------------------------------------- - Finalize HashAggregate - Group Key: (length((stringu1)::text)) - -> Gather - Workers Planned: 4 - -> Partial HashAggregate - Group Key: length((stringu1)::text) - -> Parallel Seq Scan on tenk1 -(7 rows) - -create table parallel_write as execute prep_stmt; -drop table parallel_write; -rollback; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/vacuum_parallel.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/vacuum_parallel.out --- /Users/admin/pgsql/src/test/regress/expected/vacuum_parallel.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/vacuum_parallel.out 2024-12-13 13:20:11 @@ -1,49 +1,2 @@ -SET max_parallel_maintenance_workers TO 4; -SET min_parallel_index_scan_size TO '128kB'; --- Bug #17245: Make sure that we don't totally fail to VACUUM individual indexes that --- happen to be below min_parallel_index_scan_size during parallel VACUUM: -CREATE TABLE parallel_vacuum_table (a int) WITH (autovacuum_enabled = off); -INSERT INTO parallel_vacuum_table SELECT i from generate_series(1, 10000) i; --- Parallel VACUUM will never be used unless there are at least two indexes --- that exceed min_parallel_index_scan_size. Create two such indexes, and --- a third index that is smaller than min_parallel_index_scan_size. -CREATE INDEX regular_sized_index ON parallel_vacuum_table(a); -CREATE INDEX typically_sized_index ON parallel_vacuum_table(a); --- Note: vacuum_in_leader_small_index can apply deduplication, making it ~3x --- smaller than the other indexes -CREATE INDEX vacuum_in_leader_small_index ON parallel_vacuum_table((1)); --- Verify (as best we can) that the cost model for parallel VACUUM --- will make our VACUUM run in parallel, while always leaving it up to the --- parallel leader to handle the vacuum_in_leader_small_index index: -SELECT EXISTS ( -SELECT 1 -FROM pg_class -WHERE oid = 'vacuum_in_leader_small_index'::regclass AND - pg_relation_size(oid) < - pg_size_bytes(current_setting('min_parallel_index_scan_size')) -) as leader_will_handle_small_index; - leader_will_handle_small_index --------------------------------- - t -(1 row) - -SELECT count(*) as trigger_parallel_vacuum_nindexes -FROM pg_class -WHERE oid in ('regular_sized_index'::regclass, 'typically_sized_index'::regclass) AND - pg_relation_size(oid) >= - pg_size_bytes(current_setting('min_parallel_index_scan_size')); - trigger_parallel_vacuum_nindexes ----------------------------------- - 2 -(1 row) - --- Parallel VACUUM with B-Tree page deletions, ambulkdelete calls: -DELETE FROM parallel_vacuum_table; -VACUUM (PARALLEL 4, INDEX_CLEANUP ON) parallel_vacuum_table; --- Since vacuum_in_leader_small_index uses deduplication, we expect an --- assertion failure with bug #17245 (in the absence of bugfix): -INSERT INTO parallel_vacuum_table SELECT i FROM generate_series(1, 10000) i; -RESET max_parallel_maintenance_workers; -RESET min_parallel_index_scan_size; --- Deliberately don't drop table, to get further coverage from tools like --- pg_amcheck in some testing scenarios +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/publication.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/publication.out --- /Users/admin/pgsql/src/test/regress/expected/publication.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/publication.out 2024-12-13 13:20:11 @@ -1,1879 +1,2 @@ --- --- PUBLICATION --- -CREATE ROLE regress_publication_user LOGIN SUPERUSER; -CREATE ROLE regress_publication_user2; -CREATE ROLE regress_publication_user_dummy LOGIN NOSUPERUSER; -SET SESSION AUTHORIZATION 'regress_publication_user'; --- suppress warning that depends on wal_level -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_default; -RESET client_min_messages; -COMMENT ON PUBLICATION testpub_default IS 'test publication'; -SELECT obj_description(p.oid, 'pg_publication') FROM pg_publication p; - obj_description ------------------- - test publication -(1 row) - -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpib_ins_trunct WITH (publish = insert); -RESET client_min_messages; -ALTER PUBLICATION testpub_default SET (publish = update); --- error cases -CREATE PUBLICATION testpub_xxx WITH (foo); -ERROR: unrecognized publication parameter: "foo" -CREATE PUBLICATION testpub_xxx WITH (publish = 'cluster, vacuum'); -ERROR: unrecognized value for publication option "publish": "cluster" -CREATE PUBLICATION testpub_xxx WITH (publish_via_partition_root = 'true', publish_via_partition_root = '0'); -ERROR: conflicting or redundant options -LINE 1: ...ub_xxx WITH (publish_via_partition_root = 'true', publish_vi... - ^ -CREATE PUBLICATION testpub_xxx WITH (publish_generated_columns = 'true', publish_generated_columns = '0'); -ERROR: conflicting or redundant options -LINE 1: ...pub_xxx WITH (publish_generated_columns = 'true', publish_ge... - ^ -CREATE PUBLICATION testpub_xxx WITH (publish_generated_columns = 'foo'); -ERROR: publish_generated_columns requires a Boolean value -\dRp - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------+--------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - testpib_ins_trunct | regress_publication_user | f | t | f | f | f | f | f - testpub_default | regress_publication_user | f | f | t | f | f | f | f -(2 rows) - -ALTER PUBLICATION testpub_default SET (publish = 'insert, update, delete'); -\dRp - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------+--------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - testpib_ins_trunct | regress_publication_user | f | t | f | f | f | f | f - testpub_default | regress_publication_user | f | t | t | t | f | f | f -(2 rows) - ---- adding tables -CREATE SCHEMA pub_test; -CREATE TABLE testpub_tbl1 (id serial primary key, data text); -CREATE TABLE pub_test.testpub_nopk (foo int, bar int); -CREATE VIEW testpub_view AS SELECT 1; -CREATE TABLE testpub_parted (a int) PARTITION BY LIST (a); -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_foralltables FOR ALL TABLES WITH (publish = 'insert'); -RESET client_min_messages; -ALTER PUBLICATION testpub_foralltables SET (publish = 'insert, update'); -CREATE TABLE testpub_tbl2 (id serial primary key, data text); --- fail - can't add to for all tables publication -ALTER PUBLICATION testpub_foralltables ADD TABLE testpub_tbl2; -ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES -DETAIL: Tables cannot be added to or dropped from FOR ALL TABLES publications. --- fail - can't drop from all tables publication -ALTER PUBLICATION testpub_foralltables DROP TABLE testpub_tbl2; -ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES -DETAIL: Tables cannot be added to or dropped from FOR ALL TABLES publications. --- fail - can't add to for all tables publication -ALTER PUBLICATION testpub_foralltables SET TABLE pub_test.testpub_nopk; -ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES -DETAIL: Tables cannot be added to or dropped from FOR ALL TABLES publications. --- fail - can't add schema to 'FOR ALL TABLES' publication -ALTER PUBLICATION testpub_foralltables ADD TABLES IN SCHEMA pub_test; -ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES -DETAIL: Schemas cannot be added to or dropped from FOR ALL TABLES publications. --- fail - can't drop schema from 'FOR ALL TABLES' publication -ALTER PUBLICATION testpub_foralltables DROP TABLES IN SCHEMA pub_test; -ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES -DETAIL: Schemas cannot be added to or dropped from FOR ALL TABLES publications. --- fail - can't set schema to 'FOR ALL TABLES' publication -ALTER PUBLICATION testpub_foralltables SET TABLES IN SCHEMA pub_test; -ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES -DETAIL: Schemas cannot be added to or dropped from FOR ALL TABLES publications. -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_fortable FOR TABLE testpub_tbl1; -RESET client_min_messages; --- should be able to add schema to 'FOR TABLE' publication -ALTER PUBLICATION testpub_fortable ADD TABLES IN SCHEMA pub_test; -\dRp+ testpub_fortable - Publication testpub_fortable - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables: - "public.testpub_tbl1" -Tables from schemas: - "pub_test" - --- should be able to drop schema from 'FOR TABLE' publication -ALTER PUBLICATION testpub_fortable DROP TABLES IN SCHEMA pub_test; -\dRp+ testpub_fortable - Publication testpub_fortable - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables: - "public.testpub_tbl1" - --- should be able to set schema to 'FOR TABLE' publication -ALTER PUBLICATION testpub_fortable SET TABLES IN SCHEMA pub_test; -\dRp+ testpub_fortable - Publication testpub_fortable - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables from schemas: - "pub_test" - -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_forschema FOR TABLES IN SCHEMA pub_test; --- should be able to create publication with schema and table of the same --- schema -CREATE PUBLICATION testpub_for_tbl_schema FOR TABLES IN SCHEMA pub_test, TABLE pub_test.testpub_nopk; -RESET client_min_messages; -\dRp+ testpub_for_tbl_schema - Publication testpub_for_tbl_schema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables: - "pub_test.testpub_nopk" -Tables from schemas: - "pub_test" - --- weird parser corner case -CREATE PUBLICATION testpub_parsertst FOR TABLE pub_test.testpub_nopk, CURRENT_SCHEMA; -ERROR: invalid table name -LINE 1: ...estpub_parsertst FOR TABLE pub_test.testpub_nopk, CURRENT_SC... - ^ -CREATE PUBLICATION testpub_parsertst FOR TABLES IN SCHEMA foo, test.foo; -ERROR: invalid schema name -LINE 1: ...CATION testpub_parsertst FOR TABLES IN SCHEMA foo, test.foo; - ^ --- should be able to add a table of the same schema to the schema publication -ALTER PUBLICATION testpub_forschema ADD TABLE pub_test.testpub_nopk; -\dRp+ testpub_forschema - Publication testpub_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables: - "pub_test.testpub_nopk" -Tables from schemas: - "pub_test" - --- should be able to drop the table -ALTER PUBLICATION testpub_forschema DROP TABLE pub_test.testpub_nopk; -\dRp+ testpub_forschema - Publication testpub_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables from schemas: - "pub_test" - --- fail - can't drop a table from the schema publication which isn't in the --- publication -ALTER PUBLICATION testpub_forschema DROP TABLE pub_test.testpub_nopk; -ERROR: relation "testpub_nopk" is not part of the publication --- should be able to set table to schema publication -ALTER PUBLICATION testpub_forschema SET TABLE pub_test.testpub_nopk; -\dRp+ testpub_forschema - Publication testpub_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables: - "pub_test.testpub_nopk" - -SELECT pubname, puballtables FROM pg_publication WHERE pubname = 'testpub_foralltables'; - pubname | puballtables -----------------------+-------------- - testpub_foralltables | t -(1 row) - -\d+ testpub_tbl2 - Table "public.testpub_tbl2" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+------------------------------------------+----------+--------------+------------- - id | integer | | not null | nextval('testpub_tbl2_id_seq'::regclass) | plain | | - data | text | | | | extended | | -Indexes: - "testpub_tbl2_pkey" PRIMARY KEY, btree (id) -Publications: - "testpub_foralltables" -Not-null constraints: - "testpub_tbl2_id_not_null" NOT NULL "id" - -\dRp+ testpub_foralltables - Publication testpub_foralltables - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | t | t | t | f | f | f | f -(1 row) - -DROP TABLE testpub_tbl2; -DROP PUBLICATION testpub_foralltables, testpub_fortable, testpub_forschema, testpub_for_tbl_schema; -CREATE TABLE testpub_tbl3 (a int); -CREATE TABLE testpub_tbl3a (b text) INHERITS (testpub_tbl3); -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub3 FOR TABLE testpub_tbl3; -CREATE PUBLICATION testpub4 FOR TABLE ONLY testpub_tbl3; -RESET client_min_messages; -\dRp+ testpub3 - Publication testpub3 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables: - "public.testpub_tbl3" - "public.testpub_tbl3a" - -\dRp+ testpub4 - Publication testpub4 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables: - "public.testpub_tbl3" - -DROP TABLE testpub_tbl3, testpub_tbl3a; -DROP PUBLICATION testpub3, testpub4; --- Tests for partitioned tables -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_forparted; -CREATE PUBLICATION testpub_forparted1; -RESET client_min_messages; -CREATE TABLE testpub_parted1 (LIKE testpub_parted); -CREATE TABLE testpub_parted2 (LIKE testpub_parted); -ALTER PUBLICATION testpub_forparted1 SET (publish='insert'); -ALTER TABLE testpub_parted ATTACH PARTITION testpub_parted1 FOR VALUES IN (1); -ALTER TABLE testpub_parted ATTACH PARTITION testpub_parted2 FOR VALUES IN (2); --- works despite missing REPLICA IDENTITY, because updates are not replicated -UPDATE testpub_parted1 SET a = 1; --- only parent is listed as being in publication, not the partition -ALTER PUBLICATION testpub_forparted ADD TABLE testpub_parted; -\dRp+ testpub_forparted - Publication testpub_forparted - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables: - "public.testpub_parted" - --- works despite missing REPLICA IDENTITY, because no actual update happened -UPDATE testpub_parted SET a = 1 WHERE false; --- should now fail, because parent's publication replicates updates -UPDATE testpub_parted1 SET a = 1; -ERROR: cannot update table "testpub_parted1" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. -ALTER TABLE testpub_parted DETACH PARTITION testpub_parted1; --- works again, because parent's publication is no longer considered -UPDATE testpub_parted1 SET a = 1; -ALTER PUBLICATION testpub_forparted SET (publish_via_partition_root = true); -\dRp+ testpub_forparted - Publication testpub_forparted - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | t -Tables: - "public.testpub_parted" - --- still fail, because parent's publication replicates updates -UPDATE testpub_parted2 SET a = 2; -ERROR: cannot update table "testpub_parted2" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. -ALTER PUBLICATION testpub_forparted DROP TABLE testpub_parted; --- works again, because update is no longer replicated -UPDATE testpub_parted2 SET a = 2; -DROP TABLE testpub_parted1, testpub_parted2; -DROP PUBLICATION testpub_forparted, testpub_forparted1; --- Tests for row filters -CREATE TABLE testpub_rf_tbl1 (a integer, b text); -CREATE TABLE testpub_rf_tbl2 (c text, d integer); -CREATE TABLE testpub_rf_tbl3 (e integer); -CREATE TABLE testpub_rf_tbl4 (g text); -CREATE TABLE testpub_rf_tbl5 (a xml); -CREATE SCHEMA testpub_rf_schema1; -CREATE TABLE testpub_rf_schema1.testpub_rf_tbl5 (h integer); -CREATE SCHEMA testpub_rf_schema2; -CREATE TABLE testpub_rf_schema2.testpub_rf_tbl6 (i integer); -SET client_min_messages = 'ERROR'; --- Firstly, test using the option publish='insert' because the row filter --- validation of referenced columns is less strict than for delete/update. -CREATE PUBLICATION testpub5 FOR TABLE testpub_rf_tbl1, testpub_rf_tbl2 WHERE (c <> 'test' AND d < 5) WITH (publish = 'insert'); -RESET client_min_messages; -\dRp+ testpub5 - Publication testpub5 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | f | f | f | f | f -Tables: - "public.testpub_rf_tbl1" - "public.testpub_rf_tbl2" WHERE ((c <> 'test'::text) AND (d < 5)) - -\d testpub_rf_tbl3 - Table "public.testpub_rf_tbl3" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - e | integer | | | - -ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl3 WHERE (e > 1000 AND e < 2000); -\dRp+ testpub5 - Publication testpub5 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | f | f | f | f | f -Tables: - "public.testpub_rf_tbl1" - "public.testpub_rf_tbl2" WHERE ((c <> 'test'::text) AND (d < 5)) - "public.testpub_rf_tbl3" WHERE ((e > 1000) AND (e < 2000)) - -\d testpub_rf_tbl3 - Table "public.testpub_rf_tbl3" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - e | integer | | | -Publications: - "testpub5" WHERE ((e > 1000) AND (e < 2000)) - -ALTER PUBLICATION testpub5 DROP TABLE testpub_rf_tbl2; -\dRp+ testpub5 - Publication testpub5 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | f | f | f | f | f -Tables: - "public.testpub_rf_tbl1" - "public.testpub_rf_tbl3" WHERE ((e > 1000) AND (e < 2000)) - --- remove testpub_rf_tbl1 and add testpub_rf_tbl3 again (another WHERE expression) -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (e > 300 AND e < 500); -\dRp+ testpub5 - Publication testpub5 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | f | f | f | f | f -Tables: - "public.testpub_rf_tbl3" WHERE ((e > 300) AND (e < 500)) - -\d testpub_rf_tbl3 - Table "public.testpub_rf_tbl3" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - e | integer | | | -Publications: - "testpub5" WHERE ((e > 300) AND (e < 500)) - --- test \d (now it displays filter information) -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_rf_yes FOR TABLE testpub_rf_tbl1 WHERE (a > 1) WITH (publish = 'insert'); -CREATE PUBLICATION testpub_rf_no FOR TABLE testpub_rf_tbl1; -RESET client_min_messages; -\d testpub_rf_tbl1 - Table "public.testpub_rf_tbl1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | text | | | -Publications: - "testpub_rf_no" - "testpub_rf_yes" WHERE (a > 1) - -DROP PUBLICATION testpub_rf_yes, testpub_rf_no; --- some more syntax tests to exercise other parser pathways -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_syntax1 FOR TABLE testpub_rf_tbl1, ONLY testpub_rf_tbl3 WHERE (e < 999) WITH (publish = 'insert'); -RESET client_min_messages; -\dRp+ testpub_syntax1 - Publication testpub_syntax1 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | f | f | f | f | f -Tables: - "public.testpub_rf_tbl1" - "public.testpub_rf_tbl3" WHERE (e < 999) - -DROP PUBLICATION testpub_syntax1; -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_syntax2 FOR TABLE testpub_rf_tbl1, testpub_rf_schema1.testpub_rf_tbl5 WHERE (h < 999) WITH (publish = 'insert'); -RESET client_min_messages; -\dRp+ testpub_syntax2 - Publication testpub_syntax2 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | f | f | f | f | f -Tables: - "public.testpub_rf_tbl1" - "testpub_rf_schema1.testpub_rf_tbl5" WHERE (h < 999) - -DROP PUBLICATION testpub_syntax2; --- fail - schemas don't allow WHERE clause -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_syntax3 FOR TABLES IN SCHEMA testpub_rf_schema1 WHERE (a = 123); -ERROR: syntax error at or near "WHERE" -LINE 1: ...b_syntax3 FOR TABLES IN SCHEMA testpub_rf_schema1 WHERE (a =... - ^ -CREATE PUBLICATION testpub_syntax3 FOR TABLES IN SCHEMA testpub_rf_schema1, testpub_rf_schema1 WHERE (a = 123); -ERROR: WHERE clause not allowed for schema -LINE 1: ..._syntax3 FOR TABLES IN SCHEMA testpub_rf_schema1, testpub_rf... - ^ -RESET client_min_messages; --- fail - duplicate tables are not allowed if that table has any WHERE clause -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_dups FOR TABLE testpub_rf_tbl1 WHERE (a = 1), testpub_rf_tbl1 WITH (publish = 'insert'); -ERROR: conflicting or redundant WHERE clauses for table "testpub_rf_tbl1" -CREATE PUBLICATION testpub_dups FOR TABLE testpub_rf_tbl1, testpub_rf_tbl1 WHERE (a = 2) WITH (publish = 'insert'); -ERROR: conflicting or redundant WHERE clauses for table "testpub_rf_tbl1" -RESET client_min_messages; --- fail - publication WHERE clause must be boolean -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (1234); -ERROR: argument of PUBLICATION WHERE must be type boolean, not type integer -LINE 1: ...PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (1234); - ^ --- fail - aggregate functions not allowed in WHERE clause -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (e < AVG(e)); -ERROR: aggregate functions are not allowed in WHERE -LINE 1: ...ATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (e < AVG(e)); - ^ --- fail - user-defined operators are not allowed -CREATE FUNCTION testpub_rf_func1(integer, integer) RETURNS boolean AS $$ SELECT hashint4($1) > $2 $$ LANGUAGE SQL; -CREATE OPERATOR =#> (PROCEDURE = testpub_rf_func1, LEFTARG = integer, RIGHTARG = integer); -CREATE PUBLICATION testpub6 FOR TABLE testpub_rf_tbl3 WHERE (e =#> 27); -ERROR: invalid publication WHERE expression -LINE 1: ...ICATION testpub6 FOR TABLE testpub_rf_tbl3 WHERE (e =#> 27); - ^ -DETAIL: User-defined operators are not allowed. --- fail - user-defined functions are not allowed -CREATE FUNCTION testpub_rf_func2() RETURNS integer AS $$ BEGIN RETURN 123; END; $$ LANGUAGE plpgsql; -ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (a >= testpub_rf_func2()); -ERROR: invalid publication WHERE expression -LINE 1: ...ON testpub5 ADD TABLE testpub_rf_tbl1 WHERE (a >= testpub_rf... - ^ -DETAIL: User-defined or built-in mutable functions are not allowed. --- fail - non-immutable functions are not allowed. random() is volatile. -ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (a < random()); -ERROR: invalid publication WHERE expression -LINE 1: ...ION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (a < random()); - ^ -DETAIL: User-defined or built-in mutable functions are not allowed. --- fail - user-defined collations are not allowed -CREATE COLLATION user_collation FROM "C"; -ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (b < '2' COLLATE user_collation); -ERROR: invalid publication WHERE expression -LINE 1: ...ICATION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (b < '2' CO... - ^ -DETAIL: User-defined collations are not allowed. --- ok - NULLIF is allowed -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (NULLIF(1,2) = a); --- ok - built-in operators are allowed -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (a IS NULL); -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE ((a > 5) IS FALSE); -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (a IS DISTINCT FROM 5); -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE ((a, a + 1) < (2, 3)); --- ok - built-in type coercions between two binary compatible datatypes are allowed -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (b::varchar < '2'); --- ok - immutable built-in functions are allowed -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl4 WHERE (length(g) < 6); --- fail - user-defined types are not allowed -CREATE TYPE rf_bug_status AS ENUM ('new', 'open', 'closed'); -CREATE TABLE rf_bug (id serial, description text, status rf_bug_status); -CREATE PUBLICATION testpub6 FOR TABLE rf_bug WHERE (status = 'open') WITH (publish = 'insert'); -ERROR: invalid publication WHERE expression -LINE 1: ...EATE PUBLICATION testpub6 FOR TABLE rf_bug WHERE (status = '... - ^ -DETAIL: User-defined types are not allowed. -DROP TABLE rf_bug; -DROP TYPE rf_bug_status; --- fail - row filter expression is not simple -CREATE PUBLICATION testpub6 FOR TABLE testpub_rf_tbl1 WHERE (a IN (SELECT generate_series(1,5))); -ERROR: invalid publication WHERE expression -LINE 1: ...ICATION testpub6 FOR TABLE testpub_rf_tbl1 WHERE (a IN (SELE... - ^ -DETAIL: Only columns, constants, built-in operators, built-in data types, built-in collations, and immutable built-in functions are allowed. --- fail - system columns are not allowed -CREATE PUBLICATION testpub6 FOR TABLE testpub_rf_tbl1 WHERE ('(0,1)'::tid = ctid); -ERROR: invalid publication WHERE expression -LINE 1: ...tpub6 FOR TABLE testpub_rf_tbl1 WHERE ('(0,1)'::tid = ctid); - ^ -DETAIL: System columns are not allowed. --- ok - conditional expressions are allowed -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl5 WHERE (a IS DOCUMENT); -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl5 WHERE (xmlexists('//foo[text() = ''bar'']' PASSING BY VALUE a)); -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (NULLIF(1, 2) = a); -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (CASE a WHEN 5 THEN true ELSE false END); -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (COALESCE(b, 'foo') = 'foo'); -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (GREATEST(a, 10) > 10); -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (a IN (2, 4, 6)); -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (ARRAY[a] <@ ARRAY[2, 4, 6]); -ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (ROW(a, 2) IS NULL); --- fail - WHERE not allowed in DROP -ALTER PUBLICATION testpub5 DROP TABLE testpub_rf_tbl1 WHERE (e < 27); -ERROR: cannot use a WHERE clause when removing a table from a publication --- fail - cannot ALTER SET table which is a member of a pre-existing schema -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub6 FOR TABLES IN SCHEMA testpub_rf_schema2; --- should be able to set publication with schema and table of the same schema -ALTER PUBLICATION testpub6 SET TABLES IN SCHEMA testpub_rf_schema2, TABLE testpub_rf_schema2.testpub_rf_tbl6 WHERE (i < 99); -RESET client_min_messages; -\dRp+ testpub6 - Publication testpub6 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables: - "testpub_rf_schema2.testpub_rf_tbl6" WHERE (i < 99) -Tables from schemas: - "testpub_rf_schema2" - -DROP TABLE testpub_rf_tbl1; -DROP TABLE testpub_rf_tbl2; -DROP TABLE testpub_rf_tbl3; -DROP TABLE testpub_rf_tbl4; -DROP TABLE testpub_rf_tbl5; -DROP TABLE testpub_rf_schema1.testpub_rf_tbl5; -DROP TABLE testpub_rf_schema2.testpub_rf_tbl6; -DROP SCHEMA testpub_rf_schema1; -DROP SCHEMA testpub_rf_schema2; -DROP PUBLICATION testpub5; -DROP PUBLICATION testpub6; -DROP OPERATOR =#>(integer, integer); -DROP FUNCTION testpub_rf_func1(integer, integer); -DROP FUNCTION testpub_rf_func2(); -DROP COLLATION user_collation; --- ====================================================== --- More row filter tests for validating column references -CREATE TABLE rf_tbl_abcd_nopk(a int, b int, c int, d int); -CREATE TABLE rf_tbl_abcd_pk(a int, b int, c int, d int, PRIMARY KEY(a,b)); -CREATE TABLE rf_tbl_abcd_part_pk (a int PRIMARY KEY, b int) PARTITION by RANGE (a); -CREATE TABLE rf_tbl_abcd_part_pk_1 (b int, a int PRIMARY KEY); -ALTER TABLE rf_tbl_abcd_part_pk ATTACH PARTITION rf_tbl_abcd_part_pk_1 FOR VALUES FROM (1) TO (10); --- Case 1. REPLICA IDENTITY DEFAULT (means use primary key or nothing) --- 1a. REPLICA IDENTITY is DEFAULT and table has a PK. -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub6 FOR TABLE rf_tbl_abcd_pk WHERE (a > 99); -RESET client_min_messages; --- ok - "a" is a PK col -UPDATE rf_tbl_abcd_pk SET a = 1; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (b > 99); --- ok - "b" is a PK col -UPDATE rf_tbl_abcd_pk SET a = 1; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99); --- fail - "c" is not part of the PK -UPDATE rf_tbl_abcd_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_pk" -DETAIL: Column used in the publication WHERE expression is not part of the replica identity. -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (d > 99); --- fail - "d" is not part of the PK -UPDATE rf_tbl_abcd_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_pk" -DETAIL: Column used in the publication WHERE expression is not part of the replica identity. --- 1b. REPLICA IDENTITY is DEFAULT and table has no PK -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99); --- fail - "a" is not part of REPLICA IDENTITY -UPDATE rf_tbl_abcd_nopk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_nopk" -DETAIL: Column used in the publication WHERE expression is not part of the replica identity. --- Case 2. REPLICA IDENTITY FULL -ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY FULL; -ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY FULL; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99); --- ok - "c" is in REPLICA IDENTITY now even though not in PK -UPDATE rf_tbl_abcd_pk SET a = 1; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99); --- ok - "a" is in REPLICA IDENTITY now -UPDATE rf_tbl_abcd_nopk SET a = 1; --- Case 3. REPLICA IDENTITY NOTHING -ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY NOTHING; -ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY NOTHING; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (a > 99); --- fail - "a" is in PK but it is not part of REPLICA IDENTITY NOTHING -UPDATE rf_tbl_abcd_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_pk" -DETAIL: Column used in the publication WHERE expression is not part of the replica identity. -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99); --- fail - "c" is not in PK and not in REPLICA IDENTITY NOTHING -UPDATE rf_tbl_abcd_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_pk" -DETAIL: Column used in the publication WHERE expression is not part of the replica identity. -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99); --- fail - "a" is not in REPLICA IDENTITY NOTHING -UPDATE rf_tbl_abcd_nopk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_nopk" -DETAIL: Column used in the publication WHERE expression is not part of the replica identity. --- Case 4. REPLICA IDENTITY INDEX -ALTER TABLE rf_tbl_abcd_pk ALTER COLUMN c SET NOT NULL; -CREATE UNIQUE INDEX idx_abcd_pk_c ON rf_tbl_abcd_pk(c); -ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY USING INDEX idx_abcd_pk_c; -ALTER TABLE rf_tbl_abcd_nopk ALTER COLUMN c SET NOT NULL; -CREATE UNIQUE INDEX idx_abcd_nopk_c ON rf_tbl_abcd_nopk(c); -ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY USING INDEX idx_abcd_nopk_c; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (a > 99); --- fail - "a" is in PK but it is not part of REPLICA IDENTITY INDEX -UPDATE rf_tbl_abcd_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_pk" -DETAIL: Column used in the publication WHERE expression is not part of the replica identity. -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99); --- ok - "c" is not in PK but it is part of REPLICA IDENTITY INDEX -UPDATE rf_tbl_abcd_pk SET a = 1; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99); --- fail - "a" is not in REPLICA IDENTITY INDEX -UPDATE rf_tbl_abcd_nopk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_nopk" -DETAIL: Column used in the publication WHERE expression is not part of the replica identity. -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (c > 99); --- ok - "c" is part of REPLICA IDENTITY INDEX -UPDATE rf_tbl_abcd_nopk SET a = 1; --- Tests for partitioned table --- set PUBLISH_VIA_PARTITION_ROOT to false and test row filter for partitioned --- table -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); --- fail - cannot use row filter for partitioned table -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk WHERE (a > 99); -ERROR: cannot use publication WHERE clause for relation "rf_tbl_abcd_part_pk" -DETAIL: WHERE clause cannot be used for a partitioned table when publish_via_partition_root is false. --- ok - can use row filter for partition -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk_1 WHERE (a > 99); --- ok - "a" is a PK col -UPDATE rf_tbl_abcd_part_pk SET a = 1; --- set PUBLISH_VIA_PARTITION_ROOT to true and test row filter for partitioned --- table -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=1); --- ok - can use row filter for partitioned table -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk WHERE (a > 99); --- ok - "a" is a PK col -UPDATE rf_tbl_abcd_part_pk SET a = 1; --- fail - cannot set PUBLISH_VIA_PARTITION_ROOT to false if any row filter is --- used for partitioned table -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); -ERROR: cannot set parameter "publish_via_partition_root" to false for publication "testpub6" -DETAIL: The publication contains a WHERE clause for partitioned table "rf_tbl_abcd_part_pk", which is not allowed when "publish_via_partition_root" is false. --- remove partitioned table's row filter -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk; --- ok - we don't have row filter for partitioned table. -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); --- Now change the root filter to use a column "b" --- (which is not in the replica identity) -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk_1 WHERE (b > 99); --- ok - we don't have row filter for partitioned table. -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); --- fail - "b" is not in REPLICA IDENTITY INDEX -UPDATE rf_tbl_abcd_part_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_part_pk_1" -DETAIL: Column used in the publication WHERE expression is not part of the replica identity. --- set PUBLISH_VIA_PARTITION_ROOT to true --- can use row filter for partitioned table -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=1); --- ok - can use row filter for partitioned table -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk WHERE (b > 99); --- fail - "b" is not in REPLICA IDENTITY INDEX -UPDATE rf_tbl_abcd_part_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_part_pk_1" -DETAIL: Column used in the publication WHERE expression is not part of the replica identity. -DROP PUBLICATION testpub6; -DROP TABLE rf_tbl_abcd_pk; -DROP TABLE rf_tbl_abcd_nopk; -DROP TABLE rf_tbl_abcd_part_pk; --- ====================================================== --- ====================================================== --- Tests with generated column -SET client_min_messages = 'ERROR'; -CREATE TABLE testpub_gencol (a INT, b INT GENERATED ALWAYS AS (a + 1) STORED NOT NULL); -CREATE UNIQUE INDEX testpub_gencol_idx ON testpub_gencol (b); -ALTER TABLE testpub_gencol REPLICA IDENTITY USING index testpub_gencol_idx; --- error - generated column "b" must be published explicitly as it is --- part of the REPLICA IDENTITY index. -CREATE PUBLICATION pub_gencol FOR TABLE testpub_gencol; -UPDATE testpub_gencol SET a = 100 WHERE a = 1; -ERROR: cannot update table "testpub_gencol" -DETAIL: Replica identity must not contain unpublished generated columns. --- error - generated column "b" must be published explicitly as it is --- part of the REPLICA IDENTITY. -ALTER TABLE testpub_gencol REPLICA IDENTITY FULL; -UPDATE testpub_gencol SET a = 100 WHERE a = 1; -ERROR: cannot update table "testpub_gencol" -DETAIL: Replica identity must not contain unpublished generated columns. -DROP PUBLICATION pub_gencol; --- ok - generated column "b" is published explicitly -CREATE PUBLICATION pub_gencol FOR TABLE testpub_gencol with (publish_generated_columns = true); -UPDATE testpub_gencol SET a = 100 WHERE a = 1; -DROP PUBLICATION pub_gencol; -DROP TABLE testpub_gencol; -RESET client_min_messages; --- ====================================================== --- fail - duplicate tables are not allowed if that table has any column lists -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_dups FOR TABLE testpub_tbl1 (a), testpub_tbl1 WITH (publish = 'insert'); -ERROR: conflicting or redundant column lists for table "testpub_tbl1" -CREATE PUBLICATION testpub_dups FOR TABLE testpub_tbl1, testpub_tbl1 (a) WITH (publish = 'insert'); -ERROR: conflicting or redundant column lists for table "testpub_tbl1" -RESET client_min_messages; --- test for column lists -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_fortable FOR TABLE testpub_tbl1; -CREATE PUBLICATION testpub_fortable_insert WITH (publish = 'insert'); -RESET client_min_messages; -CREATE TABLE testpub_tbl5 (a int PRIMARY KEY, b text, c text, - d int generated always as (a + length(b)) stored); --- error: column "x" does not exist -ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, x); -ERROR: column "x" of relation "testpub_tbl5" does not exist --- error: replica identity "a" not included in the column list -ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (b, c); -UPDATE testpub_tbl5 SET a = 1; -ERROR: cannot update table "testpub_tbl5" -DETAIL: Column list used by the publication does not cover the replica identity. -ALTER PUBLICATION testpub_fortable DROP TABLE testpub_tbl5; --- error: system attributes "ctid" not allowed in column list -ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, ctid); -ERROR: cannot use system column "ctid" in publication column list -ALTER PUBLICATION testpub_fortable SET TABLE testpub_tbl1 (id, ctid); -ERROR: cannot use system column "ctid" in publication column list --- error: duplicates not allowed in column list -ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, a); -ERROR: duplicate column "a" in publication column list -ALTER PUBLICATION testpub_fortable SET TABLE testpub_tbl5 (a, a); -ERROR: duplicate column "a" in publication column list --- ok -ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, c); -ALTER TABLE testpub_tbl5 DROP COLUMN c; -- no dice -ERROR: cannot drop column c of table testpub_tbl5 because other objects depend on it -DETAIL: publication of table testpub_tbl5 in publication testpub_fortable depends on column c of table testpub_tbl5 -HINT: Use DROP ... CASCADE to drop the dependent objects too. --- ok: for insert-only publication, any column list is acceptable -ALTER PUBLICATION testpub_fortable_insert ADD TABLE testpub_tbl5 (b, c); -/* not all replica identities are good enough */ -CREATE UNIQUE INDEX testpub_tbl5_b_key ON testpub_tbl5 (b, c); -ALTER TABLE testpub_tbl5 ALTER b SET NOT NULL, ALTER c SET NOT NULL; -ALTER TABLE testpub_tbl5 REPLICA IDENTITY USING INDEX testpub_tbl5_b_key; --- error: replica identity (b,c) is not covered by column list (a, c) -UPDATE testpub_tbl5 SET a = 1; -ERROR: cannot update table "testpub_tbl5" -DETAIL: Column list used by the publication does not cover the replica identity. -ALTER PUBLICATION testpub_fortable DROP TABLE testpub_tbl5; --- ok: generated column "d" can be in the list too -ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, d); -ALTER PUBLICATION testpub_fortable DROP TABLE testpub_tbl5; --- error: change the replica identity to "b", and column list to (a, c) --- then update fails, because (a, c) does not cover replica identity -ALTER TABLE testpub_tbl5 REPLICA IDENTITY USING INDEX testpub_tbl5_b_key; -ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, c); -UPDATE testpub_tbl5 SET a = 1; -ERROR: cannot update table "testpub_tbl5" -DETAIL: Column list used by the publication does not cover the replica identity. -/* But if upd/del are not published, it works OK */ -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_table_ins WITH (publish = 'insert, truncate'); -RESET client_min_messages; -ALTER PUBLICATION testpub_table_ins ADD TABLE testpub_tbl5 (a); -- ok -\dRp+ testpub_table_ins - Publication testpub_table_ins - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | f | f | t | f | f -Tables: - "public.testpub_tbl5" (a) - --- error: cannot work with deferrable primary keys -CREATE TABLE testpub_tbl5d (a int PRIMARY KEY DEFERRABLE); -ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5d; -UPDATE testpub_tbl5d SET a = 1; -ERROR: cannot update table "testpub_tbl5d" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. -/* but works fine with FULL replica identity */ -ALTER TABLE testpub_tbl5d REPLICA IDENTITY FULL; -UPDATE testpub_tbl5d SET a = 1; -DROP TABLE testpub_tbl5d; --- tests with REPLICA IDENTITY FULL -CREATE TABLE testpub_tbl6 (a int, b text, c text); -ALTER TABLE testpub_tbl6 REPLICA IDENTITY FULL; -ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl6 (a, b, c); -UPDATE testpub_tbl6 SET a = 1; -ERROR: cannot update table "testpub_tbl6" -DETAIL: Column list used by the publication does not cover the replica identity. -ALTER PUBLICATION testpub_fortable DROP TABLE testpub_tbl6; -ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl6; -- ok -UPDATE testpub_tbl6 SET a = 1; --- make sure changing the column list is propagated to the catalog -CREATE TABLE testpub_tbl7 (a int primary key, b text, c text); -ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl7 (a, b); -\d+ testpub_tbl7 - Table "public.testpub_tbl7" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - a | integer | | not null | | plain | | - b | text | | | | extended | | - c | text | | | | extended | | -Indexes: - "testpub_tbl7_pkey" PRIMARY KEY, btree (a) -Publications: - "testpub_fortable" (a, b) -Not-null constraints: - "testpub_tbl7_a_not_null" NOT NULL "a" - --- ok: the column list is the same, we should skip this table (or at least not fail) -ALTER PUBLICATION testpub_fortable SET TABLE testpub_tbl7 (a, b); -\d+ testpub_tbl7 - Table "public.testpub_tbl7" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - a | integer | | not null | | plain | | - b | text | | | | extended | | - c | text | | | | extended | | -Indexes: - "testpub_tbl7_pkey" PRIMARY KEY, btree (a) -Publications: - "testpub_fortable" (a, b) -Not-null constraints: - "testpub_tbl7_a_not_null" NOT NULL "a" - --- ok: the column list changes, make sure the catalog gets updated -ALTER PUBLICATION testpub_fortable SET TABLE testpub_tbl7 (a, c); -\d+ testpub_tbl7 - Table "public.testpub_tbl7" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - a | integer | | not null | | plain | | - b | text | | | | extended | | - c | text | | | | extended | | -Indexes: - "testpub_tbl7_pkey" PRIMARY KEY, btree (a) -Publications: - "testpub_fortable" (a, c) -Not-null constraints: - "testpub_tbl7_a_not_null" NOT NULL "a" - --- column list for partitioned tables has to cover replica identities for --- all child relations -CREATE TABLE testpub_tbl8 (a int, b text, c text) PARTITION BY HASH (a); --- first partition has replica identity "a" -CREATE TABLE testpub_tbl8_0 PARTITION OF testpub_tbl8 FOR VALUES WITH (modulus 2, remainder 0); -ALTER TABLE testpub_tbl8_0 ADD PRIMARY KEY (a); -ALTER TABLE testpub_tbl8_0 REPLICA IDENTITY USING INDEX testpub_tbl8_0_pkey; --- second partition has replica identity "b" -CREATE TABLE testpub_tbl8_1 PARTITION OF testpub_tbl8 FOR VALUES WITH (modulus 2, remainder 1); -ALTER TABLE testpub_tbl8_1 ADD PRIMARY KEY (b); -ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY USING INDEX testpub_tbl8_1_pkey; --- ok: column list covers both "a" and "b" -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_col_list FOR TABLE testpub_tbl8 (a, b) WITH (publish_via_partition_root = 'true'); -RESET client_min_messages; --- ok: the same thing, but try plain ADD TABLE -ALTER PUBLICATION testpub_col_list DROP TABLE testpub_tbl8; -ALTER PUBLICATION testpub_col_list ADD TABLE testpub_tbl8 (a, b); -UPDATE testpub_tbl8 SET a = 1; --- failure: column list does not cover replica identity for the second partition -ALTER PUBLICATION testpub_col_list DROP TABLE testpub_tbl8; -ALTER PUBLICATION testpub_col_list ADD TABLE testpub_tbl8 (a, c); -UPDATE testpub_tbl8 SET a = 1; -ERROR: cannot update table "testpub_tbl8_1" -DETAIL: Column list used by the publication does not cover the replica identity. -ALTER PUBLICATION testpub_col_list DROP TABLE testpub_tbl8; --- failure: one of the partitions has REPLICA IDENTITY FULL -ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY FULL; -ALTER PUBLICATION testpub_col_list ADD TABLE testpub_tbl8 (a, c); -UPDATE testpub_tbl8 SET a = 1; -ERROR: cannot update table "testpub_tbl8_1" -DETAIL: Column list used by the publication does not cover the replica identity. -ALTER PUBLICATION testpub_col_list DROP TABLE testpub_tbl8; --- add table and then try changing replica identity -ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY USING INDEX testpub_tbl8_1_pkey; -ALTER PUBLICATION testpub_col_list ADD TABLE testpub_tbl8 (a, b); --- failure: replica identity full can't be used with a column list -ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY FULL; -UPDATE testpub_tbl8 SET a = 1; -ERROR: cannot update table "testpub_tbl8_1" -DETAIL: Column list used by the publication does not cover the replica identity. --- failure: replica identity has to be covered by the column list -ALTER TABLE testpub_tbl8_1 DROP CONSTRAINT testpub_tbl8_1_pkey; -ALTER TABLE testpub_tbl8_1 ADD PRIMARY KEY (c); -ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY USING INDEX testpub_tbl8_1_pkey; -UPDATE testpub_tbl8 SET a = 1; -ERROR: cannot update table "testpub_tbl8_1" -DETAIL: Column list used by the publication does not cover the replica identity. -DROP TABLE testpub_tbl8; --- column list for partitioned tables has to cover replica identities for --- all child relations -CREATE TABLE testpub_tbl8 (a int, b text, c text) PARTITION BY HASH (a); -ALTER PUBLICATION testpub_col_list ADD TABLE testpub_tbl8 (a, b); --- first partition has replica identity "a" -CREATE TABLE testpub_tbl8_0 (a int, b text, c text); -ALTER TABLE testpub_tbl8_0 ADD PRIMARY KEY (a); -ALTER TABLE testpub_tbl8_0 REPLICA IDENTITY USING INDEX testpub_tbl8_0_pkey; --- second partition has replica identity "b" -CREATE TABLE testpub_tbl8_1 (a int, b text, c text); -ALTER TABLE testpub_tbl8_1 ADD PRIMARY KEY (c); -ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY USING INDEX testpub_tbl8_1_pkey; --- ok: attaching first partition works, because (a) is in column list -ALTER TABLE testpub_tbl8 ATTACH PARTITION testpub_tbl8_0 FOR VALUES WITH (modulus 2, remainder 0); --- failure: second partition has replica identity (c), which si not in column list -ALTER TABLE testpub_tbl8 ATTACH PARTITION testpub_tbl8_1 FOR VALUES WITH (modulus 2, remainder 1); -UPDATE testpub_tbl8 SET a = 1; -ERROR: cannot update table "testpub_tbl8_1" -DETAIL: Column list used by the publication does not cover the replica identity. --- failure: changing replica identity to FULL for partition fails, because --- of the column list on the parent -ALTER TABLE testpub_tbl8_0 REPLICA IDENTITY FULL; -UPDATE testpub_tbl8 SET a = 1; -ERROR: cannot update table "testpub_tbl8_0" -DETAIL: Column list used by the publication does not cover the replica identity. --- test that using column list for table is disallowed if any schemas are --- part of the publication -SET client_min_messages = 'ERROR'; --- failure - cannot use column list and schema together -CREATE PUBLICATION testpub_tbl9 FOR TABLES IN SCHEMA public, TABLE public.testpub_tbl7(a); -ERROR: cannot use column list for relation "public.testpub_tbl7" in publication "testpub_tbl9" -DETAIL: Column lists cannot be specified in publications containing FOR TABLES IN SCHEMA elements. --- ok - only publish schema -CREATE PUBLICATION testpub_tbl9 FOR TABLES IN SCHEMA public; --- failure - add a table with column list when there is already a schema in the --- publication -ALTER PUBLICATION testpub_tbl9 ADD TABLE public.testpub_tbl7(a); -ERROR: cannot use column list for relation "public.testpub_tbl7" in publication "testpub_tbl9" -DETAIL: Column lists cannot be specified in publications containing FOR TABLES IN SCHEMA elements. --- ok - only publish table with column list -ALTER PUBLICATION testpub_tbl9 SET TABLE public.testpub_tbl7(a); --- failure - specify a schema when there is already a column list in the --- publication -ALTER PUBLICATION testpub_tbl9 ADD TABLES IN SCHEMA public; -ERROR: cannot add schema to publication "testpub_tbl9" -DETAIL: Schemas cannot be added if any tables that specify a column list are already part of the publication. --- failure - cannot SET column list and schema together -ALTER PUBLICATION testpub_tbl9 SET TABLES IN SCHEMA public, TABLE public.testpub_tbl7(a); -ERROR: cannot use column list for relation "public.testpub_tbl7" in publication "testpub_tbl9" -DETAIL: Column lists cannot be specified in publications containing FOR TABLES IN SCHEMA elements. --- ok - drop table -ALTER PUBLICATION testpub_tbl9 DROP TABLE public.testpub_tbl7; --- failure - cannot ADD column list and schema together -ALTER PUBLICATION testpub_tbl9 ADD TABLES IN SCHEMA public, TABLE public.testpub_tbl7(a); -ERROR: cannot use column list for relation "public.testpub_tbl7" in publication "testpub_tbl9" -DETAIL: Column lists cannot be specified in publications containing FOR TABLES IN SCHEMA elements. -RESET client_min_messages; -DROP TABLE testpub_tbl5, testpub_tbl6, testpub_tbl7, testpub_tbl8, testpub_tbl8_1; -DROP PUBLICATION testpub_table_ins, testpub_fortable, testpub_fortable_insert, testpub_col_list, testpub_tbl9; --- ====================================================== --- Test combination of column list and row filter -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_both_filters; -RESET client_min_messages; -CREATE TABLE testpub_tbl_both_filters (a int, b int, c int, PRIMARY KEY (a,c)); -ALTER TABLE testpub_tbl_both_filters REPLICA IDENTITY USING INDEX testpub_tbl_both_filters_pkey; -ALTER PUBLICATION testpub_both_filters ADD TABLE testpub_tbl_both_filters (a,c) WHERE (c != 1); -\dRp+ testpub_both_filters - Publication testpub_both_filters - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables: - "public.testpub_tbl_both_filters" (a, c) WHERE (c <> 1) - -\d+ testpub_tbl_both_filters - Table "public.testpub_tbl_both_filters" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - a | integer | | not null | | plain | | - b | integer | | | | plain | | - c | integer | | not null | | plain | | -Indexes: - "testpub_tbl_both_filters_pkey" PRIMARY KEY, btree (a, c) REPLICA IDENTITY -Publications: - "testpub_both_filters" (a, c) WHERE (c <> 1) -Not-null constraints: - "testpub_tbl_both_filters_a_not_null" NOT NULL "a" - "testpub_tbl_both_filters_c_not_null" NOT NULL "c" - -DROP TABLE testpub_tbl_both_filters; -DROP PUBLICATION testpub_both_filters; --- ====================================================== --- More column list tests for validating column references -CREATE TABLE rf_tbl_abcd_nopk(a int, b int, c int, d int); -CREATE TABLE rf_tbl_abcd_pk(a int, b int, c int, d int, PRIMARY KEY(a,b)); -CREATE TABLE rf_tbl_abcd_part_pk (a int PRIMARY KEY, b int) PARTITION by RANGE (a); -CREATE TABLE rf_tbl_abcd_part_pk_1 (b int, a int PRIMARY KEY); -ALTER TABLE rf_tbl_abcd_part_pk ATTACH PARTITION rf_tbl_abcd_part_pk_1 FOR VALUES FROM (1) TO (10); --- Case 1. REPLICA IDENTITY DEFAULT (means use primary key or nothing) --- 1a. REPLICA IDENTITY is DEFAULT and table has a PK. -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub6 FOR TABLE rf_tbl_abcd_pk (a, b); -RESET client_min_messages; --- ok - (a,b) covers all PK cols -UPDATE rf_tbl_abcd_pk SET a = 1; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a, b, c); --- ok - (a,b,c) covers all PK cols -UPDATE rf_tbl_abcd_pk SET a = 1; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a); --- fail - "b" is missing from the column list -UPDATE rf_tbl_abcd_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_pk" -DETAIL: Column list used by the publication does not cover the replica identity. -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (b); --- fail - "a" is missing from the column list -UPDATE rf_tbl_abcd_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_pk" -DETAIL: Column list used by the publication does not cover the replica identity. --- 1b. REPLICA IDENTITY is DEFAULT and table has no PK -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk (a); --- ok - there's no replica identity, so any column list works --- note: it fails anyway, just a bit later because UPDATE requires RI -UPDATE rf_tbl_abcd_nopk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_nopk" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. --- Case 2. REPLICA IDENTITY FULL -ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY FULL; -ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY FULL; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (c); --- fail - with REPLICA IDENTITY FULL no column list is allowed -UPDATE rf_tbl_abcd_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_pk" -DETAIL: Column list used by the publication does not cover the replica identity. -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk (a, b, c, d); --- fail - with REPLICA IDENTITY FULL no column list is allowed -UPDATE rf_tbl_abcd_nopk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_nopk" -DETAIL: Column list used by the publication does not cover the replica identity. --- Case 3. REPLICA IDENTITY NOTHING -ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY NOTHING; -ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY NOTHING; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a); --- ok - REPLICA IDENTITY NOTHING means all column lists are valid --- it still fails later because without RI we can't replicate updates -UPDATE rf_tbl_abcd_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_pk" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a, b, c, d); --- ok - REPLICA IDENTITY NOTHING means all column lists are valid --- it still fails later because without RI we can't replicate updates -UPDATE rf_tbl_abcd_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_pk" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk (d); --- ok - REPLICA IDENTITY NOTHING means all column lists are valid --- it still fails later because without RI we can't replicate updates -UPDATE rf_tbl_abcd_nopk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_nopk" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. --- Case 4. REPLICA IDENTITY INDEX -ALTER TABLE rf_tbl_abcd_pk ALTER COLUMN c SET NOT NULL; -CREATE UNIQUE INDEX idx_abcd_pk_c ON rf_tbl_abcd_pk(c); -ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY USING INDEX idx_abcd_pk_c; -ALTER TABLE rf_tbl_abcd_nopk ALTER COLUMN c SET NOT NULL; -CREATE UNIQUE INDEX idx_abcd_nopk_c ON rf_tbl_abcd_nopk(c); -ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY USING INDEX idx_abcd_nopk_c; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a); --- fail - column list "a" does not cover the REPLICA IDENTITY INDEX on "c" -UPDATE rf_tbl_abcd_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_pk" -DETAIL: Column list used by the publication does not cover the replica identity. -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (c); --- ok - column list "c" does cover the REPLICA IDENTITY INDEX on "c" -UPDATE rf_tbl_abcd_pk SET a = 1; -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk (a); --- fail - column list "a" does not cover the REPLICA IDENTITY INDEX on "c" -UPDATE rf_tbl_abcd_nopk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_nopk" -DETAIL: Column list used by the publication does not cover the replica identity. -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk (c); --- ok - column list "c" does cover the REPLICA IDENTITY INDEX on "c" -UPDATE rf_tbl_abcd_nopk SET a = 1; --- Tests for partitioned table --- set PUBLISH_VIA_PARTITION_ROOT to false and test column list for partitioned --- table -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); --- fail - cannot use column list for partitioned table -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk (a); -ERROR: cannot use column list for relation "public.rf_tbl_abcd_part_pk" in publication "testpub6" -DETAIL: Column lists cannot be specified for partitioned tables when publish_via_partition_root is false. --- ok - can use column list for partition -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk_1 (a); --- ok - "a" is a PK col -UPDATE rf_tbl_abcd_part_pk SET a = 1; --- set PUBLISH_VIA_PARTITION_ROOT to true and test column list for partitioned --- table -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=1); --- ok - can use column list for partitioned table -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk (a); --- ok - "a" is a PK col -UPDATE rf_tbl_abcd_part_pk SET a = 1; --- fail - cannot set PUBLISH_VIA_PARTITION_ROOT to false if any column list is --- used for partitioned table -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); -ERROR: cannot set parameter "publish_via_partition_root" to false for publication "testpub6" -DETAIL: The publication contains a column list for partitioned table "rf_tbl_abcd_part_pk", which is not allowed when "publish_via_partition_root" is false. --- remove partitioned table's column list -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk; --- ok - we don't have column list for partitioned table. -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); --- Now change the root column list to use a column "b" --- (which is not in the replica identity) -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk_1 (b); --- ok - we don't have column list for partitioned table. -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); --- fail - "b" is not in REPLICA IDENTITY INDEX -UPDATE rf_tbl_abcd_part_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_part_pk_1" -DETAIL: Column list used by the publication does not cover the replica identity. --- set PUBLISH_VIA_PARTITION_ROOT to true --- can use column list for partitioned table -ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=1); --- ok - can use column list for partitioned table -ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk (b); --- fail - "b" is not in REPLICA IDENTITY INDEX -UPDATE rf_tbl_abcd_part_pk SET a = 1; -ERROR: cannot update table "rf_tbl_abcd_part_pk_1" -DETAIL: Column list used by the publication does not cover the replica identity. -DROP PUBLICATION testpub6; -DROP TABLE rf_tbl_abcd_pk; -DROP TABLE rf_tbl_abcd_nopk; -DROP TABLE rf_tbl_abcd_part_pk; --- ====================================================== --- Test cache invalidation FOR ALL TABLES publication -SET client_min_messages = 'ERROR'; -CREATE TABLE testpub_tbl4(a int); -INSERT INTO testpub_tbl4 values(1); -UPDATE testpub_tbl4 set a = 2; -CREATE PUBLICATION testpub_foralltables FOR ALL TABLES; -RESET client_min_messages; --- fail missing REPLICA IDENTITY -UPDATE testpub_tbl4 set a = 3; -ERROR: cannot update table "testpub_tbl4" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. -DROP PUBLICATION testpub_foralltables; --- should pass after dropping the publication -UPDATE testpub_tbl4 set a = 3; -DROP TABLE testpub_tbl4; --- fail - view -CREATE PUBLICATION testpub_fortbl FOR TABLE testpub_view; -ERROR: cannot add relation "testpub_view" to publication -DETAIL: This operation is not supported for views. -CREATE TEMPORARY TABLE testpub_temptbl(a int); --- fail - temporary table -CREATE PUBLICATION testpub_fortemptbl FOR TABLE testpub_temptbl; -ERROR: cannot add relation "testpub_temptbl" to publication -DETAIL: This operation is not supported for temporary tables. -DROP TABLE testpub_temptbl; -CREATE UNLOGGED TABLE testpub_unloggedtbl(a int); --- fail - unlogged table -CREATE PUBLICATION testpub_forunloggedtbl FOR TABLE testpub_unloggedtbl; -ERROR: cannot add relation "testpub_unloggedtbl" to publication -DETAIL: This operation is not supported for unlogged tables. -DROP TABLE testpub_unloggedtbl; --- fail - system table -CREATE PUBLICATION testpub_forsystemtbl FOR TABLE pg_publication; -ERROR: cannot add relation "pg_publication" to publication -DETAIL: This operation is not supported for system tables. -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_fortbl FOR TABLE testpub_tbl1, pub_test.testpub_nopk; -RESET client_min_messages; --- fail - already added -ALTER PUBLICATION testpub_fortbl ADD TABLE testpub_tbl1; -ERROR: relation "testpub_tbl1" is already member of publication "testpub_fortbl" --- fail - already added -CREATE PUBLICATION testpub_fortbl FOR TABLE testpub_tbl1; -ERROR: publication "testpub_fortbl" already exists -\dRp+ testpub_fortbl - Publication testpub_fortbl - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables: - "pub_test.testpub_nopk" - "public.testpub_tbl1" - --- fail - view -ALTER PUBLICATION testpub_default ADD TABLE testpub_view; -ERROR: cannot add relation "testpub_view" to publication -DETAIL: This operation is not supported for views. -ALTER PUBLICATION testpub_default ADD TABLE testpub_tbl1; -ALTER PUBLICATION testpub_default SET TABLE testpub_tbl1; -ALTER PUBLICATION testpub_default ADD TABLE pub_test.testpub_nopk; -ALTER PUBLICATION testpib_ins_trunct ADD TABLE pub_test.testpub_nopk, testpub_tbl1; -\d+ pub_test.testpub_nopk - Table "pub_test.testpub_nopk" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - foo | integer | | | | plain | | - bar | integer | | | | plain | | -Publications: - "testpib_ins_trunct" - "testpub_default" - "testpub_fortbl" - -\d+ testpub_tbl1 - Table "public.testpub_tbl1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+------------------------------------------+----------+--------------+------------- - id | integer | | not null | nextval('testpub_tbl1_id_seq'::regclass) | plain | | - data | text | | | | extended | | -Indexes: - "testpub_tbl1_pkey" PRIMARY KEY, btree (id) -Publications: - "testpib_ins_trunct" - "testpub_default" - "testpub_fortbl" -Not-null constraints: - "testpub_tbl1_id_not_null" NOT NULL "id" - -\dRp+ testpub_default - Publication testpub_default - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | f | f | f -Tables: - "pub_test.testpub_nopk" - "public.testpub_tbl1" - -ALTER PUBLICATION testpub_default DROP TABLE testpub_tbl1, pub_test.testpub_nopk; --- fail - nonexistent -ALTER PUBLICATION testpub_default DROP TABLE pub_test.testpub_nopk; -ERROR: relation "testpub_nopk" is not part of the publication -\d+ testpub_tbl1 - Table "public.testpub_tbl1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+------------------------------------------+----------+--------------+------------- - id | integer | | not null | nextval('testpub_tbl1_id_seq'::regclass) | plain | | - data | text | | | | extended | | -Indexes: - "testpub_tbl1_pkey" PRIMARY KEY, btree (id) -Publications: - "testpib_ins_trunct" - "testpub_fortbl" -Not-null constraints: - "testpub_tbl1_id_not_null" NOT NULL "id" - --- verify relation cache invalidation when a primary key is added using --- an existing index -CREATE TABLE pub_test.testpub_addpk (id int not null, data int); -ALTER PUBLICATION testpub_default ADD TABLE pub_test.testpub_addpk; -INSERT INTO pub_test.testpub_addpk VALUES(1, 11); -CREATE UNIQUE INDEX testpub_addpk_id_idx ON pub_test.testpub_addpk(id); --- fail: -UPDATE pub_test.testpub_addpk SET id = 2; -ERROR: cannot update table "testpub_addpk" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. -ALTER TABLE pub_test.testpub_addpk ADD PRIMARY KEY USING INDEX testpub_addpk_id_idx; --- now it should work: -UPDATE pub_test.testpub_addpk SET id = 2; -DROP TABLE pub_test.testpub_addpk; --- permissions -SET ROLE regress_publication_user2; -CREATE PUBLICATION testpub2; -- fail -ERROR: permission denied for database regression -SET ROLE regress_publication_user; -GRANT CREATE ON DATABASE regression TO regress_publication_user2; -SET ROLE regress_publication_user2; -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub2; -- ok -CREATE PUBLICATION testpub3 FOR TABLES IN SCHEMA pub_test; -- fail -ERROR: must be superuser to create FOR TABLES IN SCHEMA publication -CREATE PUBLICATION testpub3; -- ok -RESET client_min_messages; -ALTER PUBLICATION testpub2 ADD TABLE testpub_tbl1; -- fail -ERROR: must be owner of table testpub_tbl1 -ALTER PUBLICATION testpub3 ADD TABLES IN SCHEMA pub_test; -- fail -ERROR: must be superuser to add or set schemas -SET ROLE regress_publication_user; -GRANT regress_publication_user TO regress_publication_user2; -SET ROLE regress_publication_user2; -ALTER PUBLICATION testpub2 ADD TABLE testpub_tbl1; -- ok -DROP PUBLICATION testpub2; -DROP PUBLICATION testpub3; -SET ROLE regress_publication_user; -CREATE ROLE regress_publication_user3; -GRANT regress_publication_user2 TO regress_publication_user3; -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub4 FOR TABLES IN SCHEMA pub_test; -RESET client_min_messages; -ALTER PUBLICATION testpub4 OWNER TO regress_publication_user3; -SET ROLE regress_publication_user3; --- fail - new owner must be superuser -ALTER PUBLICATION testpub4 owner to regress_publication_user2; -- fail -ERROR: permission denied to change owner of publication "testpub4" -HINT: The owner of a FOR TABLES IN SCHEMA publication must be a superuser. -ALTER PUBLICATION testpub4 owner to regress_publication_user; -- ok -SET ROLE regress_publication_user; -DROP PUBLICATION testpub4; -DROP ROLE regress_publication_user3; -REVOKE CREATE ON DATABASE regression FROM regress_publication_user2; -DROP TABLE testpub_parted; -DROP TABLE testpub_tbl1; -\dRp+ testpub_default - Publication testpub_default - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | f | f | f -(1 row) - --- fail - must be owner of publication -SET ROLE regress_publication_user_dummy; -ALTER PUBLICATION testpub_default RENAME TO testpub_dummy; -ERROR: must be owner of publication testpub_default -RESET ROLE; -ALTER PUBLICATION testpub_default RENAME TO testpub_foo; -\dRp testpub_foo - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root --------------+--------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - testpub_foo | regress_publication_user | f | t | t | t | f | f | f -(1 row) - --- rename back to keep the rest simple -ALTER PUBLICATION testpub_foo RENAME TO testpub_default; -ALTER PUBLICATION testpub_default OWNER TO regress_publication_user2; -\dRp testpub_default - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ------------------+---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - testpub_default | regress_publication_user2 | f | t | t | t | f | f | f -(1 row) - --- adding schemas and tables -CREATE SCHEMA pub_test1; -CREATE SCHEMA pub_test2; -CREATE SCHEMA pub_test3; -CREATE SCHEMA "CURRENT_SCHEMA"; -CREATE TABLE pub_test1.tbl (id int, data text); -CREATE TABLE pub_test1.tbl1 (id serial primary key, data text); -CREATE TABLE pub_test2.tbl1 (id serial primary key, data text); -CREATE TABLE "CURRENT_SCHEMA"."CURRENT_SCHEMA"(id int); --- suppress warning that depends on wal_level -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub1_forschema FOR TABLES IN SCHEMA pub_test1; -\dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables from schemas: - "pub_test1" - -CREATE PUBLICATION testpub2_forschema FOR TABLES IN SCHEMA pub_test1, pub_test2, pub_test3; -\dRp+ testpub2_forschema - Publication testpub2_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables from schemas: - "pub_test1" - "pub_test2" - "pub_test3" - --- check create publication on CURRENT_SCHEMA -CREATE PUBLICATION testpub3_forschema FOR TABLES IN SCHEMA CURRENT_SCHEMA; -CREATE PUBLICATION testpub4_forschema FOR TABLES IN SCHEMA "CURRENT_SCHEMA"; -CREATE PUBLICATION testpub5_forschema FOR TABLES IN SCHEMA CURRENT_SCHEMA, "CURRENT_SCHEMA"; -CREATE PUBLICATION testpub6_forschema FOR TABLES IN SCHEMA "CURRENT_SCHEMA", CURRENT_SCHEMA; -CREATE PUBLICATION testpub_fortable FOR TABLE "CURRENT_SCHEMA"."CURRENT_SCHEMA"; -RESET client_min_messages; -\dRp+ testpub3_forschema - Publication testpub3_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables from schemas: - "public" - -\dRp+ testpub4_forschema - Publication testpub4_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables from schemas: - "CURRENT_SCHEMA" - -\dRp+ testpub5_forschema - Publication testpub5_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables from schemas: - "CURRENT_SCHEMA" - "public" - -\dRp+ testpub6_forschema - Publication testpub6_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables from schemas: - "CURRENT_SCHEMA" - "public" - -\dRp+ testpub_fortable - Publication testpub_fortable - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables: - "CURRENT_SCHEMA.CURRENT_SCHEMA" - --- check create publication on CURRENT_SCHEMA where search_path is not set -SET SEARCH_PATH=''; -CREATE PUBLICATION testpub_forschema FOR TABLES IN SCHEMA CURRENT_SCHEMA; -ERROR: no schema has been selected for CURRENT_SCHEMA -RESET SEARCH_PATH; --- check create publication on CURRENT_SCHEMA where TABLE/TABLES in SCHEMA --- is not specified -CREATE PUBLICATION testpub_forschema1 FOR CURRENT_SCHEMA; -ERROR: invalid publication object list -LINE 1: CREATE PUBLICATION testpub_forschema1 FOR CURRENT_SCHEMA; - ^ -DETAIL: One of TABLE or TABLES IN SCHEMA must be specified before a standalone table or schema name. --- check create publication on CURRENT_SCHEMA along with FOR TABLE -CREATE PUBLICATION testpub_forschema1 FOR TABLE CURRENT_SCHEMA; -ERROR: syntax error at or near "CURRENT_SCHEMA" -LINE 1: CREATE PUBLICATION testpub_forschema1 FOR TABLE CURRENT_SCHE... - ^ --- check create publication on a schema that does not exist -CREATE PUBLICATION testpub_forschema FOR TABLES IN SCHEMA non_existent_schema; -ERROR: schema "non_existent_schema" does not exist --- check create publication on a system schema -CREATE PUBLICATION testpub_forschema FOR TABLES IN SCHEMA pg_catalog; -ERROR: cannot add schema "pg_catalog" to publication -DETAIL: This operation is not supported for system schemas. --- check create publication on an object which is not schema -CREATE PUBLICATION testpub1_forschema1 FOR TABLES IN SCHEMA testpub_view; -ERROR: schema "testpub_view" does not exist --- dropping the schema should reflect the change in publication -DROP SCHEMA pub_test3; -\dRp+ testpub2_forschema - Publication testpub2_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables from schemas: - "pub_test1" - "pub_test2" - --- renaming the schema should reflect the change in publication -ALTER SCHEMA pub_test1 RENAME to pub_test1_renamed; -\dRp+ testpub2_forschema - Publication testpub2_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables from schemas: - "pub_test1_renamed" - "pub_test2" - -ALTER SCHEMA pub_test1_renamed RENAME to pub_test1; -\dRp+ testpub2_forschema - Publication testpub2_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables from schemas: - "pub_test1" - "pub_test2" - --- alter publication add schema -ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA pub_test2; -\dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables from schemas: - "pub_test1" - "pub_test2" - --- add non existent schema -ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA non_existent_schema; -ERROR: schema "non_existent_schema" does not exist -\dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables from schemas: - "pub_test1" - "pub_test2" - --- add a schema which is already added to the publication -ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA pub_test1; -ERROR: schema "pub_test1" is already member of publication "testpub1_forschema" -\dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables from schemas: - "pub_test1" - "pub_test2" - --- alter publication drop schema -ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA pub_test2; -\dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables from schemas: - "pub_test1" - --- drop schema that is not present in the publication -ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA pub_test2; -ERROR: tables from schema "pub_test2" are not part of the publication -\dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables from schemas: - "pub_test1" - --- drop a schema that does not exist in the system -ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA non_existent_schema; -ERROR: schema "non_existent_schema" does not exist -\dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables from schemas: - "pub_test1" - --- drop all schemas -ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA pub_test1; -\dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -(1 row) - --- alter publication set multiple schema -ALTER PUBLICATION testpub1_forschema SET TABLES IN SCHEMA pub_test1, pub_test2; -\dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables from schemas: - "pub_test1" - "pub_test2" - --- alter publication set non-existent schema -ALTER PUBLICATION testpub1_forschema SET TABLES IN SCHEMA non_existent_schema; -ERROR: schema "non_existent_schema" does not exist -\dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables from schemas: - "pub_test1" - "pub_test2" - --- alter publication set it duplicate schemas should set the schemas after --- removing the duplicate schemas -ALTER PUBLICATION testpub1_forschema SET TABLES IN SCHEMA pub_test1, pub_test1; -\dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables from schemas: - "pub_test1" - --- Verify that it fails to add a schema with a column specification -ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA foo (a, b); -ERROR: syntax error at or near "(" -LINE 1: ...LICATION testpub1_forschema ADD TABLES IN SCHEMA foo (a, b); - ^ -ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA foo, bar (a, b); -ERROR: column specification not allowed for schema -LINE 1: ...TION testpub1_forschema ADD TABLES IN SCHEMA foo, bar (a, b)... - ^ --- cleanup pub_test1 schema for invalidation tests -ALTER PUBLICATION testpub2_forschema DROP TABLES IN SCHEMA pub_test1; -DROP PUBLICATION testpub3_forschema, testpub4_forschema, testpub5_forschema, testpub6_forschema, testpub_fortable; -DROP SCHEMA "CURRENT_SCHEMA" CASCADE; -NOTICE: drop cascades to table "CURRENT_SCHEMA"."CURRENT_SCHEMA" --- verify relation cache invalidations through update statement for the --- default REPLICA IDENTITY on the relation, if schema is part of the --- publication then update will fail because relation's relreplident --- option will be set, if schema is not part of the publication then update --- will be successful. -INSERT INTO pub_test1.tbl VALUES(1, 'test'); --- fail -UPDATE pub_test1.tbl SET id = 2; -ERROR: cannot update table "tbl" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. -ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA pub_test1; --- success -UPDATE pub_test1.tbl SET id = 2; -ALTER PUBLICATION testpub1_forschema SET TABLES IN SCHEMA pub_test1; --- fail -UPDATE pub_test1.tbl SET id = 2; -ERROR: cannot update table "tbl" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. --- verify invalidation of partition table having parent and child tables in --- different schema -CREATE SCHEMA pub_testpart1; -CREATE SCHEMA pub_testpart2; -CREATE TABLE pub_testpart1.parent1 (a int) partition by list (a); -CREATE TABLE pub_testpart2.child_parent1 partition of pub_testpart1.parent1 for values in (1); -INSERT INTO pub_testpart2.child_parent1 values(1); -UPDATE pub_testpart2.child_parent1 set a = 1; -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpubpart_forschema FOR TABLES IN SCHEMA pub_testpart1; -RESET client_min_messages; --- fail -UPDATE pub_testpart1.parent1 set a = 1; -ERROR: cannot update table "child_parent1" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. -UPDATE pub_testpart2.child_parent1 set a = 1; -ERROR: cannot update table "child_parent1" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. -DROP PUBLICATION testpubpart_forschema; --- verify invalidation of partition tables for schema publication that has --- parent and child tables of different partition hierarchies -CREATE TABLE pub_testpart2.parent2 (a int) partition by list (a); -CREATE TABLE pub_testpart1.child_parent2 partition of pub_testpart2.parent2 for values in (1); -INSERT INTO pub_testpart1.child_parent2 values(1); -UPDATE pub_testpart1.child_parent2 set a = 1; -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpubpart_forschema FOR TABLES IN SCHEMA pub_testpart2; -RESET client_min_messages; --- fail -UPDATE pub_testpart2.child_parent1 set a = 1; -ERROR: cannot update table "child_parent1" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. -UPDATE pub_testpart2.parent2 set a = 1; -ERROR: cannot update table "child_parent2" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. -UPDATE pub_testpart1.child_parent2 set a = 1; -ERROR: cannot update table "child_parent2" because it does not have a replica identity and publishes updates -HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. --- alter publication set 'TABLES IN SCHEMA' on an empty publication. -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub3_forschema; -RESET client_min_messages; -\dRp+ testpub3_forschema - Publication testpub3_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -(1 row) - -ALTER PUBLICATION testpub3_forschema SET TABLES IN SCHEMA pub_test1; -\dRp+ testpub3_forschema - Publication testpub3_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables from schemas: - "pub_test1" - --- create publication including both 'FOR TABLE' and 'FOR TABLES IN SCHEMA' -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION testpub_forschema_fortable FOR TABLES IN SCHEMA pub_test1, TABLE pub_test2.tbl1; -CREATE PUBLICATION testpub_fortable_forschema FOR TABLE pub_test2.tbl1, TABLES IN SCHEMA pub_test1; -RESET client_min_messages; -\dRp+ testpub_forschema_fortable - Publication testpub_forschema_fortable - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables: - "pub_test2.tbl1" -Tables from schemas: - "pub_test1" - -\dRp+ testpub_fortable_forschema - Publication testpub_fortable_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables: - "pub_test2.tbl1" -Tables from schemas: - "pub_test1" - --- fail specifying table without any of 'FOR TABLES IN SCHEMA' or ---'FOR TABLE' or 'FOR ALL TABLES' -CREATE PUBLICATION testpub_error FOR pub_test2.tbl1; -ERROR: invalid publication object list -LINE 1: CREATE PUBLICATION testpub_error FOR pub_test2.tbl1; - ^ -DETAIL: One of TABLE or TABLES IN SCHEMA must be specified before a standalone table or schema name. -DROP VIEW testpub_view; -DROP PUBLICATION testpub_default; -DROP PUBLICATION testpib_ins_trunct; -DROP PUBLICATION testpub_fortbl; -DROP PUBLICATION testpub1_forschema; -DROP PUBLICATION testpub2_forschema; -DROP PUBLICATION testpub3_forschema; -DROP PUBLICATION testpub_forschema_fortable; -DROP PUBLICATION testpub_fortable_forschema; -DROP PUBLICATION testpubpart_forschema; -DROP SCHEMA pub_test CASCADE; -NOTICE: drop cascades to table pub_test.testpub_nopk -DROP SCHEMA pub_test1 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table pub_test1.tbl -drop cascades to table pub_test1.tbl1 -DROP SCHEMA pub_test2 CASCADE; -NOTICE: drop cascades to table pub_test2.tbl1 -DROP SCHEMA pub_testpart1 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table pub_testpart1.parent1 -drop cascades to table pub_testpart1.child_parent2 -DROP SCHEMA pub_testpart2 CASCADE; -NOTICE: drop cascades to table pub_testpart2.parent2 --- Test the list of partitions published with or without --- 'PUBLISH_VIA_PARTITION_ROOT' parameter -SET client_min_messages = 'ERROR'; -CREATE SCHEMA sch1; -CREATE SCHEMA sch2; -CREATE TABLE sch1.tbl1 (a int) PARTITION BY RANGE(a); -CREATE TABLE sch2.tbl1_part1 PARTITION OF sch1.tbl1 FOR VALUES FROM (1) to (10); --- Schema publication that does not include the schema that has the parent table -CREATE PUBLICATION pub FOR TABLES IN SCHEMA sch2 WITH (PUBLISH_VIA_PARTITION_ROOT=1); -SELECT * FROM pg_publication_tables; - pubname | schemaname | tablename | attnames | rowfilter ----------+------------+------------+----------+----------- - pub | sch2 | tbl1_part1 | {a} | -(1 row) - -DROP PUBLICATION pub; --- Table publication that does not include the parent table -CREATE PUBLICATION pub FOR TABLE sch2.tbl1_part1 WITH (PUBLISH_VIA_PARTITION_ROOT=1); -SELECT * FROM pg_publication_tables; - pubname | schemaname | tablename | attnames | rowfilter ----------+------------+------------+----------+----------- - pub | sch2 | tbl1_part1 | {a} | -(1 row) - --- Table publication that includes both the parent table and the child table -ALTER PUBLICATION pub ADD TABLE sch1.tbl1; -SELECT * FROM pg_publication_tables; - pubname | schemaname | tablename | attnames | rowfilter ----------+------------+-----------+----------+----------- - pub | sch1 | tbl1 | {a} | -(1 row) - -DROP PUBLICATION pub; --- Schema publication that does not include the schema that has the parent table -CREATE PUBLICATION pub FOR TABLES IN SCHEMA sch2 WITH (PUBLISH_VIA_PARTITION_ROOT=0); -SELECT * FROM pg_publication_tables; - pubname | schemaname | tablename | attnames | rowfilter ----------+------------+------------+----------+----------- - pub | sch2 | tbl1_part1 | {a} | -(1 row) - -DROP PUBLICATION pub; --- Table publication that does not include the parent table -CREATE PUBLICATION pub FOR TABLE sch2.tbl1_part1 WITH (PUBLISH_VIA_PARTITION_ROOT=0); -SELECT * FROM pg_publication_tables; - pubname | schemaname | tablename | attnames | rowfilter ----------+------------+------------+----------+----------- - pub | sch2 | tbl1_part1 | {a} | -(1 row) - --- Table publication that includes both the parent table and the child table -ALTER PUBLICATION pub ADD TABLE sch1.tbl1; -SELECT * FROM pg_publication_tables; - pubname | schemaname | tablename | attnames | rowfilter ----------+------------+------------+----------+----------- - pub | sch2 | tbl1_part1 | {a} | -(1 row) - -DROP PUBLICATION pub; -DROP TABLE sch2.tbl1_part1; -DROP TABLE sch1.tbl1; -CREATE TABLE sch1.tbl1 (a int) PARTITION BY RANGE(a); -CREATE TABLE sch1.tbl1_part1 PARTITION OF sch1.tbl1 FOR VALUES FROM (1) to (10); -CREATE TABLE sch1.tbl1_part2 PARTITION OF sch1.tbl1 FOR VALUES FROM (10) to (20); -CREATE TABLE sch1.tbl1_part3 (a int) PARTITION BY RANGE(a); -ALTER TABLE sch1.tbl1 ATTACH PARTITION sch1.tbl1_part3 FOR VALUES FROM (20) to (30); -CREATE PUBLICATION pub FOR TABLES IN SCHEMA sch1 WITH (PUBLISH_VIA_PARTITION_ROOT=1); -SELECT * FROM pg_publication_tables; - pubname | schemaname | tablename | attnames | rowfilter ----------+------------+-----------+----------+----------- - pub | sch1 | tbl1 | {a} | -(1 row) - -RESET client_min_messages; -DROP PUBLICATION pub; -DROP TABLE sch1.tbl1; -DROP SCHEMA sch1 cascade; -DROP SCHEMA sch2 cascade; --- ====================================================== --- Test the publication 'publish_generated_columns' parameter enabled or disabled -SET client_min_messages = 'ERROR'; -CREATE PUBLICATION pub1 FOR ALL TABLES WITH (publish_generated_columns=1); -\dRp+ pub1 - Publication pub1 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | t | t | t | t | t | t | f -(1 row) - -CREATE PUBLICATION pub2 FOR ALL TABLES WITH (publish_generated_columns=0); -\dRp+ pub2 - Publication pub2 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | t | t | t | t | t | f | f -(1 row) - -DROP PUBLICATION pub1; -DROP PUBLICATION pub2; --- Test the 'publish_generated_columns' parameter enabled or disabled for --- different scenarios with/without generated columns in column lists. -CREATE TABLE gencols (a int, gen1 int GENERATED ALWAYS AS (a * 2) STORED); --- Generated columns in column list, when 'publish_generated_columns'=false -CREATE PUBLICATION pub1 FOR table gencols(a, gen1) WITH (publish_generated_columns=false); -\dRp+ pub1 - Publication pub1 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables: - "public.gencols" (a, gen1) - --- Generated columns in column list, when 'publish_generated_columns'=true -CREATE PUBLICATION pub2 FOR table gencols(a, gen1) WITH (publish_generated_columns=true); -\dRp+ pub2 - Publication pub2 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | t | f -Tables: - "public.gencols" (a, gen1) - --- Generated columns in column list, then set 'publication_generate_columns'=false -ALTER PUBLICATION pub2 SET (publish_generated_columns = false); -\dRp+ pub2 - Publication pub2 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables: - "public.gencols" (a, gen1) - --- Remove generated columns from column list, when 'publish_generated_columns'=false -ALTER PUBLICATION pub2 SET TABLE gencols(a); -\dRp+ pub2 - Publication pub2 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables: - "public.gencols" (a) - --- Add generated columns in column list, when 'publish_generated_columns'=false -ALTER PUBLICATION pub2 SET TABLE gencols(a, gen1); -\dRp+ pub2 - Publication pub2 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root ---------------------------+------------+---------+---------+---------+-----------+-------------------+---------- - regress_publication_user | f | t | t | t | t | f | f -Tables: - "public.gencols" (a, gen1) - -DROP PUBLICATION pub1; -DROP PUBLICATION pub2; -DROP TABLE gencols; -RESET client_min_messages; -RESET SESSION AUTHORIZATION; -DROP ROLE regress_publication_user, regress_publication_user2; -DROP ROLE regress_publication_user_dummy; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/subscription.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/subscription.out --- /Users/admin/pgsql/src/test/regress/expected/subscription.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/subscription.out 2024-12-13 13:20:11 @@ -1,482 +1,2 @@ --- --- SUBSCRIPTION --- -CREATE ROLE regress_subscription_user LOGIN SUPERUSER; -CREATE ROLE regress_subscription_user2; -CREATE ROLE regress_subscription_user3 IN ROLE pg_create_subscription; -CREATE ROLE regress_subscription_user_dummy LOGIN NOSUPERUSER; -SET SESSION AUTHORIZATION 'regress_subscription_user'; --- fail - no publications -CREATE SUBSCRIPTION regress_testsub CONNECTION 'foo'; -ERROR: syntax error at or near ";" -LINE 1: CREATE SUBSCRIPTION regress_testsub CONNECTION 'foo'; - ^ --- fail - no connection -CREATE SUBSCRIPTION regress_testsub PUBLICATION foo; -ERROR: syntax error at or near "PUBLICATION" -LINE 1: CREATE SUBSCRIPTION regress_testsub PUBLICATION foo; - ^ --- fail - cannot do CREATE SUBSCRIPTION CREATE SLOT inside transaction block -BEGIN; -CREATE SUBSCRIPTION regress_testsub CONNECTION 'testconn' PUBLICATION testpub WITH (create_slot); -ERROR: CREATE SUBSCRIPTION ... WITH (create_slot = true) cannot run inside a transaction block -COMMIT; --- fail - invalid connection string -CREATE SUBSCRIPTION regress_testsub CONNECTION 'testconn' PUBLICATION testpub; -ERROR: invalid connection string syntax: missing "=" after "testconn" in connection info string - --- fail - duplicate publications -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION foo, testpub, foo WITH (connect = false); -ERROR: publication name "foo" used more than once --- ok -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false); -WARNING: subscription was created, but is not connected -HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. -COMMENT ON SUBSCRIPTION regress_testsub IS 'test subscription'; -SELECT obj_description(s.oid, 'pg_subscription') FROM pg_subscription s; - obj_description -------------------- - test subscription -(1 row) - --- Check if the subscription stats are created and stats_reset is updated --- by pg_stat_reset_subscription_stats(). -SELECT subname, stats_reset IS NULL stats_reset_is_null FROM pg_stat_subscription_stats WHERE subname = 'regress_testsub'; - subname | stats_reset_is_null ------------------+--------------------- - regress_testsub | t -(1 row) - -SELECT pg_stat_reset_subscription_stats(oid) FROM pg_subscription WHERE subname = 'regress_testsub'; - pg_stat_reset_subscription_stats ----------------------------------- - -(1 row) - -SELECT subname, stats_reset IS NULL stats_reset_is_null FROM pg_stat_subscription_stats WHERE subname = 'regress_testsub'; - subname | stats_reset_is_null ------------------+--------------------- - regress_testsub | f -(1 row) - --- Reset the stats again and check if the new reset_stats is updated. -SELECT stats_reset as prev_stats_reset FROM pg_stat_subscription_stats WHERE subname = 'regress_testsub' \gset -SELECT pg_stat_reset_subscription_stats(oid) FROM pg_subscription WHERE subname = 'regress_testsub'; - pg_stat_reset_subscription_stats ----------------------------------- - -(1 row) - -SELECT :'prev_stats_reset' < stats_reset FROM pg_stat_subscription_stats WHERE subname = 'regress_testsub'; - ?column? ----------- - t -(1 row) - --- fail - name already exists -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false); -ERROR: subscription "regress_testsub" already exists --- fail - must be superuser -SET SESSION AUTHORIZATION 'regress_subscription_user2'; -CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION foo WITH (connect = false); -ERROR: permission denied to create subscription -DETAIL: Only roles with privileges of the "pg_create_subscription" role may create subscriptions. -SET SESSION AUTHORIZATION 'regress_subscription_user'; --- fail - invalid option combinations -CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, copy_data = true); -ERROR: connect = false and copy_data = true are mutually exclusive options -CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, enabled = true); -ERROR: connect = false and enabled = true are mutually exclusive options -CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, create_slot = true); -ERROR: connect = false and create_slot = true are mutually exclusive options -CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, enabled = true); -ERROR: slot_name = NONE and enabled = true are mutually exclusive options -CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, enabled = false, create_slot = true); -ERROR: slot_name = NONE and create_slot = true are mutually exclusive options -CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE); -ERROR: subscription with slot_name = NONE must also set enabled = false -CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, enabled = false); -ERROR: subscription with slot_name = NONE must also set create_slot = false -CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, create_slot = false); -ERROR: subscription with slot_name = NONE must also set enabled = false --- ok - with slot_name = NONE -CREATE SUBSCRIPTION regress_testsub3 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, connect = false); -WARNING: subscription was created, but is not connected -HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. --- fail -ALTER SUBSCRIPTION regress_testsub3 ENABLE; -ERROR: cannot enable subscription that does not have a slot name -ALTER SUBSCRIPTION regress_testsub3 REFRESH PUBLICATION; -ERROR: ALTER SUBSCRIPTION ... REFRESH is not allowed for disabled subscriptions --- fail - origin must be either none or any -CREATE SUBSCRIPTION regress_testsub4 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, connect = false, origin = foo); -ERROR: unrecognized origin value: "foo" --- now it works -CREATE SUBSCRIPTION regress_testsub4 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, connect = false, origin = none); -WARNING: subscription was created, but is not connected -HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. -\dRs+ regress_testsub4 - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN -------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub4 | regress_subscription_user | f | {testpub} | f | parallel | d | f | none | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -ALTER SUBSCRIPTION regress_testsub4 SET (origin = any); -\dRs+ regress_testsub4 - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN -------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub4 | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -DROP SUBSCRIPTION regress_testsub3; -DROP SUBSCRIPTION regress_testsub4; --- fail, connection string does not parse -CREATE SUBSCRIPTION regress_testsub5 CONNECTION 'i_dont_exist=param' PUBLICATION testpub; -ERROR: invalid connection string syntax: invalid connection option "i_dont_exist" - --- fail, connection string parses, but doesn't work (and does so without --- connecting, so this is reliable and safe) -CREATE SUBSCRIPTION regress_testsub5 CONNECTION 'port=-1' PUBLICATION testpub; -ERROR: subscription "regress_testsub5" could not connect to the publisher: invalid port number: "-1" --- fail - invalid connection string during ALTER -ALTER SUBSCRIPTION regress_testsub CONNECTION 'foobar'; -ERROR: invalid connection string syntax: missing "=" after "foobar" in connection info string - -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -ALTER SUBSCRIPTION regress_testsub SET PUBLICATION testpub2, testpub3 WITH (refresh = false); -ALTER SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist2'; -ALTER SUBSCRIPTION regress_testsub SET (slot_name = 'newname'); -ALTER SUBSCRIPTION regress_testsub SET (password_required = false); -ALTER SUBSCRIPTION regress_testsub SET (run_as_owner = true); -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | f | t | f | off | dbname=regress_doesnotexist2 | 0/0 -(1 row) - -ALTER SUBSCRIPTION regress_testsub SET (password_required = true); -ALTER SUBSCRIPTION regress_testsub SET (run_as_owner = false); --- fail -ALTER SUBSCRIPTION regress_testsub SET (slot_name = ''); -ERROR: replication slot name "" is too short --- fail -ALTER SUBSCRIPTION regress_doesnotexist CONNECTION 'dbname=regress_doesnotexist2'; -ERROR: subscription "regress_doesnotexist" does not exist -ALTER SUBSCRIPTION regress_testsub SET (create_slot = false); -ERROR: unrecognized subscription parameter: "create_slot" --- ok -ALTER SUBSCRIPTION regress_testsub SKIP (lsn = '0/12345'); -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist2 | 0/12345 -(1 row) - --- ok - with lsn = NONE -ALTER SUBSCRIPTION regress_testsub SKIP (lsn = NONE); --- fail -ALTER SUBSCRIPTION regress_testsub SKIP (lsn = '0/0'); -ERROR: invalid WAL location (LSN): 0/0 -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist2 | 0/0 -(1 row) - -BEGIN; -ALTER SUBSCRIPTION regress_testsub ENABLE; -\dRs - List of subscriptions - Name | Owner | Enabled | Publication ------------------+---------------------------+---------+--------------------- - regress_testsub | regress_subscription_user | t | {testpub2,testpub3} -(1 row) - -ALTER SUBSCRIPTION regress_testsub DISABLE; -\dRs - List of subscriptions - Name | Owner | Enabled | Publication ------------------+---------------------------+---------+--------------------- - regress_testsub | regress_subscription_user | f | {testpub2,testpub3} -(1 row) - -COMMIT; --- fail - must be owner of subscription -SET ROLE regress_subscription_user_dummy; -ALTER SUBSCRIPTION regress_testsub RENAME TO regress_testsub_dummy; -ERROR: must be owner of subscription regress_testsub -RESET ROLE; -ALTER SUBSCRIPTION regress_testsub RENAME TO regress_testsub_foo; -ALTER SUBSCRIPTION regress_testsub_foo SET (synchronous_commit = local); -ALTER SUBSCRIPTION regress_testsub_foo SET (synchronous_commit = foobar); -ERROR: invalid value for parameter "synchronous_commit": "foobar" -HINT: Available values: local, remote_write, remote_apply, on, off. -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ----------------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------------+---------- - regress_testsub_foo | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | t | f | f | local | dbname=regress_doesnotexist2 | 0/0 -(1 row) - --- rename back to keep the rest simple -ALTER SUBSCRIPTION regress_testsub_foo RENAME TO regress_testsub; --- ok, we're a superuser -ALTER SUBSCRIPTION regress_testsub OWNER TO regress_subscription_user2; --- fail - cannot do DROP SUBSCRIPTION inside transaction block with slot name -BEGIN; -DROP SUBSCRIPTION regress_testsub; -ERROR: DROP SUBSCRIPTION cannot run inside a transaction block -COMMIT; -ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); --- now it works -BEGIN; -DROP SUBSCRIPTION regress_testsub; -COMMIT; -DROP SUBSCRIPTION IF EXISTS regress_testsub; -NOTICE: subscription "regress_testsub" does not exist, skipping -DROP SUBSCRIPTION regress_testsub; -- fail -ERROR: subscription "regress_testsub" does not exist --- fail - binary must be boolean -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, binary = foo); -ERROR: binary requires a Boolean value --- now it works -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, binary = true); -WARNING: subscription was created, but is not connected -HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | t | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -ALTER SUBSCRIPTION regress_testsub SET (binary = false); -ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -DROP SUBSCRIPTION regress_testsub; --- fail - streaming must be boolean or 'parallel' -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, streaming = foo); -ERROR: streaming requires a Boolean value or "parallel" --- now it works -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, streaming = true); -WARNING: subscription was created, but is not connected -HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | f | on | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -ALTER SUBSCRIPTION regress_testsub SET (streaming = parallel); -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -ALTER SUBSCRIPTION regress_testsub SET (streaming = false); -ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - --- fail - publication already exists -ALTER SUBSCRIPTION regress_testsub ADD PUBLICATION testpub WITH (refresh = false); -ERROR: publication "testpub" is already in subscription "regress_testsub" --- fail - publication used more than once -ALTER SUBSCRIPTION regress_testsub ADD PUBLICATION testpub1, testpub1 WITH (refresh = false); -ERROR: publication name "testpub1" used more than once --- ok - add two publications into subscription -ALTER SUBSCRIPTION regress_testsub ADD PUBLICATION testpub1, testpub2 WITH (refresh = false); --- fail - publications already exist -ALTER SUBSCRIPTION regress_testsub ADD PUBLICATION testpub1, testpub2 WITH (refresh = false); -ERROR: publication "testpub1" is already in subscription "regress_testsub" -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-----------------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub,testpub1,testpub2} | f | off | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - --- fail - publication used more than once -ALTER SUBSCRIPTION regress_testsub DROP PUBLICATION testpub1, testpub1 WITH (refresh = false); -ERROR: publication name "testpub1" used more than once --- fail - all publications are deleted -ALTER SUBSCRIPTION regress_testsub DROP PUBLICATION testpub, testpub1, testpub2 WITH (refresh = false); -ERROR: cannot drop all the publications from a subscription --- fail - publication does not exist in subscription -ALTER SUBSCRIPTION regress_testsub DROP PUBLICATION testpub3 WITH (refresh = false); -ERROR: publication "testpub3" is not in subscription "regress_testsub" --- ok - delete publications -ALTER SUBSCRIPTION regress_testsub DROP PUBLICATION testpub1, testpub2 WITH (refresh = false); -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -DROP SUBSCRIPTION regress_testsub; -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION mypub - WITH (connect = false, create_slot = false, copy_data = false); -WARNING: subscription was created, but is not connected -HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. -ALTER SUBSCRIPTION regress_testsub ENABLE; --- fail - ALTER SUBSCRIPTION with refresh is not allowed in a transaction --- block or function -BEGIN; -ALTER SUBSCRIPTION regress_testsub SET PUBLICATION mypub WITH (refresh = true); -ERROR: ALTER SUBSCRIPTION with refresh cannot run inside a transaction block -END; -BEGIN; -ALTER SUBSCRIPTION regress_testsub REFRESH PUBLICATION; -ERROR: ALTER SUBSCRIPTION ... REFRESH cannot run inside a transaction block -END; -CREATE FUNCTION func() RETURNS VOID AS -$$ ALTER SUBSCRIPTION regress_testsub SET PUBLICATION mypub WITH (refresh = true) $$ LANGUAGE SQL; -SELECT func(); -ERROR: ALTER SUBSCRIPTION with refresh cannot be executed from a function -CONTEXT: SQL function "func" statement 1 -ALTER SUBSCRIPTION regress_testsub DISABLE; -ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); -DROP SUBSCRIPTION regress_testsub; -DROP FUNCTION func; --- fail - two_phase must be boolean -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, two_phase = foo); -ERROR: two_phase requires a Boolean value --- now it works -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, two_phase = true); -WARNING: subscription was created, but is not connected -HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | p | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - --- we can alter streaming when two_phase enabled -ALTER SUBSCRIPTION regress_testsub SET (streaming = true); -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | f | on | p | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); -DROP SUBSCRIPTION regress_testsub; --- two_phase and streaming are compatible. -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, streaming = true, two_phase = true); -WARNING: subscription was created, but is not connected -HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | f | on | p | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); -DROP SUBSCRIPTION regress_testsub; --- fail - disable_on_error must be boolean -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, disable_on_error = foo); -ERROR: disable_on_error requires a Boolean value --- now it works -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, disable_on_error = false); -WARNING: subscription was created, but is not connected -HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -ALTER SUBSCRIPTION regress_testsub SET (disable_on_error = true); -\dRs+ - List of subscriptions - Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN ------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+---------- - regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | t | any | t | f | f | off | dbname=regress_doesnotexist | 0/0 -(1 row) - -ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); -DROP SUBSCRIPTION regress_testsub; --- let's do some tests with pg_create_subscription rather than superuser -SET SESSION AUTHORIZATION regress_subscription_user3; --- fail, not enough privileges -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false); -ERROR: permission denied for database regression --- fail, must specify password -RESET SESSION AUTHORIZATION; -GRANT CREATE ON DATABASE REGRESSION TO regress_subscription_user3; -SET SESSION AUTHORIZATION regress_subscription_user3; -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false); -ERROR: password is required -DETAIL: Non-superusers must provide a password in the connection string. --- fail, can't set password_required=false -RESET SESSION AUTHORIZATION; -GRANT CREATE ON DATABASE REGRESSION TO regress_subscription_user3; -SET SESSION AUTHORIZATION regress_subscription_user3; -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, password_required = false); -ERROR: password_required=false is superuser-only -HINT: Subscriptions with the password_required option set to false may only be created or modified by the superuser. --- ok -RESET SESSION AUTHORIZATION; -GRANT CREATE ON DATABASE REGRESSION TO regress_subscription_user3; -SET SESSION AUTHORIZATION regress_subscription_user3; -CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist password=regress_fakepassword' PUBLICATION testpub WITH (connect = false); -WARNING: subscription was created, but is not connected -HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. --- we cannot give the subscription away to some random user -ALTER SUBSCRIPTION regress_testsub OWNER TO regress_subscription_user; -ERROR: must be able to SET ROLE "regress_subscription_user" --- but we can rename the subscription we just created -ALTER SUBSCRIPTION regress_testsub RENAME TO regress_testsub2; --- ok, even after losing pg_create_subscription we can still rename it -RESET SESSION AUTHORIZATION; -REVOKE pg_create_subscription FROM regress_subscription_user3; -SET SESSION AUTHORIZATION regress_subscription_user3; -ALTER SUBSCRIPTION regress_testsub2 RENAME TO regress_testsub; --- fail, after losing CREATE on the database we can't rename it any more -RESET SESSION AUTHORIZATION; -REVOKE CREATE ON DATABASE REGRESSION FROM regress_subscription_user3; -SET SESSION AUTHORIZATION regress_subscription_user3; -ALTER SUBSCRIPTION regress_testsub RENAME TO regress_testsub2; -ERROR: permission denied for database regression --- fail - cannot do ALTER SUBSCRIPTION SET (failover) inside transaction block -BEGIN; -ALTER SUBSCRIPTION regress_testsub SET (failover); -ERROR: ALTER SUBSCRIPTION ... SET (failover) cannot run inside a transaction block -COMMIT; --- ok, owning it is enough for this stuff -ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); -DROP SUBSCRIPTION regress_testsub; -RESET SESSION AUTHORIZATION; -DROP ROLE regress_subscription_user; -DROP ROLE regress_subscription_user2; -DROP ROLE regress_subscription_user3; -DROP ROLE regress_subscription_user_dummy; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/select_views.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/select_views.out --- /Users/admin/pgsql/src/test/regress/expected/select_views.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/select_views.out 2024-12-13 13:20:11 @@ -1,1552 +1,2 @@ --- --- SELECT_VIEWS --- test the views defined in CREATE_VIEWS --- -SELECT * FROM street; - name | thepath | cname -------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------- - Access Rd 25 | [(-121.9283,37.894),(-121.9283,37.9)] | Oakland - Ada St | [(-122.2487,37.398),(-122.2496,37.401)] | Lafayette - Agua Fria Creek | [(-121.9254,37.922),(-121.9281,37.889)] | Oakland - Allen Ct | [(-122.0131,37.602),(-122.0117,37.597)] | Berkeley - Alvarado Niles Road | [(-122.0325,37.903),(-122.0316,37.9)] | Berkeley - Andrea Cir | [(-121.733218,37.88641),(-121.733286,37.90617)] | Oakland - Apricot Lane | [(-121.9471,37.401),(-121.9456,37.392)] | Oakland - Apricot Lane | [(-121.9471,37.401),(-121.9456,37.392)] | Oakland - Arden Road | [(-122.0978,37.177),(-122.1,37.177)] | Oakland - Arizona St | [(-122.0381,37.901),(-122.0367,37.898)] | Berkeley - Arlington Dr | [(-121.8802,37.408),(-121.8807,37.394)] | Oakland - Arlington Dr | [(-121.8802,37.408),(-121.8807,37.394)] | Oakland - Arlington Road | [(-121.7957,37.898),(-121.7956,37.906)] | Oakland - Arroyo Las Positas | [(-121.7973,37.997),(-121.7957,37.005)] | Oakland - Arroyo Las Positas | [(-121.7973,37.997),(-121.7957,37.005)] | Oakland - Arroyo Seco | [(-121.7073,37.766),(-121.6997,37.729)] | Oakland - Ash St | [(-122.0408,37.31),(-122.04,37.292)] | Oakland - Avenue 134th | [(-122.1823,37.002),(-122.1851,37.992)] | Oakland - Avenue 134th | [(-122.1823,37.002),(-122.1851,37.992)] | Berkeley - Avenue 140th | [(-122.1656,37.003),(-122.1691,37.988)] | Oakland - Avenue 140th | [(-122.1656,37.003),(-122.1691,37.988)] | Berkeley - Avenue D | [(-122.298,37.848),(-122.3024,37.849)] | Berkeley - B St | [(-122.1749,37.451),(-122.1743,37.443)] | Oakland - Bancroft Ave | [(-122.15714,37.4242),(-122.156,37.409)] | Oakland - Bancroft Ave | [(-122.1643,37.523),(-122.1631,37.508),(-122.1621,37.493)] | Oakland - Birch St | [(-122.1617,37.425),(-122.1614,37.417)] | Oakland - Birch St | [(-122.1673,37.509),(-122.1661,37.492)] | Oakland - Blacow Road | [(-122.0179,37.469),(-122.0167,37.465)] | Oakland - Bridgepointe Dr | [(-122.0514,37.305),(-122.0509,37.299)] | Oakland - Broadmore Ave | [(-122.095,37.522),(-122.0936,37.497)] | Oakland - Broadway | [(-122.2409,37.586),(-122.2395,37.601)] | Berkeley - Buckingham Blvd | [(-122.2231,37.59),(-122.2214,37.606)] | Berkeley - Butterfield Dr | [(-122.0838,37.002),(-122.0834,37.987)] | Oakland - Butterfield Dr | [(-122.0838,37.002),(-122.0834,37.987)] | Oakland - Butterfield Dr | [(-122.0838,37.002),(-122.0834,37.987)] | Berkeley - C St | [(-122.1768,37.46),(-122.1749,37.435)] | Oakland - Calaveras Creek | [(-121.8203,37.035),(-121.8207,37.931)] | Oakland - Calaveras Creek | [(-121.8203,37.035),(-121.8207,37.931)] | Oakland - California St | [(-122.2032,37.005),(-122.2016,37.996)] | Berkeley - California St | [(-122.2032,37.005),(-122.2016,37.996)] | Lafayette - Cameron Ave | [(-122.1316,37.502),(-122.1327,37.481)] | Oakland - Campus Dr | [(-122.1704,37.905),(-122.1678,37.868),(-122.1671,37.865)] | Berkeley - Capricorn Ave | [(-122.2176,37.404),(-122.2164,37.384)] | Lafayette - Carson St | [(-122.1846,37.9),(-122.1843,37.901)] | Berkeley - Cedar Blvd | [(-122.0282,37.446),(-122.0265,37.43)] | Oakland - Cedar St | [(-122.3011,37.737),(-122.2999,37.739)] | Berkeley - Celia St | [(-122.0611,37.3),(-122.0616,37.299)] | Oakland - Central Ave | [(-122.2343,37.602),(-122.2331,37.595)] | Berkeley - Chambers Dr | [(-122.2004,37.352),(-122.1972,37.368)] | Lafayette - Chambers Lane | [(-122.2001,37.359),(-122.1975,37.371)] | Lafayette - Champion St | [(-122.214,37.991),(-122.2147,37.002)] | Berkeley - Champion St | [(-122.214,37.991),(-122.2147,37.002)] | Lafayette - Chapman Dr | [(-122.0421,37.504),(-122.0414,37.498)] | Oakland - Charles St | [(-122.0255,37.505),(-122.0252,37.499)] | Oakland - Cherry St | [(-122.0437,37.42),(-122.0434,37.413)] | Oakland - Claremont Pl | [(-122.0542,37.995),(-122.0542,37.008)] | Oakland - Claremont Pl | [(-122.0542,37.995),(-122.0542,37.008)] | Oakland - Claremont Pl | [(-122.0542,37.995),(-122.0542,37.008)] | Berkeley - Coliseum Way | [(-122.2001,37.47),(-122.1978,37.516)] | Oakland - Coliseum Way | [(-122.2113,37.626),(-122.2085,37.592),(-122.2063,37.568)] | Berkeley - Coolidge Ave | [(-122.2007,37.058),(-122.1992,37.06)] | Lafayette - Cornell Ave | [(-122.2956,37.925),(-122.2949,37.906),(-122.2939,37.875)] | Berkeley - Corriea Way | [(-121.9501,37.402),(-121.9505,37.398)] | Oakland - Corriea Way | [(-121.9501,37.402),(-121.9505,37.398)] | Oakland - Cowing Road | [(-122.0002,37.934),(-121.9772,37.782)] | Oakland - Creston Road | [(-122.2639,37.002),(-122.2613,37.986),(-122.2602,37.978),(-122.2598,37.973)] | Berkeley - Creston Road | [(-122.2639,37.002),(-122.2613,37.986),(-122.2602,37.978),(-122.2598,37.973)] | Lafayette - Crow Canyon Creek | [(-122.043,37.905),(-122.0368,37.71)] | Berkeley - Crystaline Dr | [(-121.925856,37),(-121.925869,37.00527)] | Oakland - Cull Canyon Road | [(-122.0536,37.435),(-122.0499,37.315)] | Oakland - Cull Creek | [(-122.0624,37.875),(-122.0582,37.527)] | Berkeley - D St | [(-122.1811,37.505),(-122.1805,37.497)] | Oakland - Decoto Road | [(-122.0159,37.006),(-122.016,37.002),(-122.0164,37.993)] | Oakland - Decoto Road | [(-122.0159,37.006),(-122.016,37.002),(-122.0164,37.993)] | Oakland - Decoto Road | [(-122.0159,37.006),(-122.016,37.002),(-122.0164,37.993)] | Berkeley - Deering St | [(-122.2146,37.904),(-122.2126,37.897)] | Berkeley - Dimond Ave | [(-122.2167,37.994),(-122.2162,37.006)] | Berkeley - Dimond Ave | [(-122.2167,37.994),(-122.2162,37.006)] | Lafayette - Donna Way | [(-122.1333,37.606),(-122.1316,37.599)] | Berkeley - Driftwood Dr | [(-122.0109,37.482),(-122.0113,37.477)] | Oakland - Driscoll Road | [(-121.9482,37.403),(-121.948451,37.39995)] | Oakland - Driscoll Road | [(-121.9482,37.403),(-121.948451,37.39995)] | Oakland - E St | [(-122.1832,37.505),(-122.1826,37.498),(-122.182,37.49)] | Oakland - Eden Ave | [(-122.1143,37.505),(-122.1142,37.491)] | Oakland - Eden Creek | [(-122.022037,37.00675),(-122.0221,37.998)] | Oakland - Eden Creek | [(-122.022037,37.00675),(-122.0221,37.998)] | Oakland - Eden Creek | [(-122.022037,37.00675),(-122.0221,37.998)] | Berkeley - Edgewater Dr | [(-122.201,37.379),(-122.2042,37.41)] | Lafayette - Enos Way | [(-121.7677,37.896),(-121.7673,37.91)] | Oakland - Euclid Ave | [(-122.2671,37.009),(-122.2666,37.987)] | Berkeley - Euclid Ave | [(-122.2671,37.009),(-122.2666,37.987)] | Lafayette - Fairview Ave | [(-121.999,37.428),(-121.9863,37.351)] | Oakland - Fairview Ave | [(-121.999,37.428),(-121.9863,37.351)] | Oakland - Foothill Blvd | [(-122.2414,37.9),(-122.2403,37.893)] | Berkeley - Fountain St | [(-122.2306,37.593),(-122.2293,37.605)] | Berkeley - Gading Road | [(-122.0801,37.343),(-122.08,37.336)] | Oakland - Grizzly Peak Blvd | [(-122.2213,37.638),(-122.2127,37.581)] | Berkeley - Grove Way | [(-122.0643,37.884),(-122.062679,37.89162),(-122.061796,37.89578),(-122.0609,37.9)] | Berkeley - Harris Road | [(-122.0659,37.372),(-122.0675,37.363)] | Oakland - Heartwood Dr | [(-122.2006,37.341),(-122.1992,37.338)] | Lafayette - Hegenberger Exwy | [(-122.1946,37.52),(-122.1947,37.497)] | Oakland - Herrier St | [(-122.1943,37.006),(-122.1936,37.998)] | Oakland - Herrier St | [(-122.1943,37.006),(-122.1936,37.998)] | Berkeley - Hesperian Blvd | [(-122.097,37.333),(-122.0956,37.31),(-122.0946,37.293)] | Oakland - Hesperian Blvd | [(-122.097,37.333),(-122.0956,37.31),(-122.0946,37.293)] | Oakland - Hesperian Blvd | [(-122.1132,37.6),(-122.1123,37.586)] | Berkeley - Hollis St | [(-122.2885,37.397),(-122.289,37.414)] | Lafayette - I- 580 | [(-121.727,37.074),(-121.7229,37.093),(-121.722301,37.09522),(-121.721001,37.10005),(-121.7194,37.106),(-121.7188,37.109),(-121.7168,37.12),(-121.7163,37.123),(-121.7145,37.127),(-121.7096,37.148),(-121.707731,37.1568),(-121.7058,37.166),(-121.7055,37.168),(-121.7044,37.174),(-121.7038,37.172),(-121.7037,37.172),(-121.7027,37.175),(-121.7001,37.181),(-121.6957,37.191),(-121.6948,37.192),(-121.6897,37.204),(-121.6697,37.185)] | Oakland - I- 580 | [(-121.9322,37.989),(-121.9243,37.006),(-121.9217,37.014)] | Oakland - I- 580 | [(-121.9322,37.989),(-121.9243,37.006),(-121.9217,37.014)] | Oakland - I- 580 | [(-122.018,37.019),(-122.0009,37.032),(-121.9787,37.983),(-121.958,37.984),(-121.9571,37.986)] | Oakland - I- 580 | [(-122.018,37.019),(-122.0009,37.032),(-121.9787,37.983),(-121.958,37.984),(-121.9571,37.986)] | Oakland - I- 580 | [(-122.1108,37.023),(-122.1101,37.02),(-122.108103,37.00764),(-122.108,37.007),(-122.1069,37.998),(-122.1064,37.994),(-122.1053,37.982),(-122.1048,37.977),(-122.1032,37.958),(-122.1026,37.953),(-122.1013,37.938),(-122.0989,37.911),(-122.0984,37.91),(-122.098,37.908)] | Oakland - I- 580 | [(-122.1108,37.023),(-122.1101,37.02),(-122.108103,37.00764),(-122.108,37.007),(-122.1069,37.998),(-122.1064,37.994),(-122.1053,37.982),(-122.1048,37.977),(-122.1032,37.958),(-122.1026,37.953),(-122.1013,37.938),(-122.0989,37.911),(-122.0984,37.91),(-122.098,37.908)] | Berkeley - I- 580 | [(-122.1543,37.703),(-122.1535,37.694),(-122.1512,37.655),(-122.1475,37.603),(-122.1468,37.583),(-122.1472,37.569),(-122.149044,37.54874),(-122.1493,37.546),(-122.1501,37.532),(-122.1506,37.509),(-122.1495,37.482),(-122.1487,37.467),(-122.1477,37.447),(-122.1414,37.383),(-122.1404,37.376),(-122.1398,37.372),(-122.139,37.356),(-122.1388,37.353),(-122.1385,37.34),(-122.1382,37.33),(-122.1378,37.316)] | Oakland - I- 580 | [(-122.1543,37.703),(-122.1535,37.694),(-122.1512,37.655),(-122.1475,37.603),(-122.1468,37.583),(-122.1472,37.569),(-122.149044,37.54874),(-122.1493,37.546),(-122.1501,37.532),(-122.1506,37.509),(-122.1495,37.482),(-122.1487,37.467),(-122.1477,37.447),(-122.1414,37.383),(-122.1404,37.376),(-122.1398,37.372),(-122.139,37.356),(-122.1388,37.353),(-122.1385,37.34),(-122.1382,37.33),(-122.1378,37.316)] | Berkeley - I- 580 | [(-122.2197,37.99),(-122.22,37.99),(-122.222092,37.99523),(-122.2232,37.998),(-122.224146,37.99963),(-122.2261,37.003),(-122.2278,37.007),(-122.2302,37.026),(-122.2323,37.043),(-122.2344,37.059),(-122.235405,37.06427),(-122.2365,37.07)] | Berkeley - I- 580 | [(-122.2197,37.99),(-122.22,37.99),(-122.222092,37.99523),(-122.2232,37.998),(-122.224146,37.99963),(-122.2261,37.003),(-122.2278,37.007),(-122.2302,37.026),(-122.2323,37.043),(-122.2344,37.059),(-122.235405,37.06427),(-122.2365,37.07)] | Lafayette - I- 580 Ramp | [(-121.8521,37.011),(-121.8479,37.999),(-121.8476,37.999),(-121.8456,37.01),(-121.8455,37.011)] | Oakland - I- 580 Ramp | [(-121.8521,37.011),(-121.8479,37.999),(-121.8476,37.999),(-121.8456,37.01),(-121.8455,37.011)] | Oakland - I- 580 Ramp | [(-121.8743,37.014),(-121.8722,37.999),(-121.8714,37.999)] | Oakland - I- 580 Ramp | [(-121.8743,37.014),(-121.8722,37.999),(-121.8714,37.999)] | Oakland - I- 580 Ramp | [(-121.9043,37.998),(-121.9036,37.013),(-121.902632,37.0174),(-121.9025,37.018)] | Oakland - I- 580 Ramp | [(-121.9043,37.998),(-121.9036,37.013),(-121.902632,37.0174),(-121.9025,37.018)] | Oakland - I- 580 Ramp | [(-121.9368,37.986),(-121.936483,37.98832),(-121.9353,37.997),(-121.93504,37.00035),(-121.9346,37.006),(-121.933764,37.00031),(-121.9333,37.997),(-121.9322,37.989)] | Oakland - I- 580 Ramp | [(-121.9368,37.986),(-121.936483,37.98832),(-121.9353,37.997),(-121.93504,37.00035),(-121.9346,37.006),(-121.933764,37.00031),(-121.9333,37.997),(-121.9322,37.989)] | Oakland - I- 580 Ramp | [(-122.093241,37.90351),(-122.09364,37.89634),(-122.093788,37.89212)] | Berkeley - I- 580 Ramp | [(-122.0934,37.896),(-122.09257,37.89961),(-122.0911,37.906)] | Berkeley - I- 580 Ramp | [(-122.0941,37.897),(-122.0943,37.902)] | Berkeley - I- 580 Ramp | [(-122.096,37.888),(-122.0962,37.891),(-122.0964,37.9)] | Berkeley - I- 580 Ramp | [(-122.101,37.898),(-122.1005,37.902),(-122.0989,37.911)] | Berkeley - I- 580 Ramp | [(-122.1086,37.003),(-122.1068,37.993),(-122.1066,37.992),(-122.1053,37.982)] | Oakland - I- 580 Ramp | [(-122.1086,37.003),(-122.1068,37.993),(-122.1066,37.992),(-122.1053,37.982)] | Berkeley - I- 580 Ramp | [(-122.1414,37.383),(-122.1407,37.376),(-122.1403,37.372),(-122.139,37.356)] | Oakland - I- 580/I-680 Ramp | ((-121.9207,37.988),(-121.9192,37.016)) | Oakland - I- 580/I-680 Ramp | ((-121.9207,37.988),(-121.9192,37.016)) | Oakland - I- 680 | ((-121.939,37.15),(-121.9387,37.145),(-121.9373,37.125),(-121.934242,37.07643),(-121.933886,37.0709),(-121.9337,37.068),(-121.933122,37.06139),(-121.932736,37.05698),(-121.93222,37.05108),(-121.931844,37.04678),(-121.930113,37.027),(-121.926829,37),(-121.9265,37.998),(-121.9217,37.96),(-121.9203,37.949),(-121.9184,37.934)) | Oakland - I- 680 | ((-121.939,37.15),(-121.9387,37.145),(-121.9373,37.125),(-121.934242,37.07643),(-121.933886,37.0709),(-121.9337,37.068),(-121.933122,37.06139),(-121.932736,37.05698),(-121.93222,37.05108),(-121.931844,37.04678),(-121.930113,37.027),(-121.926829,37),(-121.9265,37.998),(-121.9217,37.96),(-121.9203,37.949),(-121.9184,37.934)) | Oakland - I- 680 | [(-121.9101,37.715),(-121.911269,37.74682),(-121.9119,37.764),(-121.9124,37.776),(-121.9174,37.905),(-121.9194,37.957),(-121.9207,37.988)] | Oakland - I- 680 | [(-121.9184,37.934),(-121.917,37.913),(-121.9122,37.83),(-121.9052,37.702)] | Oakland - I- 680 Ramp | [(-121.8833,37.376),(-121.8833,37.392),(-121.883,37.4),(-121.8835,37.402),(-121.8852,37.422)] | Oakland - I- 680 Ramp | [(-121.8833,37.376),(-121.8833,37.392),(-121.883,37.4),(-121.8835,37.402),(-121.8852,37.422)] | Oakland - I- 680 Ramp | [(-121.92,37.438),(-121.9218,37.424),(-121.9238,37.408),(-121.9252,37.392)] | Oakland - I- 680 Ramp | [(-121.92,37.438),(-121.9218,37.424),(-121.9238,37.408),(-121.9252,37.392)] | Oakland - I- 680 Ramp | [(-121.9238,37.402),(-121.9234,37.395),(-121.923,37.399)] | Oakland - I- 680 Ramp | [(-121.9238,37.402),(-121.9234,37.395),(-121.923,37.399)] | Oakland - I- 80 | ((-122.2937,37.277),(-122.3016,37.262)) | Lafayette - I- 80 | ((-122.2962,37.273),(-122.3004,37.264)) | Lafayette - I- 80 Ramp | [(-122.2962,37.413),(-122.2959,37.382),(-122.2951,37.372)] | Lafayette - I- 880 | ((-121.9669,37.075),(-121.9663,37.071),(-121.9656,37.065),(-121.9618,37.037),(-121.95689,37),(-121.948,37.933)) | Oakland - I- 880 | ((-121.9669,37.075),(-121.9663,37.071),(-121.9656,37.065),(-121.9618,37.037),(-121.95689,37),(-121.948,37.933)) | Oakland - I- 880 | [(-121.948,37.933),(-121.9471,37.925),(-121.9467,37.923),(-121.946,37.918),(-121.9452,37.912),(-121.937,37.852)] | Oakland - I- 880 | [(-122.0219,37.466),(-122.0205,37.447),(-122.020331,37.44447),(-122.020008,37.43962),(-122.0195,37.432),(-122.0193,37.429),(-122.0164,37.393),(-122.010219,37.34771),(-122.0041,37.313)] | Oakland - I- 880 | [(-122.0375,37.632),(-122.0359,37.619),(-122.0358,37.616),(-122.034514,37.60409),(-122.031876,37.57965),(-122.031193,37.57332),(-122.03016,37.56375),(-122.02943,37.55698),(-122.028689,37.54929),(-122.027833,37.53908),(-122.025979,37.51698),(-122.0238,37.491)] | Oakland - I- 880 | [(-122.0375,37.632),(-122.0359,37.619),(-122.0358,37.616),(-122.034514,37.60409),(-122.031876,37.57965),(-122.031193,37.57332),(-122.03016,37.56375),(-122.02943,37.55698),(-122.028689,37.54929),(-122.027833,37.53908),(-122.025979,37.51698),(-122.0238,37.491)] | Berkeley - I- 880 | [(-122.0612,37.003),(-122.0604,37.991),(-122.0596,37.982),(-122.0585,37.967),(-122.0583,37.961),(-122.0553,37.918),(-122.053635,37.89475),(-122.050759,37.8546),(-122.05,37.844),(-122.0485,37.817),(-122.0483,37.813),(-122.0482,37.811)] | Oakland - I- 880 | [(-122.0612,37.003),(-122.0604,37.991),(-122.0596,37.982),(-122.0585,37.967),(-122.0583,37.961),(-122.0553,37.918),(-122.053635,37.89475),(-122.050759,37.8546),(-122.05,37.844),(-122.0485,37.817),(-122.0483,37.813),(-122.0482,37.811)] | Oakland - I- 880 | [(-122.0612,37.003),(-122.0604,37.991),(-122.0596,37.982),(-122.0585,37.967),(-122.0583,37.961),(-122.0553,37.918),(-122.053635,37.89475),(-122.050759,37.8546),(-122.05,37.844),(-122.0485,37.817),(-122.0483,37.813),(-122.0482,37.811)] | Berkeley - I- 880 | [(-122.0831,37.312),(-122.0819,37.296),(-122.081,37.285),(-122.0786,37.248),(-122.078,37.24),(-122.077642,37.23496),(-122.076983,37.22567),(-122.076599,37.22026),(-122.076229,37.21505),(-122.0758,37.209)] | Oakland - I- 880 | [(-122.0978,37.528),(-122.096,37.496),(-122.0931,37.453),(-122.09277,37.4496),(-122.090189,37.41442),(-122.0896,37.405),(-122.085,37.34)] | Oakland - I- 880 | [(-122.1365,37.902),(-122.1358,37.898),(-122.1333,37.881),(-122.1323,37.874),(-122.1311,37.866),(-122.1308,37.865),(-122.1307,37.864),(-122.1289,37.851),(-122.1277,37.843),(-122.1264,37.834),(-122.1231,37.812),(-122.1165,37.766),(-122.1104,37.72),(-122.109695,37.71094),(-122.109,37.702),(-122.108312,37.69168),(-122.1076,37.681)] | Berkeley - I- 880 | [(-122.1755,37.185),(-122.1747,37.178),(-122.1742,37.173),(-122.1692,37.126),(-122.167792,37.11594),(-122.16757,37.11435),(-122.1671,37.111),(-122.1655,37.1),(-122.165169,37.09811),(-122.1641,37.092),(-122.1596,37.061),(-122.158381,37.05275),(-122.155991,37.03657),(-122.1531,37.017),(-122.1478,37.98),(-122.1407,37.932),(-122.1394,37.924),(-122.1389,37.92),(-122.1376,37.91)] | Oakland - I- 880 | [(-122.1755,37.185),(-122.1747,37.178),(-122.1742,37.173),(-122.1692,37.126),(-122.167792,37.11594),(-122.16757,37.11435),(-122.1671,37.111),(-122.1655,37.1),(-122.165169,37.09811),(-122.1641,37.092),(-122.1596,37.061),(-122.158381,37.05275),(-122.155991,37.03657),(-122.1531,37.017),(-122.1478,37.98),(-122.1407,37.932),(-122.1394,37.924),(-122.1389,37.92),(-122.1376,37.91)] | Berkeley - I- 880 | [(-122.2214,37.711),(-122.2202,37.699),(-122.2199,37.695),(-122.219,37.682),(-122.2184,37.672),(-122.2173,37.652),(-122.2159,37.638),(-122.2144,37.616),(-122.2138,37.612),(-122.2135,37.609),(-122.212,37.592),(-122.2116,37.586),(-122.2111,37.581)] | Berkeley - I- 880 | [(-122.2707,37.975),(-122.2693,37.972),(-122.2681,37.966),(-122.267,37.962),(-122.2659,37.957),(-122.2648,37.952),(-122.2636,37.946),(-122.2625,37.935),(-122.2617,37.927),(-122.2607,37.921),(-122.2593,37.916),(-122.258,37.911),(-122.2536,37.898),(-122.2432,37.858),(-122.2408,37.845),(-122.2386,37.827),(-122.2374,37.811)] | Berkeley - I- 880 Ramp | [(-122.0019,37.301),(-122.002,37.293)] | Oakland - I- 880 Ramp | [(-122.0041,37.313),(-122.0018,37.315),(-122.0007,37.315),(-122.0005,37.313),(-122.0002,37.308),(-121.9995,37.289)] | Oakland - I- 880 Ramp | [(-122.0041,37.313),(-122.0038,37.308),(-122.0039,37.284),(-122.0013,37.287),(-121.9995,37.289)] | Oakland - I- 880 Ramp | [(-122.0236,37.488),(-122.0231,37.458),(-122.0227,37.458),(-122.0223,37.452),(-122.0205,37.447)] | Oakland - I- 880 Ramp | [(-122.0238,37.491),(-122.0215,37.483),(-122.0211,37.477),(-122.0205,37.447)] | Oakland - I- 880 Ramp | [(-122.059,37.982),(-122.0577,37.984),(-122.0612,37.003)] | Oakland - I- 880 Ramp | [(-122.059,37.982),(-122.0577,37.984),(-122.0612,37.003)] | Oakland - I- 880 Ramp | [(-122.059,37.982),(-122.0577,37.984),(-122.0612,37.003)] | Berkeley - I- 880 Ramp | [(-122.0618,37.011),(-122.0631,37.982),(-122.0585,37.967)] | Oakland - I- 880 Ramp | [(-122.0618,37.011),(-122.0631,37.982),(-122.0585,37.967)] | Oakland - I- 880 Ramp | [(-122.0618,37.011),(-122.0631,37.982),(-122.0585,37.967)] | Berkeley - I- 880 Ramp | [(-122.085,37.34),(-122.0801,37.316),(-122.081,37.285)] | Oakland - I- 880 Ramp | [(-122.085,37.34),(-122.0801,37.316),(-122.081,37.285)] | Oakland - I- 880 Ramp | [(-122.085,37.34),(-122.0866,37.316),(-122.0819,37.296)] | Oakland - I- 880 Ramp | [(-122.085,37.34),(-122.0866,37.316),(-122.0819,37.296)] | Oakland - I- 880 Ramp | [(-122.1029,37.61),(-122.1013,37.587),(-122.0999,37.569)] | Berkeley - I- 880 Ramp | [(-122.1379,37.891),(-122.1383,37.897),(-122.1377,37.902)] | Berkeley - I- 880 Ramp | [(-122.1379,37.931),(-122.137597,37.92736),(-122.1374,37.925),(-122.1373,37.924),(-122.1369,37.914),(-122.1358,37.905),(-122.1365,37.908),(-122.1358,37.898)] | Berkeley - I- 880 Ramp | [(-122.2536,37.898),(-122.254,37.902)] | Berkeley - I- 880 Ramp | [(-122.2771,37.002),(-122.278,37)] | Lafayette - Indian Way | [(-122.2066,37.398),(-122.2045,37.411)] | Lafayette - Jackson St | [(-122.0845,37.6),(-122.0842,37.606)] | Berkeley - Johnson Dr | [(-121.9145,37.901),(-121.915,37.877)] | Oakland - Joyce St | [(-122.0792,37.604),(-122.0774,37.581)] | Berkeley - Juniper St | [(-121.7823,37.897),(-121.7815,37.9)] | Oakland - Kaiser Dr | [(-122.067163,37.47821),(-122.060402,37.51961)] | Oakland - Keeler Ave | [(-122.2578,37.906),(-122.2579,37.899)] | Berkeley - Kildare Road | [(-122.0968,37.016),(-122.0959,37)] | Oakland - La Playa Dr | [(-122.1039,37.545),(-122.101,37.493)] | Oakland - Laguna Ave | [(-122.2099,37.989),(-122.2089,37)] | Berkeley - Laguna Ave | [(-122.2099,37.989),(-122.2089,37)] | Lafayette - Lakehurst Cir | [(-122.284729,37.89025),(-122.286096,37.90364)] | Berkeley - Lakeshore Ave | [(-122.2586,37.99),(-122.2556,37.006)] | Berkeley - Lakeshore Ave | [(-122.2586,37.99),(-122.2556,37.006)] | Lafayette - Las Positas Road | [(-121.764488,37.99199),(-121.75569,37.02022)] | Oakland - Las Positas Road | [(-121.764488,37.99199),(-121.75569,37.02022)] | Oakland - Linden St | [(-122.2867,37.998),(-122.2864,37.008)] | Berkeley - Linden St | [(-122.2867,37.998),(-122.2864,37.008)] | Lafayette - Livermore Ave | [(-121.7687,37.448),(-121.769,37.375)] | Oakland - Livermore Ave | [(-121.7687,37.448),(-121.769,37.375)] | Oakland - Livermore Ave | [(-121.772719,37.99085),(-121.7728,37.001)] | Oakland - Livermore Ave | [(-121.772719,37.99085),(-121.7728,37.001)] | Oakland - Locust St | [(-122.1606,37.007),(-122.1593,37.987)] | Oakland - Locust St | [(-122.1606,37.007),(-122.1593,37.987)] | Berkeley - Logan Ct | [(-122.0053,37.492),(-122.0061,37.484)] | Oakland - Magnolia St | [(-122.0971,37.5),(-122.0962,37.484)] | Oakland - Mandalay Road | [(-122.2322,37.397),(-122.2321,37.403)] | Lafayette - Marin Ave | [(-122.2741,37.894),(-122.272,37.901)] | Berkeley - Martin Luther King Jr Way | [(-122.2712,37.608),(-122.2711,37.599)] | Berkeley - Mattos Dr | [(-122.0005,37.502),(-122.000898,37.49683)] | Oakland - Maubert Ave | [(-122.1114,37.009),(-122.1096,37.995)] | Oakland - Maubert Ave | [(-122.1114,37.009),(-122.1096,37.995)] | Berkeley - McClure Ave | [(-122.1431,37.001),(-122.1436,37.998)] | Oakland - McClure Ave | [(-122.1431,37.001),(-122.1436,37.998)] | Berkeley - Medlar Dr | [(-122.0627,37.378),(-122.0625,37.375)] | Oakland - Mildred Ct | [(-122.0002,37.388),(-121.9998,37.386)] | Oakland - Miller Road | [(-122.0902,37.645),(-122.0865,37.545)] | Berkeley - Miramar Ave | [(-122.1009,37.025),(-122.099089,37.03209)] | Oakland - Mission Blvd | [(-121.918886,37),(-121.9194,37.976),(-121.9198,37.975)] | Oakland - Mission Blvd | [(-121.918886,37),(-121.9194,37.976),(-121.9198,37.975)] | Oakland - Mission Blvd | [(-122.0006,37.896),(-121.9989,37.88)] | Oakland - Mission Blvd | [(-122.0006,37.896),(-121.9989,37.88)] | Berkeley - Moores Ave | [(-122.0087,37.301),(-122.0094,37.292)] | Oakland - National Ave | [(-122.1192,37.5),(-122.1281,37.489)] | Oakland - Navajo Ct | [(-121.8779,37.901),(-121.8783,37.9)] | Oakland - Newark Blvd | [(-122.0352,37.438),(-122.0341,37.423)] | Oakland - Oakland Inner Harbor | [(-122.2625,37.913),(-122.260016,37.89484)] | Berkeley - Oakridge Road | [(-121.8316,37.049),(-121.828382,37)] | Oakland - Oneil Ave | [(-122.076754,37.62476),(-122.0745,37.595)] | Berkeley - Parkridge Dr | [(-122.1438,37.884),(-122.1428,37.9)] | Berkeley - Parkside Dr | [(-122.0475,37.603),(-122.0443,37.596)] | Berkeley - Paseo Padre Pkwy | [(-121.9143,37.005),(-121.913522,37)] | Oakland - Paseo Padre Pkwy | [(-122.0021,37.639),(-121.996,37.628)] | Oakland - Paseo Padre Pkwy | [(-122.0021,37.639),(-121.996,37.628)] | Berkeley - Pearl St | [(-122.2383,37.594),(-122.2366,37.615)] | Berkeley - Periwinkle Road | [(-122.0451,37.301),(-122.044758,37.29844)] | Oakland - Pimlico Dr | [(-121.8616,37.998),(-121.8618,37.008)] | Oakland - Pimlico Dr | [(-121.8616,37.998),(-121.8618,37.008)] | Oakland - Portsmouth Ave | [(-122.1064,37.315),(-122.1064,37.308)] | Oakland - Proctor Ave | [(-122.2267,37.406),(-122.2251,37.386)] | Lafayette - Railroad Ave | [(-122.0245,37.013),(-122.0234,37.003),(-122.0223,37.993)] | Oakland - Railroad Ave | [(-122.0245,37.013),(-122.0234,37.003),(-122.0223,37.993)] | Oakland - Railroad Ave | [(-122.0245,37.013),(-122.0234,37.003),(-122.0223,37.993)] | Berkeley - Ranspot Dr | [(-122.0972,37.999),(-122.0959,37)] | Oakland - Ranspot Dr | [(-122.0972,37.999),(-122.0959,37)] | Oakland - Ranspot Dr | [(-122.0972,37.999),(-122.0959,37)] | Berkeley - Redding St | [(-122.1978,37.901),(-122.1975,37.895)] | Berkeley - Redwood Road | [(-122.1493,37.98),(-122.1437,37.001)] | Oakland - Redwood Road | [(-122.1493,37.98),(-122.1437,37.001)] | Berkeley - Roca Dr | [(-122.0335,37.609),(-122.0314,37.599)] | Berkeley - Rosedale Ct | [(-121.9232,37.9),(-121.924,37.897)] | Oakland - Sacramento St | [(-122.2799,37.606),(-122.2797,37.597)] | Berkeley - Saddle Brook Dr | [(-122.1478,37.909),(-122.1454,37.904),(-122.1451,37.888)] | Berkeley - Saginaw Ct | [(-121.8803,37.898),(-121.8806,37.901)] | Oakland - San Andreas Dr | [(-122.0609,37.9),(-122.0614,37.895)] | Berkeley - Santa Maria Ave | [(-122.0773,37),(-122.0773,37.98)] | Oakland - Santa Maria Ave | [(-122.0773,37),(-122.0773,37.98)] | Oakland - Santa Maria Ave | [(-122.0773,37),(-122.0773,37.98)] | Berkeley - Shattuck Ave | [(-122.2686,37.904),(-122.2686,37.897)] | Berkeley - Sheridan Road | [(-122.2279,37.425),(-122.2253,37.411),(-122.2223,37.377)] | Lafayette - Shoreline Dr | [(-122.2657,37.603),(-122.2648,37.6)] | Berkeley - Skyline Blvd | [(-122.1738,37.01),(-122.1714,37.996)] | Oakland - Skyline Blvd | [(-122.1738,37.01),(-122.1714,37.996)] | Berkeley - Skyline Dr | [(-122.0277,37.5),(-122.0284,37.498)] | Oakland - Skywest Dr | [(-122.1161,37.62),(-122.1123,37.586)] | Berkeley - Southern Pacific Railroad | [(-122.3002,37.674),(-122.2999,37.661)] | Berkeley - Sp Railroad | [(-121.893564,37.99009),(-121.897,37.016)] | Oakland - Sp Railroad | [(-121.893564,37.99009),(-121.897,37.016)] | Oakland - Sp Railroad | [(-121.9565,37.898),(-121.9562,37.9)] | Oakland - Sp Railroad | [(-122.0734,37.001),(-122.0734,37.997)] | Oakland - Sp Railroad | [(-122.0734,37.001),(-122.0734,37.997)] | Oakland - Sp Railroad | [(-122.0734,37.001),(-122.0734,37.997)] | Berkeley - Sp Railroad | [(-122.0914,37.601),(-122.087,37.56),(-122.086408,37.5551)] | Berkeley - Sp Railroad | [(-122.137792,37.003),(-122.1365,37.992),(-122.131257,37.94612)] | Oakland - Sp Railroad | [(-122.137792,37.003),(-122.1365,37.992),(-122.131257,37.94612)] | Berkeley - Sp Railroad | [(-122.1947,37.497),(-122.193328,37.4848)] | Oakland - Stanton Ave | [(-122.100392,37.0697),(-122.099513,37.06052)] | Oakland - State Hwy 123 | [(-122.3004,37.986),(-122.2998,37.969),(-122.2995,37.962),(-122.2992,37.952),(-122.299,37.942),(-122.2987,37.935),(-122.2984,37.924),(-122.2982,37.92),(-122.2976,37.904),(-122.297,37.88),(-122.2966,37.869),(-122.2959,37.848),(-122.2961,37.843)] | Berkeley - State Hwy 13 | [(-122.1797,37.943),(-122.179871,37.91849),(-122.18,37.9),(-122.179023,37.86615),(-122.1787,37.862),(-122.1781,37.851),(-122.1777,37.845),(-122.1773,37.839),(-122.177,37.833)] | Berkeley - State Hwy 13 | [(-122.2049,37.2),(-122.20328,37.17975),(-122.1989,37.125),(-122.198078,37.11641),(-122.1975,37.11)] | Lafayette - State Hwy 13 Ramp | [(-122.2244,37.427),(-122.223,37.414),(-122.2214,37.396),(-122.2213,37.388)] | Lafayette - State Hwy 238 | ((-122.098,37.908),(-122.0983,37.907),(-122.099,37.905),(-122.101,37.898),(-122.101535,37.89711),(-122.103173,37.89438),(-122.1046,37.892),(-122.106,37.89)) | Berkeley - State Hwy 238 Ramp | [(-122.1288,37.9),(-122.1293,37.895),(-122.1296,37.906)] | Berkeley - State Hwy 24 | [(-122.2674,37.246),(-122.2673,37.248),(-122.267,37.261),(-122.2668,37.271),(-122.2663,37.298),(-122.2659,37.315),(-122.2655,37.336),(-122.265007,37.35882),(-122.264443,37.37286),(-122.2641,37.381),(-122.2638,37.388),(-122.2631,37.396),(-122.2617,37.405),(-122.2615,37.407),(-122.2605,37.412)] | Lafayette - State Hwy 84 | [(-121.9565,37.898),(-121.956589,37.89911),(-121.9569,37.903),(-121.956,37.91),(-121.9553,37.919)] | Oakland - State Hwy 84 | [(-122.0671,37.426),(-122.07,37.402),(-122.074,37.37),(-122.0773,37.338)] | Oakland - State Hwy 92 | [(-122.1085,37.326),(-122.1095,37.322),(-122.1111,37.316),(-122.1119,37.313),(-122.1125,37.311),(-122.1131,37.308),(-122.1167,37.292),(-122.1187,37.285),(-122.12,37.28)] | Oakland - State Hwy 92 Ramp | [(-122.1086,37.321),(-122.1089,37.315),(-122.1111,37.316)] | Oakland - Stuart St | [(-122.2518,37.6),(-122.2507,37.601),(-122.2491,37.606)] | Berkeley - Sunol Ridge Trl | [(-121.9419,37.455),(-121.9345,37.38)] | Oakland - Sunol Ridge Trl | [(-121.9419,37.455),(-121.9345,37.38)] | Oakland - Tassajara Creek | [(-121.87866,37.98898),(-121.8782,37.015)] | Oakland - Tassajara Creek | [(-121.87866,37.98898),(-121.8782,37.015)] | Oakland - Taurus Ave | [(-122.2159,37.416),(-122.2128,37.389)] | Lafayette - Tennyson Road | [(-122.0891,37.317),(-122.0927,37.317)] | Oakland - Thackeray Ave | [(-122.072,37.305),(-122.0715,37.298)] | Oakland - Theresa Way | [(-121.7289,37.906),(-121.728,37.899)] | Oakland - Tissiack Way | [(-121.920364,37),(-121.9208,37.995)] | Oakland - Tissiack Way | [(-121.920364,37),(-121.9208,37.995)] | Oakland - Tupelo Ter | [(-122.059087,37.6113),(-122.057021,37.59942)] | Berkeley - Vallecitos Road | [(-121.8699,37.916),(-121.8703,37.891)] | Oakland - Warm Springs Blvd | [(-121.933956,37),(-121.9343,37.97)] | Oakland - Warm Springs Blvd | [(-121.933956,37),(-121.9343,37.97)] | Oakland - Welch Creek Road | [(-121.7695,37.386),(-121.7737,37.413)] | Oakland - Welch Creek Road | [(-121.7695,37.386),(-121.7737,37.413)] | Oakland - West Loop Road | [(-122.0576,37.604),(-122.0602,37.586)] | Berkeley - Western Pacific Railroad Spur | [(-122.0394,37.018),(-122.0394,37.961)] | Oakland - Western Pacific Railroad Spur | [(-122.0394,37.018),(-122.0394,37.961)] | Oakland - Western Pacific Railroad Spur | [(-122.0394,37.018),(-122.0394,37.961)] | Berkeley - Whitlock Creek | [(-121.74683,37.91276),(-121.733107,37)] | Oakland - Whitlock Creek | [(-121.74683,37.91276),(-121.733107,37)] | Oakland - Willimet Way | [(-122.0964,37.517),(-122.0949,37.493)] | Oakland - Wisconsin St | [(-122.1994,37.017),(-122.1975,37.998),(-122.1971,37.994)] | Oakland - Wisconsin St | [(-122.1994,37.017),(-122.1975,37.998),(-122.1971,37.994)] | Berkeley - Wp Railroad | [(-122.254,37.902),(-122.2506,37.891)] | Berkeley - 100th Ave | [(-122.1657,37.429),(-122.1647,37.432)] | Oakland - 107th Ave | [(-122.1555,37.403),(-122.1531,37.41)] | Oakland - 14th St | [(-122.299,37.147),(-122.3,37.148)] | Lafayette - 19th Ave | [(-122.2366,37.897),(-122.2359,37.905)] | Berkeley - 1st St | [(-121.75508,37.89294),(-121.753581,37.90031)] | Oakland - 5th St | [(-122.278,37),(-122.2792,37.005),(-122.2803,37.009)] | Lafayette - 5th St | [(-122.296,37.615),(-122.2953,37.598)] | Berkeley - 82nd Ave | [(-122.1695,37.596),(-122.1681,37.603)] | Berkeley - 85th Ave | [(-122.1877,37.466),(-122.186,37.476)] | Oakland - 89th Ave | [(-122.1822,37.459),(-122.1803,37.471)] | Oakland - 98th Ave | [(-122.1568,37.498),(-122.1558,37.502)] | Oakland - 98th Ave | [(-122.1693,37.438),(-122.1682,37.444)] | Oakland - 98th Ave | [(-122.2001,37.258),(-122.1974,37.27)] | Lafayette -(333 rows) - -SELECT name, #thepath FROM iexit ORDER BY name COLLATE "C", 2; - name | ?column? -------------------------------------+---------- - I- 580 | 2 - I- 580 | 2 - I- 580 | 2 - I- 580 | 2 - I- 580 | 2 - I- 580 | 2 - I- 580 | 2 - I- 580 | 2 - I- 580 | 2 - I- 580 | 2 - I- 580 | 2 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 3 - I- 580 | 4 - I- 580 | 4 - I- 580 | 4 - I- 580 | 4 - I- 580 | 5 - I- 580 | 5 - I- 580 | 5 - I- 580 | 5 - I- 580 | 5 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 6 - I- 580 | 7 - I- 580 | 7 - I- 580 | 7 - I- 580 | 7 - I- 580 | 7 - I- 580 | 7 - I- 580 | 7 - I- 580 | 8 - I- 580 | 8 - I- 580 | 8 - I- 580 | 8 - I- 580 | 8 - I- 580 | 8 - I- 580 | 8 - I- 580 | 8 - I- 580 | 8 - I- 580 | 9 - I- 580 | 9 - I- 580 | 9 - I- 580 | 9 - I- 580 | 9 - I- 580 | 12 - I- 580 | 12 - I- 580 | 12 - I- 580 | 12 - I- 580 | 12 - I- 580 | 12 - I- 580 | 12 - I- 580 | 12 - I- 580 | 12 - I- 580 | 12 - I- 580 | 13 - I- 580 | 13 - I- 580 | 13 - I- 580 | 13 - I- 580 | 13 - I- 580 | 13 - I- 580 | 14 - I- 580 | 14 - I- 580 | 14 - I- 580 | 14 - I- 580 | 14 - I- 580 | 14 - I- 580 | 14 - I- 580 | 14 - I- 580 | 18 - I- 580 | 18 - I- 580 | 18 - I- 580 | 18 - I- 580 | 18 - I- 580 | 18 - I- 580 | 21 - I- 580 | 21 - I- 580 | 21 - I- 580 | 21 - I- 580 | 21 - I- 580 | 21 - I- 580 | 21 - I- 580 | 21 - I- 580 | 21 - I- 580 | 21 - I- 580 | 22 - I- 580 | 22 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 2 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 3 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 4 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 5 - I- 580 Ramp | 6 - I- 580 Ramp | 6 - I- 580 Ramp | 6 - I- 580 Ramp | 7 - I- 580 Ramp | 8 - I- 580 Ramp | 8 - I- 580 Ramp | 8 - I- 580 Ramp | 8 - I- 580 Ramp | 8 - I- 580 Ramp | 8 - I- 580/I-680 Ramp | 2 - I- 580/I-680 Ramp | 2 - I- 580/I-680 Ramp | 2 - I- 580/I-680 Ramp | 2 - I- 580/I-680 Ramp | 2 - I- 580/I-680 Ramp | 2 - I- 580/I-680 Ramp | 4 - I- 580/I-680 Ramp | 4 - I- 580/I-680 Ramp | 4 - I- 580/I-680 Ramp | 4 - I- 580/I-680 Ramp | 5 - I- 580/I-680 Ramp | 6 - I- 580/I-680 Ramp | 6 - I- 580/I-680 Ramp | 6 - I- 680 | 2 - I- 680 | 2 - I- 680 | 2 - I- 680 | 2 - I- 680 | 2 - I- 680 | 2 - I- 680 | 2 - I- 680 | 3 - I- 680 | 3 - I- 680 | 3 - I- 680 | 4 - I- 680 | 4 - I- 680 | 4 - I- 680 | 5 - I- 680 | 5 - I- 680 | 5 - I- 680 | 7 - I- 680 | 7 - I- 680 | 7 - I- 680 | 7 - I- 680 | 8 - I- 680 | 8 - I- 680 | 8 - I- 680 | 8 - I- 680 | 10 - I- 680 | 10 - I- 680 | 10 - I- 680 | 10 - I- 680 | 10 - I- 680 | 10 - I- 680 | 10 - I- 680 | 16 - I- 680 | 16 - I- 680 | 16 - I- 680 | 16 - I- 680 | 16 - I- 680 | 16 - I- 680 | 16 - I- 680 | 16 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 2 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 3 - I- 680 Ramp | 4 - I- 680 Ramp | 4 - I- 680 Ramp | 4 - I- 680 Ramp | 5 - I- 680 Ramp | 5 - I- 680 Ramp | 5 - I- 680 Ramp | 5 - I- 680 Ramp | 5 - I- 680 Ramp | 5 - I- 680 Ramp | 6 - I- 680 Ramp | 6 - I- 680 Ramp | 6 - I- 680 Ramp | 6 - I- 680 Ramp | 7 - I- 680 Ramp | 7 - I- 680 Ramp | 7 - I- 680 Ramp | 7 - I- 680 Ramp | 8 - I- 680 Ramp | 8 - I- 680 Ramp | 8 - I- 680 Ramp | 8 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 2 - I- 80 | 3 - I- 80 | 3 - I- 80 | 3 - I- 80 | 4 - I- 80 | 4 - I- 80 | 4 - I- 80 | 4 - I- 80 | 4 - I- 80 | 5 - I- 80 | 5 - I- 80 | 5 - I- 80 | 5 - I- 80 | 5 - I- 80 | 5 - I- 80 | 5 - I- 80 | 5 - I- 80 | 5 - I- 80 | 11 - I- 80 | 11 - I- 80 | 11 - I- 80 | 11 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 2 - I- 80 Ramp | 3 - I- 80 Ramp | 3 - I- 80 Ramp | 3 - I- 80 Ramp | 3 - I- 80 Ramp | 3 - I- 80 Ramp | 3 - I- 80 Ramp | 3 - I- 80 Ramp | 3 - I- 80 Ramp | 3 - I- 80 Ramp | 4 - I- 80 Ramp | 4 - I- 80 Ramp | 4 - I- 80 Ramp | 4 - I- 80 Ramp | 5 - I- 80 Ramp | 5 - I- 80 Ramp | 5 - I- 80 Ramp | 5 - I- 80 Ramp | 5 - I- 80 Ramp | 5 - I- 80 Ramp | 5 - I- 80 Ramp | 7 - I- 80 Ramp | 7 - I- 80 Ramp | 7 - I- 80 Ramp | 7 - I- 880 | 2 - I- 880 | 2 - I- 880 | 2 - I- 880 | 2 - I- 880 | 2 - I- 880 | 5 - I- 880 | 5 - I- 880 | 5 - I- 880 | 5 - I- 880 | 5 - I- 880 | 5 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 6 - I- 880 | 7 - I- 880 | 7 - I- 880 | 7 - I- 880 | 7 - I- 880 | 7 - I- 880 | 7 - I- 880 | 7 - I- 880 | 9 - I- 880 | 9 - I- 880 | 9 - I- 880 | 9 - I- 880 | 9 - I- 880 | 9 - I- 880 | 9 - I- 880 | 10 - I- 880 | 10 - I- 880 | 10 - I- 880 | 10 - I- 880 | 10 - I- 880 | 10 - I- 880 | 10 - I- 880 | 10 - I- 880 | 10 - I- 880 | 10 - I- 880 | 10 - I- 880 | 10 - I- 880 | 12 - I- 880 | 12 - I- 880 | 12 - I- 880 | 12 - I- 880 | 12 - I- 880 | 12 - I- 880 | 12 - I- 880 | 12 - I- 880 | 12 - I- 880 | 12 - I- 880 | 12 - I- 880 | 13 - I- 880 | 13 - I- 880 | 13 - I- 880 | 13 - I- 880 | 13 - I- 880 | 13 - I- 880 | 13 - I- 880 | 13 - I- 880 | 13 - I- 880 | 13 - I- 880 | 13 - I- 880 | 13 - I- 880 | 14 - I- 880 | 14 - I- 880 | 14 - I- 880 | 14 - I- 880 | 14 - I- 880 | 14 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 17 - I- 880 | 19 - I- 880 | 19 - I- 880 | 19 - I- 880 | 19 - I- 880 | 19 - I- 880 | 19 - I- 880 | 19 - I- 880 | 19 - I- 880 | 19 - I- 880 | 19 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 2 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 3 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 4 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 5 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 6 - I- 880 Ramp | 8 - I- 880 Ramp | 8 - I- 880 Ramp | 8 - I- 980 | 2 - I- 980 | 2 - I- 980 | 2 - I- 980 | 2 - I- 980 | 2 - I- 980 | 2 - I- 980 | 2 - I- 980 | 2 - I- 980 | 3 - I- 980 | 3 - I- 980 | 3 - I- 980 | 3 - I- 980 | 3 - I- 980 | 3 - I- 980 | 3 - I- 980 | 3 - I- 980 | 3 - I- 980 | 4 - I- 980 | 4 - I- 980 | 5 - I- 980 | 5 - I- 980 | 7 - I- 980 | 7 - I- 980 | 7 - I- 980 | 7 - I- 980 | 12 - I- 980 Ramp | 3 - I- 980 Ramp | 3 - I- 980 Ramp | 3 - I- 980 Ramp | 7 -(896 rows) - -SELECT * FROM toyemp WHERE name = 'sharon'; - name | age | location | annualsal ---------+-----+----------+----------- - sharon | 25 | (15,12) | 12000 -(1 row) - --- --- Test for Leaky view scenario --- -CREATE ROLE regress_alice; -CREATE FUNCTION f_leak (text) - RETURNS bool LANGUAGE 'plpgsql' COST 0.0000001 - AS 'BEGIN RAISE NOTICE ''f_leak => %'', $1; RETURN true; END'; -CREATE TABLE customer ( - cid int primary key, - name text not null, - tel text, - passwd text -); -CREATE TABLE credit_card ( - cid int references customer(cid), - cnum text, - climit int -); -CREATE TABLE credit_usage ( - cid int references customer(cid), - ymd date, - usage int -); -INSERT INTO customer - VALUES (101, 'regress_alice', '+81-12-3456-7890', 'passwd123'), - (102, 'regress_bob', '+01-234-567-8901', 'beafsteak'), - (103, 'regress_eve', '+49-8765-43210', 'hamburger'); -INSERT INTO credit_card - VALUES (101, '1111-2222-3333-4444', 4000), - (102, '5555-6666-7777-8888', 3000), - (103, '9801-2345-6789-0123', 2000); -INSERT INTO credit_usage - VALUES (101, '2011-09-15', 120), - (101, '2011-10-05', 90), - (101, '2011-10-18', 110), - (101, '2011-10-21', 200), - (101, '2011-11-10', 80), - (102, '2011-09-22', 300), - (102, '2011-10-12', 120), - (102, '2011-10-28', 200), - (103, '2011-10-15', 480); -CREATE VIEW my_property_normal AS - SELECT * FROM customer WHERE name = current_user; -CREATE VIEW my_property_secure WITH (security_barrier) AS - SELECT * FROM customer WHERE name = current_user; -CREATE VIEW my_credit_card_normal AS - SELECT * FROM customer l NATURAL JOIN credit_card r - WHERE l.name = current_user; -CREATE VIEW my_credit_card_secure WITH (security_barrier) AS - SELECT * FROM customer l NATURAL JOIN credit_card r - WHERE l.name = current_user; -CREATE VIEW my_credit_card_usage_normal AS - SELECT * FROM my_credit_card_secure l NATURAL JOIN credit_usage r; -CREATE VIEW my_credit_card_usage_secure WITH (security_barrier) AS - SELECT * FROM my_credit_card_secure l NATURAL JOIN credit_usage r; -GRANT SELECT ON my_property_normal TO public; -GRANT SELECT ON my_property_secure TO public; -GRANT SELECT ON my_credit_card_normal TO public; -GRANT SELECT ON my_credit_card_secure TO public; -GRANT SELECT ON my_credit_card_usage_normal TO public; -GRANT SELECT ON my_credit_card_usage_secure TO public; --- --- Run leaky view scenarios --- -SET SESSION AUTHORIZATION regress_alice; --- --- scenario: if a qualifier with tiny-cost is given, it shall be launched --- prior to the security policy of the view. --- -SELECT * FROM my_property_normal WHERE f_leak(passwd); -NOTICE: f_leak => passwd123 -NOTICE: f_leak => beafsteak -NOTICE: f_leak => hamburger - cid | name | tel | passwd ------+---------------+------------------+----------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM my_property_normal WHERE f_leak(passwd); - QUERY PLAN ------------------------------------------------------- - Seq Scan on customer - Filter: (f_leak(passwd) AND (name = CURRENT_USER)) -(2 rows) - -SELECT * FROM my_property_secure WHERE f_leak(passwd); -NOTICE: f_leak => passwd123 - cid | name | tel | passwd ------+---------------+------------------+----------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM my_property_secure WHERE f_leak(passwd); - QUERY PLAN ---------------------------------------------- - Subquery Scan on my_property_secure - Filter: f_leak(my_property_secure.passwd) - -> Seq Scan on customer - Filter: (name = CURRENT_USER) -(4 rows) - --- --- scenario: qualifiers can be pushed down if they contain leaky functions, --- provided they aren't passed data from inside the view. --- -SELECT * FROM my_property_normal v - WHERE f_leak('passwd') AND f_leak(passwd); -NOTICE: f_leak => passwd -NOTICE: f_leak => passwd123 -NOTICE: f_leak => passwd -NOTICE: f_leak => beafsteak -NOTICE: f_leak => passwd -NOTICE: f_leak => hamburger - cid | name | tel | passwd ------+---------------+------------------+----------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM my_property_normal v - WHERE f_leak('passwd') AND f_leak(passwd); - QUERY PLAN ---------------------------------------------------------------------------------- - Seq Scan on customer - Filter: (f_leak('passwd'::text) AND f_leak(passwd) AND (name = CURRENT_USER)) -(2 rows) - -SELECT * FROM my_property_secure v - WHERE f_leak('passwd') AND f_leak(passwd); -NOTICE: f_leak => passwd -NOTICE: f_leak => passwd123 -NOTICE: f_leak => passwd -NOTICE: f_leak => passwd - cid | name | tel | passwd ------+---------------+------------------+----------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM my_property_secure v - WHERE f_leak('passwd') AND f_leak(passwd); - QUERY PLAN --------------------------------------------------------------------- - Subquery Scan on v - Filter: f_leak(v.passwd) - -> Seq Scan on customer - Filter: (f_leak('passwd'::text) AND (name = CURRENT_USER)) -(4 rows) - --- --- scenario: if a qualifier references only one-side of a particular join- --- tree, it shall be distributed to the most deep scan plan as --- possible as we can. --- -SELECT * FROM my_credit_card_normal WHERE f_leak(cnum); -NOTICE: f_leak => 1111-2222-3333-4444 -NOTICE: f_leak => 5555-6666-7777-8888 -NOTICE: f_leak => 9801-2345-6789-0123 - cid | name | tel | passwd | cnum | climit ------+---------------+------------------+-----------+---------------------+-------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_normal WHERE f_leak(cnum); - QUERY PLAN ---------------------------------------------- - Hash Join - Hash Cond: (r.cid = l.cid) - -> Seq Scan on credit_card r - Filter: f_leak(cnum) - -> Hash - -> Seq Scan on customer l - Filter: (name = CURRENT_USER) -(7 rows) - -SELECT * FROM my_credit_card_secure WHERE f_leak(cnum); -NOTICE: f_leak => 1111-2222-3333-4444 - cid | name | tel | passwd | cnum | climit ------+---------------+------------------+-----------+---------------------+-------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_secure WHERE f_leak(cnum); - QUERY PLAN ---------------------------------------------------- - Subquery Scan on my_credit_card_secure - Filter: f_leak(my_credit_card_secure.cnum) - -> Hash Join - Hash Cond: (r.cid = l.cid) - -> Seq Scan on credit_card r - -> Hash - -> Seq Scan on customer l - Filter: (name = CURRENT_USER) -(8 rows) - --- --- scenario: an external qualifier can be pushed-down by in-front-of the --- views with "security_barrier" attribute, except for operators --- implemented with leakproof functions. --- -SELECT * FROM my_credit_card_usage_normal - WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01'; -NOTICE: f_leak => 1111-2222-3333-4444 - cid | name | tel | passwd | cnum | climit | ymd | usage ------+---------------+------------------+-----------+---------------------+--------+------------+------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-05-2011 | 90 - 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-18-2011 | 110 - 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-21-2011 | 200 -(3 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_usage_normal - WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01'; - QUERY PLAN ------------------------------------------------------------------------------- - Nested Loop - Join Filter: (l.cid = r.cid) - -> Seq Scan on credit_usage r - Filter: ((ymd >= '10-01-2011'::date) AND (ymd < '11-01-2011'::date)) - -> Materialize - -> Subquery Scan on l - Filter: f_leak(l.cnum) - -> Hash Join - Hash Cond: (r_1.cid = l_1.cid) - -> Seq Scan on credit_card r_1 - -> Hash - -> Seq Scan on customer l_1 - Filter: (name = CURRENT_USER) -(13 rows) - -SELECT * FROM my_credit_card_usage_secure - WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01'; -NOTICE: f_leak => 1111-2222-3333-4444 -NOTICE: f_leak => 1111-2222-3333-4444 -NOTICE: f_leak => 1111-2222-3333-4444 - cid | name | tel | passwd | cnum | climit | ymd | usage ------+---------------+------------------+-----------+---------------------+--------+------------+------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-05-2011 | 90 - 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-18-2011 | 110 - 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-21-2011 | 200 -(3 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_usage_secure - WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01'; - QUERY PLAN ------------------------------------------------------------------------------------- - Subquery Scan on my_credit_card_usage_secure - Filter: f_leak(my_credit_card_usage_secure.cnum) - -> Nested Loop - Join Filter: (l.cid = r.cid) - -> Seq Scan on credit_usage r - Filter: ((ymd >= '10-01-2011'::date) AND (ymd < '11-01-2011'::date)) - -> Materialize - -> Hash Join - Hash Cond: (r_1.cid = l.cid) - -> Seq Scan on credit_card r_1 - -> Hash - -> Seq Scan on customer l - Filter: (name = CURRENT_USER) -(13 rows) - --- --- Test for the case when security_barrier gets changed between rewriter --- and planner stage. --- -PREPARE p1 AS SELECT * FROM my_property_normal WHERE f_leak(passwd); -PREPARE p2 AS SELECT * FROM my_property_secure WHERE f_leak(passwd); -EXECUTE p1; -NOTICE: f_leak => passwd123 -NOTICE: f_leak => beafsteak -NOTICE: f_leak => hamburger - cid | name | tel | passwd ------+---------------+------------------+----------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 -(1 row) - -EXECUTE p2; -NOTICE: f_leak => passwd123 - cid | name | tel | passwd ------+---------------+------------------+----------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 -(1 row) - -RESET SESSION AUTHORIZATION; -ALTER VIEW my_property_normal SET (security_barrier=true); -ALTER VIEW my_property_secure SET (security_barrier=false); -SET SESSION AUTHORIZATION regress_alice; -EXECUTE p1; -- To be perform as a view with security-barrier -NOTICE: f_leak => passwd123 - cid | name | tel | passwd ------+---------------+------------------+----------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 -(1 row) - -EXECUTE p2; -- To be perform as a view without security-barrier -NOTICE: f_leak => passwd123 -NOTICE: f_leak => beafsteak -NOTICE: f_leak => hamburger - cid | name | tel | passwd ------+---------------+------------------+----------- - 101 | regress_alice | +81-12-3456-7890 | passwd123 -(1 row) - --- Cleanup. -RESET SESSION AUTHORIZATION; -DROP ROLE regress_alice; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/portals_p2.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/portals_p2.out --- /Users/admin/pgsql/src/test/regress/expected/portals_p2.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/portals_p2.out 2024-12-13 13:20:11 @@ -1,122 +1,2 @@ --- --- PORTALS_P2 --- -BEGIN; -DECLARE foo13 CURSOR FOR - SELECT * FROM onek WHERE unique1 = 50; -DECLARE foo14 CURSOR FOR - SELECT * FROM onek WHERE unique1 = 51; -DECLARE foo15 CURSOR FOR - SELECT * FROM onek WHERE unique1 = 52; -DECLARE foo16 CURSOR FOR - SELECT * FROM onek WHERE unique1 = 53; -DECLARE foo17 CURSOR FOR - SELECT * FROM onek WHERE unique1 = 54; -DECLARE foo18 CURSOR FOR - SELECT * FROM onek WHERE unique1 = 55; -DECLARE foo19 CURSOR FOR - SELECT * FROM onek WHERE unique1 = 56; -DECLARE foo20 CURSOR FOR - SELECT * FROM onek WHERE unique1 = 57; -DECLARE foo21 CURSOR FOR - SELECT * FROM onek WHERE unique1 = 58; -DECLARE foo22 CURSOR FOR - SELECT * FROM onek WHERE unique1 = 59; -DECLARE foo23 CURSOR FOR - SELECT * FROM onek WHERE unique1 = 60; -DECLARE foo24 CURSOR FOR - SELECT * FROM onek2 WHERE unique1 = 50; -DECLARE foo25 CURSOR FOR - SELECT * FROM onek2 WHERE unique1 = 60; -FETCH all in foo13; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 50 | 253 | 0 | 2 | 0 | 10 | 0 | 50 | 50 | 50 | 50 | 0 | 1 | YBAAAA | TJAAAA | HHHHxx -(1 row) - -FETCH all in foo14; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 51 | 76 | 1 | 3 | 1 | 11 | 1 | 51 | 51 | 51 | 51 | 2 | 3 | ZBAAAA | YCAAAA | AAAAxx -(1 row) - -FETCH all in foo15; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 52 | 985 | 0 | 0 | 2 | 12 | 2 | 52 | 52 | 52 | 52 | 4 | 5 | ACAAAA | XLBAAA | HHHHxx -(1 row) - -FETCH all in foo16; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 53 | 196 | 1 | 1 | 3 | 13 | 3 | 53 | 53 | 53 | 53 | 6 | 7 | BCAAAA | OHAAAA | AAAAxx -(1 row) - -FETCH all in foo17; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 54 | 356 | 0 | 2 | 4 | 14 | 4 | 54 | 54 | 54 | 54 | 8 | 9 | CCAAAA | SNAAAA | AAAAxx -(1 row) - -FETCH all in foo18; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 55 | 627 | 1 | 3 | 5 | 15 | 5 | 55 | 55 | 55 | 55 | 10 | 11 | DCAAAA | DYAAAA | VVVVxx -(1 row) - -FETCH all in foo19; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 56 | 54 | 0 | 0 | 6 | 16 | 6 | 56 | 56 | 56 | 56 | 12 | 13 | ECAAAA | CCAAAA | OOOOxx -(1 row) - -FETCH all in foo20; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 57 | 942 | 1 | 1 | 7 | 17 | 7 | 57 | 57 | 57 | 57 | 14 | 15 | FCAAAA | GKBAAA | OOOOxx -(1 row) - -FETCH all in foo21; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 58 | 114 | 0 | 2 | 8 | 18 | 8 | 58 | 58 | 58 | 58 | 16 | 17 | GCAAAA | KEAAAA | OOOOxx -(1 row) - -FETCH all in foo22; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 59 | 593 | 1 | 3 | 9 | 19 | 9 | 59 | 59 | 59 | 59 | 18 | 19 | HCAAAA | VWAAAA | HHHHxx -(1 row) - -FETCH all in foo23; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 60 | 483 | 0 | 0 | 0 | 0 | 0 | 60 | 60 | 60 | 60 | 0 | 1 | ICAAAA | PSAAAA | VVVVxx -(1 row) - -FETCH all in foo24; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 50 | 253 | 0 | 2 | 0 | 10 | 0 | 50 | 50 | 50 | 50 | 0 | 1 | YBAAAA | TJAAAA | HHHHxx -(1 row) - -FETCH all in foo25; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 60 | 483 | 0 | 0 | 0 | 0 | 0 | 60 | 60 | 60 | 60 | 0 | 1 | ICAAAA | PSAAAA | VVVVxx -(1 row) - -CLOSE foo13; -CLOSE foo14; -CLOSE foo15; -CLOSE foo16; -CLOSE foo17; -CLOSE foo18; -CLOSE foo19; -CLOSE foo20; -CLOSE foo21; -CLOSE foo22; -CLOSE foo23; -CLOSE foo24; -CLOSE foo25; -END; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/foreign_key.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/foreign_key.out --- /Users/admin/pgsql/src/test/regress/expected/foreign_key.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/foreign_key.out 2024-12-13 13:20:11 @@ -1,3047 +1,2 @@ --- --- FOREIGN KEY --- --- MATCH FULL --- --- First test, check and cascade --- -CREATE TABLE PKTABLE ( ptest1 int PRIMARY KEY, ptest2 text ); -CREATE TABLE FKTABLE ( ftest1 int REFERENCES PKTABLE MATCH FULL ON DELETE CASCADE ON UPDATE CASCADE, ftest2 int ); --- Insert test data into PKTABLE -INSERT INTO PKTABLE VALUES (1, 'Test1'); -INSERT INTO PKTABLE VALUES (2, 'Test2'); -INSERT INTO PKTABLE VALUES (3, 'Test3'); -INSERT INTO PKTABLE VALUES (4, 'Test4'); -INSERT INTO PKTABLE VALUES (5, 'Test5'); --- Insert successful rows into FK TABLE -INSERT INTO FKTABLE VALUES (1, 2); -INSERT INTO FKTABLE VALUES (2, 3); -INSERT INTO FKTABLE VALUES (3, 4); -INSERT INTO FKTABLE VALUES (NULL, 1); --- Insert a failed row into FK TABLE -INSERT INTO FKTABLE VALUES (100, 2); -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" -DETAIL: Key (ftest1)=(100) is not present in table "pktable". --- Check FKTABLE -SELECT * FROM FKTABLE; - ftest1 | ftest2 ---------+-------- - 1 | 2 - 2 | 3 - 3 | 4 - | 1 -(4 rows) - --- Delete a row from PK TABLE -DELETE FROM PKTABLE WHERE ptest1=1; --- Check FKTABLE for removal of matched row -SELECT * FROM FKTABLE; - ftest1 | ftest2 ---------+-------- - 2 | 3 - 3 | 4 - | 1 -(3 rows) - --- Update a row from PK TABLE -UPDATE PKTABLE SET ptest1=1 WHERE ptest1=2; --- Check FKTABLE for update of matched row -SELECT * FROM FKTABLE; - ftest1 | ftest2 ---------+-------- - 3 | 4 - | 1 - 1 | 3 -(3 rows) - -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- --- check set NULL and table constraint on multiple columns --- -CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 text, PRIMARY KEY(ptest1, ptest2) ); -CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, CONSTRAINT constrname FOREIGN KEY(ftest1, ftest2) - REFERENCES PKTABLE MATCH FULL ON DELETE SET NULL ON UPDATE SET NULL); --- Test comments -COMMENT ON CONSTRAINT constrname_wrong ON FKTABLE IS 'fk constraint comment'; -ERROR: constraint "constrname_wrong" for table "fktable" does not exist -COMMENT ON CONSTRAINT constrname ON FKTABLE IS 'fk constraint comment'; -COMMENT ON CONSTRAINT constrname ON FKTABLE IS NULL; --- Insert test data into PKTABLE -INSERT INTO PKTABLE VALUES (1, 2, 'Test1'); -INSERT INTO PKTABLE VALUES (1, 3, 'Test1-2'); -INSERT INTO PKTABLE VALUES (2, 4, 'Test2'); -INSERT INTO PKTABLE VALUES (3, 6, 'Test3'); -INSERT INTO PKTABLE VALUES (4, 8, 'Test4'); -INSERT INTO PKTABLE VALUES (5, 10, 'Test5'); --- Insert successful rows into FK TABLE -INSERT INTO FKTABLE VALUES (1, 2, 4); -INSERT INTO FKTABLE VALUES (1, 3, 5); -INSERT INTO FKTABLE VALUES (2, 4, 8); -INSERT INTO FKTABLE VALUES (3, 6, 12); -INSERT INTO FKTABLE VALUES (NULL, NULL, 0); --- Insert failed rows into FK TABLE -INSERT INTO FKTABLE VALUES (100, 2, 4); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname" -DETAIL: Key (ftest1, ftest2)=(100, 2) is not present in table "pktable". -INSERT INTO FKTABLE VALUES (2, 2, 4); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname" -DETAIL: Key (ftest1, ftest2)=(2, 2) is not present in table "pktable". -INSERT INTO FKTABLE VALUES (NULL, 2, 4); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname" -DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. -INSERT INTO FKTABLE VALUES (1, NULL, 4); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname" -DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. --- Check FKTABLE -SELECT * FROM FKTABLE; - ftest1 | ftest2 | ftest3 ---------+--------+-------- - 1 | 2 | 4 - 1 | 3 | 5 - 2 | 4 | 8 - 3 | 6 | 12 - | | 0 -(5 rows) - --- Delete a row from PK TABLE -DELETE FROM PKTABLE WHERE ptest1=1 and ptest2=2; --- Check FKTABLE for removal of matched row -SELECT * FROM FKTABLE; - ftest1 | ftest2 | ftest3 ---------+--------+-------- - 1 | 3 | 5 - 2 | 4 | 8 - 3 | 6 | 12 - | | 0 - | | 4 -(5 rows) - --- Delete another row from PK TABLE -DELETE FROM PKTABLE WHERE ptest1=5 and ptest2=10; --- Check FKTABLE (should be no change) -SELECT * FROM FKTABLE; - ftest1 | ftest2 | ftest3 ---------+--------+-------- - 1 | 3 | 5 - 2 | 4 | 8 - 3 | 6 | 12 - | | 0 - | | 4 -(5 rows) - --- Update a row from PK TABLE -UPDATE PKTABLE SET ptest1=1 WHERE ptest1=2; --- Check FKTABLE for update of matched row -SELECT * FROM FKTABLE; - ftest1 | ftest2 | ftest3 ---------+--------+-------- - 1 | 3 | 5 - 3 | 6 | 12 - | | 0 - | | 4 - | | 8 -(5 rows) - --- Check update with part of key null -UPDATE FKTABLE SET ftest1 = NULL WHERE ftest1 = 1; -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname" -DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. --- Check update with old and new key values equal -UPDATE FKTABLE SET ftest1 = 1 WHERE ftest1 = 1; --- Try altering the column type where foreign keys are involved -ALTER TABLE PKTABLE ALTER COLUMN ptest1 TYPE bigint; -ALTER TABLE FKTABLE ALTER COLUMN ftest1 TYPE bigint; -SELECT * FROM PKTABLE; - ptest1 | ptest2 | ptest3 ---------+--------+--------- - 1 | 3 | Test1-2 - 3 | 6 | Test3 - 4 | 8 | Test4 - 1 | 4 | Test2 -(4 rows) - -SELECT * FROM FKTABLE; - ftest1 | ftest2 | ftest3 ---------+--------+-------- - 3 | 6 | 12 - | | 0 - | | 4 - | | 8 - 1 | 3 | 5 -(5 rows) - -DROP TABLE PKTABLE CASCADE; -NOTICE: drop cascades to constraint constrname on table fktable -DROP TABLE FKTABLE; --- --- check set default and table constraint on multiple columns --- -CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 text, PRIMARY KEY(ptest1, ptest2) ); -CREATE TABLE FKTABLE ( ftest1 int DEFAULT -1, ftest2 int DEFAULT -2, ftest3 int, CONSTRAINT constrname2 FOREIGN KEY(ftest1, ftest2) - REFERENCES PKTABLE MATCH FULL ON DELETE SET DEFAULT ON UPDATE SET DEFAULT); --- Insert a value in PKTABLE for default -INSERT INTO PKTABLE VALUES (-1, -2, 'The Default!'); --- Insert test data into PKTABLE -INSERT INTO PKTABLE VALUES (1, 2, 'Test1'); -INSERT INTO PKTABLE VALUES (1, 3, 'Test1-2'); -INSERT INTO PKTABLE VALUES (2, 4, 'Test2'); -INSERT INTO PKTABLE VALUES (3, 6, 'Test3'); -INSERT INTO PKTABLE VALUES (4, 8, 'Test4'); -INSERT INTO PKTABLE VALUES (5, 10, 'Test5'); --- Insert successful rows into FK TABLE -INSERT INTO FKTABLE VALUES (1, 2, 4); -INSERT INTO FKTABLE VALUES (1, 3, 5); -INSERT INTO FKTABLE VALUES (2, 4, 8); -INSERT INTO FKTABLE VALUES (3, 6, 12); -INSERT INTO FKTABLE VALUES (NULL, NULL, 0); --- Insert failed rows into FK TABLE -INSERT INTO FKTABLE VALUES (100, 2, 4); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname2" -DETAIL: Key (ftest1, ftest2)=(100, 2) is not present in table "pktable". -INSERT INTO FKTABLE VALUES (2, 2, 4); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname2" -DETAIL: Key (ftest1, ftest2)=(2, 2) is not present in table "pktable". -INSERT INTO FKTABLE VALUES (NULL, 2, 4); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname2" -DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. -INSERT INTO FKTABLE VALUES (1, NULL, 4); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname2" -DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. --- Check FKTABLE -SELECT * FROM FKTABLE; - ftest1 | ftest2 | ftest3 ---------+--------+-------- - 1 | 2 | 4 - 1 | 3 | 5 - 2 | 4 | 8 - 3 | 6 | 12 - | | 0 -(5 rows) - --- Delete a row from PK TABLE -DELETE FROM PKTABLE WHERE ptest1=1 and ptest2=2; --- Check FKTABLE to check for removal -SELECT * FROM FKTABLE; - ftest1 | ftest2 | ftest3 ---------+--------+-------- - 1 | 3 | 5 - 2 | 4 | 8 - 3 | 6 | 12 - | | 0 - -1 | -2 | 4 -(5 rows) - --- Delete another row from PK TABLE -DELETE FROM PKTABLE WHERE ptest1=5 and ptest2=10; --- Check FKTABLE (should be no change) -SELECT * FROM FKTABLE; - ftest1 | ftest2 | ftest3 ---------+--------+-------- - 1 | 3 | 5 - 2 | 4 | 8 - 3 | 6 | 12 - | | 0 - -1 | -2 | 4 -(5 rows) - --- Update a row from PK TABLE -UPDATE PKTABLE SET ptest1=1 WHERE ptest1=2; --- Check FKTABLE for update of matched row -SELECT * FROM FKTABLE; - ftest1 | ftest2 | ftest3 ---------+--------+-------- - 1 | 3 | 5 - 3 | 6 | 12 - | | 0 - -1 | -2 | 4 - -1 | -2 | 8 -(5 rows) - --- this should fail for lack of CASCADE -DROP TABLE PKTABLE; -ERROR: cannot drop table pktable because other objects depend on it -DETAIL: constraint constrname2 on table fktable depends on table pktable -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP TABLE PKTABLE CASCADE; -NOTICE: drop cascades to constraint constrname2 on table fktable -DROP TABLE FKTABLE; --- --- First test, check with no on delete or on update --- -CREATE TABLE PKTABLE ( ptest1 int PRIMARY KEY, ptest2 text ); -CREATE TABLE FKTABLE ( ftest1 int REFERENCES PKTABLE MATCH FULL, ftest2 int ); --- Insert test data into PKTABLE -INSERT INTO PKTABLE VALUES (1, 'Test1'); -INSERT INTO PKTABLE VALUES (2, 'Test2'); -INSERT INTO PKTABLE VALUES (3, 'Test3'); -INSERT INTO PKTABLE VALUES (4, 'Test4'); -INSERT INTO PKTABLE VALUES (5, 'Test5'); --- Insert successful rows into FK TABLE -INSERT INTO FKTABLE VALUES (1, 2); -INSERT INTO FKTABLE VALUES (2, 3); -INSERT INTO FKTABLE VALUES (3, 4); -INSERT INTO FKTABLE VALUES (NULL, 1); --- Insert a failed row into FK TABLE -INSERT INTO FKTABLE VALUES (100, 2); -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" -DETAIL: Key (ftest1)=(100) is not present in table "pktable". --- Check FKTABLE -SELECT * FROM FKTABLE; - ftest1 | ftest2 ---------+-------- - 1 | 2 - 2 | 3 - 3 | 4 - | 1 -(4 rows) - --- Check PKTABLE -SELECT * FROM PKTABLE; - ptest1 | ptest2 ---------+-------- - 1 | Test1 - 2 | Test2 - 3 | Test3 - 4 | Test4 - 5 | Test5 -(5 rows) - --- Delete a row from PK TABLE (should fail) -DELETE FROM PKTABLE WHERE ptest1=1; -ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_fkey" on table "fktable" -DETAIL: Key (ptest1)=(1) is still referenced from table "fktable". --- Delete a row from PK TABLE (should succeed) -DELETE FROM PKTABLE WHERE ptest1=5; --- Check PKTABLE for deletes -SELECT * FROM PKTABLE; - ptest1 | ptest2 ---------+-------- - 1 | Test1 - 2 | Test2 - 3 | Test3 - 4 | Test4 -(4 rows) - --- Update a row from PK TABLE (should fail) -UPDATE PKTABLE SET ptest1=0 WHERE ptest1=2; -ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_fkey" on table "fktable" -DETAIL: Key (ptest1)=(2) is still referenced from table "fktable". --- Update a row from PK TABLE (should succeed) -UPDATE PKTABLE SET ptest1=0 WHERE ptest1=4; --- Check PKTABLE for updates -SELECT * FROM PKTABLE; - ptest1 | ptest2 ---------+-------- - 1 | Test1 - 2 | Test2 - 3 | Test3 - 0 | Test4 -(4 rows) - -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- --- Check initial check upon ALTER TABLE --- -CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, PRIMARY KEY(ptest1, ptest2) ); -CREATE TABLE FKTABLE ( ftest1 int, ftest2 int ); -INSERT INTO PKTABLE VALUES (1, 2); -INSERT INTO FKTABLE VALUES (1, NULL); -ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2) REFERENCES PKTABLE MATCH FULL; -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_ftest2_fkey" -DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- MATCH SIMPLE --- Base test restricting update/delete -CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) ); -CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, ftest4 int, CONSTRAINT constrname3 - FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE); --- Insert Primary Key values -INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1'); -INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2'); -INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3'); -INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4'); --- Insert Foreign Key values -INSERT INTO FKTABLE VALUES (1, 2, 3, 1); -INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2); -INSERT INTO FKTABLE VALUES (2, NULL, 3, 3); -INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4); -INSERT INTO FKTABLE VALUES (NULL, 3, 4, 5); --- Insert a failed values -INSERT INTO FKTABLE VALUES (1, 2, 7, 6); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname3" -DETAIL: Key (ftest1, ftest2, ftest3)=(1, 2, 7) is not present in table "pktable". --- Show FKTABLE -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - 1 | 2 | 3 | 1 - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 -(5 rows) - --- Try to update something that should fail -UPDATE PKTABLE set ptest2=5 where ptest2=2; -ERROR: update or delete on table "pktable" violates foreign key constraint "constrname3" on table "fktable" -DETAIL: Key (ptest1, ptest2, ptest3)=(1, 2, 3) is still referenced from table "fktable". --- Try to update something that should succeed -UPDATE PKTABLE set ptest1=1 WHERE ptest2=3; --- Try to delete something that should fail -DELETE FROM PKTABLE where ptest1=1 and ptest2=2 and ptest3=3; -ERROR: update or delete on table "pktable" violates foreign key constraint "constrname3" on table "fktable" -DETAIL: Key (ptest1, ptest2, ptest3)=(1, 2, 3) is still referenced from table "fktable". --- Try to delete something that should work -DELETE FROM PKTABLE where ptest1=2; --- Show PKTABLE and FKTABLE -SELECT * from PKTABLE; - ptest1 | ptest2 | ptest3 | ptest4 ---------+--------+--------+-------- - 1 | 2 | 3 | test1 - 1 | 3 | 3 | test2 - 1 | 3 | 4 | test3 -(3 rows) - -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - 1 | 2 | 3 | 1 - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 -(5 rows) - -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- restrict with null values -CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, UNIQUE(ptest1, ptest2, ptest3) ); -CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, ftest4 int, CONSTRAINT constrname3 - FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE (ptest1, ptest2, ptest3)); -INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1'); -INSERT INTO PKTABLE VALUES (1, 3, NULL, 'test2'); -INSERT INTO PKTABLE VALUES (2, NULL, 4, 'test3'); -INSERT INTO FKTABLE VALUES (1, 2, 3, 1); -DELETE FROM PKTABLE WHERE ptest1 = 2; -SELECT * FROM PKTABLE; - ptest1 | ptest2 | ptest3 | ptest4 ---------+--------+--------+-------- - 1 | 2 | 3 | test1 - 1 | 3 | | test2 -(2 rows) - -SELECT * FROM FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - 1 | 2 | 3 | 1 -(1 row) - -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- cascade update/delete -CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) ); -CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, ftest4 int, CONSTRAINT constrname3 - FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE - ON DELETE CASCADE ON UPDATE CASCADE); --- Insert Primary Key values -INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1'); -INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2'); -INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3'); -INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4'); --- Insert Foreign Key values -INSERT INTO FKTABLE VALUES (1, 2, 3, 1); -INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2); -INSERT INTO FKTABLE VALUES (2, NULL, 3, 3); -INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4); -INSERT INTO FKTABLE VALUES (NULL, 3, 4, 5); --- Insert a failed values -INSERT INTO FKTABLE VALUES (1, 2, 7, 6); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname3" -DETAIL: Key (ftest1, ftest2, ftest3)=(1, 2, 7) is not present in table "pktable". --- Show FKTABLE -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - 1 | 2 | 3 | 1 - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 -(5 rows) - --- Try to update something that will cascade -UPDATE PKTABLE set ptest2=5 where ptest2=2; --- Try to update something that should not cascade -UPDATE PKTABLE set ptest1=1 WHERE ptest2=3; --- Show PKTABLE and FKTABLE -SELECT * from PKTABLE; - ptest1 | ptest2 | ptest3 | ptest4 ---------+--------+--------+-------- - 2 | 4 | 5 | test4 - 1 | 5 | 3 | test1 - 1 | 3 | 3 | test2 - 1 | 3 | 4 | test3 -(4 rows) - -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 - 1 | 5 | 3 | 1 -(5 rows) - --- Try to delete something that should cascade -DELETE FROM PKTABLE where ptest1=1 and ptest2=5 and ptest3=3; --- Show PKTABLE and FKTABLE -SELECT * from PKTABLE; - ptest1 | ptest2 | ptest3 | ptest4 ---------+--------+--------+-------- - 2 | 4 | 5 | test4 - 1 | 3 | 3 | test2 - 1 | 3 | 4 | test3 -(3 rows) - -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 -(4 rows) - --- Try to delete something that should not have a cascade -DELETE FROM PKTABLE where ptest1=2; --- Show PKTABLE and FKTABLE -SELECT * from PKTABLE; - ptest1 | ptest2 | ptest3 | ptest4 ---------+--------+--------+-------- - 1 | 3 | 3 | test2 - 1 | 3 | 4 | test3 -(2 rows) - -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 -(4 rows) - -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- set null update / set default delete -CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) ); -CREATE TABLE FKTABLE ( ftest1 int DEFAULT 0, ftest2 int, ftest3 int, ftest4 int, CONSTRAINT constrname3 - FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE - ON DELETE SET DEFAULT ON UPDATE SET NULL); --- Insert Primary Key values -INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1'); -INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2'); -INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3'); -INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4'); --- Insert Foreign Key values -INSERT INTO FKTABLE VALUES (1, 2, 3, 1); -INSERT INTO FKTABLE VALUES (2, 3, 4, 1); -INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2); -INSERT INTO FKTABLE VALUES (2, NULL, 3, 3); -INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4); -INSERT INTO FKTABLE VALUES (NULL, 3, 4, 5); --- Insert a failed values -INSERT INTO FKTABLE VALUES (1, 2, 7, 6); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname3" -DETAIL: Key (ftest1, ftest2, ftest3)=(1, 2, 7) is not present in table "pktable". --- Show FKTABLE -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - 1 | 2 | 3 | 1 - 2 | 3 | 4 | 1 - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 -(6 rows) - --- Try to update something that will set null -UPDATE PKTABLE set ptest2=5 where ptest2=2; --- Try to update something that should not set null -UPDATE PKTABLE set ptest2=2 WHERE ptest2=3 and ptest1=1; --- Show PKTABLE and FKTABLE -SELECT * from PKTABLE; - ptest1 | ptest2 | ptest3 | ptest4 ---------+--------+--------+-------- - 2 | 3 | 4 | test3 - 2 | 4 | 5 | test4 - 1 | 5 | 3 | test1 - 1 | 2 | 3 | test2 -(4 rows) - -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - 2 | 3 | 4 | 1 - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 - | | | 1 -(6 rows) - --- Try to delete something that should set default -DELETE FROM PKTABLE where ptest1=2 and ptest2=3 and ptest3=4; --- Show PKTABLE and FKTABLE -SELECT * from PKTABLE; - ptest1 | ptest2 | ptest3 | ptest4 ---------+--------+--------+-------- - 2 | 4 | 5 | test4 - 1 | 5 | 3 | test1 - 1 | 2 | 3 | test2 -(3 rows) - -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 - | | | 1 - 0 | | | 1 -(6 rows) - --- Try to delete something that should not set default -DELETE FROM PKTABLE where ptest2=5; --- Show PKTABLE and FKTABLE -SELECT * from PKTABLE; - ptest1 | ptest2 | ptest3 | ptest4 ---------+--------+--------+-------- - 2 | 4 | 5 | test4 - 1 | 2 | 3 | test2 -(2 rows) - -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 - | | | 1 - 0 | | | 1 -(6 rows) - -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- set default update / set null delete -CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) ); -CREATE TABLE FKTABLE ( ftest1 int DEFAULT 0, ftest2 int DEFAULT -1, ftest3 int DEFAULT -2, ftest4 int, CONSTRAINT constrname3 - FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE - ON DELETE SET NULL ON UPDATE SET DEFAULT); --- Insert Primary Key values -INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1'); -INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2'); -INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3'); -INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4'); -INSERT INTO PKTABLE VALUES (2, -1, 5, 'test5'); --- Insert Foreign Key values -INSERT INTO FKTABLE VALUES (1, 2, 3, 1); -INSERT INTO FKTABLE VALUES (2, 3, 4, 1); -INSERT INTO FKTABLE VALUES (2, 4, 5, 1); -INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2); -INSERT INTO FKTABLE VALUES (2, NULL, 3, 3); -INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4); -INSERT INTO FKTABLE VALUES (NULL, 3, 4, 5); --- Insert a failed values -INSERT INTO FKTABLE VALUES (1, 2, 7, 6); -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname3" -DETAIL: Key (ftest1, ftest2, ftest3)=(1, 2, 7) is not present in table "pktable". --- Show FKTABLE -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - 1 | 2 | 3 | 1 - 2 | 3 | 4 | 1 - 2 | 4 | 5 | 1 - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 -(7 rows) - --- Try to update something that will fail -UPDATE PKTABLE set ptest2=5 where ptest2=2; -ERROR: insert or update on table "fktable" violates foreign key constraint "constrname3" -DETAIL: Key (ftest1, ftest2, ftest3)=(0, -1, -2) is not present in table "pktable". --- Try to update something that will set default -UPDATE PKTABLE set ptest1=0, ptest2=-1, ptest3=-2 where ptest2=2; -UPDATE PKTABLE set ptest2=10 where ptest2=4; --- Try to update something that should not set default -UPDATE PKTABLE set ptest2=2 WHERE ptest2=3 and ptest1=1; --- Show PKTABLE and FKTABLE -SELECT * from PKTABLE; - ptest1 | ptest2 | ptest3 | ptest4 ---------+--------+--------+-------- - 2 | 3 | 4 | test3 - 2 | -1 | 5 | test5 - 0 | -1 | -2 | test1 - 2 | 10 | 5 | test4 - 1 | 2 | 3 | test2 -(5 rows) - -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - 2 | 3 | 4 | 1 - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 - 0 | -1 | -2 | 1 - 0 | -1 | -2 | 1 -(7 rows) - --- Try to delete something that should set null -DELETE FROM PKTABLE where ptest1=2 and ptest2=3 and ptest3=4; --- Show PKTABLE and FKTABLE -SELECT * from PKTABLE; - ptest1 | ptest2 | ptest3 | ptest4 ---------+--------+--------+-------- - 2 | -1 | 5 | test5 - 0 | -1 | -2 | test1 - 2 | 10 | 5 | test4 - 1 | 2 | 3 | test2 -(4 rows) - -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 - 0 | -1 | -2 | 1 - 0 | -1 | -2 | 1 - | | | 1 -(7 rows) - --- Try to delete something that should not set null -DELETE FROM PKTABLE where ptest2=-1 and ptest3=5; --- Show PKTABLE and FKTABLE -SELECT * from PKTABLE; - ptest1 | ptest2 | ptest3 | ptest4 ---------+--------+--------+-------- - 0 | -1 | -2 | test1 - 2 | 10 | 5 | test4 - 1 | 2 | 3 | test2 -(3 rows) - -SELECT * from FKTABLE; - ftest1 | ftest2 | ftest3 | ftest4 ---------+--------+--------+-------- - | 2 | 3 | 2 - 2 | | 3 | 3 - | 2 | 7 | 4 - | 3 | 4 | 5 - 0 | -1 | -2 | 1 - 0 | -1 | -2 | 1 - | | | 1 -(7 rows) - -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- Test for ON DELETE SET NULL/DEFAULT (column_list); -CREATE TABLE PKTABLE (tid int, id int, PRIMARY KEY (tid, id)); -CREATE TABLE FKTABLE (tid int, id int, foo int, FOREIGN KEY (tid, id) REFERENCES PKTABLE ON DELETE SET NULL (bar)); -ERROR: column "bar" referenced in foreign key constraint does not exist -CREATE TABLE FKTABLE (tid int, id int, foo int, FOREIGN KEY (tid, id) REFERENCES PKTABLE ON DELETE SET NULL (foo)); -ERROR: column "foo" referenced in ON DELETE SET action must be part of foreign key -CREATE TABLE FKTABLE (tid int, id int, foo int, FOREIGN KEY (tid, foo) REFERENCES PKTABLE ON UPDATE SET NULL (foo)); -ERROR: a column list with SET NULL is only supported for ON DELETE actions -LINE 1: ...oo int, FOREIGN KEY (tid, foo) REFERENCES PKTABLE ON UPDATE ... - ^ -CREATE TABLE FKTABLE ( - tid int, id int, - fk_id_del_set_null int, - fk_id_del_set_default int DEFAULT 0, - FOREIGN KEY (tid, fk_id_del_set_null) REFERENCES PKTABLE ON DELETE SET NULL (fk_id_del_set_null), - FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES PKTABLE ON DELETE SET DEFAULT (fk_id_del_set_default) -); -SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conrelid = 'fktable'::regclass::oid ORDER BY oid; - pg_get_constraintdef --------------------------------------------------------------------------------------------------------------------- - FOREIGN KEY (tid, fk_id_del_set_null) REFERENCES pktable(tid, id) ON DELETE SET NULL (fk_id_del_set_null) - FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES pktable(tid, id) ON DELETE SET DEFAULT (fk_id_del_set_default) -(2 rows) - -INSERT INTO PKTABLE VALUES (1, 0), (1, 1), (1, 2); -INSERT INTO FKTABLE VALUES - (1, 1, 1, NULL), - (1, 2, NULL, 2); -DELETE FROM PKTABLE WHERE id = 1 OR id = 2; -SELECT * FROM FKTABLE ORDER BY id; - tid | id | fk_id_del_set_null | fk_id_del_set_default ------+----+--------------------+----------------------- - 1 | 1 | | - 1 | 2 | | 0 -(2 rows) - -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- Test some invalid FK definitions -CREATE TABLE PKTABLE (ptest1 int PRIMARY KEY, someoid oid); -CREATE TABLE FKTABLE_FAIL1 ( ftest1 int, CONSTRAINT fkfail1 FOREIGN KEY (ftest2) REFERENCES PKTABLE); -ERROR: column "ftest2" referenced in foreign key constraint does not exist -CREATE TABLE FKTABLE_FAIL2 ( ftest1 int, CONSTRAINT fkfail1 FOREIGN KEY (ftest1) REFERENCES PKTABLE(ptest2)); -ERROR: column "ptest2" referenced in foreign key constraint does not exist -CREATE TABLE FKTABLE_FAIL3 ( ftest1 int, CONSTRAINT fkfail1 FOREIGN KEY (tableoid) REFERENCES PKTABLE(someoid)); -ERROR: system columns cannot be used in foreign keys -CREATE TABLE FKTABLE_FAIL4 ( ftest1 oid, CONSTRAINT fkfail1 FOREIGN KEY (ftest1) REFERENCES PKTABLE(tableoid)); -ERROR: system columns cannot be used in foreign keys -DROP TABLE PKTABLE; --- Test for referencing column number smaller than referenced constraint -CREATE TABLE PKTABLE (ptest1 int, ptest2 int, UNIQUE(ptest1, ptest2)); -CREATE TABLE FKTABLE_FAIL1 (ftest1 int REFERENCES pktable(ptest1)); -ERROR: there is no unique constraint matching given keys for referenced table "pktable" -DROP TABLE FKTABLE_FAIL1; -ERROR: table "fktable_fail1" does not exist -DROP TABLE PKTABLE; --- --- Tests for mismatched types --- --- Basic one column, two table setup -CREATE TABLE PKTABLE (ptest1 int PRIMARY KEY); -INSERT INTO PKTABLE VALUES(42); --- This next should fail, because int=inet does not exist -CREATE TABLE FKTABLE (ftest1 inet REFERENCES pktable); -ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented -DETAIL: Key columns "ftest1" of the referencing table and "ptest1" of the referenced table are of incompatible types: inet and integer. --- This should also fail for the same reason, but here we --- give the column name -CREATE TABLE FKTABLE (ftest1 inet REFERENCES pktable(ptest1)); -ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented -DETAIL: Key columns "ftest1" of the referencing table and "ptest1" of the referenced table are of incompatible types: inet and integer. --- This should succeed, even though they are different types, --- because int=int8 exists and is a member of the integer opfamily -CREATE TABLE FKTABLE (ftest1 int8 REFERENCES pktable); --- Check it actually works -INSERT INTO FKTABLE VALUES(42); -- should succeed -INSERT INTO FKTABLE VALUES(43); -- should fail -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" -DETAIL: Key (ftest1)=(43) is not present in table "pktable". -UPDATE FKTABLE SET ftest1 = ftest1; -- should succeed -UPDATE FKTABLE SET ftest1 = ftest1 + 1; -- should fail -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" -DETAIL: Key (ftest1)=(43) is not present in table "pktable". -DROP TABLE FKTABLE; --- This should fail, because we'd have to cast numeric to int which is --- not an implicit coercion (or use numeric=numeric, but that's not part --- of the integer opfamily) -CREATE TABLE FKTABLE (ftest1 numeric REFERENCES pktable); -ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented -DETAIL: Key columns "ftest1" of the referencing table and "ptest1" of the referenced table are of incompatible types: numeric and integer. -DROP TABLE PKTABLE; --- On the other hand, this should work because int implicitly promotes to --- numeric, and we allow promotion on the FK side -CREATE TABLE PKTABLE (ptest1 numeric PRIMARY KEY); -INSERT INTO PKTABLE VALUES(42); -CREATE TABLE FKTABLE (ftest1 int REFERENCES pktable); --- Check it actually works -INSERT INTO FKTABLE VALUES(42); -- should succeed -INSERT INTO FKTABLE VALUES(43); -- should fail -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" -DETAIL: Key (ftest1)=(43) is not present in table "pktable". -UPDATE FKTABLE SET ftest1 = ftest1; -- should succeed -UPDATE FKTABLE SET ftest1 = ftest1 + 1; -- should fail -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" -DETAIL: Key (ftest1)=(43) is not present in table "pktable". -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- Two columns, two tables -CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, PRIMARY KEY(ptest1, ptest2)); --- This should fail, because we just chose really odd types -CREATE TABLE FKTABLE (ftest1 cidr, ftest2 timestamp, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable); -ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented -DETAIL: Key columns "ftest1" of the referencing table and "ptest1" of the referenced table are of incompatible types: cidr and integer. --- Again, so should this... -CREATE TABLE FKTABLE (ftest1 cidr, ftest2 timestamp, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable(ptest1, ptest2)); -ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented -DETAIL: Key columns "ftest1" of the referencing table and "ptest1" of the referenced table are of incompatible types: cidr and integer. --- This fails because we mixed up the column ordering -CREATE TABLE FKTABLE (ftest1 int, ftest2 inet, FOREIGN KEY(ftest2, ftest1) REFERENCES pktable); -ERROR: foreign key constraint "fktable_ftest2_ftest1_fkey" cannot be implemented -DETAIL: Key columns "ftest2" of the referencing table and "ptest1" of the referenced table are of incompatible types: inet and integer. --- As does this... -CREATE TABLE FKTABLE (ftest1 int, ftest2 inet, FOREIGN KEY(ftest2, ftest1) REFERENCES pktable(ptest1, ptest2)); -ERROR: foreign key constraint "fktable_ftest2_ftest1_fkey" cannot be implemented -DETAIL: Key columns "ftest2" of the referencing table and "ptest1" of the referenced table are of incompatible types: inet and integer. --- And again.. -CREATE TABLE FKTABLE (ftest1 int, ftest2 inet, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable(ptest2, ptest1)); -ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented -DETAIL: Key columns "ftest1" of the referencing table and "ptest2" of the referenced table are of incompatible types: integer and inet. --- This works... -CREATE TABLE FKTABLE (ftest1 int, ftest2 inet, FOREIGN KEY(ftest2, ftest1) REFERENCES pktable(ptest2, ptest1)); -DROP TABLE FKTABLE; --- As does this -CREATE TABLE FKTABLE (ftest1 int, ftest2 inet, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable(ptest1, ptest2)); -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- Two columns, same table --- Make sure this still works... -CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3, -ptest4) REFERENCES pktable(ptest1, ptest2)); -DROP TABLE PKTABLE; --- And this, -CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3, -ptest4) REFERENCES pktable); -DROP TABLE PKTABLE; --- This shouldn't (mixed up columns) -CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3, -ptest4) REFERENCES pktable(ptest2, ptest1)); -ERROR: foreign key constraint "pktable_ptest3_ptest4_fkey" cannot be implemented -DETAIL: Key columns "ptest3" of the referencing table and "ptest2" of the referenced table are of incompatible types: integer and inet. --- Nor should this... (same reason, we have 4,3 referencing 1,2 which mismatches types -CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest4, -ptest3) REFERENCES pktable(ptest1, ptest2)); -ERROR: foreign key constraint "pktable_ptest4_ptest3_fkey" cannot be implemented -DETAIL: Key columns "ptest4" of the referencing table and "ptest1" of the referenced table are of incompatible types: inet and integer. --- Not this one either... Same as the last one except we didn't defined the columns being referenced. -CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest4, -ptest3) REFERENCES pktable); -ERROR: foreign key constraint "pktable_ptest4_ptest3_fkey" cannot be implemented -DETAIL: Key columns "ptest4" of the referencing table and "ptest1" of the referenced table are of incompatible types: inet and integer. --- --- Now some cases with inheritance --- Basic 2 table case: 1 column of matching types. -create table pktable_base (base1 int not null); -create table pktable (ptest1 int, primary key(base1), unique(base1, ptest1)) inherits (pktable_base); -create table fktable (ftest1 int references pktable(base1)); --- now some ins, upd, del -insert into pktable(base1) values (1); -insert into pktable(base1) values (2); --- let's insert a non-existent fktable value -insert into fktable(ftest1) values (3); -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" -DETAIL: Key (ftest1)=(3) is not present in table "pktable". --- let's make a valid row for that -insert into pktable(base1) values (3); -insert into fktable(ftest1) values (3); --- let's try removing a row that should fail from pktable -delete from pktable where base1>2; -ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_fkey" on table "fktable" -DETAIL: Key (base1)=(3) is still referenced from table "fktable". --- okay, let's try updating all of the base1 values to *4 --- which should fail. -update pktable set base1=base1*4; -ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_fkey" on table "fktable" -DETAIL: Key (base1)=(3) is still referenced from table "fktable". --- okay, let's try an update that should work. -update pktable set base1=base1*4 where base1<3; --- and a delete that should work -delete from pktable where base1>3; --- cleanup -drop table fktable; -delete from pktable; --- Now 2 columns 2 tables, matching types -create table fktable (ftest1 int, ftest2 int, foreign key(ftest1, ftest2) references pktable(base1, ptest1)); --- now some ins, upd, del -insert into pktable(base1, ptest1) values (1, 1); -insert into pktable(base1, ptest1) values (2, 2); --- let's insert a non-existent fktable value -insert into fktable(ftest1, ftest2) values (3, 1); -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_ftest2_fkey" -DETAIL: Key (ftest1, ftest2)=(3, 1) is not present in table "pktable". --- let's make a valid row for that -insert into pktable(base1,ptest1) values (3, 1); -insert into fktable(ftest1, ftest2) values (3, 1); --- let's try removing a row that should fail from pktable -delete from pktable where base1>2; -ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_ftest2_fkey" on table "fktable" -DETAIL: Key (base1, ptest1)=(3, 1) is still referenced from table "fktable". --- okay, let's try updating all of the base1 values to *4 --- which should fail. -update pktable set base1=base1*4; -ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_ftest2_fkey" on table "fktable" -DETAIL: Key (base1, ptest1)=(3, 1) is still referenced from table "fktable". --- okay, let's try an update that should work. -update pktable set base1=base1*4 where base1<3; --- and a delete that should work -delete from pktable where base1>3; --- cleanup -drop table fktable; -drop table pktable; -drop table pktable_base; --- Now we'll do one all in 1 table with 2 columns of matching types -create table pktable_base(base1 int not null, base2 int); -create table pktable(ptest1 int, ptest2 int, primary key(base1, ptest1), foreign key(base2, ptest2) references - pktable(base1, ptest1)) inherits (pktable_base); -insert into pktable (base1, ptest1, base2, ptest2) values (1, 1, 1, 1); -insert into pktable (base1, ptest1, base2, ptest2) values (2, 1, 1, 1); -insert into pktable (base1, ptest1, base2, ptest2) values (2, 2, 2, 1); -insert into pktable (base1, ptest1, base2, ptest2) values (1, 3, 2, 2); --- fails (3,2) isn't in base1, ptest1 -insert into pktable (base1, ptest1, base2, ptest2) values (2, 3, 3, 2); -ERROR: insert or update on table "pktable" violates foreign key constraint "pktable_base2_ptest2_fkey" -DETAIL: Key (base2, ptest2)=(3, 2) is not present in table "pktable". --- fails (2,2) is being referenced -delete from pktable where base1=2; -ERROR: update or delete on table "pktable" violates foreign key constraint "pktable_base2_ptest2_fkey" on table "pktable" -DETAIL: Key (base1, ptest1)=(2, 2) is still referenced from table "pktable". --- fails (1,1) is being referenced (twice) -update pktable set base1=3 where base1=1; -ERROR: update or delete on table "pktable" violates foreign key constraint "pktable_base2_ptest2_fkey" on table "pktable" -DETAIL: Key (base1, ptest1)=(1, 1) is still referenced from table "pktable". --- this sequence of two deletes will work, since after the first there will be no (2,*) references -delete from pktable where base2=2; -delete from pktable where base1=2; -drop table pktable; -drop table pktable_base; --- 2 columns (2 tables), mismatched types -create table pktable_base(base1 int not null); -create table pktable(ptest1 inet, primary key(base1, ptest1)) inherits (pktable_base); --- just generally bad types (with and without column references on the referenced table) -create table fktable(ftest1 cidr, ftest2 int[], foreign key (ftest1, ftest2) references pktable); -ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented -DETAIL: Key columns "ftest1" of the referencing table and "base1" of the referenced table are of incompatible types: cidr and integer. -create table fktable(ftest1 cidr, ftest2 int[], foreign key (ftest1, ftest2) references pktable(base1, ptest1)); -ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented -DETAIL: Key columns "ftest1" of the referencing table and "base1" of the referenced table are of incompatible types: cidr and integer. --- let's mix up which columns reference which -create table fktable(ftest1 int, ftest2 inet, foreign key(ftest2, ftest1) references pktable); -ERROR: foreign key constraint "fktable_ftest2_ftest1_fkey" cannot be implemented -DETAIL: Key columns "ftest2" of the referencing table and "base1" of the referenced table are of incompatible types: inet and integer. -create table fktable(ftest1 int, ftest2 inet, foreign key(ftest2, ftest1) references pktable(base1, ptest1)); -ERROR: foreign key constraint "fktable_ftest2_ftest1_fkey" cannot be implemented -DETAIL: Key columns "ftest2" of the referencing table and "base1" of the referenced table are of incompatible types: inet and integer. -create table fktable(ftest1 int, ftest2 inet, foreign key(ftest1, ftest2) references pktable(ptest1, base1)); -ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented -DETAIL: Key columns "ftest1" of the referencing table and "ptest1" of the referenced table are of incompatible types: integer and inet. -drop table pktable; -drop table pktable_base; --- 2 columns (1 table), mismatched types -create table pktable_base(base1 int not null, base2 int); -create table pktable(ptest1 inet, ptest2 inet[], primary key(base1, ptest1), foreign key(base2, ptest2) references - pktable(base1, ptest1)) inherits (pktable_base); -ERROR: foreign key constraint "pktable_base2_ptest2_fkey" cannot be implemented -DETAIL: Key columns "ptest2" of the referencing table and "ptest1" of the referenced table are of incompatible types: inet[] and inet. -create table pktable(ptest1 inet, ptest2 inet, primary key(base1, ptest1), foreign key(base2, ptest2) references - pktable(ptest1, base1)) inherits (pktable_base); -ERROR: foreign key constraint "pktable_base2_ptest2_fkey" cannot be implemented -DETAIL: Key columns "base2" of the referencing table and "ptest1" of the referenced table are of incompatible types: integer and inet. -create table pktable(ptest1 inet, ptest2 inet, primary key(base1, ptest1), foreign key(ptest2, base2) references - pktable(base1, ptest1)) inherits (pktable_base); -ERROR: foreign key constraint "pktable_ptest2_base2_fkey" cannot be implemented -DETAIL: Key columns "ptest2" of the referencing table and "base1" of the referenced table are of incompatible types: inet and integer. -create table pktable(ptest1 inet, ptest2 inet, primary key(base1, ptest1), foreign key(ptest2, base2) references - pktable(base1, ptest1)) inherits (pktable_base); -ERROR: foreign key constraint "pktable_ptest2_base2_fkey" cannot be implemented -DETAIL: Key columns "ptest2" of the referencing table and "base1" of the referenced table are of incompatible types: inet and integer. -drop table pktable; -ERROR: table "pktable" does not exist -drop table pktable_base; --- --- Deferrable constraints --- --- deferrable, explicitly deferred -CREATE TABLE pktable ( - id INT4 PRIMARY KEY, - other INT4 -); -CREATE TABLE fktable ( - id INT4 PRIMARY KEY, - fk INT4 REFERENCES pktable DEFERRABLE -); --- default to immediate: should fail -INSERT INTO fktable VALUES (5, 10); -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" -DETAIL: Key (fk)=(10) is not present in table "pktable". --- explicitly defer the constraint -BEGIN; -SET CONSTRAINTS ALL DEFERRED; -INSERT INTO fktable VALUES (10, 15); -INSERT INTO pktable VALUES (15, 0); -- make the FK insert valid -COMMIT; -DROP TABLE fktable, pktable; --- deferrable, initially deferred -CREATE TABLE pktable ( - id INT4 PRIMARY KEY, - other INT4 -); -CREATE TABLE fktable ( - id INT4 PRIMARY KEY, - fk INT4 REFERENCES pktable DEFERRABLE INITIALLY DEFERRED -); --- default to deferred, should succeed -BEGIN; -INSERT INTO fktable VALUES (100, 200); -INSERT INTO pktable VALUES (200, 500); -- make the FK insert valid -COMMIT; --- default to deferred, explicitly make immediate -BEGIN; -SET CONSTRAINTS ALL IMMEDIATE; --- should fail -INSERT INTO fktable VALUES (500, 1000); -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" -DETAIL: Key (fk)=(1000) is not present in table "pktable". -COMMIT; -DROP TABLE fktable, pktable; --- tricky behavior: according to SQL99, if a deferred constraint is set --- to 'immediate' mode, it should be checked for validity *immediately*, --- not when the current transaction commits (i.e. the mode change applies --- retroactively) -CREATE TABLE pktable ( - id INT4 PRIMARY KEY, - other INT4 -); -CREATE TABLE fktable ( - id INT4 PRIMARY KEY, - fk INT4 REFERENCES pktable DEFERRABLE -); -BEGIN; -SET CONSTRAINTS ALL DEFERRED; --- should succeed, for now -INSERT INTO fktable VALUES (1000, 2000); --- should cause transaction abort, due to preceding error -SET CONSTRAINTS ALL IMMEDIATE; -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" -DETAIL: Key (fk)=(2000) is not present in table "pktable". -INSERT INTO pktable VALUES (2000, 3); -- too late -ERROR: current transaction is aborted, commands ignored until end of transaction block -COMMIT; -DROP TABLE fktable, pktable; --- deferrable, initially deferred -CREATE TABLE pktable ( - id INT4 PRIMARY KEY, - other INT4 -); -CREATE TABLE fktable ( - id INT4 PRIMARY KEY, - fk INT4 REFERENCES pktable DEFERRABLE INITIALLY DEFERRED -); -BEGIN; --- no error here -INSERT INTO fktable VALUES (100, 200); --- error here on commit -COMMIT; -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" -DETAIL: Key (fk)=(200) is not present in table "pktable". -DROP TABLE pktable, fktable; --- test notice about expensive referential integrity checks, --- where the index cannot be used because of type incompatibilities. -CREATE TEMP TABLE pktable ( - id1 INT4 PRIMARY KEY, - id2 VARCHAR(4) UNIQUE, - id3 REAL UNIQUE, - UNIQUE(id1, id2, id3) -); -CREATE TEMP TABLE fktable ( - x1 INT4 REFERENCES pktable(id1), - x2 VARCHAR(4) REFERENCES pktable(id2), - x3 REAL REFERENCES pktable(id3), - x4 TEXT, - x5 INT2 -); --- check individual constraints with alter table. --- should fail --- varchar does not promote to real -ALTER TABLE fktable ADD CONSTRAINT fk_2_3 -FOREIGN KEY (x2) REFERENCES pktable(id3); -ERROR: foreign key constraint "fk_2_3" cannot be implemented -DETAIL: Key columns "x2" of the referencing table and "id3" of the referenced table are of incompatible types: character varying and real. --- nor to int4 -ALTER TABLE fktable ADD CONSTRAINT fk_2_1 -FOREIGN KEY (x2) REFERENCES pktable(id1); -ERROR: foreign key constraint "fk_2_1" cannot be implemented -DETAIL: Key columns "x2" of the referencing table and "id1" of the referenced table are of incompatible types: character varying and integer. --- real does not promote to int4 -ALTER TABLE fktable ADD CONSTRAINT fk_3_1 -FOREIGN KEY (x3) REFERENCES pktable(id1); -ERROR: foreign key constraint "fk_3_1" cannot be implemented -DETAIL: Key columns "x3" of the referencing table and "id1" of the referenced table are of incompatible types: real and integer. --- int4 does not promote to text -ALTER TABLE fktable ADD CONSTRAINT fk_1_2 -FOREIGN KEY (x1) REFERENCES pktable(id2); -ERROR: foreign key constraint "fk_1_2" cannot be implemented -DETAIL: Key columns "x1" of the referencing table and "id2" of the referenced table are of incompatible types: integer and character varying. --- should succeed --- int4 promotes to real -ALTER TABLE fktable ADD CONSTRAINT fk_1_3 -FOREIGN KEY (x1) REFERENCES pktable(id3); --- text is compatible with varchar -ALTER TABLE fktable ADD CONSTRAINT fk_4_2 -FOREIGN KEY (x4) REFERENCES pktable(id2); --- int2 is part of integer opfamily as of 8.0 -ALTER TABLE fktable ADD CONSTRAINT fk_5_1 -FOREIGN KEY (x5) REFERENCES pktable(id1); --- check multikey cases, especially out-of-order column lists --- these should work -ALTER TABLE fktable ADD CONSTRAINT fk_123_123 -FOREIGN KEY (x1,x2,x3) REFERENCES pktable(id1,id2,id3); -ALTER TABLE fktable ADD CONSTRAINT fk_213_213 -FOREIGN KEY (x2,x1,x3) REFERENCES pktable(id2,id1,id3); -ALTER TABLE fktable ADD CONSTRAINT fk_253_213 -FOREIGN KEY (x2,x5,x3) REFERENCES pktable(id2,id1,id3); --- these should fail -ALTER TABLE fktable ADD CONSTRAINT fk_123_231 -FOREIGN KEY (x1,x2,x3) REFERENCES pktable(id2,id3,id1); -ERROR: foreign key constraint "fk_123_231" cannot be implemented -DETAIL: Key columns "x1" of the referencing table and "id2" of the referenced table are of incompatible types: integer and character varying. -ALTER TABLE fktable ADD CONSTRAINT fk_241_132 -FOREIGN KEY (x2,x4,x1) REFERENCES pktable(id1,id3,id2); -ERROR: foreign key constraint "fk_241_132" cannot be implemented -DETAIL: Key columns "x2" of the referencing table and "id1" of the referenced table are of incompatible types: character varying and integer. -DROP TABLE pktable, fktable; --- test a tricky case: we can elide firing the FK check trigger during --- an UPDATE if the UPDATE did not change the foreign key --- field. However, we can't do this if our transaction was the one that --- created the updated row and the trigger is deferred, since our UPDATE --- will have invalidated the original newly-inserted tuple, and therefore --- cause the on-INSERT RI trigger not to be fired. -CREATE TEMP TABLE pktable ( - id int primary key, - other int -); -CREATE TEMP TABLE fktable ( - id int primary key, - fk int references pktable deferrable initially deferred -); -INSERT INTO pktable VALUES (5, 10); -BEGIN; --- doesn't match PK, but no error yet -INSERT INTO fktable VALUES (0, 20); --- don't change FK -UPDATE fktable SET id = id + 1; --- should catch error from initial INSERT -COMMIT; -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" -DETAIL: Key (fk)=(20) is not present in table "pktable". --- check same case when insert is in a different subtransaction than update -BEGIN; --- doesn't match PK, but no error yet -INSERT INTO fktable VALUES (0, 20); --- UPDATE will be in a subxact -SAVEPOINT savept1; --- don't change FK -UPDATE fktable SET id = id + 1; --- should catch error from initial INSERT -COMMIT; -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" -DETAIL: Key (fk)=(20) is not present in table "pktable". -BEGIN; --- INSERT will be in a subxact -SAVEPOINT savept1; --- doesn't match PK, but no error yet -INSERT INTO fktable VALUES (0, 20); -RELEASE SAVEPOINT savept1; --- don't change FK -UPDATE fktable SET id = id + 1; --- should catch error from initial INSERT -COMMIT; -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" -DETAIL: Key (fk)=(20) is not present in table "pktable". -BEGIN; --- doesn't match PK, but no error yet -INSERT INTO fktable VALUES (0, 20); --- UPDATE will be in a subxact -SAVEPOINT savept1; --- don't change FK -UPDATE fktable SET id = id + 1; --- Roll back the UPDATE -ROLLBACK TO savept1; --- should catch error from initial INSERT -COMMIT; -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" -DETAIL: Key (fk)=(20) is not present in table "pktable". --- --- check ALTER CONSTRAINT --- -INSERT INTO fktable VALUES (1, 5); -ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey DEFERRABLE INITIALLY IMMEDIATE; -BEGIN; --- doesn't match FK, should throw error now -UPDATE pktable SET id = 10 WHERE id = 5; -ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_fk_fkey" on table "fktable" -DETAIL: Key (id)=(5) is still referenced from table "fktable". -COMMIT; -BEGIN; --- doesn't match PK, should throw error now -INSERT INTO fktable VALUES (0, 20); -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" -DETAIL: Key (fk)=(20) is not present in table "pktable". -COMMIT; --- try additional syntax -ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey NOT DEFERRABLE; --- illegal option -ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey NOT DEFERRABLE INITIALLY DEFERRED; -ERROR: constraint declared INITIALLY DEFERRED must be DEFERRABLE -LINE 1: ...e ALTER CONSTRAINT fktable_fk_fkey NOT DEFERRABLE INITIALLY ... - ^ --- test order of firing of FK triggers when several RI-induced changes need to --- be made to the same row. This was broken by subtransaction-related --- changes in 8.0. -CREATE TEMP TABLE users ( - id INT PRIMARY KEY, - name VARCHAR NOT NULL -); -INSERT INTO users VALUES (1, 'Jozko'); -INSERT INTO users VALUES (2, 'Ferko'); -INSERT INTO users VALUES (3, 'Samko'); -CREATE TEMP TABLE tasks ( - id INT PRIMARY KEY, - owner INT REFERENCES users ON UPDATE CASCADE ON DELETE SET NULL, - worker INT REFERENCES users ON UPDATE CASCADE ON DELETE SET NULL, - checked_by INT REFERENCES users ON UPDATE CASCADE ON DELETE SET NULL -); -INSERT INTO tasks VALUES (1,1,NULL,NULL); -INSERT INTO tasks VALUES (2,2,2,NULL); -INSERT INTO tasks VALUES (3,3,3,3); -SELECT * FROM tasks; - id | owner | worker | checked_by -----+-------+--------+------------ - 1 | 1 | | - 2 | 2 | 2 | - 3 | 3 | 3 | 3 -(3 rows) - -UPDATE users SET id = 4 WHERE id = 3; -SELECT * FROM tasks; - id | owner | worker | checked_by -----+-------+--------+------------ - 1 | 1 | | - 2 | 2 | 2 | - 3 | 4 | 4 | 4 -(3 rows) - -DELETE FROM users WHERE id = 4; -SELECT * FROM tasks; - id | owner | worker | checked_by -----+-------+--------+------------ - 1 | 1 | | - 2 | 2 | 2 | - 3 | | | -(3 rows) - --- could fail with only 2 changes to make, if row was already updated -BEGIN; -UPDATE tasks set id=id WHERE id=2; -SELECT * FROM tasks; - id | owner | worker | checked_by -----+-------+--------+------------ - 1 | 1 | | - 3 | | | - 2 | 2 | 2 | -(3 rows) - -DELETE FROM users WHERE id = 2; -SELECT * FROM tasks; - id | owner | worker | checked_by -----+-------+--------+------------ - 1 | 1 | | - 3 | | | - 2 | | | -(3 rows) - -COMMIT; --- --- Test self-referential FK with CASCADE (bug #6268) --- -create temp table selfref ( - a int primary key, - b int, - foreign key (b) references selfref (a) - on update cascade on delete cascade -); -insert into selfref (a, b) -values - (0, 0), - (1, 1); -begin; - update selfref set a = 123 where a = 0; - select a, b from selfref; - a | b ------+----- - 1 | 1 - 123 | 123 -(2 rows) - - update selfref set a = 456 where a = 123; - select a, b from selfref; - a | b ------+----- - 1 | 1 - 456 | 456 -(2 rows) - -commit; --- --- Test that SET DEFAULT actions recognize updates to default values --- -create temp table defp (f1 int primary key); -create temp table defc (f1 int default 0 - references defp on delete set default); -insert into defp values (0), (1), (2); -insert into defc values (2); -select * from defc; - f1 ----- - 2 -(1 row) - -delete from defp where f1 = 2; -select * from defc; - f1 ----- - 0 -(1 row) - -delete from defp where f1 = 0; -- fail -ERROR: update or delete on table "defp" violates foreign key constraint "defc_f1_fkey" on table "defc" -DETAIL: Key (f1)=(0) is still referenced from table "defc". -alter table defc alter column f1 set default 1; -delete from defp where f1 = 0; -select * from defc; - f1 ----- - 1 -(1 row) - -delete from defp where f1 = 1; -- fail -ERROR: update or delete on table "defp" violates foreign key constraint "defc_f1_fkey" on table "defc" -DETAIL: Key (f1)=(1) is still referenced from table "defc". --- --- Test the difference between NO ACTION and RESTRICT --- -create temp table pp (f1 int primary key); -create temp table cc (f1 int references pp on update no action on delete no action); -insert into pp values(12); -insert into pp values(11); -update pp set f1=f1+1; -insert into cc values(13); -update pp set f1=f1+1; -update pp set f1=f1+1; -- fail -ERROR: update or delete on table "pp" violates foreign key constraint "cc_f1_fkey" on table "cc" -DETAIL: Key (f1)=(13) is still referenced from table "cc". -delete from pp where f1 = 13; -- fail -ERROR: update or delete on table "pp" violates foreign key constraint "cc_f1_fkey" on table "cc" -DETAIL: Key (f1)=(13) is still referenced from table "cc". -drop table pp, cc; -create temp table pp (f1 int primary key); -create temp table cc (f1 int references pp on update restrict on delete restrict); -insert into pp values(12); -insert into pp values(11); -update pp set f1=f1+1; -insert into cc values(13); -update pp set f1=f1+1; -- fail -ERROR: update or delete on table "pp" violates RESTRICT setting of foreign key constraint "cc_f1_fkey" on table "cc" -DETAIL: Key (f1)=(13) is referenced from table "cc". -delete from pp where f1 = 13; -- fail -ERROR: update or delete on table "pp" violates RESTRICT setting of foreign key constraint "cc_f1_fkey" on table "cc" -DETAIL: Key (f1)=(13) is referenced from table "cc". -drop table pp, cc; --- --- Test interaction of foreign-key optimization with rules (bug #14219) --- -create temp table t1 (a integer primary key, b text); -create temp table t2 (a integer primary key, b integer references t1); -create rule r1 as on delete to t1 do delete from t2 where t2.b = old.a; -explain (costs off) delete from t1 where a = 1; - QUERY PLAN --------------------------------------------- - Delete on t2 - -> Nested Loop - -> Index Scan using t1_pkey on t1 - Index Cond: (a = 1) - -> Seq Scan on t2 - Filter: (b = 1) - - Delete on t1 - -> Index Scan using t1_pkey on t1 - Index Cond: (a = 1) -(10 rows) - -delete from t1 where a = 1; --- Test a primary key with attributes located in later attnum positions --- compared to the fk attributes. -create table pktable2 (a int, b int, c int, d int, e int, primary key (d, e)); -create table fktable2 (d int, e int, foreign key (d, e) references pktable2); -insert into pktable2 values (1, 2, 3, 4, 5); -insert into fktable2 values (4, 5); -delete from pktable2; -ERROR: update or delete on table "pktable2" violates foreign key constraint "fktable2_d_e_fkey" on table "fktable2" -DETAIL: Key (d, e)=(4, 5) is still referenced from table "fktable2". -update pktable2 set d = 5; -ERROR: update or delete on table "pktable2" violates foreign key constraint "fktable2_d_e_fkey" on table "fktable2" -DETAIL: Key (d, e)=(4, 5) is still referenced from table "fktable2". -drop table pktable2, fktable2; --- Test truncation of long foreign key names -create table pktable1 (a int primary key); -create table pktable2 (a int, b int, primary key (a, b)); -create table fktable2 ( - a int, - b int, - very_very_long_column_name_to_exceed_63_characters int, - foreign key (very_very_long_column_name_to_exceed_63_characters) references pktable1, - foreign key (a, very_very_long_column_name_to_exceed_63_characters) references pktable2, - foreign key (a, very_very_long_column_name_to_exceed_63_characters) references pktable2 -); -select conname from pg_constraint where conrelid = 'fktable2'::regclass order by conname; - conname ------------------------------------------------------------------ - fktable2_a_very_very_long_column_name_to_exceed_63_charac_fkey1 - fktable2_a_very_very_long_column_name_to_exceed_63_charact_fkey - fktable2_very_very_long_column_name_to_exceed_63_character_fkey -(3 rows) - -drop table pktable1, pktable2, fktable2; --- --- Test deferred FK check on a tuple deleted by a rolled-back subtransaction --- -create table pktable2(f1 int primary key); -create table fktable2(f1 int references pktable2 deferrable initially deferred); -insert into pktable2 values(1); -begin; -insert into fktable2 values(1); -savepoint x; -delete from fktable2; -rollback to x; -commit; -begin; -insert into fktable2 values(2); -savepoint x; -delete from fktable2; -rollback to x; -commit; -- fail -ERROR: insert or update on table "fktable2" violates foreign key constraint "fktable2_f1_fkey" -DETAIL: Key (f1)=(2) is not present in table "pktable2". --- --- Test that we prevent dropping FK constraint with pending trigger events --- -begin; -insert into fktable2 values(2); -alter table fktable2 drop constraint fktable2_f1_fkey; -ERROR: cannot ALTER TABLE "fktable2" because it has pending trigger events -commit; -begin; -delete from pktable2 where f1 = 1; -alter table fktable2 drop constraint fktable2_f1_fkey; -ERROR: cannot ALTER TABLE "pktable2" because it has pending trigger events -commit; -drop table pktable2, fktable2; --- --- Test keys that "look" different but compare as equal --- -create table pktable2 (a float8, b float8, primary key (a, b)); -create table fktable2 (x float8, y float8, foreign key (x, y) references pktable2 (a, b) on update cascade); -insert into pktable2 values ('-0', '-0'); -insert into fktable2 values ('-0', '-0'); -select * from pktable2; - a | b -----+---- - -0 | -0 -(1 row) - -select * from fktable2; - x | y -----+---- - -0 | -0 -(1 row) - -update pktable2 set a = '0' where a = '-0'; -select * from pktable2; - a | b ----+---- - 0 | -0 -(1 row) - --- should have updated fktable2.x -select * from fktable2; - x | y ----+---- - 0 | -0 -(1 row) - -drop table pktable2, fktable2; --- --- Foreign keys and partitioned tables --- --- Creation of a partitioned hierarchy with irregular definitions -CREATE TABLE fk_notpartitioned_pk (fdrop1 int, a int, fdrop2 int, b int, - PRIMARY KEY (a, b)); -ALTER TABLE fk_notpartitioned_pk DROP COLUMN fdrop1, DROP COLUMN fdrop2; -CREATE TABLE fk_partitioned_fk (b int, fdrop1 int, a int) PARTITION BY RANGE (a, b); -ALTER TABLE fk_partitioned_fk DROP COLUMN fdrop1; -CREATE TABLE fk_partitioned_fk_1 (fdrop1 int, fdrop2 int, a int, fdrop3 int, b int); -ALTER TABLE fk_partitioned_fk_1 DROP COLUMN fdrop1, DROP COLUMN fdrop2, DROP COLUMN fdrop3; -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_1 FOR VALUES FROM (0,0) TO (1000,1000); -ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk; -CREATE TABLE fk_partitioned_fk_2 (b int, fdrop1 int, fdrop2 int, a int); -ALTER TABLE fk_partitioned_fk_2 DROP COLUMN fdrop1, DROP COLUMN fdrop2; -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 FOR VALUES FROM (1000,1000) TO (2000,2000); -CREATE TABLE fk_partitioned_fk_3 (fdrop1 int, fdrop2 int, fdrop3 int, fdrop4 int, b int, a int) - PARTITION BY HASH (a); -ALTER TABLE fk_partitioned_fk_3 DROP COLUMN fdrop1, DROP COLUMN fdrop2, - DROP COLUMN fdrop3, DROP COLUMN fdrop4; -CREATE TABLE fk_partitioned_fk_3_0 PARTITION OF fk_partitioned_fk_3 FOR VALUES WITH (MODULUS 5, REMAINDER 0); -CREATE TABLE fk_partitioned_fk_3_1 PARTITION OF fk_partitioned_fk_3 FOR VALUES WITH (MODULUS 5, REMAINDER 1); -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_3 - FOR VALUES FROM (2000,2000) TO (3000,3000); --- Creating a foreign key with ONLY on a partitioned table referencing --- a non-partitioned table fails. -ALTER TABLE ONLY fk_partitioned_fk ADD FOREIGN KEY (a, b) - REFERENCES fk_notpartitioned_pk; -ERROR: cannot use ONLY for foreign key on partitioned table "fk_partitioned_fk" referencing relation "fk_notpartitioned_pk" --- Adding a NOT VALID foreign key on a partitioned table referencing --- a non-partitioned table fails. -ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) - REFERENCES fk_notpartitioned_pk NOT VALID; -ERROR: cannot add NOT VALID foreign key on partitioned table "fk_partitioned_fk" referencing relation "fk_notpartitioned_pk" -DETAIL: This feature is not yet supported on partitioned tables. --- these inserts, targeting both the partition directly as well as the --- partitioned table, should all fail -INSERT INTO fk_partitioned_fk (a,b) VALUES (500, 501); -ERROR: insert or update on table "fk_partitioned_fk_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(500, 501) is not present in table "fk_notpartitioned_pk". -INSERT INTO fk_partitioned_fk_1 (a,b) VALUES (500, 501); -ERROR: insert or update on table "fk_partitioned_fk_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(500, 501) is not present in table "fk_notpartitioned_pk". -INSERT INTO fk_partitioned_fk (a,b) VALUES (1500, 1501); -ERROR: insert or update on table "fk_partitioned_fk_2" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(1500, 1501) is not present in table "fk_notpartitioned_pk". -INSERT INTO fk_partitioned_fk_2 (a,b) VALUES (1500, 1501); -ERROR: insert or update on table "fk_partitioned_fk_2" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(1500, 1501) is not present in table "fk_notpartitioned_pk". -INSERT INTO fk_partitioned_fk (a,b) VALUES (2500, 2502); -ERROR: insert or update on table "fk_partitioned_fk_3_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(2500, 2502) is not present in table "fk_notpartitioned_pk". -INSERT INTO fk_partitioned_fk_3 (a,b) VALUES (2500, 2502); -ERROR: insert or update on table "fk_partitioned_fk_3_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(2500, 2502) is not present in table "fk_notpartitioned_pk". -INSERT INTO fk_partitioned_fk (a,b) VALUES (2501, 2503); -ERROR: insert or update on table "fk_partitioned_fk_3_0" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(2501, 2503) is not present in table "fk_notpartitioned_pk". -INSERT INTO fk_partitioned_fk_3 (a,b) VALUES (2501, 2503); -ERROR: insert or update on table "fk_partitioned_fk_3_0" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(2501, 2503) is not present in table "fk_notpartitioned_pk". --- but if we insert the values that make them valid, then they work -INSERT INTO fk_notpartitioned_pk VALUES (500, 501), (1500, 1501), - (2500, 2502), (2501, 2503); -INSERT INTO fk_partitioned_fk (a,b) VALUES (500, 501); -INSERT INTO fk_partitioned_fk (a,b) VALUES (1500, 1501); -INSERT INTO fk_partitioned_fk (a,b) VALUES (2500, 2502); -INSERT INTO fk_partitioned_fk (a,b) VALUES (2501, 2503); --- this update fails because there is no referenced row -UPDATE fk_partitioned_fk SET a = a + 1 WHERE a = 2501; -ERROR: insert or update on table "fk_partitioned_fk_3_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(2502, 2503) is not present in table "fk_notpartitioned_pk". --- but we can fix it thusly: -INSERT INTO fk_notpartitioned_pk (a,b) VALUES (2502, 2503); -UPDATE fk_partitioned_fk SET a = a + 1 WHERE a = 2501; --- these updates would leave lingering rows in the referencing table; disallow -UPDATE fk_notpartitioned_pk SET b = 502 WHERE a = 500; -ERROR: update or delete on table "fk_notpartitioned_pk" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" on table "fk_partitioned_fk" -DETAIL: Key (a, b)=(500, 501) is still referenced from table "fk_partitioned_fk". -UPDATE fk_notpartitioned_pk SET b = 1502 WHERE a = 1500; -ERROR: update or delete on table "fk_notpartitioned_pk" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" on table "fk_partitioned_fk" -DETAIL: Key (a, b)=(1500, 1501) is still referenced from table "fk_partitioned_fk". -UPDATE fk_notpartitioned_pk SET b = 2504 WHERE a = 2500; -ERROR: update or delete on table "fk_notpartitioned_pk" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" on table "fk_partitioned_fk" -DETAIL: Key (a, b)=(2500, 2502) is still referenced from table "fk_partitioned_fk". --- check psql behavior -\d fk_notpartitioned_pk - Table "public.fk_notpartitioned_pk" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | not null | - b | integer | | not null | -Indexes: - "fk_notpartitioned_pk_pkey" PRIMARY KEY, btree (a, b) -Referenced by: - TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) - -ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_b_fkey; --- done. -DROP TABLE fk_notpartitioned_pk, fk_partitioned_fk; --- Altering a type referenced by a foreign key needs to drop/recreate the FK. --- Ensure that works. -CREATE TABLE fk_notpartitioned_pk (a INT, PRIMARY KEY(a), CHECK (a > 0)); -CREATE TABLE fk_partitioned_fk (a INT REFERENCES fk_notpartitioned_pk(a) PRIMARY KEY) PARTITION BY RANGE(a); -CREATE TABLE fk_partitioned_fk_1 PARTITION OF fk_partitioned_fk FOR VALUES FROM (MINVALUE) TO (MAXVALUE); -INSERT INTO fk_notpartitioned_pk VALUES (1); -INSERT INTO fk_partitioned_fk VALUES (1); -ALTER TABLE fk_notpartitioned_pk ALTER COLUMN a TYPE bigint; -DELETE FROM fk_notpartitioned_pk WHERE a = 1; -ERROR: update or delete on table "fk_notpartitioned_pk" violates foreign key constraint "fk_partitioned_fk_a_fkey" on table "fk_partitioned_fk" -DETAIL: Key (a)=(1) is still referenced from table "fk_partitioned_fk". -DROP TABLE fk_notpartitioned_pk, fk_partitioned_fk; --- Test some other exotic foreign key features: MATCH SIMPLE, ON UPDATE/DELETE --- actions -CREATE TABLE fk_notpartitioned_pk (a int, b int, primary key (a, b)); -CREATE TABLE fk_partitioned_fk (a int default 2501, b int default 142857) PARTITION BY LIST (a); -CREATE TABLE fk_partitioned_fk_1 PARTITION OF fk_partitioned_fk FOR VALUES IN (NULL,500,501,502); -ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) - REFERENCES fk_notpartitioned_pk MATCH SIMPLE - ON DELETE SET NULL ON UPDATE SET NULL; -CREATE TABLE fk_partitioned_fk_2 PARTITION OF fk_partitioned_fk FOR VALUES IN (1500,1502); -CREATE TABLE fk_partitioned_fk_3 (a int, b int); -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_3 FOR VALUES IN (2500,2501,2502,2503); --- this insert fails -INSERT INTO fk_partitioned_fk (a, b) VALUES (2502, 2503); -ERROR: insert or update on table "fk_partitioned_fk_3" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(2502, 2503) is not present in table "fk_notpartitioned_pk". -INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, 2503); -ERROR: insert or update on table "fk_partitioned_fk_3" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(2502, 2503) is not present in table "fk_notpartitioned_pk". --- but since the FK is MATCH SIMPLE, this one doesn't -INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, NULL); --- now create the referenced row ... -INSERT INTO fk_notpartitioned_pk VALUES (2502, 2503); ---- and now the same insert work -INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, 2503); --- this always works -INSERT INTO fk_partitioned_fk (a,b) VALUES (NULL, NULL); --- MATCH FULL -INSERT INTO fk_notpartitioned_pk VALUES (1, 2); -CREATE TABLE fk_partitioned_fk_full (x int, y int) PARTITION BY RANGE (x); -CREATE TABLE fk_partitioned_fk_full_1 PARTITION OF fk_partitioned_fk_full DEFAULT; -INSERT INTO fk_partitioned_fk_full VALUES (1, NULL); -ALTER TABLE fk_partitioned_fk_full ADD FOREIGN KEY (x, y) REFERENCES fk_notpartitioned_pk MATCH FULL; -- fails -ERROR: insert or update on table "fk_partitioned_fk_full_1" violates foreign key constraint "fk_partitioned_fk_full_x_y_fkey" -DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. -TRUNCATE fk_partitioned_fk_full; -ALTER TABLE fk_partitioned_fk_full ADD FOREIGN KEY (x, y) REFERENCES fk_notpartitioned_pk MATCH FULL; -INSERT INTO fk_partitioned_fk_full VALUES (1, NULL); -- fails -ERROR: insert or update on table "fk_partitioned_fk_full_1" violates foreign key constraint "fk_partitioned_fk_full_x_y_fkey" -DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. -DROP TABLE fk_partitioned_fk_full; --- ON UPDATE SET NULL -SELECT tableoid::regclass, a, b FROM fk_partitioned_fk WHERE b IS NULL ORDER BY a; - tableoid | a | b ----------------------+------+--- - fk_partitioned_fk_3 | 2502 | - fk_partitioned_fk_1 | | -(2 rows) - -UPDATE fk_notpartitioned_pk SET a = a + 1 WHERE a = 2502; -SELECT tableoid::regclass, a, b FROM fk_partitioned_fk WHERE b IS NULL ORDER BY a; - tableoid | a | b ----------------------+------+--- - fk_partitioned_fk_3 | 2502 | - fk_partitioned_fk_1 | | - fk_partitioned_fk_1 | | -(3 rows) - --- ON DELETE SET NULL -INSERT INTO fk_partitioned_fk VALUES (2503, 2503); -SELECT count(*) FROM fk_partitioned_fk WHERE a IS NULL; - count -------- - 2 -(1 row) - -DELETE FROM fk_notpartitioned_pk; -SELECT count(*) FROM fk_partitioned_fk WHERE a IS NULL; - count -------- - 3 -(1 row) - --- ON UPDATE/DELETE SET DEFAULT -ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_b_fkey; -ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) - REFERENCES fk_notpartitioned_pk - ON DELETE SET DEFAULT ON UPDATE SET DEFAULT; -INSERT INTO fk_notpartitioned_pk VALUES (2502, 2503); -INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, 2503); --- this fails, because the defaults for the referencing table are not present --- in the referenced table: -UPDATE fk_notpartitioned_pk SET a = 1500 WHERE a = 2502; -ERROR: insert or update on table "fk_partitioned_fk_3" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(2501, 142857) is not present in table "fk_notpartitioned_pk". --- but inserting the row we can make it work: -INSERT INTO fk_notpartitioned_pk VALUES (2501, 142857); -UPDATE fk_notpartitioned_pk SET a = 1500 WHERE a = 2502; -SELECT * FROM fk_partitioned_fk WHERE b = 142857; - a | b -------+-------- - 2501 | 142857 -(1 row) - --- ON DELETE SET NULL column_list -ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_b_fkey; -ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) - REFERENCES fk_notpartitioned_pk - ON DELETE SET NULL (a); -BEGIN; -DELETE FROM fk_notpartitioned_pk WHERE b = 142857; -SELECT * FROM fk_partitioned_fk WHERE a IS NOT NULL OR b IS NOT NULL ORDER BY a NULLS LAST; - a | b -------+-------- - 2502 | - | 142857 -(2 rows) - -ROLLBACK; --- ON DELETE SET DEFAULT column_list -ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_b_fkey; -ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) - REFERENCES fk_notpartitioned_pk - ON DELETE SET DEFAULT (a); -BEGIN; -DELETE FROM fk_partitioned_fk; -DELETE FROM fk_notpartitioned_pk; -INSERT INTO fk_notpartitioned_pk VALUES (500, 100000), (2501, 100000); -INSERT INTO fk_partitioned_fk VALUES (500, 100000); -DELETE FROM fk_notpartitioned_pk WHERE a = 500; -SELECT * FROM fk_partitioned_fk ORDER BY a; - a | b -------+-------- - 2501 | 100000 -(1 row) - -ROLLBACK; --- ON UPDATE/DELETE CASCADE -ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_b_fkey; -ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) - REFERENCES fk_notpartitioned_pk - ON DELETE CASCADE ON UPDATE CASCADE; -UPDATE fk_notpartitioned_pk SET a = 2502 WHERE a = 2501; -SELECT * FROM fk_partitioned_fk WHERE b = 142857; - a | b -------+-------- - 2502 | 142857 -(1 row) - --- Now you see it ... -SELECT * FROM fk_partitioned_fk WHERE b = 142857; - a | b -------+-------- - 2502 | 142857 -(1 row) - -DELETE FROM fk_notpartitioned_pk WHERE b = 142857; --- now you don't. -SELECT * FROM fk_partitioned_fk WHERE a = 142857; - a | b ----+--- -(0 rows) - --- verify that DROP works -DROP TABLE fk_partitioned_fk_2; --- Test behavior of the constraint together with attaching and detaching --- partitions. -CREATE TABLE fk_partitioned_fk_2 PARTITION OF fk_partitioned_fk FOR VALUES IN (1500,1502); -ALTER TABLE fk_partitioned_fk DETACH PARTITION fk_partitioned_fk_2; -BEGIN; -DROP TABLE fk_partitioned_fk; --- constraint should still be there -\d fk_partitioned_fk_2; - Table "public.fk_partitioned_fk_2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | 2501 - b | integer | | | 142857 -Foreign-key constraints: - "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE - -ROLLBACK; -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 FOR VALUES IN (1500,1502); -DROP TABLE fk_partitioned_fk_2; -CREATE TABLE fk_partitioned_fk_2 (b int, c text, a int, - FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk ON UPDATE CASCADE ON DELETE CASCADE); -ALTER TABLE fk_partitioned_fk_2 DROP COLUMN c; -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 FOR VALUES IN (1500,1502); --- should have only one constraint -\d fk_partitioned_fk_2 - Table "public.fk_partitioned_fk_2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - b | integer | | | - a | integer | | | -Partition of: fk_partitioned_fk FOR VALUES IN (1500, 1502) -Foreign-key constraints: - TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE - -DROP TABLE fk_partitioned_fk_2; -CREATE TABLE fk_partitioned_fk_4 (a int, b int, FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE) PARTITION BY RANGE (b, a); -CREATE TABLE fk_partitioned_fk_4_1 PARTITION OF fk_partitioned_fk_4 FOR VALUES FROM (1,1) TO (100,100); -CREATE TABLE fk_partitioned_fk_4_2 (a int, b int, FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE SET NULL); -ALTER TABLE fk_partitioned_fk_4 ATTACH PARTITION fk_partitioned_fk_4_2 FOR VALUES FROM (100,100) TO (1000,1000); -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_4 FOR VALUES IN (3500,3502); -ALTER TABLE fk_partitioned_fk DETACH PARTITION fk_partitioned_fk_4; -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_4 FOR VALUES IN (3500,3502); --- should only have one constraint -\d fk_partitioned_fk_4 - Partitioned table "public.fk_partitioned_fk_4" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition of: fk_partitioned_fk FOR VALUES IN (3500, 3502) -Partition key: RANGE (b, a) -Foreign-key constraints: - TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE -Number of partitions: 2 (Use \d+ to list them.) - -\d fk_partitioned_fk_4_1 - Table "public.fk_partitioned_fk_4_1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition of: fk_partitioned_fk_4 FOR VALUES FROM (1, 1) TO (100, 100) -Foreign-key constraints: - TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE - --- this one has an FK with mismatched properties -\d fk_partitioned_fk_4_2 - Table "public.fk_partitioned_fk_4_2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition of: fk_partitioned_fk_4 FOR VALUES FROM (100, 100) TO (1000, 1000) -Foreign-key constraints: - "fk_partitioned_fk_4_2_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE SET NULL - TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE - -CREATE TABLE fk_partitioned_fk_5 (a int, b int, - FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE, - FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) MATCH FULL ON UPDATE CASCADE ON DELETE CASCADE) - PARTITION BY RANGE (a); -CREATE TABLE fk_partitioned_fk_5_1 (a int, b int, FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk); -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_5 FOR VALUES IN (4500); -ALTER TABLE fk_partitioned_fk_5 ATTACH PARTITION fk_partitioned_fk_5_1 FOR VALUES FROM (0) TO (10); -ALTER TABLE fk_partitioned_fk DETACH PARTITION fk_partitioned_fk_5; -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_5 FOR VALUES IN (4500); --- this one has two constraints, similar but not quite the one in the parent, --- so it gets a new one -\d fk_partitioned_fk_5 - Partitioned table "public.fk_partitioned_fk_5" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition of: fk_partitioned_fk FOR VALUES IN (4500) -Partition key: RANGE (a) -Foreign-key constraints: - "fk_partitioned_fk_5_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE - "fk_partitioned_fk_5_a_b_fkey1" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) MATCH FULL ON UPDATE CASCADE ON DELETE CASCADE - TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE -Number of partitions: 1 (Use \d+ to list them.) - --- verify that it works to reattaching a child with multiple candidate --- constraints -ALTER TABLE fk_partitioned_fk_5 DETACH PARTITION fk_partitioned_fk_5_1; -ALTER TABLE fk_partitioned_fk_5 ATTACH PARTITION fk_partitioned_fk_5_1 FOR VALUES FROM (0) TO (10); -\d fk_partitioned_fk_5_1 - Table "public.fk_partitioned_fk_5_1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition of: fk_partitioned_fk_5 FOR VALUES FROM (0) TO (10) -Foreign-key constraints: - "fk_partitioned_fk_5_1_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) - TABLE "fk_partitioned_fk_5" CONSTRAINT "fk_partitioned_fk_5_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE - TABLE "fk_partitioned_fk_5" CONSTRAINT "fk_partitioned_fk_5_a_b_fkey1" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) MATCH FULL ON UPDATE CASCADE ON DELETE CASCADE - TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE - --- verify that attaching a table checks that the existing data satisfies the --- constraint -CREATE TABLE fk_partitioned_fk_2 (a int, b int) PARTITION BY RANGE (b); -CREATE TABLE fk_partitioned_fk_2_1 PARTITION OF fk_partitioned_fk_2 FOR VALUES FROM (0) TO (1000); -CREATE TABLE fk_partitioned_fk_2_2 PARTITION OF fk_partitioned_fk_2 FOR VALUES FROM (1000) TO (2000); -INSERT INTO fk_partitioned_fk_2 VALUES (1600, 601), (1600, 1601); -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 - FOR VALUES IN (1600); -ERROR: insert or update on table "fk_partitioned_fk_2_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(1600, 601) is not present in table "fk_notpartitioned_pk". -INSERT INTO fk_notpartitioned_pk VALUES (1600, 601), (1600, 1601); -ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 - FOR VALUES IN (1600); --- leave these tables around intentionally --- Verify that attaching a table that's referenced by an existing FK --- in the parent throws an error -CREATE TABLE fk_partitioned_pk_6 (a int PRIMARY KEY); -CREATE TABLE fk_partitioned_fk_6 (a int REFERENCES fk_partitioned_pk_6) PARTITION BY LIST (a); -ALTER TABLE fk_partitioned_fk_6 ATTACH PARTITION fk_partitioned_pk_6 FOR VALUES IN (1); -ERROR: cannot attach table "fk_partitioned_pk_6" as a partition because it is referenced by foreign key "fk_partitioned_fk_6_a_fkey" -DROP TABLE fk_partitioned_pk_6, fk_partitioned_fk_6; --- This case is similar to above, but the referenced relation is one level --- lower in the hierarchy. This one fails in a different way as the above, --- because we don't bother to protect against this case explicitly. If the --- current error stops happening, we'll need to add a better protection. -CREATE TABLE fk_partitioned_pk_6 (a int PRIMARY KEY) PARTITION BY list (a); -CREATE TABLE fk_partitioned_pk_61 PARTITION OF fk_partitioned_pk_6 FOR VALUES IN (1); -CREATE TABLE fk_partitioned_fk_6 (a int REFERENCES fk_partitioned_pk_61) PARTITION BY LIST (a); -ALTER TABLE fk_partitioned_fk_6 ATTACH PARTITION fk_partitioned_pk_6 FOR VALUES IN (1); -ERROR: cannot ALTER TABLE "fk_partitioned_pk_61" because it is being used by active queries in this session -DROP TABLE fk_partitioned_pk_6, fk_partitioned_fk_6; --- test the case when the referenced table is owned by a different user -create role regress_other_partitioned_fk_owner; -grant references on fk_notpartitioned_pk to regress_other_partitioned_fk_owner; -set role regress_other_partitioned_fk_owner; -create table other_partitioned_fk(a int, b int) partition by list (a); -create table other_partitioned_fk_1 partition of other_partitioned_fk - for values in (2048); -insert into other_partitioned_fk - select 2048, x from generate_series(1,10) x; --- this should fail -alter table other_partitioned_fk add foreign key (a, b) - references fk_notpartitioned_pk(a, b); -ERROR: insert or update on table "other_partitioned_fk_1" violates foreign key constraint "other_partitioned_fk_a_b_fkey" -DETAIL: Key (a, b)=(2048, 1) is not present in table "fk_notpartitioned_pk". --- add the missing keys and retry -reset role; -insert into fk_notpartitioned_pk (a, b) - select 2048, x from generate_series(1,10) x; -set role regress_other_partitioned_fk_owner; -alter table other_partitioned_fk add foreign key (a, b) - references fk_notpartitioned_pk(a, b); --- clean up -drop table other_partitioned_fk; -reset role; -revoke all on fk_notpartitioned_pk from regress_other_partitioned_fk_owner; -drop role regress_other_partitioned_fk_owner; --- --- Test self-referencing foreign key with partition. --- This should create only one fk constraint per partition --- -CREATE TABLE parted_self_fk ( - id bigint NOT NULL PRIMARY KEY, - id_abc bigint, - FOREIGN KEY (id_abc) REFERENCES parted_self_fk(id) -) -PARTITION BY RANGE (id); -CREATE TABLE part1_self_fk ( - id bigint NOT NULL PRIMARY KEY, - id_abc bigint -); -ALTER TABLE parted_self_fk ATTACH PARTITION part1_self_fk FOR VALUES FROM (0) TO (10); -CREATE TABLE part2_self_fk PARTITION OF parted_self_fk FOR VALUES FROM (10) TO (20); -CREATE TABLE part3_self_fk ( -- a partitioned partition - id bigint NOT NULL PRIMARY KEY, - id_abc bigint -) PARTITION BY RANGE (id); -CREATE TABLE part32_self_fk PARTITION OF part3_self_fk FOR VALUES FROM (20) TO (30); -ALTER TABLE parted_self_fk ATTACH PARTITION part3_self_fk FOR VALUES FROM (20) TO (40); -CREATE TABLE part33_self_fk ( - id bigint NOT NULL PRIMARY KEY, - id_abc bigint -); -ALTER TABLE part3_self_fk ATTACH PARTITION part33_self_fk FOR VALUES FROM (30) TO (40); -SELECT cr.relname, co.conname, co.contype, co.convalidated, - p.conname AS conparent, p.convalidated, cf.relname AS foreignrel -FROM pg_constraint co -JOIN pg_class cr ON cr.oid = co.conrelid -LEFT JOIN pg_class cf ON cf.oid = co.confrelid -LEFT JOIN pg_constraint p ON p.oid = co.conparentid -WHERE cr.oid IN (SELECT relid FROM pg_partition_tree('parted_self_fk')) -ORDER BY co.contype, cr.relname, co.conname, p.conname; - relname | conname | contype | convalidated | conparent | convalidated | foreignrel -----------------+----------------------------+---------+--------------+----------------------------+--------------+---------------- - part1_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk - part2_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk - part32_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk - part33_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk - part3_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk - parted_self_fk | parted_self_fk_id_abc_fkey | f | t | | | parted_self_fk - part1_self_fk | part1_self_fk_id_not_null | n | t | | | - part2_self_fk | parted_self_fk_id_not_null | n | t | | | - part32_self_fk | part3_self_fk_id_not_null | n | t | | | - part33_self_fk | part33_self_fk_id_not_null | n | t | | | - part3_self_fk | part3_self_fk_id_not_null | n | t | | | - parted_self_fk | parted_self_fk_id_not_null | n | t | | | - part1_self_fk | part1_self_fk_pkey | p | t | parted_self_fk_pkey | t | - part2_self_fk | part2_self_fk_pkey | p | t | parted_self_fk_pkey | t | - part32_self_fk | part32_self_fk_pkey | p | t | part3_self_fk_pkey | t | - part33_self_fk | part33_self_fk_pkey | p | t | part3_self_fk_pkey | t | - part3_self_fk | part3_self_fk_pkey | p | t | parted_self_fk_pkey | t | - parted_self_fk | parted_self_fk_pkey | p | t | | | -(18 rows) - --- detach and re-attach multiple times just to ensure everything is kosher -ALTER TABLE parted_self_fk DETACH PARTITION part2_self_fk; -ALTER TABLE parted_self_fk ATTACH PARTITION part2_self_fk FOR VALUES FROM (10) TO (20); -ALTER TABLE parted_self_fk DETACH PARTITION part2_self_fk; -ALTER TABLE parted_self_fk ATTACH PARTITION part2_self_fk FOR VALUES FROM (10) TO (20); -SELECT cr.relname, co.conname, co.contype, co.convalidated, - p.conname AS conparent, p.convalidated, cf.relname AS foreignrel -FROM pg_constraint co -JOIN pg_class cr ON cr.oid = co.conrelid -LEFT JOIN pg_class cf ON cf.oid = co.confrelid -LEFT JOIN pg_constraint p ON p.oid = co.conparentid -WHERE cr.oid IN (SELECT relid FROM pg_partition_tree('parted_self_fk')) -ORDER BY co.contype, cr.relname, co.conname, p.conname; - relname | conname | contype | convalidated | conparent | convalidated | foreignrel -----------------+----------------------------+---------+--------------+----------------------------+--------------+---------------- - part1_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk - part2_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk - part32_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk - part33_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk - part3_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk - parted_self_fk | parted_self_fk_id_abc_fkey | f | t | | | parted_self_fk - part1_self_fk | part1_self_fk_id_not_null | n | t | | | - part2_self_fk | parted_self_fk_id_not_null | n | t | | | - part32_self_fk | part3_self_fk_id_not_null | n | t | | | - part33_self_fk | part33_self_fk_id_not_null | n | t | | | - part3_self_fk | part3_self_fk_id_not_null | n | t | | | - parted_self_fk | parted_self_fk_id_not_null | n | t | | | - part1_self_fk | part1_self_fk_pkey | p | t | parted_self_fk_pkey | t | - part2_self_fk | part2_self_fk_pkey | p | t | parted_self_fk_pkey | t | - part32_self_fk | part32_self_fk_pkey | p | t | part3_self_fk_pkey | t | - part33_self_fk | part33_self_fk_pkey | p | t | part3_self_fk_pkey | t | - part3_self_fk | part3_self_fk_pkey | p | t | parted_self_fk_pkey | t | - parted_self_fk | parted_self_fk_pkey | p | t | | | -(18 rows) - --- Leave this table around, for pg_upgrade/pg_dump tests --- Test creating a constraint at the parent that already exists in partitions. --- There should be no duplicated constraints, and attempts to drop the --- constraint in partitions should raise appropriate errors. -create schema fkpart0 - create table pkey (a int primary key) - create table fk_part (a int) partition by list (a) - create table fk_part_1 partition of fk_part - (foreign key (a) references fkpart0.pkey) for values in (1) - create table fk_part_23 partition of fk_part - (foreign key (a) references fkpart0.pkey) for values in (2, 3) - partition by list (a) - create table fk_part_23_2 partition of fk_part_23 for values in (2); -alter table fkpart0.fk_part add foreign key (a) references fkpart0.pkey; -\d fkpart0.fk_part_1 \\ -- should have only one FK - Table "fkpart0.fk_part_1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition of: fkpart0.fk_part FOR VALUES IN (1) -Foreign-key constraints: - TABLE "fkpart0.fk_part" CONSTRAINT "fk_part_a_fkey" FOREIGN KEY (a) REFERENCES fkpart0.pkey(a) - -alter table fkpart0.fk_part_1 drop constraint fk_part_1_a_fkey; -ERROR: cannot drop inherited constraint "fk_part_1_a_fkey" of relation "fk_part_1" -\d fkpart0.fk_part_23 \\ -- should have only one FK - Partitioned table "fkpart0.fk_part_23" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition of: fkpart0.fk_part FOR VALUES IN (2, 3) -Partition key: LIST (a) -Foreign-key constraints: - TABLE "fkpart0.fk_part" CONSTRAINT "fk_part_a_fkey" FOREIGN KEY (a) REFERENCES fkpart0.pkey(a) -Number of partitions: 1 (Use \d+ to list them.) - -\d fkpart0.fk_part_23_2 \\ -- should have only one FK - Table "fkpart0.fk_part_23_2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition of: fkpart0.fk_part_23 FOR VALUES IN (2) -Foreign-key constraints: - TABLE "fkpart0.fk_part" CONSTRAINT "fk_part_a_fkey" FOREIGN KEY (a) REFERENCES fkpart0.pkey(a) - -alter table fkpart0.fk_part_23 drop constraint fk_part_23_a_fkey; -ERROR: cannot drop inherited constraint "fk_part_23_a_fkey" of relation "fk_part_23" -alter table fkpart0.fk_part_23_2 drop constraint fk_part_23_a_fkey; -ERROR: cannot drop inherited constraint "fk_part_23_a_fkey" of relation "fk_part_23_2" -create table fkpart0.fk_part_4 partition of fkpart0.fk_part for values in (4); -\d fkpart0.fk_part_4 - Table "fkpart0.fk_part_4" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition of: fkpart0.fk_part FOR VALUES IN (4) -Foreign-key constraints: - TABLE "fkpart0.fk_part" CONSTRAINT "fk_part_a_fkey" FOREIGN KEY (a) REFERENCES fkpart0.pkey(a) - -alter table fkpart0.fk_part_4 drop constraint fk_part_a_fkey; -ERROR: cannot drop inherited constraint "fk_part_a_fkey" of relation "fk_part_4" -create table fkpart0.fk_part_56 partition of fkpart0.fk_part - for values in (5,6) partition by list (a); -create table fkpart0.fk_part_56_5 partition of fkpart0.fk_part_56 - for values in (5); -\d fkpart0.fk_part_56 - Partitioned table "fkpart0.fk_part_56" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition of: fkpart0.fk_part FOR VALUES IN (5, 6) -Partition key: LIST (a) -Foreign-key constraints: - TABLE "fkpart0.fk_part" CONSTRAINT "fk_part_a_fkey" FOREIGN KEY (a) REFERENCES fkpart0.pkey(a) -Number of partitions: 1 (Use \d+ to list them.) - -alter table fkpart0.fk_part_56 drop constraint fk_part_a_fkey; -ERROR: cannot drop inherited constraint "fk_part_a_fkey" of relation "fk_part_56" -alter table fkpart0.fk_part_56_5 drop constraint fk_part_a_fkey; -ERROR: cannot drop inherited constraint "fk_part_a_fkey" of relation "fk_part_56_5" --- verify that attaching and detaching partitions maintains the right set of --- triggers -create schema fkpart1 - create table pkey (a int primary key) - create table fk_part (a int) partition by list (a) - create table fk_part_1 partition of fk_part for values in (1) partition by list (a) - create table fk_part_1_1 partition of fk_part_1 for values in (1); -alter table fkpart1.fk_part add foreign key (a) references fkpart1.pkey; -insert into fkpart1.fk_part values (1); -- should fail -ERROR: insert or update on table "fk_part_1_1" violates foreign key constraint "fk_part_a_fkey" -DETAIL: Key (a)=(1) is not present in table "pkey". -insert into fkpart1.pkey values (1); -insert into fkpart1.fk_part values (1); -delete from fkpart1.pkey where a = 1; -- should fail -ERROR: update or delete on table "pkey" violates foreign key constraint "fk_part_a_fkey" on table "fk_part" -DETAIL: Key (a)=(1) is still referenced from table "fk_part". -alter table fkpart1.fk_part detach partition fkpart1.fk_part_1; -create table fkpart1.fk_part_1_2 partition of fkpart1.fk_part_1 for values in (2); -insert into fkpart1.fk_part_1 values (2); -- should fail -ERROR: insert or update on table "fk_part_1_2" violates foreign key constraint "fk_part_a_fkey" -DETAIL: Key (a)=(2) is not present in table "pkey". -delete from fkpart1.pkey where a = 1; -ERROR: update or delete on table "pkey" violates foreign key constraint "fk_part_a_fkey" on table "fk_part_1" -DETAIL: Key (a)=(1) is still referenced from table "fk_part_1". --- verify that attaching and detaching partitions manipulates the inheritance --- properties of their FK constraints correctly -create schema fkpart2 - create table pkey (a int primary key) - create table fk_part (a int, constraint fkey foreign key (a) references fkpart2.pkey) partition by list (a) - create table fk_part_1 partition of fkpart2.fk_part for values in (1) partition by list (a) - create table fk_part_1_1 (a int, constraint my_fkey foreign key (a) references fkpart2.pkey); -alter table fkpart2.fk_part_1 attach partition fkpart2.fk_part_1_1 for values in (1); -alter table fkpart2.fk_part_1 drop constraint fkey; -- should fail -ERROR: cannot drop inherited constraint "fkey" of relation "fk_part_1" -alter table fkpart2.fk_part_1_1 drop constraint my_fkey; -- should fail -ERROR: cannot drop inherited constraint "my_fkey" of relation "fk_part_1_1" -alter table fkpart2.fk_part detach partition fkpart2.fk_part_1; -alter table fkpart2.fk_part_1 drop constraint fkey; -- ok -alter table fkpart2.fk_part_1_1 drop constraint my_fkey; -- doesn't exist -ERROR: constraint "my_fkey" of relation "fk_part_1_1" does not exist --- verify constraint deferrability -create schema fkpart3 - create table pkey (a int primary key) - create table fk_part (a int, constraint fkey foreign key (a) references fkpart3.pkey deferrable initially immediate) partition by list (a) - create table fk_part_1 partition of fkpart3.fk_part for values in (1) partition by list (a) - create table fk_part_1_1 partition of fkpart3.fk_part_1 for values in (1) - create table fk_part_2 partition of fkpart3.fk_part for values in (2); -begin; -set constraints fkpart3.fkey deferred; -insert into fkpart3.fk_part values (1); -insert into fkpart3.pkey values (1); -commit; -begin; -set constraints fkpart3.fkey deferred; -delete from fkpart3.pkey; -delete from fkpart3.fk_part; -commit; -drop schema fkpart0, fkpart1, fkpart2, fkpart3 cascade; -NOTICE: drop cascades to 10 other objects -DETAIL: drop cascades to table fkpart3.pkey -drop cascades to table fkpart3.fk_part -drop cascades to table fkpart2.pkey -drop cascades to table fkpart2.fk_part -drop cascades to table fkpart2.fk_part_1 -drop cascades to table fkpart1.pkey -drop cascades to table fkpart1.fk_part -drop cascades to table fkpart1.fk_part_1 -drop cascades to table fkpart0.pkey -drop cascades to table fkpart0.fk_part --- Test a partitioned table as referenced table. --- Verify basic functionality with a regular partition creation and a partition --- with a different column layout, as well as partitions added (created and --- attached) after creating the foreign key. -CREATE SCHEMA fkpart3; -SET search_path TO fkpart3; -CREATE TABLE pk (a int PRIMARY KEY) PARTITION BY RANGE (a); -CREATE TABLE pk1 PARTITION OF pk FOR VALUES FROM (0) TO (1000); -CREATE TABLE pk2 (b int, a int); -ALTER TABLE pk2 DROP COLUMN b; -ALTER TABLE pk2 ALTER a SET NOT NULL; -ALTER TABLE pk ATTACH PARTITION pk2 FOR VALUES FROM (1000) TO (2000); -CREATE TABLE fk (a int) PARTITION BY RANGE (a); -CREATE TABLE fk1 PARTITION OF fk FOR VALUES FROM (0) TO (750); -ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk; -CREATE TABLE fk2 (b int, a int) ; -ALTER TABLE fk2 DROP COLUMN b; -ALTER TABLE fk ATTACH PARTITION fk2 FOR VALUES FROM (750) TO (3500); -CREATE TABLE pk3 PARTITION OF pk FOR VALUES FROM (2000) TO (3000); -CREATE TABLE pk4 (LIKE pk); -ALTER TABLE pk ATTACH PARTITION pk4 FOR VALUES FROM (3000) TO (4000); -CREATE TABLE pk5 (c int, b int, a int NOT NULL) PARTITION BY RANGE (a); -ALTER TABLE pk5 DROP COLUMN b, DROP COLUMN c; -CREATE TABLE pk51 PARTITION OF pk5 FOR VALUES FROM (4000) TO (4500); -CREATE TABLE pk52 PARTITION OF pk5 FOR VALUES FROM (4500) TO (5000); -ALTER TABLE pk ATTACH PARTITION pk5 FOR VALUES FROM (4000) TO (5000); -CREATE TABLE fk3 PARTITION OF fk FOR VALUES FROM (3500) TO (5000); --- these should fail: referenced value not present -INSERT into fk VALUES (1); -ERROR: insert or update on table "fk1" violates foreign key constraint "fk_a_fkey" -DETAIL: Key (a)=(1) is not present in table "pk". -INSERT into fk VALUES (1000); -ERROR: insert or update on table "fk2" violates foreign key constraint "fk_a_fkey" -DETAIL: Key (a)=(1000) is not present in table "pk". -INSERT into fk VALUES (2000); -ERROR: insert or update on table "fk2" violates foreign key constraint "fk_a_fkey" -DETAIL: Key (a)=(2000) is not present in table "pk". -INSERT into fk VALUES (3000); -ERROR: insert or update on table "fk2" violates foreign key constraint "fk_a_fkey" -DETAIL: Key (a)=(3000) is not present in table "pk". -INSERT into fk VALUES (4000); -ERROR: insert or update on table "fk3" violates foreign key constraint "fk_a_fkey" -DETAIL: Key (a)=(4000) is not present in table "pk". -INSERT into fk VALUES (4500); -ERROR: insert or update on table "fk3" violates foreign key constraint "fk_a_fkey" -DETAIL: Key (a)=(4500) is not present in table "pk". --- insert into the referenced table, now they should work -INSERT into pk VALUES (1), (1000), (2000), (3000), (4000), (4500); -INSERT into fk VALUES (1), (1000), (2000), (3000), (4000), (4500); --- should fail: referencing value present -DELETE FROM pk WHERE a = 1; -ERROR: update or delete on table "pk1" violates foreign key constraint "fk_a_fkey1" on table "fk" -DETAIL: Key (a)=(1) is still referenced from table "fk". -DELETE FROM pk WHERE a = 1000; -ERROR: update or delete on table "pk2" violates foreign key constraint "fk_a_fkey2" on table "fk" -DETAIL: Key (a)=(1000) is still referenced from table "fk". -DELETE FROM pk WHERE a = 2000; -ERROR: update or delete on table "pk3" violates foreign key constraint "fk_a_fkey3" on table "fk" -DETAIL: Key (a)=(2000) is still referenced from table "fk". -DELETE FROM pk WHERE a = 3000; -ERROR: update or delete on table "pk4" violates foreign key constraint "fk_a_fkey4" on table "fk" -DETAIL: Key (a)=(3000) is still referenced from table "fk". -DELETE FROM pk WHERE a = 4000; -ERROR: update or delete on table "pk51" violates foreign key constraint "fk_a_fkey6" on table "fk" -DETAIL: Key (a)=(4000) is still referenced from table "fk". -DELETE FROM pk WHERE a = 4500; -ERROR: update or delete on table "pk52" violates foreign key constraint "fk_a_fkey7" on table "fk" -DETAIL: Key (a)=(4500) is still referenced from table "fk". -UPDATE pk SET a = 2 WHERE a = 1; -ERROR: update or delete on table "pk1" violates foreign key constraint "fk_a_fkey1" on table "fk" -DETAIL: Key (a)=(1) is still referenced from table "fk". -UPDATE pk SET a = 1002 WHERE a = 1000; -ERROR: update or delete on table "pk2" violates foreign key constraint "fk_a_fkey2" on table "fk" -DETAIL: Key (a)=(1000) is still referenced from table "fk". -UPDATE pk SET a = 2002 WHERE a = 2000; -ERROR: update or delete on table "pk3" violates foreign key constraint "fk_a_fkey3" on table "fk" -DETAIL: Key (a)=(2000) is still referenced from table "fk". -UPDATE pk SET a = 3002 WHERE a = 3000; -ERROR: update or delete on table "pk4" violates foreign key constraint "fk_a_fkey4" on table "fk" -DETAIL: Key (a)=(3000) is still referenced from table "fk". -UPDATE pk SET a = 4002 WHERE a = 4000; -ERROR: update or delete on table "pk51" violates foreign key constraint "fk_a_fkey6" on table "fk" -DETAIL: Key (a)=(4000) is still referenced from table "fk". -UPDATE pk SET a = 4502 WHERE a = 4500; -ERROR: update or delete on table "pk52" violates foreign key constraint "fk_a_fkey7" on table "fk" -DETAIL: Key (a)=(4500) is still referenced from table "fk". --- now they should work -DELETE FROM fk; -UPDATE pk SET a = 2 WHERE a = 1; -DELETE FROM pk WHERE a = 2; -UPDATE pk SET a = 1002 WHERE a = 1000; -DELETE FROM pk WHERE a = 1002; -UPDATE pk SET a = 2002 WHERE a = 2000; -DELETE FROM pk WHERE a = 2002; -UPDATE pk SET a = 3002 WHERE a = 3000; -DELETE FROM pk WHERE a = 3002; -UPDATE pk SET a = 4002 WHERE a = 4000; -DELETE FROM pk WHERE a = 4002; -UPDATE pk SET a = 4502 WHERE a = 4500; -DELETE FROM pk WHERE a = 4502; -CREATE SCHEMA fkpart4; -SET search_path TO fkpart4; --- dropping/detaching PARTITIONs is prevented if that would break --- a foreign key's existing data -CREATE TABLE droppk (a int PRIMARY KEY) PARTITION BY RANGE (a); -CREATE TABLE droppk1 PARTITION OF droppk FOR VALUES FROM (0) TO (1000); -CREATE TABLE droppk_d PARTITION OF droppk DEFAULT; -CREATE TABLE droppk2 PARTITION OF droppk FOR VALUES FROM (1000) TO (2000) - PARTITION BY RANGE (a); -CREATE TABLE droppk21 PARTITION OF droppk2 FOR VALUES FROM (1000) TO (1400); -CREATE TABLE droppk2_d PARTITION OF droppk2 DEFAULT; -INSERT into droppk VALUES (1), (1000), (1500), (2000); -CREATE TABLE dropfk (a int REFERENCES droppk); -INSERT into dropfk VALUES (1), (1000), (1500), (2000); --- these should all fail -ALTER TABLE droppk DETACH PARTITION droppk_d; -ERROR: removing partition "droppk_d" violates foreign key constraint "dropfk_a_fkey5" -DETAIL: Key (a)=(2000) is still referenced from table "dropfk". -ALTER TABLE droppk2 DETACH PARTITION droppk2_d; -ERROR: removing partition "droppk2_d" violates foreign key constraint "dropfk_a_fkey4" -DETAIL: Key (a)=(1500) is still referenced from table "dropfk". -ALTER TABLE droppk DETACH PARTITION droppk1; -ERROR: removing partition "droppk1" violates foreign key constraint "dropfk_a_fkey1" -DETAIL: Key (a)=(1) is still referenced from table "dropfk". -ALTER TABLE droppk DETACH PARTITION droppk2; -ERROR: removing partition "droppk2" violates foreign key constraint "dropfk_a_fkey2" -DETAIL: Key (a)=(1000) is still referenced from table "dropfk". -ALTER TABLE droppk2 DETACH PARTITION droppk21; -ERROR: removing partition "droppk21" violates foreign key constraint "dropfk_a_fkey3" -DETAIL: Key (a)=(1000) is still referenced from table "dropfk". --- dropping partitions is disallowed -DROP TABLE droppk_d; -ERROR: cannot drop table droppk_d because other objects depend on it -DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk_d -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP TABLE droppk2_d; -ERROR: cannot drop table droppk2_d because other objects depend on it -DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk2_d -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP TABLE droppk1; -ERROR: cannot drop table droppk1 because other objects depend on it -DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk1 -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP TABLE droppk2; -ERROR: cannot drop table droppk2 because other objects depend on it -DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk2 -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP TABLE droppk21; -ERROR: cannot drop table droppk21 because other objects depend on it -DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk21 -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DELETE FROM dropfk; --- dropping partitions is disallowed, even when no referencing values -DROP TABLE droppk_d; -ERROR: cannot drop table droppk_d because other objects depend on it -DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk_d -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP TABLE droppk2_d; -ERROR: cannot drop table droppk2_d because other objects depend on it -DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk2_d -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP TABLE droppk1; -ERROR: cannot drop table droppk1 because other objects depend on it -DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk1 -HINT: Use DROP ... CASCADE to drop the dependent objects too. --- but DETACH is allowed, and DROP afterwards works -ALTER TABLE droppk2 DETACH PARTITION droppk21; -DROP TABLE droppk2; -ERROR: cannot drop table droppk2 because other objects depend on it -DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk2 -HINT: Use DROP ... CASCADE to drop the dependent objects too. --- Verify that initial constraint creation and cloning behave correctly -CREATE SCHEMA fkpart5; -SET search_path TO fkpart5; -CREATE TABLE pk (a int PRIMARY KEY) PARTITION BY LIST (a); -CREATE TABLE pk1 PARTITION OF pk FOR VALUES IN (1) PARTITION BY LIST (a); -CREATE TABLE pk11 PARTITION OF pk1 FOR VALUES IN (1); -CREATE TABLE fk (a int) PARTITION BY LIST (a); -CREATE TABLE fk1 PARTITION OF fk FOR VALUES IN (1) PARTITION BY LIST (a); -CREATE TABLE fk11 PARTITION OF fk1 FOR VALUES IN (1); -ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk; -CREATE TABLE pk2 PARTITION OF pk FOR VALUES IN (2); -CREATE TABLE pk3 (a int NOT NULL) PARTITION BY LIST (a); -CREATE TABLE pk31 PARTITION OF pk3 FOR VALUES IN (31); -CREATE TABLE pk32 (b int, a int NOT NULL); -ALTER TABLE pk32 DROP COLUMN b; -ALTER TABLE pk3 ATTACH PARTITION pk32 FOR VALUES IN (32); -ALTER TABLE pk ATTACH PARTITION pk3 FOR VALUES IN (31, 32); -CREATE TABLE fk2 PARTITION OF fk FOR VALUES IN (2); -CREATE TABLE fk3 (b int, a int); -ALTER TABLE fk3 DROP COLUMN b; -ALTER TABLE fk ATTACH PARTITION fk3 FOR VALUES IN (3); -SELECT pg_describe_object('pg_constraint'::regclass, oid, 0), confrelid::regclass, - CASE WHEN conparentid <> 0 THEN pg_describe_object('pg_constraint'::regclass, conparentid, 0) ELSE 'TOP' END -FROM pg_catalog.pg_constraint -WHERE conrelid IN (SELECT relid FROM pg_partition_tree('fk')) -ORDER BY conrelid::regclass::text, conname; - pg_describe_object | confrelid | case -------------------------------------+-----------+----------------------------------- - constraint fk_a_fkey on table fk | pk | TOP - constraint fk_a_fkey1 on table fk | pk1 | constraint fk_a_fkey on table fk - constraint fk_a_fkey2 on table fk | pk11 | constraint fk_a_fkey1 on table fk - constraint fk_a_fkey3 on table fk | pk2 | constraint fk_a_fkey on table fk - constraint fk_a_fkey4 on table fk | pk3 | constraint fk_a_fkey on table fk - constraint fk_a_fkey5 on table fk | pk31 | constraint fk_a_fkey4 on table fk - constraint fk_a_fkey6 on table fk | pk32 | constraint fk_a_fkey4 on table fk - constraint fk_a_fkey on table fk1 | pk | constraint fk_a_fkey on table fk - constraint fk_a_fkey on table fk11 | pk | constraint fk_a_fkey on table fk1 - constraint fk_a_fkey on table fk2 | pk | constraint fk_a_fkey on table fk - constraint fk_a_fkey on table fk3 | pk | constraint fk_a_fkey on table fk -(11 rows) - -CREATE TABLE fk4 (LIKE fk); -INSERT INTO fk4 VALUES (50); -ALTER TABLE fk ATTACH PARTITION fk4 FOR VALUES IN (50); -ERROR: insert or update on table "fk4" violates foreign key constraint "fk_a_fkey" -DETAIL: Key (a)=(50) is not present in table "pk". --- Verify constraint deferrability -CREATE SCHEMA fkpart9; -SET search_path TO fkpart9; -CREATE TABLE pk (a int PRIMARY KEY) PARTITION BY LIST (a); -CREATE TABLE pk1 PARTITION OF pk FOR VALUES IN (1, 2) PARTITION BY LIST (a); -CREATE TABLE pk11 PARTITION OF pk1 FOR VALUES IN (1); -CREATE TABLE pk3 PARTITION OF pk FOR VALUES IN (3); -CREATE TABLE fk (a int REFERENCES pk DEFERRABLE INITIALLY IMMEDIATE); -INSERT INTO fk VALUES (1); -- should fail -ERROR: insert or update on table "fk" violates foreign key constraint "fk_a_fkey" -DETAIL: Key (a)=(1) is not present in table "pk". -BEGIN; -SET CONSTRAINTS fk_a_fkey DEFERRED; -INSERT INTO fk VALUES (1); -COMMIT; -- should fail -ERROR: insert or update on table "fk" violates foreign key constraint "fk_a_fkey" -DETAIL: Key (a)=(1) is not present in table "pk". -BEGIN; -SET CONSTRAINTS fk_a_fkey DEFERRED; -INSERT INTO fk VALUES (1); -INSERT INTO pk VALUES (1); -COMMIT; -- OK -BEGIN; -SET CONSTRAINTS fk_a_fkey DEFERRED; -DELETE FROM pk WHERE a = 1; -DELETE FROM fk WHERE a = 1; -COMMIT; -- OK --- Verify constraint deferrability when changed by ALTER --- Partitioned table at referencing end -CREATE TABLE pt(f1 int, f2 int, f3 int, PRIMARY KEY(f1,f2)); -CREATE TABLE ref(f1 int, f2 int, f3 int) - PARTITION BY list(f1); -CREATE TABLE ref1 PARTITION OF ref FOR VALUES IN (1); -CREATE TABLE ref2 PARTITION OF ref FOR VALUES in (2); -ALTER TABLE ref ADD FOREIGN KEY(f1,f2) REFERENCES pt; -ALTER TABLE ref ALTER CONSTRAINT ref_f1_f2_fkey - DEFERRABLE INITIALLY DEFERRED; -INSERT INTO pt VALUES(1,2,3); -INSERT INTO ref VALUES(1,2,3); -BEGIN; -DELETE FROM pt; -DELETE FROM ref; -ABORT; -DROP TABLE pt, ref; --- Multi-level partitioning at referencing end -CREATE TABLE pt(f1 int, f2 int, f3 int, PRIMARY KEY(f1,f2)); -CREATE TABLE ref(f1 int, f2 int, f3 int) - PARTITION BY list(f1); -CREATE TABLE ref1_2 PARTITION OF ref FOR VALUES IN (1, 2) PARTITION BY list (f2); -CREATE TABLE ref1 PARTITION OF ref1_2 FOR VALUES IN (1); -CREATE TABLE ref2 PARTITION OF ref1_2 FOR VALUES IN (2) PARTITION BY list (f2); -CREATE TABLE ref22 PARTITION OF ref2 FOR VALUES IN (2); -ALTER TABLE ref ADD FOREIGN KEY(f1,f2) REFERENCES pt; -INSERT INTO pt VALUES(1,2,3); -INSERT INTO ref VALUES(1,2,3); -ALTER TABLE ref22 ALTER CONSTRAINT ref_f1_f2_fkey - DEFERRABLE INITIALLY IMMEDIATE; -- fails -ERROR: cannot alter constraint "ref_f1_f2_fkey" on relation "ref22" -DETAIL: Constraint "ref_f1_f2_fkey" is derived from constraint "ref_f1_f2_fkey" of relation "ref". -HINT: You may alter the constraint it derives from instead. -ALTER TABLE ref ALTER CONSTRAINT ref_f1_f2_fkey - DEFERRABLE INITIALLY DEFERRED; -BEGIN; -DELETE FROM pt; -DELETE FROM ref; -ABORT; -DROP TABLE pt, ref; --- Partitioned table at referenced end -CREATE TABLE pt(f1 int, f2 int, f3 int, PRIMARY KEY(f1,f2)) - PARTITION BY LIST(f1); -CREATE TABLE pt1 PARTITION OF pt FOR VALUES IN (1); -CREATE TABLE pt2 PARTITION OF pt FOR VALUES IN (2); -CREATE TABLE ref(f1 int, f2 int, f3 int); -ALTER TABLE ref ADD FOREIGN KEY(f1,f2) REFERENCES pt; -ALTER TABLE ref ALTER CONSTRAINT ref_f1_f2_fkey - DEFERRABLE INITIALLY DEFERRED; -INSERT INTO pt VALUES(1,2,3); -INSERT INTO ref VALUES(1,2,3); -BEGIN; -DELETE FROM pt; -DELETE FROM ref; -ABORT; -DROP TABLE pt, ref; --- Multi-level partitioning at referenced end -CREATE TABLE pt(f1 int, f2 int, f3 int, PRIMARY KEY(f1,f2)) - PARTITION BY LIST(f1); -CREATE TABLE pt1_2 PARTITION OF pt FOR VALUES IN (1, 2) PARTITION BY LIST (f1); -CREATE TABLE pt1 PARTITION OF pt1_2 FOR VALUES IN (1); -CREATE TABLE pt2 PARTITION OF pt1_2 FOR VALUES IN (2); -CREATE TABLE ref(f1 int, f2 int, f3 int); -ALTER TABLE ref ADD FOREIGN KEY(f1,f2) REFERENCES pt; -ALTER TABLE ref ALTER CONSTRAINT ref_f1_f2_fkey1 - DEFERRABLE INITIALLY DEFERRED; -- fails -ERROR: cannot alter constraint "ref_f1_f2_fkey1" on relation "ref" -DETAIL: Constraint "ref_f1_f2_fkey1" is derived from constraint "ref_f1_f2_fkey" of relation "ref". -HINT: You may alter the constraint it derives from instead. -ALTER TABLE ref ALTER CONSTRAINT ref_f1_f2_fkey - DEFERRABLE INITIALLY DEFERRED; -INSERT INTO pt VALUES(1,2,3); -INSERT INTO ref VALUES(1,2,3); -BEGIN; -DELETE FROM pt; -DELETE FROM ref; -ABORT; -DROP TABLE pt, ref; -DROP SCHEMA fkpart9 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table pk -drop cascades to table fk --- Verify ON UPDATE/DELETE behavior -CREATE SCHEMA fkpart6; -SET search_path TO fkpart6; -CREATE TABLE pk (a int PRIMARY KEY) PARTITION BY RANGE (a); -CREATE TABLE pk1 PARTITION OF pk FOR VALUES FROM (1) TO (100) PARTITION BY RANGE (a); -CREATE TABLE pk11 PARTITION OF pk1 FOR VALUES FROM (1) TO (50); -CREATE TABLE pk12 PARTITION OF pk1 FOR VALUES FROM (50) TO (100); -CREATE TABLE fk (a int) PARTITION BY RANGE (a); -CREATE TABLE fk1 PARTITION OF fk FOR VALUES FROM (1) TO (100) PARTITION BY RANGE (a); -CREATE TABLE fk11 PARTITION OF fk1 FOR VALUES FROM (1) TO (10); -CREATE TABLE fk12 PARTITION OF fk1 FOR VALUES FROM (10) TO (100); -ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk ON UPDATE CASCADE ON DELETE CASCADE; -CREATE TABLE fk_d PARTITION OF fk DEFAULT; -INSERT INTO pk VALUES (1); -INSERT INTO fk VALUES (1); -UPDATE pk SET a = 20; -SELECT tableoid::regclass, * FROM fk; - tableoid | a -----------+---- - fk12 | 20 -(1 row) - -DELETE FROM pk WHERE a = 20; -SELECT tableoid::regclass, * FROM fk; - tableoid | a -----------+--- -(0 rows) - -DROP TABLE fk; -TRUNCATE TABLE pk; -INSERT INTO pk VALUES (20), (50); -CREATE TABLE fk (a int) PARTITION BY RANGE (a); -CREATE TABLE fk1 PARTITION OF fk FOR VALUES FROM (1) TO (100) PARTITION BY RANGE (a); -CREATE TABLE fk11 PARTITION OF fk1 FOR VALUES FROM (1) TO (10); -CREATE TABLE fk12 PARTITION OF fk1 FOR VALUES FROM (10) TO (100); -ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk ON UPDATE SET NULL ON DELETE SET NULL; -CREATE TABLE fk_d PARTITION OF fk DEFAULT; -INSERT INTO fk VALUES (20), (50); -UPDATE pk SET a = 21 WHERE a = 20; -DELETE FROM pk WHERE a = 50; -SELECT tableoid::regclass, * FROM fk; - tableoid | a -----------+--- - fk_d | - fk_d | -(2 rows) - -DROP TABLE fk; -TRUNCATE TABLE pk; -INSERT INTO pk VALUES (20), (30), (50); -CREATE TABLE fk (id int, a int DEFAULT 50) PARTITION BY RANGE (a); -CREATE TABLE fk1 PARTITION OF fk FOR VALUES FROM (1) TO (100) PARTITION BY RANGE (a); -CREATE TABLE fk11 PARTITION OF fk1 FOR VALUES FROM (1) TO (10); -CREATE TABLE fk12 PARTITION OF fk1 FOR VALUES FROM (10) TO (100); -ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk ON UPDATE SET DEFAULT ON DELETE SET DEFAULT; -CREATE TABLE fk_d PARTITION OF fk DEFAULT; -INSERT INTO fk VALUES (1, 20), (2, 30); -DELETE FROM pk WHERE a = 20 RETURNING *; - a ----- - 20 -(1 row) - -UPDATE pk SET a = 90 WHERE a = 30 RETURNING *; - a ----- - 90 -(1 row) - -SELECT tableoid::regclass, * FROM fk; - tableoid | id | a -----------+----+---- - fk12 | 1 | 50 - fk12 | 2 | 50 -(2 rows) - -DROP TABLE fk; -TRUNCATE TABLE pk; -INSERT INTO pk VALUES (20), (30); -CREATE TABLE fk (a int DEFAULT 50) PARTITION BY RANGE (a); -CREATE TABLE fk1 PARTITION OF fk FOR VALUES FROM (1) TO (100) PARTITION BY RANGE (a); -CREATE TABLE fk11 PARTITION OF fk1 FOR VALUES FROM (1) TO (10); -CREATE TABLE fk12 PARTITION OF fk1 FOR VALUES FROM (10) TO (100); -ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk ON UPDATE RESTRICT ON DELETE RESTRICT; -CREATE TABLE fk_d PARTITION OF fk DEFAULT; -INSERT INTO fk VALUES (20), (30); -DELETE FROM pk WHERE a = 20; -ERROR: update or delete on table "pk11" violates RESTRICT setting of foreign key constraint "fk_a_fkey2" on table "fk" -DETAIL: Key (a)=(20) is referenced from table "fk". -UPDATE pk SET a = 90 WHERE a = 30; -ERROR: update or delete on table "pk" violates RESTRICT setting of foreign key constraint "fk_a_fkey" on table "fk" -DETAIL: Key (a)=(30) is referenced from table "fk". -SELECT tableoid::regclass, * FROM fk; - tableoid | a -----------+---- - fk12 | 20 - fk12 | 30 -(2 rows) - -DROP TABLE fk; --- test for reported bug: relispartition not set --- https://postgr.es/m/CA+HiwqHMsRtRYRWYTWavKJ8x14AFsv7bmAV46mYwnfD3vy8goQ@mail.gmail.com -CREATE SCHEMA fkpart7 - CREATE TABLE pkpart (a int) PARTITION BY LIST (a) - CREATE TABLE pkpart1 PARTITION OF pkpart FOR VALUES IN (1); -ALTER TABLE fkpart7.pkpart1 ADD PRIMARY KEY (a); -ALTER TABLE fkpart7.pkpart ADD PRIMARY KEY (a); -CREATE TABLE fkpart7.fk (a int REFERENCES fkpart7.pkpart); -DROP SCHEMA fkpart7 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table fkpart7.pkpart -drop cascades to table fkpart7.fk --- ensure we check partitions are "not used" when dropping constraints -CREATE SCHEMA fkpart8 - CREATE TABLE tbl1(f1 int PRIMARY KEY) - CREATE TABLE tbl2(f1 int REFERENCES tbl1 DEFERRABLE INITIALLY DEFERRED) PARTITION BY RANGE(f1) - CREATE TABLE tbl2_p1 PARTITION OF tbl2 FOR VALUES FROM (minvalue) TO (maxvalue); -INSERT INTO fkpart8.tbl1 VALUES(1); -BEGIN; -INSERT INTO fkpart8.tbl2 VALUES(1); -ALTER TABLE fkpart8.tbl2 DROP CONSTRAINT tbl2_f1_fkey; -ERROR: cannot ALTER TABLE "tbl2_p1" because it has pending trigger events -COMMIT; -DROP SCHEMA fkpart8 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table fkpart8.tbl1 -drop cascades to table fkpart8.tbl2 --- ensure FK referencing a multi-level partitioned table are --- enforce reference to sub-children. -CREATE SCHEMA fkpart9 - CREATE TABLE pk (a INT PRIMARY KEY) PARTITION BY RANGE (a) - CREATE TABLE fk ( - fk_a INT REFERENCES pk(a) ON DELETE CASCADE - ) - CREATE TABLE pk1 PARTITION OF pk FOR VALUES FROM (30) TO (50) PARTITION BY RANGE (a) - CREATE TABLE pk11 PARTITION OF pk1 FOR VALUES FROM (30) TO (40); -INSERT INTO fkpart9.pk VALUES (35); -INSERT INTO fkpart9.fk VALUES (35); -DELETE FROM fkpart9.pk WHERE a=35; -SELECT * FROM fkpart9.pk; - a ---- -(0 rows) - -SELECT * FROM fkpart9.fk; - fk_a ------- -(0 rows) - -DROP SCHEMA fkpart9 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table fkpart9.pk -drop cascades to table fkpart9.fk --- test that ri_Check_Pk_Match() scans the correct partition for a deferred --- ON DELETE/UPDATE NO ACTION constraint -CREATE SCHEMA fkpart10 - CREATE TABLE tbl1(f1 int PRIMARY KEY) PARTITION BY RANGE(f1) - CREATE TABLE tbl1_p1 PARTITION OF tbl1 FOR VALUES FROM (minvalue) TO (1) - CREATE TABLE tbl1_p2 PARTITION OF tbl1 FOR VALUES FROM (1) TO (maxvalue) - CREATE TABLE tbl2(f1 int REFERENCES tbl1 DEFERRABLE INITIALLY DEFERRED) - CREATE TABLE tbl3(f1 int PRIMARY KEY) PARTITION BY RANGE(f1) - CREATE TABLE tbl3_p1 PARTITION OF tbl3 FOR VALUES FROM (minvalue) TO (1) - CREATE TABLE tbl3_p2 PARTITION OF tbl3 FOR VALUES FROM (1) TO (maxvalue) - CREATE TABLE tbl4(f1 int REFERENCES tbl3 DEFERRABLE INITIALLY DEFERRED); -INSERT INTO fkpart10.tbl1 VALUES (0), (1); -INSERT INTO fkpart10.tbl2 VALUES (0), (1); -INSERT INTO fkpart10.tbl3 VALUES (-2), (-1), (0); -INSERT INTO fkpart10.tbl4 VALUES (-2), (-1); -BEGIN; -DELETE FROM fkpart10.tbl1 WHERE f1 = 0; -UPDATE fkpart10.tbl1 SET f1 = 2 WHERE f1 = 1; -INSERT INTO fkpart10.tbl1 VALUES (0), (1); -COMMIT; --- test that cross-partition updates correctly enforces the foreign key --- restriction (specifically testing INITIALLY DEFERRED) -BEGIN; -UPDATE fkpart10.tbl1 SET f1 = 3 WHERE f1 = 0; -UPDATE fkpart10.tbl3 SET f1 = f1 * -1; -INSERT INTO fkpart10.tbl1 VALUES (4); -COMMIT; -ERROR: update or delete on table "tbl1" violates foreign key constraint "tbl2_f1_fkey" on table "tbl2" -DETAIL: Key (f1)=(0) is still referenced from table "tbl2". -BEGIN; -UPDATE fkpart10.tbl3 SET f1 = f1 * -1; -UPDATE fkpart10.tbl3 SET f1 = f1 + 3; -UPDATE fkpart10.tbl1 SET f1 = 3 WHERE f1 = 0; -INSERT INTO fkpart10.tbl1 VALUES (0); -COMMIT; -ERROR: update or delete on table "tbl3" violates foreign key constraint "tbl4_f1_fkey" on table "tbl4" -DETAIL: Key (f1)=(-2) is still referenced from table "tbl4". -BEGIN; -UPDATE fkpart10.tbl3 SET f1 = f1 * -1; -UPDATE fkpart10.tbl1 SET f1 = 3 WHERE f1 = 0; -INSERT INTO fkpart10.tbl1 VALUES (0); -INSERT INTO fkpart10.tbl3 VALUES (-2), (-1); -COMMIT; --- test where the updated table now has both an IMMEDIATE and a DEFERRED --- constraint pointing into it -CREATE TABLE fkpart10.tbl5(f1 int REFERENCES fkpart10.tbl3); -INSERT INTO fkpart10.tbl5 VALUES (-2), (-1); -BEGIN; -UPDATE fkpart10.tbl3 SET f1 = f1 * -3; -ERROR: update or delete on table "tbl3" violates foreign key constraint "tbl5_f1_fkey" on table "tbl5" -DETAIL: Key (f1)=(-2) is still referenced from table "tbl5". -COMMIT; --- Now test where the row referenced from the table with an IMMEDIATE --- constraint stays in place, while those referenced from the table with a --- DEFERRED constraint don't. -DELETE FROM fkpart10.tbl5; -INSERT INTO fkpart10.tbl5 VALUES (0); -BEGIN; -UPDATE fkpart10.tbl3 SET f1 = f1 * -3; -COMMIT; -ERROR: update or delete on table "tbl3" violates foreign key constraint "tbl4_f1_fkey" on table "tbl4" -DETAIL: Key (f1)=(-2) is still referenced from table "tbl4". -DROP SCHEMA fkpart10 CASCADE; -NOTICE: drop cascades to 5 other objects -DETAIL: drop cascades to table fkpart10.tbl1 -drop cascades to table fkpart10.tbl2 -drop cascades to table fkpart10.tbl3 -drop cascades to table fkpart10.tbl4 -drop cascades to table fkpart10.tbl5 --- verify foreign keys are enforced during cross-partition updates, --- especially on the PK side -CREATE SCHEMA fkpart11 - CREATE TABLE pk (a INT PRIMARY KEY, b text) PARTITION BY LIST (a) - CREATE TABLE fk ( - a INT, - CONSTRAINT fkey FOREIGN KEY (a) REFERENCES pk(a) ON UPDATE CASCADE ON DELETE CASCADE - ) - CREATE TABLE fk_parted ( - a INT PRIMARY KEY, - CONSTRAINT fkey FOREIGN KEY (a) REFERENCES pk(a) ON UPDATE CASCADE ON DELETE CASCADE - ) PARTITION BY LIST (a) - CREATE TABLE fk_another ( - a INT, - CONSTRAINT fkey FOREIGN KEY (a) REFERENCES fk_parted (a) ON UPDATE CASCADE ON DELETE CASCADE - ) - CREATE TABLE pk1 PARTITION OF pk FOR VALUES IN (1, 2) PARTITION BY LIST (a) - CREATE TABLE pk2 PARTITION OF pk FOR VALUES IN (3) - CREATE TABLE pk3 PARTITION OF pk FOR VALUES IN (4) - CREATE TABLE fk1 PARTITION OF fk_parted FOR VALUES IN (1, 2) - CREATE TABLE fk2 PARTITION OF fk_parted FOR VALUES IN (3) - CREATE TABLE fk3 PARTITION OF fk_parted FOR VALUES IN (4); -CREATE TABLE fkpart11.pk11 (b text, a int NOT NULL); -ALTER TABLE fkpart11.pk1 ATTACH PARTITION fkpart11.pk11 FOR VALUES IN (1); -CREATE TABLE fkpart11.pk12 (b text, c int, a int NOT NULL); -ALTER TABLE fkpart11.pk12 DROP c; -ALTER TABLE fkpart11.pk1 ATTACH PARTITION fkpart11.pk12 FOR VALUES IN (2); -INSERT INTO fkpart11.pk VALUES (1, 'xxx'), (3, 'yyy'); -INSERT INTO fkpart11.fk VALUES (1), (3); -INSERT INTO fkpart11.fk_parted VALUES (1), (3); -INSERT INTO fkpart11.fk_another VALUES (1), (3); --- moves 2 rows from one leaf partition to another, with both updates being --- cascaded to fk and fk_parted. Updates of fk_parted, of which one is --- cross-partition (3 -> 4), are further cascaded to fk_another. -UPDATE fkpart11.pk SET a = a + 1 RETURNING tableoid::pg_catalog.regclass, *; - tableoid | a | b ----------------+---+----- - fkpart11.pk12 | 2 | xxx - fkpart11.pk3 | 4 | yyy -(2 rows) - -SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk; - tableoid | a --------------+--- - fkpart11.fk | 2 - fkpart11.fk | 4 -(2 rows) - -SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk_parted; - tableoid | a ---------------+--- - fkpart11.fk1 | 2 - fkpart11.fk3 | 4 -(2 rows) - -SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk_another; - tableoid | a ----------------------+--- - fkpart11.fk_another | 2 - fkpart11.fk_another | 4 -(2 rows) - --- let's try with the foreign key pointing at tables in the partition tree --- that are not the same as the query's target table --- 1. foreign key pointing into a non-root ancestor --- --- A cross-partition update on the root table will fail, because we currently --- can't enforce the foreign keys pointing into a non-leaf partition -ALTER TABLE fkpart11.fk DROP CONSTRAINT fkey; -DELETE FROM fkpart11.fk WHERE a = 4; -ALTER TABLE fkpart11.fk ADD CONSTRAINT fkey FOREIGN KEY (a) REFERENCES fkpart11.pk1 (a) ON UPDATE CASCADE ON DELETE CASCADE; -UPDATE fkpart11.pk SET a = a - 1; -ERROR: cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key -DETAIL: A foreign key points to ancestor "pk1" but not the root ancestor "pk". -HINT: Consider defining the foreign key on table "pk". --- it's okay though if the non-leaf partition is updated directly -UPDATE fkpart11.pk1 SET a = a - 1; -SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.pk; - tableoid | a | b ----------------+---+----- - fkpart11.pk11 | 1 | xxx - fkpart11.pk3 | 4 | yyy -(2 rows) - -SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk; - tableoid | a --------------+--- - fkpart11.fk | 1 -(1 row) - -SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk_parted; - tableoid | a ---------------+--- - fkpart11.fk1 | 1 - fkpart11.fk3 | 4 -(2 rows) - -SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk_another; - tableoid | a ----------------------+--- - fkpart11.fk_another | 4 - fkpart11.fk_another | 1 -(2 rows) - --- 2. foreign key pointing into a single leaf partition --- --- A cross-partition update that deletes from the pointed-to leaf partition --- is allowed to succeed -ALTER TABLE fkpart11.fk DROP CONSTRAINT fkey; -ALTER TABLE fkpart11.fk ADD CONSTRAINT fkey FOREIGN KEY (a) REFERENCES fkpart11.pk11 (a) ON UPDATE CASCADE ON DELETE CASCADE; --- will delete (1) from p11 which is cascaded to fk -UPDATE fkpart11.pk SET a = a + 1 WHERE a = 1; -SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk; - tableoid | a -----------+--- -(0 rows) - -DROP TABLE fkpart11.fk; --- check that regular and deferrable AR triggers on the PK tables --- still work as expected -CREATE FUNCTION fkpart11.print_row () RETURNS TRIGGER LANGUAGE plpgsql AS $$ - BEGIN - RAISE NOTICE 'TABLE: %, OP: %, OLD: %, NEW: %', TG_RELNAME, TG_OP, OLD, NEW; - RETURN NULL; - END; -$$; -CREATE TRIGGER trig_upd_pk AFTER UPDATE ON fkpart11.pk FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); -CREATE TRIGGER trig_del_pk AFTER DELETE ON fkpart11.pk FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); -CREATE TRIGGER trig_ins_pk AFTER INSERT ON fkpart11.pk FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); -CREATE CONSTRAINT TRIGGER trig_upd_fk_parted AFTER UPDATE ON fkpart11.fk_parted INITIALLY DEFERRED FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); -CREATE CONSTRAINT TRIGGER trig_del_fk_parted AFTER DELETE ON fkpart11.fk_parted INITIALLY DEFERRED FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); -CREATE CONSTRAINT TRIGGER trig_ins_fk_parted AFTER INSERT ON fkpart11.fk_parted INITIALLY DEFERRED FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); -UPDATE fkpart11.pk SET a = 3 WHERE a = 4; -NOTICE: TABLE: pk3, OP: DELETE, OLD: (4,yyy), NEW: -NOTICE: TABLE: pk2, OP: INSERT, OLD: , NEW: (3,yyy) -NOTICE: TABLE: fk3, OP: DELETE, OLD: (4), NEW: -NOTICE: TABLE: fk2, OP: INSERT, OLD: , NEW: (3) -UPDATE fkpart11.pk SET a = 1 WHERE a = 2; -NOTICE: TABLE: pk12, OP: DELETE, OLD: (xxx,2), NEW: -NOTICE: TABLE: pk11, OP: INSERT, OLD: , NEW: (xxx,1) -NOTICE: TABLE: fk1, OP: UPDATE, OLD: (2), NEW: (1) -DROP SCHEMA fkpart11 CASCADE; -NOTICE: drop cascades to 4 other objects -DETAIL: drop cascades to table fkpart11.pk -drop cascades to table fkpart11.fk_parted -drop cascades to table fkpart11.fk_another -drop cascades to function fkpart11.print_row() --- When a table is attached as partition to a partitioned table that has --- a foreign key to another partitioned table, it acquires a clone of the --- FK. Upon detach, this clone is not removed, but instead becomes an --- independent FK. If it then attaches to the partitioned table again, --- the FK from the parent "takes over" ownership of the independent FK rather --- than creating a separate one. -CREATE SCHEMA fkpart12 - CREATE TABLE fk_p ( id int, jd int, PRIMARY KEY(id, jd)) PARTITION BY list (id) - CREATE TABLE fk_p_1 PARTITION OF fk_p FOR VALUES IN (1) PARTITION BY list (jd) - CREATE TABLE fk_p_1_1 PARTITION OF fk_p_1 FOR VALUES IN (1) - CREATE TABLE fk_p_1_2 (x int, y int, jd int NOT NULL, id int NOT NULL) - CREATE TABLE fk_p_2 PARTITION OF fk_p FOR VALUES IN (2) PARTITION BY list (jd) - CREATE TABLE fk_p_2_1 PARTITION OF fk_p_2 FOR VALUES IN (1) - CREATE TABLE fk_p_2_2 PARTITION OF fk_p_2 FOR VALUES IN (2) - CREATE TABLE fk_r_1 ( p_jd int NOT NULL, x int, id int PRIMARY KEY, p_id int NOT NULL) - CREATE TABLE fk_r_2 ( id int PRIMARY KEY, p_id int NOT NULL, p_jd int NOT NULL) PARTITION BY list (id) - CREATE TABLE fk_r_2_1 PARTITION OF fk_r_2 FOR VALUES IN (2, 1) - CREATE TABLE fk_r ( id int PRIMARY KEY, p_id int NOT NULL, p_jd int NOT NULL, - FOREIGN KEY (p_id, p_jd) REFERENCES fk_p (id, jd) - ) PARTITION BY list (id); -SET search_path TO fkpart12; -ALTER TABLE fk_p_1_2 DROP COLUMN x, DROP COLUMN y; -ALTER TABLE fk_p_1 ATTACH PARTITION fk_p_1_2 FOR VALUES IN (2); -ALTER TABLE fk_r_1 DROP COLUMN x; -INSERT INTO fk_p VALUES (1, 1); -ALTER TABLE fk_r ATTACH PARTITION fk_r_1 FOR VALUES IN (1); -ALTER TABLE fk_r ATTACH PARTITION fk_r_2 FOR VALUES IN (2); -\d fk_r_2 - Partitioned table "fkpart12.fk_r_2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - id | integer | | not null | - p_id | integer | | not null | - p_jd | integer | | not null | -Partition of: fk_r FOR VALUES IN (2) -Partition key: LIST (id) -Indexes: - "fk_r_2_pkey" PRIMARY KEY, btree (id) -Foreign-key constraints: - TABLE "fk_r" CONSTRAINT "fk_r_p_id_p_jd_fkey" FOREIGN KEY (p_id, p_jd) REFERENCES fk_p(id, jd) -Number of partitions: 1 (Use \d+ to list them.) - -INSERT INTO fk_r VALUES (1, 1, 1); -INSERT INTO fk_r VALUES (2, 2, 1); -ERROR: insert or update on table "fk_r_2_1" violates foreign key constraint "fk_r_p_id_p_jd_fkey" -DETAIL: Key (p_id, p_jd)=(2, 1) is not present in table "fk_p". -ALTER TABLE fk_r DETACH PARTITION fk_r_1; -ALTER TABLE fk_r DETACH PARTITION fk_r_2; -\d fk_r_2 - Partitioned table "fkpart12.fk_r_2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - id | integer | | not null | - p_id | integer | | not null | - p_jd | integer | | not null | -Partition key: LIST (id) -Indexes: - "fk_r_2_pkey" PRIMARY KEY, btree (id) -Foreign-key constraints: - "fk_r_p_id_p_jd_fkey" FOREIGN KEY (p_id, p_jd) REFERENCES fk_p(id, jd) -Number of partitions: 1 (Use \d+ to list them.) - -INSERT INTO fk_r_1 (id, p_id, p_jd) VALUES (2, 1, 2); -- should fail -ERROR: insert or update on table "fk_r_1" violates foreign key constraint "fk_r_p_id_p_jd_fkey" -DETAIL: Key (p_id, p_jd)=(1, 2) is not present in table "fk_p". -DELETE FROM fk_p; -- should fail -ERROR: update or delete on table "fk_p_1_1" violates foreign key constraint "fk_r_1_p_id_p_jd_fkey1" on table "fk_r_1" -DETAIL: Key (id, jd)=(1, 1) is still referenced from table "fk_r_1". -ALTER TABLE fk_r ATTACH PARTITION fk_r_1 FOR VALUES IN (1); -ALTER TABLE fk_r ATTACH PARTITION fk_r_2 FOR VALUES IN (2); -\d fk_r_2 - Partitioned table "fkpart12.fk_r_2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - id | integer | | not null | - p_id | integer | | not null | - p_jd | integer | | not null | -Partition of: fk_r FOR VALUES IN (2) -Partition key: LIST (id) -Indexes: - "fk_r_2_pkey" PRIMARY KEY, btree (id) -Foreign-key constraints: - TABLE "fk_r" CONSTRAINT "fk_r_p_id_p_jd_fkey" FOREIGN KEY (p_id, p_jd) REFERENCES fk_p(id, jd) -Number of partitions: 1 (Use \d+ to list them.) - -DELETE FROM fk_p; -- should fail -ERROR: update or delete on table "fk_p_1_1" violates foreign key constraint "fk_r_p_id_p_jd_fkey2" on table "fk_r" -DETAIL: Key (id, jd)=(1, 1) is still referenced from table "fk_r". --- these should all fail -ALTER TABLE fk_r_1 DROP CONSTRAINT fk_r_p_id_p_jd_fkey; -ERROR: cannot drop inherited constraint "fk_r_p_id_p_jd_fkey" of relation "fk_r_1" -ALTER TABLE fk_r DROP CONSTRAINT fk_r_p_id_p_jd_fkey1; -ERROR: cannot drop inherited constraint "fk_r_p_id_p_jd_fkey1" of relation "fk_r" -ALTER TABLE fk_r_2 DROP CONSTRAINT fk_r_p_id_p_jd_fkey; -ERROR: cannot drop inherited constraint "fk_r_p_id_p_jd_fkey" of relation "fk_r_2" -SET client_min_messages TO warning; -DROP SCHEMA fkpart12 CASCADE; -RESET client_min_messages; -RESET search_path; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/cluster.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/cluster.out --- /Users/admin/pgsql/src/test/regress/expected/cluster.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/cluster.out 2024-12-13 13:20:11 @@ -1,675 +1,2 @@ --- --- CLUSTER --- -CREATE TABLE clstr_tst_s (rf_a SERIAL PRIMARY KEY, - b INT); -CREATE TABLE clstr_tst (a SERIAL PRIMARY KEY, - b INT, - c TEXT, - d TEXT, - CONSTRAINT clstr_tst_con FOREIGN KEY (b) REFERENCES clstr_tst_s); -CREATE INDEX clstr_tst_b ON clstr_tst (b); -CREATE INDEX clstr_tst_c ON clstr_tst (c); -CREATE INDEX clstr_tst_c_b ON clstr_tst (c,b); -CREATE INDEX clstr_tst_b_c ON clstr_tst (b,c); -INSERT INTO clstr_tst_s (b) VALUES (0); -INSERT INTO clstr_tst_s (b) SELECT b FROM clstr_tst_s; -INSERT INTO clstr_tst_s (b) SELECT b FROM clstr_tst_s; -INSERT INTO clstr_tst_s (b) SELECT b FROM clstr_tst_s; -INSERT INTO clstr_tst_s (b) SELECT b FROM clstr_tst_s; -INSERT INTO clstr_tst_s (b) SELECT b FROM clstr_tst_s; -CREATE TABLE clstr_tst_inh () INHERITS (clstr_tst); -INSERT INTO clstr_tst (b, c) VALUES (11, 'once'); -INSERT INTO clstr_tst (b, c) VALUES (10, 'diez'); -INSERT INTO clstr_tst (b, c) VALUES (31, 'treinta y uno'); -INSERT INTO clstr_tst (b, c) VALUES (22, 'veintidos'); -INSERT INTO clstr_tst (b, c) VALUES (3, 'tres'); -INSERT INTO clstr_tst (b, c) VALUES (20, 'veinte'); -INSERT INTO clstr_tst (b, c) VALUES (23, 'veintitres'); -INSERT INTO clstr_tst (b, c) VALUES (21, 'veintiuno'); -INSERT INTO clstr_tst (b, c) VALUES (4, 'cuatro'); -INSERT INTO clstr_tst (b, c) VALUES (14, 'catorce'); -INSERT INTO clstr_tst (b, c) VALUES (2, 'dos'); -INSERT INTO clstr_tst (b, c) VALUES (18, 'dieciocho'); -INSERT INTO clstr_tst (b, c) VALUES (27, 'veintisiete'); -INSERT INTO clstr_tst (b, c) VALUES (25, 'veinticinco'); -INSERT INTO clstr_tst (b, c) VALUES (13, 'trece'); -INSERT INTO clstr_tst (b, c) VALUES (28, 'veintiocho'); -INSERT INTO clstr_tst (b, c) VALUES (32, 'treinta y dos'); -INSERT INTO clstr_tst (b, c) VALUES (5, 'cinco'); -INSERT INTO clstr_tst (b, c) VALUES (29, 'veintinueve'); -INSERT INTO clstr_tst (b, c) VALUES (1, 'uno'); -INSERT INTO clstr_tst (b, c) VALUES (24, 'veinticuatro'); -INSERT INTO clstr_tst (b, c) VALUES (30, 'treinta'); -INSERT INTO clstr_tst (b, c) VALUES (12, 'doce'); -INSERT INTO clstr_tst (b, c) VALUES (17, 'diecisiete'); -INSERT INTO clstr_tst (b, c) VALUES (9, 'nueve'); -INSERT INTO clstr_tst (b, c) VALUES (19, 'diecinueve'); -INSERT INTO clstr_tst (b, c) VALUES (26, 'veintiseis'); -INSERT INTO clstr_tst (b, c) VALUES (15, 'quince'); -INSERT INTO clstr_tst (b, c) VALUES (7, 'siete'); -INSERT INTO clstr_tst (b, c) VALUES (16, 'dieciseis'); -INSERT INTO clstr_tst (b, c) VALUES (8, 'ocho'); --- This entry is needed to test that TOASTED values are copied correctly. -INSERT INTO clstr_tst (b, c, d) VALUES (6, 'seis', repeat('xyzzy', 100000)); -CLUSTER clstr_tst_c ON clstr_tst; -SELECT a,b,c,substring(d for 30), length(d) from clstr_tst; - a | b | c | substring | length -----+----+---------------+--------------------------------+-------- - 10 | 14 | catorce | | - 18 | 5 | cinco | | - 9 | 4 | cuatro | | - 26 | 19 | diecinueve | | - 12 | 18 | dieciocho | | - 30 | 16 | dieciseis | | - 24 | 17 | diecisiete | | - 2 | 10 | diez | | - 23 | 12 | doce | | - 11 | 2 | dos | | - 25 | 9 | nueve | | - 31 | 8 | ocho | | - 1 | 11 | once | | - 28 | 15 | quince | | - 32 | 6 | seis | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzy | 500000 - 29 | 7 | siete | | - 15 | 13 | trece | | - 22 | 30 | treinta | | - 17 | 32 | treinta y dos | | - 3 | 31 | treinta y uno | | - 5 | 3 | tres | | - 20 | 1 | uno | | - 6 | 20 | veinte | | - 14 | 25 | veinticinco | | - 21 | 24 | veinticuatro | | - 4 | 22 | veintidos | | - 19 | 29 | veintinueve | | - 16 | 28 | veintiocho | | - 27 | 26 | veintiseis | | - 13 | 27 | veintisiete | | - 7 | 23 | veintitres | | - 8 | 21 | veintiuno | | -(32 rows) - -SELECT a,b,c,substring(d for 30), length(d) from clstr_tst ORDER BY a; - a | b | c | substring | length -----+----+---------------+--------------------------------+-------- - 1 | 11 | once | | - 2 | 10 | diez | | - 3 | 31 | treinta y uno | | - 4 | 22 | veintidos | | - 5 | 3 | tres | | - 6 | 20 | veinte | | - 7 | 23 | veintitres | | - 8 | 21 | veintiuno | | - 9 | 4 | cuatro | | - 10 | 14 | catorce | | - 11 | 2 | dos | | - 12 | 18 | dieciocho | | - 13 | 27 | veintisiete | | - 14 | 25 | veinticinco | | - 15 | 13 | trece | | - 16 | 28 | veintiocho | | - 17 | 32 | treinta y dos | | - 18 | 5 | cinco | | - 19 | 29 | veintinueve | | - 20 | 1 | uno | | - 21 | 24 | veinticuatro | | - 22 | 30 | treinta | | - 23 | 12 | doce | | - 24 | 17 | diecisiete | | - 25 | 9 | nueve | | - 26 | 19 | diecinueve | | - 27 | 26 | veintiseis | | - 28 | 15 | quince | | - 29 | 7 | siete | | - 30 | 16 | dieciseis | | - 31 | 8 | ocho | | - 32 | 6 | seis | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzy | 500000 -(32 rows) - -SELECT a,b,c,substring(d for 30), length(d) from clstr_tst ORDER BY b; - a | b | c | substring | length -----+----+---------------+--------------------------------+-------- - 20 | 1 | uno | | - 11 | 2 | dos | | - 5 | 3 | tres | | - 9 | 4 | cuatro | | - 18 | 5 | cinco | | - 32 | 6 | seis | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzy | 500000 - 29 | 7 | siete | | - 31 | 8 | ocho | | - 25 | 9 | nueve | | - 2 | 10 | diez | | - 1 | 11 | once | | - 23 | 12 | doce | | - 15 | 13 | trece | | - 10 | 14 | catorce | | - 28 | 15 | quince | | - 30 | 16 | dieciseis | | - 24 | 17 | diecisiete | | - 12 | 18 | dieciocho | | - 26 | 19 | diecinueve | | - 6 | 20 | veinte | | - 8 | 21 | veintiuno | | - 4 | 22 | veintidos | | - 7 | 23 | veintitres | | - 21 | 24 | veinticuatro | | - 14 | 25 | veinticinco | | - 27 | 26 | veintiseis | | - 13 | 27 | veintisiete | | - 16 | 28 | veintiocho | | - 19 | 29 | veintinueve | | - 22 | 30 | treinta | | - 3 | 31 | treinta y uno | | - 17 | 32 | treinta y dos | | -(32 rows) - -SELECT a,b,c,substring(d for 30), length(d) from clstr_tst ORDER BY c; - a | b | c | substring | length -----+----+---------------+--------------------------------+-------- - 10 | 14 | catorce | | - 18 | 5 | cinco | | - 9 | 4 | cuatro | | - 26 | 19 | diecinueve | | - 12 | 18 | dieciocho | | - 30 | 16 | dieciseis | | - 24 | 17 | diecisiete | | - 2 | 10 | diez | | - 23 | 12 | doce | | - 11 | 2 | dos | | - 25 | 9 | nueve | | - 31 | 8 | ocho | | - 1 | 11 | once | | - 28 | 15 | quince | | - 32 | 6 | seis | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzy | 500000 - 29 | 7 | siete | | - 15 | 13 | trece | | - 22 | 30 | treinta | | - 17 | 32 | treinta y dos | | - 3 | 31 | treinta y uno | | - 5 | 3 | tres | | - 20 | 1 | uno | | - 6 | 20 | veinte | | - 14 | 25 | veinticinco | | - 21 | 24 | veinticuatro | | - 4 | 22 | veintidos | | - 19 | 29 | veintinueve | | - 16 | 28 | veintiocho | | - 27 | 26 | veintiseis | | - 13 | 27 | veintisiete | | - 7 | 23 | veintitres | | - 8 | 21 | veintiuno | | -(32 rows) - --- Verify that inheritance link still works -INSERT INTO clstr_tst_inh VALUES (0, 100, 'in child table'); -SELECT a,b,c,substring(d for 30), length(d) from clstr_tst; - a | b | c | substring | length -----+-----+----------------+--------------------------------+-------- - 10 | 14 | catorce | | - 18 | 5 | cinco | | - 9 | 4 | cuatro | | - 26 | 19 | diecinueve | | - 12 | 18 | dieciocho | | - 30 | 16 | dieciseis | | - 24 | 17 | diecisiete | | - 2 | 10 | diez | | - 23 | 12 | doce | | - 11 | 2 | dos | | - 25 | 9 | nueve | | - 31 | 8 | ocho | | - 1 | 11 | once | | - 28 | 15 | quince | | - 32 | 6 | seis | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzy | 500000 - 29 | 7 | siete | | - 15 | 13 | trece | | - 22 | 30 | treinta | | - 17 | 32 | treinta y dos | | - 3 | 31 | treinta y uno | | - 5 | 3 | tres | | - 20 | 1 | uno | | - 6 | 20 | veinte | | - 14 | 25 | veinticinco | | - 21 | 24 | veinticuatro | | - 4 | 22 | veintidos | | - 19 | 29 | veintinueve | | - 16 | 28 | veintiocho | | - 27 | 26 | veintiseis | | - 13 | 27 | veintisiete | | - 7 | 23 | veintitres | | - 8 | 21 | veintiuno | | - 0 | 100 | in child table | | -(33 rows) - --- Verify that foreign key link still works -INSERT INTO clstr_tst (b, c) VALUES (1111, 'this should fail'); -ERROR: insert or update on table "clstr_tst" violates foreign key constraint "clstr_tst_con" -DETAIL: Key (b)=(1111) is not present in table "clstr_tst_s". -SELECT conname FROM pg_constraint WHERE conrelid = 'clstr_tst'::regclass -ORDER BY 1; - conname ----------------------- - clstr_tst_a_not_null - clstr_tst_con - clstr_tst_pkey -(3 rows) - -SELECT relname, relkind, - EXISTS(SELECT 1 FROM pg_class WHERE oid = c.reltoastrelid) AS hastoast -FROM pg_class c WHERE relname LIKE 'clstr_tst%' ORDER BY relname; - relname | relkind | hastoast -----------------------+---------+---------- - clstr_tst | r | t - clstr_tst_a_seq | S | f - clstr_tst_b | i | f - clstr_tst_b_c | i | f - clstr_tst_c | i | f - clstr_tst_c_b | i | f - clstr_tst_inh | r | t - clstr_tst_pkey | i | f - clstr_tst_s | r | f - clstr_tst_s_pkey | i | f - clstr_tst_s_rf_a_seq | S | f -(11 rows) - --- Verify that indisclustered is correctly set -SELECT pg_class.relname FROM pg_index, pg_class, pg_class AS pg_class_2 -WHERE pg_class.oid=indexrelid - AND indrelid=pg_class_2.oid - AND pg_class_2.relname = 'clstr_tst' - AND indisclustered; - relname -------------- - clstr_tst_c -(1 row) - --- Try changing indisclustered -ALTER TABLE clstr_tst CLUSTER ON clstr_tst_b_c; -SELECT pg_class.relname FROM pg_index, pg_class, pg_class AS pg_class_2 -WHERE pg_class.oid=indexrelid - AND indrelid=pg_class_2.oid - AND pg_class_2.relname = 'clstr_tst' - AND indisclustered; - relname ---------------- - clstr_tst_b_c -(1 row) - --- Try turning off all clustering -ALTER TABLE clstr_tst SET WITHOUT CLUSTER; -SELECT pg_class.relname FROM pg_index, pg_class, pg_class AS pg_class_2 -WHERE pg_class.oid=indexrelid - AND indrelid=pg_class_2.oid - AND pg_class_2.relname = 'clstr_tst' - AND indisclustered; - relname ---------- -(0 rows) - --- Verify that toast tables are clusterable -CLUSTER pg_toast.pg_toast_826 USING pg_toast_826_index; --- Verify that clustering all tables does in fact cluster the right ones -CREATE USER regress_clstr_user; -CREATE TABLE clstr_1 (a INT PRIMARY KEY); -CREATE TABLE clstr_2 (a INT PRIMARY KEY); -CREATE TABLE clstr_3 (a INT PRIMARY KEY); -ALTER TABLE clstr_1 OWNER TO regress_clstr_user; -ALTER TABLE clstr_3 OWNER TO regress_clstr_user; -GRANT SELECT ON clstr_2 TO regress_clstr_user; -INSERT INTO clstr_1 VALUES (2); -INSERT INTO clstr_1 VALUES (1); -INSERT INTO clstr_2 VALUES (2); -INSERT INTO clstr_2 VALUES (1); -INSERT INTO clstr_3 VALUES (2); -INSERT INTO clstr_3 VALUES (1); --- "CLUSTER " on a table that hasn't been clustered -CLUSTER clstr_2; -ERROR: there is no previously clustered index for table "clstr_2" -CLUSTER clstr_1_pkey ON clstr_1; -CLUSTER clstr_2 USING clstr_2_pkey; -SELECT * FROM clstr_1 UNION ALL - SELECT * FROM clstr_2 UNION ALL - SELECT * FROM clstr_3; - a ---- - 1 - 2 - 1 - 2 - 2 - 1 -(6 rows) - --- revert to the original state -DELETE FROM clstr_1; -DELETE FROM clstr_2; -DELETE FROM clstr_3; -INSERT INTO clstr_1 VALUES (2); -INSERT INTO clstr_1 VALUES (1); -INSERT INTO clstr_2 VALUES (2); -INSERT INTO clstr_2 VALUES (1); -INSERT INTO clstr_3 VALUES (2); -INSERT INTO clstr_3 VALUES (1); --- this user can only cluster clstr_1 and clstr_3, but the latter --- has not been clustered -SET SESSION AUTHORIZATION regress_clstr_user; -SET client_min_messages = ERROR; -- order of "skipping" warnings may vary -CLUSTER; -RESET client_min_messages; -SELECT * FROM clstr_1 UNION ALL - SELECT * FROM clstr_2 UNION ALL - SELECT * FROM clstr_3; - a ---- - 1 - 2 - 2 - 1 - 2 - 1 -(6 rows) - --- cluster a single table using the indisclustered bit previously set -DELETE FROM clstr_1; -INSERT INTO clstr_1 VALUES (2); -INSERT INTO clstr_1 VALUES (1); -CLUSTER clstr_1; -SELECT * FROM clstr_1; - a ---- - 1 - 2 -(2 rows) - --- Test MVCC-safety of cluster. There isn't much we can do to verify the --- results with a single backend... -CREATE TABLE clustertest (key int PRIMARY KEY); -INSERT INTO clustertest VALUES (10); -INSERT INTO clustertest VALUES (20); -INSERT INTO clustertest VALUES (30); -INSERT INTO clustertest VALUES (40); -INSERT INTO clustertest VALUES (50); --- Use a transaction so that updates are not committed when CLUSTER sees 'em -BEGIN; --- Test update where the old row version is found first in the scan -UPDATE clustertest SET key = 100 WHERE key = 10; --- Test update where the new row version is found first in the scan -UPDATE clustertest SET key = 35 WHERE key = 40; --- Test longer update chain -UPDATE clustertest SET key = 60 WHERE key = 50; -UPDATE clustertest SET key = 70 WHERE key = 60; -UPDATE clustertest SET key = 80 WHERE key = 70; -SELECT * FROM clustertest; - key ------ - 20 - 30 - 100 - 35 - 80 -(5 rows) - -CLUSTER clustertest_pkey ON clustertest; -SELECT * FROM clustertest; - key ------ - 20 - 30 - 35 - 80 - 100 -(5 rows) - -COMMIT; -SELECT * FROM clustertest; - key ------ - 20 - 30 - 35 - 80 - 100 -(5 rows) - --- check that temp tables can be clustered -create temp table clstr_temp (col1 int primary key, col2 text); -insert into clstr_temp values (2, 'two'), (1, 'one'); -cluster clstr_temp using clstr_temp_pkey; -select * from clstr_temp; - col1 | col2 -------+------ - 1 | one - 2 | two -(2 rows) - -drop table clstr_temp; -RESET SESSION AUTHORIZATION; --- check clustering an empty table -DROP TABLE clustertest; -CREATE TABLE clustertest (f1 int PRIMARY KEY); -CLUSTER clustertest USING clustertest_pkey; -CLUSTER clustertest; --- Check that partitioned tables can be clustered -CREATE TABLE clstrpart (a int) PARTITION BY RANGE (a); -CREATE TABLE clstrpart1 PARTITION OF clstrpart FOR VALUES FROM (1) TO (10) PARTITION BY RANGE (a); -CREATE TABLE clstrpart11 PARTITION OF clstrpart1 FOR VALUES FROM (1) TO (5); -CREATE TABLE clstrpart12 PARTITION OF clstrpart1 FOR VALUES FROM (5) TO (10) PARTITION BY RANGE (a); -CREATE TABLE clstrpart2 PARTITION OF clstrpart FOR VALUES FROM (10) TO (20); -CREATE TABLE clstrpart3 PARTITION OF clstrpart DEFAULT PARTITION BY RANGE (a); -CREATE TABLE clstrpart33 PARTITION OF clstrpart3 DEFAULT; -CREATE INDEX clstrpart_only_idx ON ONLY clstrpart (a); -CLUSTER clstrpart USING clstrpart_only_idx; -- fails -ERROR: cannot cluster on invalid index "clstrpart_only_idx" -DROP INDEX clstrpart_only_idx; -CREATE INDEX clstrpart_idx ON clstrpart (a); --- Check that clustering sets new relfilenodes: -CREATE TEMP TABLE old_cluster_info AS SELECT relname, level, relfilenode, relkind FROM pg_partition_tree('clstrpart'::regclass) AS tree JOIN pg_class c ON c.oid=tree.relid ; -CLUSTER clstrpart USING clstrpart_idx; -CREATE TEMP TABLE new_cluster_info AS SELECT relname, level, relfilenode, relkind FROM pg_partition_tree('clstrpart'::regclass) AS tree JOIN pg_class c ON c.oid=tree.relid ; -SELECT relname, old.level, old.relkind, old.relfilenode = new.relfilenode FROM old_cluster_info AS old JOIN new_cluster_info AS new USING (relname) ORDER BY relname COLLATE "C"; - relname | level | relkind | ?column? --------------+-------+---------+---------- - clstrpart | 0 | p | t - clstrpart1 | 1 | p | t - clstrpart11 | 2 | r | f - clstrpart12 | 2 | p | t - clstrpart2 | 1 | r | f - clstrpart3 | 1 | p | t - clstrpart33 | 2 | r | f -(7 rows) - --- Partitioned indexes aren't and can't be marked un/clustered: -\d clstrpart - Partitioned table "public.clstrpart" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition key: RANGE (a) -Indexes: - "clstrpart_idx" btree (a) -Number of partitions: 3 (Use \d+ to list them.) - -CLUSTER clstrpart; -ERROR: there is no previously clustered index for table "clstrpart" -ALTER TABLE clstrpart SET WITHOUT CLUSTER; -ERROR: cannot mark index clustered in partitioned table -ALTER TABLE clstrpart CLUSTER ON clstrpart_idx; -ERROR: cannot mark index clustered in partitioned table -DROP TABLE clstrpart; --- Ownership of partitions is checked -CREATE TABLE ptnowner(i int unique) PARTITION BY LIST (i); -CREATE INDEX ptnowner_i_idx ON ptnowner(i); -CREATE TABLE ptnowner1 PARTITION OF ptnowner FOR VALUES IN (1); -CREATE ROLE regress_ptnowner; -CREATE TABLE ptnowner2 PARTITION OF ptnowner FOR VALUES IN (2); -ALTER TABLE ptnowner1 OWNER TO regress_ptnowner; -SET SESSION AUTHORIZATION regress_ptnowner; -CLUSTER ptnowner USING ptnowner_i_idx; -ERROR: permission denied for table ptnowner -RESET SESSION AUTHORIZATION; -ALTER TABLE ptnowner OWNER TO regress_ptnowner; -CREATE TEMP TABLE ptnowner_oldnodes AS - SELECT oid, relname, relfilenode FROM pg_partition_tree('ptnowner') AS tree - JOIN pg_class AS c ON c.oid=tree.relid; -SET SESSION AUTHORIZATION regress_ptnowner; -CLUSTER ptnowner USING ptnowner_i_idx; -WARNING: permission denied to cluster "ptnowner2", skipping it -RESET SESSION AUTHORIZATION; -SELECT a.relname, a.relfilenode=b.relfilenode FROM pg_class a - JOIN ptnowner_oldnodes b USING (oid) ORDER BY a.relname COLLATE "C"; - relname | ?column? ------------+---------- - ptnowner | t - ptnowner1 | f - ptnowner2 | t -(3 rows) - -DROP TABLE ptnowner; -DROP ROLE regress_ptnowner; --- Test CLUSTER with external tuplesorting -create table clstr_4 as select * from tenk1; -create index cluster_sort on clstr_4 (hundred, thousand, tenthous); --- ensure we don't use the index in CLUSTER nor the checking SELECTs -set enable_indexscan = off; --- Use external sort: -set maintenance_work_mem = '1MB'; -cluster clstr_4 using cluster_sort; -select * from -(select hundred, lag(hundred) over () as lhundred, - thousand, lag(thousand) over () as lthousand, - tenthous, lag(tenthous) over () as ltenthous from clstr_4) ss -where row(hundred, thousand, tenthous) <= row(lhundred, lthousand, ltenthous); - hundred | lhundred | thousand | lthousand | tenthous | ltenthous ----------+----------+----------+-----------+----------+----------- -(0 rows) - -reset enable_indexscan; -reset maintenance_work_mem; --- test CLUSTER on expression index -CREATE TABLE clstr_expression(id serial primary key, a int, b text COLLATE "C"); -INSERT INTO clstr_expression(a, b) SELECT g.i % 42, 'prefix'||g.i FROM generate_series(1, 133) g(i); -CREATE INDEX clstr_expression_minus_a ON clstr_expression ((-a), b); -CREATE INDEX clstr_expression_upper_b ON clstr_expression ((upper(b))); --- verify indexes work before cluster -BEGIN; -SET LOCAL enable_seqscan = false; -EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3'; - QUERY PLAN ---------------------------------------------------------------- - Index Scan using clstr_expression_upper_b on clstr_expression - Index Cond: (upper(b) = 'PREFIX3'::text) -(2 rows) - -SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3'; - id | a | b -----+---+--------- - 3 | 3 | prefix3 -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b; - QUERY PLAN ---------------------------------------------------------------- - Index Scan using clstr_expression_minus_a on clstr_expression - Index Cond: ((- a) = '-3'::integer) -(2 rows) - -SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b; - id | a | b ------+---+----------- - 129 | 3 | prefix129 - 3 | 3 | prefix3 - 45 | 3 | prefix45 - 87 | 3 | prefix87 -(4 rows) - -COMMIT; --- and after clustering on clstr_expression_minus_a -CLUSTER clstr_expression USING clstr_expression_minus_a; -WITH rows AS - (SELECT ctid, lag(a) OVER (ORDER BY ctid) AS la, a FROM clstr_expression) -SELECT * FROM rows WHERE la < a; - ctid | la | a -------+----+--- -(0 rows) - -BEGIN; -SET LOCAL enable_seqscan = false; -EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3'; - QUERY PLAN ---------------------------------------------------------------- - Index Scan using clstr_expression_upper_b on clstr_expression - Index Cond: (upper(b) = 'PREFIX3'::text) -(2 rows) - -SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3'; - id | a | b -----+---+--------- - 3 | 3 | prefix3 -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b; - QUERY PLAN ---------------------------------------------------------------- - Index Scan using clstr_expression_minus_a on clstr_expression - Index Cond: ((- a) = '-3'::integer) -(2 rows) - -SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b; - id | a | b ------+---+----------- - 129 | 3 | prefix129 - 3 | 3 | prefix3 - 45 | 3 | prefix45 - 87 | 3 | prefix87 -(4 rows) - -COMMIT; --- and after clustering on clstr_expression_upper_b -CLUSTER clstr_expression USING clstr_expression_upper_b; -WITH rows AS - (SELECT ctid, lag(b) OVER (ORDER BY ctid) AS lb, b FROM clstr_expression) -SELECT * FROM rows WHERE upper(lb) > upper(b); - ctid | lb | b -------+----+--- -(0 rows) - -BEGIN; -SET LOCAL enable_seqscan = false; -EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3'; - QUERY PLAN ---------------------------------------------------------------- - Index Scan using clstr_expression_upper_b on clstr_expression - Index Cond: (upper(b) = 'PREFIX3'::text) -(2 rows) - -SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3'; - id | a | b -----+---+--------- - 3 | 3 | prefix3 -(1 row) - -EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b; - QUERY PLAN ---------------------------------------------------------------- - Index Scan using clstr_expression_minus_a on clstr_expression - Index Cond: ((- a) = '-3'::integer) -(2 rows) - -SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b; - id | a | b ------+---+----------- - 129 | 3 | prefix129 - 3 | 3 | prefix3 - 45 | 3 | prefix45 - 87 | 3 | prefix87 -(4 rows) - -COMMIT; --- clean up -DROP TABLE clustertest; -DROP TABLE clstr_1; -DROP TABLE clstr_2; -DROP TABLE clstr_3; -DROP TABLE clstr_4; -DROP TABLE clstr_expression; -DROP USER regress_clstr_user; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/dependency.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/dependency.out --- /Users/admin/pgsql/src/test/regress/expected/dependency.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/dependency.out 2024-12-13 13:20:11 @@ -1,153 +1,2 @@ --- --- DEPENDENCIES --- -CREATE USER regress_dep_user; -CREATE USER regress_dep_user2; -CREATE USER regress_dep_user3; -CREATE GROUP regress_dep_group; -CREATE TABLE deptest (f1 serial primary key, f2 text); -GRANT SELECT ON TABLE deptest TO GROUP regress_dep_group; -GRANT ALL ON TABLE deptest TO regress_dep_user, regress_dep_user2; --- can't drop neither because they have privileges somewhere -DROP USER regress_dep_user; -ERROR: role "regress_dep_user" cannot be dropped because some objects depend on it -DETAIL: privileges for table deptest -DROP GROUP regress_dep_group; -ERROR: role "regress_dep_group" cannot be dropped because some objects depend on it -DETAIL: privileges for table deptest --- if we revoke the privileges we can drop the group -REVOKE SELECT ON deptest FROM GROUP regress_dep_group; -DROP GROUP regress_dep_group; --- can't drop the user if we revoke the privileges partially -REVOKE SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, MAINTAIN ON deptest FROM regress_dep_user; -DROP USER regress_dep_user; -ERROR: role "regress_dep_user" cannot be dropped because some objects depend on it -DETAIL: privileges for table deptest --- now we are OK to drop him -REVOKE TRIGGER ON deptest FROM regress_dep_user; -DROP USER regress_dep_user; --- we are OK too if we drop the privileges all at once -REVOKE ALL ON deptest FROM regress_dep_user2; -DROP USER regress_dep_user2; --- can't drop the owner of an object --- the error message detail here would include a pg_toast_nnn name that --- is not constant, so suppress it -\set VERBOSITY terse -ALTER TABLE deptest OWNER TO regress_dep_user3; -DROP USER regress_dep_user3; -ERROR: role "regress_dep_user3" cannot be dropped because some objects depend on it -\set VERBOSITY default --- if we drop the object, we can drop the user too -DROP TABLE deptest; -DROP USER regress_dep_user3; --- Test DROP OWNED -CREATE USER regress_dep_user0; -CREATE USER regress_dep_user1; -CREATE USER regress_dep_user2; -SET SESSION AUTHORIZATION regress_dep_user0; --- permission denied -DROP OWNED BY regress_dep_user1; -ERROR: permission denied to drop objects -DETAIL: Only roles with privileges of role "regress_dep_user1" may drop objects owned by it. -DROP OWNED BY regress_dep_user0, regress_dep_user2; -ERROR: permission denied to drop objects -DETAIL: Only roles with privileges of role "regress_dep_user2" may drop objects owned by it. -REASSIGN OWNED BY regress_dep_user0 TO regress_dep_user1; -ERROR: permission denied to reassign objects -DETAIL: Only roles with privileges of role "regress_dep_user1" may reassign objects to it. -REASSIGN OWNED BY regress_dep_user1 TO regress_dep_user0; -ERROR: permission denied to reassign objects -DETAIL: Only roles with privileges of role "regress_dep_user1" may reassign objects owned by it. --- this one is allowed -DROP OWNED BY regress_dep_user0; -CREATE TABLE deptest1 (f1 int unique); -GRANT ALL ON deptest1 TO regress_dep_user1 WITH GRANT OPTION; -SET SESSION AUTHORIZATION regress_dep_user1; -CREATE TABLE deptest (a serial primary key, b text); -GRANT ALL ON deptest1 TO regress_dep_user2; -RESET SESSION AUTHORIZATION; -\z deptest1 - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+----------+-------+------------------------------------------------------+-------------------+---------- - public | deptest1 | table | regress_dep_user0=arwdDxtm/regress_dep_user0 +| | - | | | regress_dep_user1=a*r*w*d*D*x*t*m*/regress_dep_user0+| | - | | | regress_dep_user2=arwdDxtm/regress_dep_user1 | | -(1 row) - -DROP OWNED BY regress_dep_user1; --- all grants revoked -\z deptest1 - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+----------+-------+----------------------------------------------+-------------------+---------- - public | deptest1 | table | regress_dep_user0=arwdDxtm/regress_dep_user0 | | -(1 row) - --- table was dropped -\d deptest --- Test REASSIGN OWNED -GRANT ALL ON deptest1 TO regress_dep_user1; -GRANT CREATE ON DATABASE regression TO regress_dep_user1; -SET SESSION AUTHORIZATION regress_dep_user1; -CREATE SCHEMA deptest; -CREATE TABLE deptest (a serial primary key, b text); -ALTER DEFAULT PRIVILEGES FOR ROLE regress_dep_user1 IN SCHEMA deptest - GRANT ALL ON TABLES TO regress_dep_user2; -CREATE FUNCTION deptest_func() RETURNS void LANGUAGE plpgsql - AS $$ BEGIN END; $$; -CREATE TYPE deptest_enum AS ENUM ('red'); -CREATE TYPE deptest_range AS RANGE (SUBTYPE = int4); -CREATE TABLE deptest2 (f1 int); --- make a serial column the hard way -CREATE SEQUENCE ss1; -ALTER TABLE deptest2 ALTER f1 SET DEFAULT nextval('ss1'); -ALTER SEQUENCE ss1 OWNED BY deptest2.f1; --- When reassigning ownership of a composite type, its pg_class entry --- should match -CREATE TYPE deptest_t AS (a int); -SELECT typowner = relowner -FROM pg_type JOIN pg_class c ON typrelid = c.oid WHERE typname = 'deptest_t'; - ?column? ----------- - t -(1 row) - -RESET SESSION AUTHORIZATION; -REASSIGN OWNED BY regress_dep_user1 TO regress_dep_user2; -\dt deptest - List of relations - Schema | Name | Type | Owner ---------+---------+-------+------------------- - public | deptest | table | regress_dep_user2 -(1 row) - -SELECT typowner = relowner -FROM pg_type JOIN pg_class c ON typrelid = c.oid WHERE typname = 'deptest_t'; - ?column? ----------- - t -(1 row) - --- doesn't work: grant still exists -DROP USER regress_dep_user1; -ERROR: role "regress_dep_user1" cannot be dropped because some objects depend on it -DETAIL: privileges for database regression -privileges for table deptest1 -owner of default privileges on new relations belonging to role regress_dep_user1 in schema deptest -DROP OWNED BY regress_dep_user1; -DROP USER regress_dep_user1; -DROP USER regress_dep_user2; -ERROR: role "regress_dep_user2" cannot be dropped because some objects depend on it -DETAIL: owner of schema deptest -owner of sequence deptest_a_seq -owner of table deptest -owner of function deptest_func() -owner of type deptest_enum -owner of type deptest_range -owner of table deptest2 -owner of sequence ss1 -owner of type deptest_t -DROP OWNED BY regress_dep_user2, regress_dep_user0; -DROP USER regress_dep_user2; -DROP USER regress_dep_user0; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/guc.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/guc.out --- /Users/admin/pgsql/src/test/regress/expected/guc.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/guc.out 2024-12-13 13:20:11 @@ -1,915 +1,2 @@ --- pg_regress should ensure that this default value applies; however --- we can't rely on any specific default value of vacuum_cost_delay -SHOW datestyle; - DateStyle ---------------- - Postgres, MDY -(1 row) - --- Check output style of CamelCase enum options -SET intervalstyle to 'asd'; -ERROR: invalid value for parameter "IntervalStyle": "asd" -HINT: Available values: postgres, postgres_verbose, sql_standard, iso_8601. --- SET to some nondefault value -SET vacuum_cost_delay TO 40; -SET datestyle = 'ISO, YMD'; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 40ms -(1 row) - -SHOW datestyle; - DateStyle ------------ - ISO, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - --- SET LOCAL has no effect outside of a transaction -SET LOCAL vacuum_cost_delay TO 50; -WARNING: SET LOCAL can only be used in transaction blocks -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 40ms -(1 row) - -SET LOCAL datestyle = 'SQL'; -WARNING: SET LOCAL can only be used in transaction blocks -SHOW datestyle; - DateStyle ------------ - ISO, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - --- SET LOCAL within a transaction that commits -BEGIN; -SET LOCAL vacuum_cost_delay TO 50; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 50ms -(1 row) - -SET LOCAL datestyle = 'SQL'; -SHOW datestyle; - DateStyle ------------ - SQL, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz -------------------------- - 08/13/2006 12:34:56 PDT -(1 row) - -COMMIT; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 40ms -(1 row) - -SHOW datestyle; - DateStyle ------------ - ISO, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - --- SET should be reverted after ROLLBACK -BEGIN; -SET vacuum_cost_delay TO 60; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 60ms -(1 row) - -SET datestyle = 'German'; -SHOW datestyle; - DateStyle -------------- - German, DMY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz -------------------------- - 13.08.2006 12:34:56 PDT -(1 row) - -ROLLBACK; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 40ms -(1 row) - -SHOW datestyle; - DateStyle ------------ - ISO, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - --- Some tests with subtransactions -BEGIN; -SET vacuum_cost_delay TO 70; -SET datestyle = 'MDY'; -SHOW datestyle; - DateStyle ------------ - ISO, MDY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - -SAVEPOINT first_sp; -SET vacuum_cost_delay TO 80.1; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 80100us -(1 row) - -SET datestyle = 'German, DMY'; -SHOW datestyle; - DateStyle -------------- - German, DMY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz -------------------------- - 13.08.2006 12:34:56 PDT -(1 row) - -ROLLBACK TO first_sp; -SHOW datestyle; - DateStyle ------------ - ISO, MDY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - -SAVEPOINT second_sp; -SET vacuum_cost_delay TO '900us'; -SET datestyle = 'SQL, YMD'; -SHOW datestyle; - DateStyle ------------ - SQL, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz -------------------------- - 08/13/2006 12:34:56 PDT -(1 row) - -SAVEPOINT third_sp; -SET vacuum_cost_delay TO 100; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 100ms -(1 row) - -SET datestyle = 'Postgres, MDY'; -SHOW datestyle; - DateStyle ---------------- - Postgres, MDY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------------- - Sun Aug 13 12:34:56 2006 PDT -(1 row) - -ROLLBACK TO third_sp; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 900us -(1 row) - -SHOW datestyle; - DateStyle ------------ - SQL, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz -------------------------- - 08/13/2006 12:34:56 PDT -(1 row) - -ROLLBACK TO second_sp; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 70ms -(1 row) - -SHOW datestyle; - DateStyle ------------ - ISO, MDY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - -ROLLBACK; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 40ms -(1 row) - -SHOW datestyle; - DateStyle ------------ - ISO, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - --- SET LOCAL with Savepoints -BEGIN; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 40ms -(1 row) - -SHOW datestyle; - DateStyle ------------ - ISO, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - -SAVEPOINT sp; -SET LOCAL vacuum_cost_delay TO 30; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 30ms -(1 row) - -SET LOCAL datestyle = 'Postgres, MDY'; -SHOW datestyle; - DateStyle ---------------- - Postgres, MDY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------------- - Sun Aug 13 12:34:56 2006 PDT -(1 row) - -ROLLBACK TO sp; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 40ms -(1 row) - -SHOW datestyle; - DateStyle ------------ - ISO, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - -ROLLBACK; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 40ms -(1 row) - -SHOW datestyle; - DateStyle ------------ - ISO, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - --- SET LOCAL persists through RELEASE (which was not true in 8.0-8.2) -BEGIN; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 40ms -(1 row) - -SHOW datestyle; - DateStyle ------------ - ISO, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - -SAVEPOINT sp; -SET LOCAL vacuum_cost_delay TO 30; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 30ms -(1 row) - -SET LOCAL datestyle = 'Postgres, MDY'; -SHOW datestyle; - DateStyle ---------------- - Postgres, MDY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------------- - Sun Aug 13 12:34:56 2006 PDT -(1 row) - -RELEASE SAVEPOINT sp; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 30ms -(1 row) - -SHOW datestyle; - DateStyle ---------------- - Postgres, MDY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------------- - Sun Aug 13 12:34:56 2006 PDT -(1 row) - -ROLLBACK; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 40ms -(1 row) - -SHOW datestyle; - DateStyle ------------ - ISO, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - --- SET followed by SET LOCAL -BEGIN; -SET vacuum_cost_delay TO 40; -SET LOCAL vacuum_cost_delay TO 50; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 50ms -(1 row) - -SET datestyle = 'ISO, DMY'; -SET LOCAL datestyle = 'Postgres, MDY'; -SHOW datestyle; - DateStyle ---------------- - Postgres, MDY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------------- - Sun Aug 13 12:34:56 2006 PDT -(1 row) - -COMMIT; -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 40ms -(1 row) - -SHOW datestyle; - DateStyle ------------ - ISO, DMY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - --- --- Test RESET. We use datestyle because the reset value is forced by --- pg_regress, so it doesn't depend on the installation's configuration. --- -SET datestyle = iso, ymd; -SHOW datestyle; - DateStyle ------------ - ISO, YMD -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------- - 2006-08-13 12:34:56-07 -(1 row) - -RESET datestyle; -SHOW datestyle; - DateStyle ---------------- - Postgres, MDY -(1 row) - -SELECT '2006-08-13 12:34:56'::timestamptz; - timestamptz ------------------------------- - Sun Aug 13 12:34:56 2006 PDT -(1 row) - --- Test some simple error cases -SET seq_page_cost TO 'NaN'; -ERROR: invalid value for parameter "seq_page_cost": "NaN" -SET vacuum_cost_delay TO '10s'; -ERROR: 10000 ms is outside the valid range for parameter "vacuum_cost_delay" (0 ms .. 100 ms) -SET no_such_variable TO 42; -ERROR: unrecognized configuration parameter "no_such_variable" --- Test "custom" GUCs created on the fly (which aren't really an --- intended feature, but many people use them). -SHOW custom.my_guc; -- error, not known yet -ERROR: unrecognized configuration parameter "custom.my_guc" -SET custom.my_guc = 42; -SHOW custom.my_guc; - custom.my_guc ---------------- - 42 -(1 row) - -RESET custom.my_guc; -- this makes it go to empty, not become unknown again -SHOW custom.my_guc; - custom.my_guc ---------------- - -(1 row) - -SET custom.my.qualified.guc = 'foo'; -SHOW custom.my.qualified.guc; - custom.my.qualified.guc -------------------------- - foo -(1 row) - -SET custom."bad-guc" = 42; -- disallowed because -c cannot set this name -ERROR: invalid configuration parameter name "custom.bad-guc" -DETAIL: Custom parameter names must be two or more simple identifiers separated by dots. -SHOW custom."bad-guc"; -ERROR: unrecognized configuration parameter "custom.bad-guc" -SET special."weird name" = 'foo'; -- could be allowed, but we choose not to -ERROR: invalid configuration parameter name "special.weird name" -DETAIL: Custom parameter names must be two or more simple identifiers separated by dots. -SHOW special."weird name"; -ERROR: unrecognized configuration parameter "special.weird name" --- Check what happens when you try to set a "custom" GUC within the --- namespace of an extension. -SET plpgsql.extra_foo_warnings = true; -- allowed if plpgsql is not loaded yet -LOAD 'plpgsql'; -- this will throw a warning and delete the variable -WARNING: invalid configuration parameter name "plpgsql.extra_foo_warnings", removing it -DETAIL: "plpgsql" is now a reserved prefix. -SET plpgsql.extra_foo_warnings = true; -- now, it's an error -ERROR: invalid configuration parameter name "plpgsql.extra_foo_warnings" -DETAIL: "plpgsql" is a reserved prefix. -SHOW plpgsql.extra_foo_warnings; -ERROR: unrecognized configuration parameter "plpgsql.extra_foo_warnings" --- --- Test DISCARD TEMP --- -CREATE TEMP TABLE reset_test ( data text ) ON COMMIT DELETE ROWS; -SELECT relname FROM pg_class WHERE relname = 'reset_test'; - relname ------------- - reset_test -(1 row) - -DISCARD TEMP; -SELECT relname FROM pg_class WHERE relname = 'reset_test'; - relname ---------- -(0 rows) - --- --- Test DISCARD ALL --- --- do changes -DECLARE foo CURSOR WITH HOLD FOR SELECT 1; -PREPARE foo AS SELECT 1; -LISTEN foo_event; -SET vacuum_cost_delay = 13; -CREATE TEMP TABLE tmp_foo (data text) ON COMMIT DELETE ROWS; -CREATE ROLE regress_guc_user; -SET SESSION AUTHORIZATION regress_guc_user; --- look changes -SELECT pg_listening_channels(); - pg_listening_channels ------------------------ - foo_event -(1 row) - -SELECT name FROM pg_prepared_statements; - name ------- - foo -(1 row) - -SELECT name FROM pg_cursors; - name ------- - foo -(1 row) - -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 13ms -(1 row) - -SELECT relname from pg_class where relname = 'tmp_foo'; - relname ---------- - tmp_foo -(1 row) - -SELECT current_user = 'regress_guc_user'; - ?column? ----------- - t -(1 row) - --- discard everything -DISCARD ALL; --- look again -SELECT pg_listening_channels(); - pg_listening_channels ------------------------ -(0 rows) - -SELECT name FROM pg_prepared_statements; - name ------- -(0 rows) - -SELECT name FROM pg_cursors; - name ------- -(0 rows) - -SHOW vacuum_cost_delay; - vacuum_cost_delay -------------------- - 0 -(1 row) - -SELECT relname from pg_class where relname = 'tmp_foo'; - relname ---------- -(0 rows) - -SELECT current_user = 'regress_guc_user'; - ?column? ----------- - f -(1 row) - -DROP ROLE regress_guc_user; --- --- search_path should react to changes in pg_namespace --- -set search_path = foo, public, not_there_initially; -select current_schemas(false); - current_schemas ------------------ - {public} -(1 row) - -create schema not_there_initially; -select current_schemas(false); - current_schemas ------------------------------- - {public,not_there_initially} -(1 row) - -drop schema not_there_initially; -select current_schemas(false); - current_schemas ------------------ - {public} -(1 row) - -reset search_path; --- --- Tests for function-local GUC settings --- -set work_mem = '3MB'; -create function report_guc(text) returns text as -$$ select current_setting($1) $$ language sql -set work_mem = '1MB'; -select report_guc('work_mem'), current_setting('work_mem'); - report_guc | current_setting -------------+----------------- - 1MB | 3MB -(1 row) - -alter function report_guc(text) set work_mem = '2MB'; -select report_guc('work_mem'), current_setting('work_mem'); - report_guc | current_setting -------------+----------------- - 2MB | 3MB -(1 row) - -alter function report_guc(text) reset all; -select report_guc('work_mem'), current_setting('work_mem'); - report_guc | current_setting -------------+----------------- - 3MB | 3MB -(1 row) - --- SET LOCAL is restricted by a function SET option -create or replace function myfunc(int) returns text as $$ -begin - set local work_mem = '2MB'; - return current_setting('work_mem'); -end $$ -language plpgsql -set work_mem = '1MB'; -select myfunc(0), current_setting('work_mem'); - myfunc | current_setting ---------+----------------- - 2MB | 3MB -(1 row) - -alter function myfunc(int) reset all; -select myfunc(0), current_setting('work_mem'); - myfunc | current_setting ---------+----------------- - 2MB | 2MB -(1 row) - -set work_mem = '3MB'; --- but SET isn't -create or replace function myfunc(int) returns text as $$ -begin - set work_mem = '2MB'; - return current_setting('work_mem'); -end $$ -language plpgsql -set work_mem = '1MB'; -select myfunc(0), current_setting('work_mem'); - myfunc | current_setting ---------+----------------- - 2MB | 2MB -(1 row) - -set work_mem = '3MB'; --- it should roll back on error, though -create or replace function myfunc(int) returns text as $$ -begin - set work_mem = '2MB'; - perform 1/$1; - return current_setting('work_mem'); -end $$ -language plpgsql -set work_mem = '1MB'; -select myfunc(0); -ERROR: division by zero -CONTEXT: SQL statement "SELECT 1/$1" -PL/pgSQL function myfunc(integer) line 4 at PERFORM -select current_setting('work_mem'); - current_setting ------------------ - 3MB -(1 row) - -select myfunc(1), current_setting('work_mem'); - myfunc | current_setting ---------+----------------- - 2MB | 2MB -(1 row) - --- check current_setting()'s behavior with invalid setting name -select current_setting('nosuch.setting'); -- FAIL -ERROR: unrecognized configuration parameter "nosuch.setting" -select current_setting('nosuch.setting', false); -- FAIL -ERROR: unrecognized configuration parameter "nosuch.setting" -select current_setting('nosuch.setting', true) is null; - ?column? ----------- - t -(1 row) - --- after this, all three cases should yield 'nada' -set nosuch.setting = 'nada'; -select current_setting('nosuch.setting'); - current_setting ------------------ - nada -(1 row) - -select current_setting('nosuch.setting', false); - current_setting ------------------ - nada -(1 row) - -select current_setting('nosuch.setting', true); - current_setting ------------------ - nada -(1 row) - --- Normally, CREATE FUNCTION should complain about invalid values in --- function SET options; but not if check_function_bodies is off, --- because that creates ordering hazards for pg_dump -create function func_with_bad_set() returns int as $$ select 1 $$ -language sql -set default_text_search_config = no_such_config; -NOTICE: text search configuration "no_such_config" does not exist -ERROR: invalid value for parameter "default_text_search_config": "no_such_config" -set check_function_bodies = off; -create function func_with_bad_set() returns int as $$ select 1 $$ -language sql -set default_text_search_config = no_such_config; -NOTICE: text search configuration "no_such_config" does not exist -select func_with_bad_set(); -ERROR: invalid value for parameter "default_text_search_config": "no_such_config" -reset check_function_bodies; -set default_with_oids to f; --- Should not allow to set it to true. -set default_with_oids to t; -ERROR: tables declared WITH OIDS are not supported --- Test that disabling track_activities disables query ID reporting in --- pg_stat_activity. -SET compute_query_id = on; -SET track_activities = on; -SELECT query_id IS NOT NULL AS qid_set FROM pg_stat_activity - WHERE pid = pg_backend_pid(); - qid_set ---------- - t -(1 row) - -SET track_activities = off; -SELECT query_id IS NOT NULL AS qid_set FROM pg_stat_activity - WHERE pid = pg_backend_pid(); - qid_set ---------- - f -(1 row) - -RESET track_activities; -RESET compute_query_id; --- Test GUC categories and flag patterns -SELECT pg_settings_get_flags(NULL); - pg_settings_get_flags ------------------------ - -(1 row) - -SELECT pg_settings_get_flags('does_not_exist'); - pg_settings_get_flags ------------------------ - -(1 row) - -CREATE TABLE tab_settings_flags AS SELECT name, category, - 'EXPLAIN' = ANY(flags) AS explain, - 'NO_RESET' = ANY(flags) AS no_reset, - 'NO_RESET_ALL' = ANY(flags) AS no_reset_all, - 'NOT_IN_SAMPLE' = ANY(flags) AS not_in_sample, - 'RUNTIME_COMPUTED' = ANY(flags) AS runtime_computed - FROM pg_show_all_settings() AS psas, - pg_settings_get_flags(psas.name) AS flags; --- Developer GUCs should be flagged with GUC_NOT_IN_SAMPLE: -SELECT name FROM tab_settings_flags - WHERE category = 'Developer Options' AND NOT not_in_sample - ORDER BY 1; - name ------- -(0 rows) - --- Most query-tuning GUCs are flagged as valid for EXPLAIN. --- default_statistics_target is an exception. -SELECT name FROM tab_settings_flags - WHERE category ~ '^Query Tuning' AND NOT explain - ORDER BY 1; - name ---------------------------- - default_statistics_target -(1 row) - --- Runtime-computed GUCs should be part of the preset category. -SELECT name FROM tab_settings_flags - WHERE NOT category = 'Preset Options' AND runtime_computed - ORDER BY 1; - name ------- -(0 rows) - --- Preset GUCs are flagged as NOT_IN_SAMPLE. -SELECT name FROM tab_settings_flags - WHERE category = 'Preset Options' AND NOT not_in_sample - ORDER BY 1; - name ------- -(0 rows) - --- NO_RESET implies NO_RESET_ALL. -SELECT name FROM tab_settings_flags - WHERE no_reset AND NOT no_reset_all - ORDER BY 1; - name ------- -(0 rows) - -DROP TABLE tab_settings_flags; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/bitmapops.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/bitmapops.out --- /Users/admin/pgsql/src/test/regress/expected/bitmapops.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/bitmapops.out 2024-12-13 13:20:11 @@ -1,38 +1,2 @@ --- Test bitmap AND and OR --- Generate enough data that we can test the lossy bitmaps. --- There's 55 tuples per page in the table. 53 is just --- below 55, so that an index scan with qual a = constant --- will return at least one hit per page. 59 is just above --- 55, so that an index scan with qual b = constant will return --- hits on most but not all pages. 53 and 59 are prime, so that --- there's a maximum number of a,b combinations in the table. --- That allows us to test all the different combinations of --- lossy and non-lossy pages with the minimum amount of data -CREATE TABLE bmscantest (a int, b int, t text); -INSERT INTO bmscantest - SELECT (r%53), (r%59), 'foooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo' - FROM generate_series(1,70000) r; -CREATE INDEX i_bmtest_a ON bmscantest(a); -CREATE INDEX i_bmtest_b ON bmscantest(b); --- We want to use bitmapscans. With default settings, the planner currently --- chooses a bitmap scan for the queries below anyway, but let's make sure. -set enable_indexscan=false; -set enable_seqscan=false; --- Lower work_mem to trigger use of lossy bitmaps -set work_mem = 64; --- Test bitmap-and. -SELECT count(*) FROM bmscantest WHERE a = 1 AND b = 1; - count -------- - 23 -(1 row) - --- Test bitmap-or. -SELECT count(*) FROM bmscantest WHERE a = 1 OR b = 1; - count -------- - 2485 -(1 row) - --- clean up -DROP TABLE bmscantest; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/combocid.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/combocid.out --- /Users/admin/pgsql/src/test/regress/expected/combocid.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/combocid.out 2024-12-13 13:20:11 @@ -1,169 +1,2 @@ --- --- Tests for some likely failure cases with combo cmin/cmax mechanism --- -CREATE TEMP TABLE combocidtest (foobar int); -BEGIN; --- a few dummy ops to push up the CommandId counter -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest VALUES (1); -INSERT INTO combocidtest VALUES (2); -SELECT ctid,cmin,* FROM combocidtest; - ctid | cmin | foobar --------+------+-------- - (0,1) | 10 | 1 - (0,2) | 11 | 2 -(2 rows) - -SAVEPOINT s1; -UPDATE combocidtest SET foobar = foobar + 10; --- here we should see only updated tuples -SELECT ctid,cmin,* FROM combocidtest; - ctid | cmin | foobar --------+------+-------- - (0,3) | 12 | 11 - (0,4) | 12 | 12 -(2 rows) - -ROLLBACK TO s1; --- now we should see old tuples, but with combo CIDs starting at 0 -SELECT ctid,cmin,* FROM combocidtest; - ctid | cmin | foobar --------+------+-------- - (0,1) | 0 | 1 - (0,2) | 1 | 2 -(2 rows) - -COMMIT; --- combo data is not there anymore, but should still see tuples -SELECT ctid,cmin,* FROM combocidtest; - ctid | cmin | foobar --------+------+-------- - (0,1) | 0 | 1 - (0,2) | 1 | 2 -(2 rows) - --- Test combo CIDs with portals -BEGIN; -INSERT INTO combocidtest VALUES (333); -DECLARE c CURSOR FOR SELECT ctid,cmin,* FROM combocidtest; -DELETE FROM combocidtest; -FETCH ALL FROM c; - ctid | cmin | foobar --------+------+-------- - (0,1) | 1 | 1 - (0,2) | 1 | 2 - (0,5) | 0 | 333 -(3 rows) - -ROLLBACK; -SELECT ctid,cmin,* FROM combocidtest; - ctid | cmin | foobar --------+------+-------- - (0,1) | 1 | 1 - (0,2) | 1 | 2 -(2 rows) - --- check behavior with locked tuples -BEGIN; --- a few dummy ops to push up the CommandId counter -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest SELECT 1 LIMIT 0; -INSERT INTO combocidtest VALUES (444); -SELECT ctid,cmin,* FROM combocidtest; - ctid | cmin | foobar --------+------+-------- - (0,1) | 1 | 1 - (0,2) | 1 | 2 - (0,6) | 10 | 444 -(3 rows) - -SAVEPOINT s1; --- this doesn't affect cmin -SELECT ctid,cmin,* FROM combocidtest FOR UPDATE; - ctid | cmin | foobar --------+------+-------- - (0,1) | 1 | 1 - (0,2) | 1 | 2 - (0,6) | 10 | 444 -(3 rows) - -SELECT ctid,cmin,* FROM combocidtest; - ctid | cmin | foobar --------+------+-------- - (0,1) | 1 | 1 - (0,2) | 1 | 2 - (0,6) | 10 | 444 -(3 rows) - --- but this does -UPDATE combocidtest SET foobar = foobar + 10; -SELECT ctid,cmin,* FROM combocidtest; - ctid | cmin | foobar --------+------+-------- - (0,7) | 12 | 11 - (0,8) | 12 | 12 - (0,9) | 12 | 454 -(3 rows) - -ROLLBACK TO s1; -SELECT ctid,cmin,* FROM combocidtest; - ctid | cmin | foobar --------+------+-------- - (0,1) | 12 | 1 - (0,2) | 12 | 2 - (0,6) | 0 | 444 -(3 rows) - -COMMIT; -SELECT ctid,cmin,* FROM combocidtest; - ctid | cmin | foobar --------+------+-------- - (0,1) | 12 | 1 - (0,2) | 12 | 2 - (0,6) | 0 | 444 -(3 rows) - --- test for bug reported in --- CABRT9RC81YUf1=jsmWopcKJEro=VoeG2ou6sPwyOUTx_qteRsg@mail.gmail.com -CREATE TABLE IF NOT EXISTS testcase( - id int PRIMARY KEY, - balance numeric -); -INSERT INTO testcase VALUES (1, 0); -BEGIN; -SELECT * FROM testcase WHERE testcase.id = 1 FOR UPDATE; - id | balance -----+--------- - 1 | 0 -(1 row) - -UPDATE testcase SET balance = balance + 400 WHERE id=1; -SAVEPOINT subxact; -UPDATE testcase SET balance = balance - 100 WHERE id=1; -ROLLBACK TO SAVEPOINT subxact; --- should return one tuple -SELECT * FROM testcase WHERE id = 1 FOR UPDATE; - id | balance -----+--------- - 1 | 400 -(1 row) - -ROLLBACK; -DROP TABLE testcase; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/tsearch.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/tsearch.out --- /Users/admin/pgsql/src/test/regress/expected/tsearch.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/tsearch.out 2024-12-13 13:20:11 @@ -1,3014 +1,2 @@ --- directory paths are passed to us in environment variables -\getenv abs_srcdir PG_ABS_SRCDIR --- --- Sanity checks for text search catalogs --- --- NB: we assume the oidjoins test will have caught any dangling links, --- that is OID or REGPROC fields that are not zero and do not match some --- row in the linked-to table. However, if we want to enforce that a link --- field can't be 0, we have to check it here. --- Find unexpected zero link entries -SELECT oid, prsname -FROM pg_ts_parser -WHERE prsnamespace = 0 OR prsstart = 0 OR prstoken = 0 OR prsend = 0 OR - -- prsheadline is optional - prslextype = 0; - oid | prsname ------+--------- -(0 rows) - -SELECT oid, dictname -FROM pg_ts_dict -WHERE dictnamespace = 0 OR dictowner = 0 OR dicttemplate = 0; - oid | dictname ------+---------- -(0 rows) - -SELECT oid, tmplname -FROM pg_ts_template -WHERE tmplnamespace = 0 OR tmpllexize = 0; -- tmplinit is optional - oid | tmplname ------+---------- -(0 rows) - -SELECT oid, cfgname -FROM pg_ts_config -WHERE cfgnamespace = 0 OR cfgowner = 0 OR cfgparser = 0; - oid | cfgname ------+--------- -(0 rows) - -SELECT mapcfg, maptokentype, mapseqno -FROM pg_ts_config_map -WHERE mapcfg = 0 OR mapdict = 0; - mapcfg | maptokentype | mapseqno ---------+--------------+---------- -(0 rows) - --- Look for pg_ts_config_map entries that aren't one of parser's token types -SELECT * FROM - ( SELECT oid AS cfgid, (ts_token_type(cfgparser)).tokid AS tokid - FROM pg_ts_config ) AS tt -RIGHT JOIN pg_ts_config_map AS m - ON (tt.cfgid=m.mapcfg AND tt.tokid=m.maptokentype) -WHERE - tt.cfgid IS NULL OR tt.tokid IS NULL; - cfgid | tokid | mapcfg | maptokentype | mapseqno | mapdict --------+-------+--------+--------------+----------+--------- -(0 rows) - --- Load some test data -CREATE TABLE test_tsvector( - t text, - a tsvector -); -\set filename :abs_srcdir '/data/tsearch.data' -COPY test_tsvector FROM :'filename'; -ANALYZE test_tsvector; --- test basic text search behavior without indexes, then with -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh'; - count -------- - 17 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt'; - count -------- - 98 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)'; - count -------- - 23 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)'; - count -------- - 39 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*'; - count -------- - 494 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}'); - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh'; - count -------- - 432 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A'; - count -------- - 56 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D'; - count -------- - 58 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A'; - count -------- - 452 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D'; - count -------- - 450 -(1 row) - -create index wowidx on test_tsvector using gist (a); -SET enable_seqscan=OFF; -SET enable_indexscan=ON; -SET enable_bitmapscan=OFF; -explain (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; - QUERY PLAN -------------------------------------------------------- - Aggregate - -> Index Scan using wowidx on test_tsvector - Index Cond: (a @@ '''wr'' | ''qh'''::tsquery) -(3 rows) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh'; - count -------- - 17 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt'; - count -------- - 98 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)'; - count -------- - 23 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)'; - count -------- - 39 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*'; - count -------- - 494 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}'); - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh'; - count -------- - 432 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A'; - count -------- - 56 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D'; - count -------- - 58 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A'; - count -------- - 452 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D'; - count -------- - 450 -(1 row) - -SET enable_indexscan=OFF; -SET enable_bitmapscan=ON; -explain (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; - QUERY PLAN -------------------------------------------------------------- - Aggregate - -> Bitmap Heap Scan on test_tsvector - Recheck Cond: (a @@ '''wr'' | ''qh'''::tsquery) - -> Bitmap Index Scan on wowidx - Index Cond: (a @@ '''wr'' | ''qh'''::tsquery) -(5 rows) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh'; - count -------- - 17 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt'; - count -------- - 98 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)'; - count -------- - 23 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)'; - count -------- - 39 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*'; - count -------- - 494 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}'); - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh'; - count -------- - 432 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A'; - count -------- - 56 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D'; - count -------- - 58 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A'; - count -------- - 452 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D'; - count -------- - 450 -(1 row) - --- Test siglen parameter of GiST tsvector_ops -CREATE INDEX wowidx1 ON test_tsvector USING gist (a tsvector_ops(foo=1)); -ERROR: unrecognized parameter "foo" -CREATE INDEX wowidx1 ON test_tsvector USING gist (a tsvector_ops(siglen=0)); -ERROR: value 0 out of bounds for option "siglen" -DETAIL: Valid values are between "1" and "2024". -CREATE INDEX wowidx1 ON test_tsvector USING gist (a tsvector_ops(siglen=2048)); -ERROR: value 2048 out of bounds for option "siglen" -DETAIL: Valid values are between "1" and "2024". -CREATE INDEX wowidx1 ON test_tsvector USING gist (a tsvector_ops(siglen=100,foo='bar')); -ERROR: unrecognized parameter "foo" -CREATE INDEX wowidx1 ON test_tsvector USING gist (a tsvector_ops(siglen=100, siglen = 200)); -ERROR: parameter "siglen" specified more than once -CREATE INDEX wowidx2 ON test_tsvector USING gist (a tsvector_ops(siglen=1)); -\d test_tsvector - Table "public.test_tsvector" - Column | Type | Collation | Nullable | Default ---------+----------+-----------+----------+--------- - t | text | | | - a | tsvector | | | -Indexes: - "wowidx" gist (a) - "wowidx2" gist (a tsvector_ops (siglen='1')) - -DROP INDEX wowidx; -EXPLAIN (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; - QUERY PLAN -------------------------------------------------------------- - Aggregate - -> Bitmap Heap Scan on test_tsvector - Recheck Cond: (a @@ '''wr'' | ''qh'''::tsquery) - -> Bitmap Index Scan on wowidx2 - Index Cond: (a @@ '''wr'' | ''qh'''::tsquery) -(5 rows) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh'; - count -------- - 17 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt'; - count -------- - 98 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)'; - count -------- - 23 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)'; - count -------- - 39 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*'; - count -------- - 494 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}'); - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh'; - count -------- - 432 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A'; - count -------- - 56 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D'; - count -------- - 58 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A'; - count -------- - 452 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D'; - count -------- - 450 -(1 row) - -DROP INDEX wowidx2; -CREATE INDEX wowidx ON test_tsvector USING gist (a tsvector_ops(siglen=484)); -\d test_tsvector - Table "public.test_tsvector" - Column | Type | Collation | Nullable | Default ---------+----------+-----------+----------+--------- - t | text | | | - a | tsvector | | | -Indexes: - "wowidx" gist (a tsvector_ops (siglen='484')) - -EXPLAIN (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; - QUERY PLAN -------------------------------------------------------------- - Aggregate - -> Bitmap Heap Scan on test_tsvector - Recheck Cond: (a @@ '''wr'' | ''qh'''::tsquery) - -> Bitmap Index Scan on wowidx - Index Cond: (a @@ '''wr'' | ''qh'''::tsquery) -(5 rows) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh'; - count -------- - 17 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt'; - count -------- - 98 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)'; - count -------- - 23 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)'; - count -------- - 39 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*'; - count -------- - 494 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}'); - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh'; - count -------- - 432 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A'; - count -------- - 56 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D'; - count -------- - 58 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A'; - count -------- - 452 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D'; - count -------- - 450 -(1 row) - -RESET enable_seqscan; -RESET enable_indexscan; -RESET enable_bitmapscan; -DROP INDEX wowidx; -CREATE INDEX wowidx ON test_tsvector USING gin (a); -SET enable_seqscan=OFF; --- GIN only supports bitmapscan, so no need to test plain indexscan -explain (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; - QUERY PLAN -------------------------------------------------------------- - Aggregate - -> Bitmap Heap Scan on test_tsvector - Recheck Cond: (a @@ '''wr'' | ''qh'''::tsquery) - -> Bitmap Index Scan on wowidx - Index Cond: (a @@ '''wr'' | ''qh'''::tsquery) -(5 rows) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh'; - count -------- - 17 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt'; - count -------- - 98 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)'; - count -------- - 23 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)'; - count -------- - 39 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*'; - count -------- - 494 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}'); - count -------- - 158 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh'; - count -------- - 432 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt'; - count -------- - 6 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)'; - count -------- - 508 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)'; - count -------- - 507 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A'; - count -------- - 56 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D'; - count -------- - 58 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A'; - count -------- - 452 -(1 row) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D'; - count -------- - 450 -(1 row) - --- Test optimization of non-empty GIN_SEARCH_MODE_ALL queries -EXPLAIN (COSTS OFF) -SELECT count(*) FROM test_tsvector WHERE a @@ '!qh'; - QUERY PLAN ------------------------------------------------------ - Aggregate - -> Bitmap Heap Scan on test_tsvector - Recheck Cond: (a @@ '!''qh'''::tsquery) - -> Bitmap Index Scan on wowidx - Index Cond: (a @@ '!''qh'''::tsquery) -(5 rows) - -SELECT count(*) FROM test_tsvector WHERE a @@ '!qh'; - count -------- - 410 -(1 row) - -EXPLAIN (COSTS OFF) -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr' AND a @@ '!qh'; - QUERY PLAN ------------------------------------------------------------------------------------- - Aggregate - -> Bitmap Heap Scan on test_tsvector - Recheck Cond: ((a @@ '''wr'''::tsquery) AND (a @@ '!''qh'''::tsquery)) - -> Bitmap Index Scan on wowidx - Index Cond: ((a @@ '''wr'''::tsquery) AND (a @@ '!''qh'''::tsquery)) -(5 rows) - -SELECT count(*) FROM test_tsvector WHERE a @@ 'wr' AND a @@ '!qh'; - count -------- - 60 -(1 row) - -RESET enable_seqscan; -INSERT INTO test_tsvector VALUES ('???', 'DFG:1A,2B,6C,10 FGH'); -SELECT * FROM ts_stat('SELECT a FROM test_tsvector') ORDER BY ndoc DESC, nentry DESC, word LIMIT 10; - word | ndoc | nentry -------+------+-------- - qq | 108 | 108 - qt | 102 | 102 - qe | 100 | 101 - qh | 98 | 99 - qw | 98 | 98 - qa | 97 | 97 - ql | 94 | 94 - qs | 94 | 94 - qr | 92 | 93 - qi | 92 | 92 -(10 rows) - -SELECT * FROM ts_stat('SELECT a FROM test_tsvector', 'AB') ORDER BY ndoc DESC, nentry DESC, word; - word | ndoc | nentry -------+------+-------- - DFG | 1 | 2 -(1 row) - ---dictionaries and to_tsvector -SELECT ts_lexize('english_stem', 'skies'); - ts_lexize ------------ - {sky} -(1 row) - -SELECT ts_lexize('english_stem', 'identity'); - ts_lexize ------------ - {ident} -(1 row) - -SELECT * FROM ts_token_type('default'); - tokid | alias | description --------+-----------------+------------------------------------------ - 1 | asciiword | Word, all ASCII - 2 | word | Word, all letters - 3 | numword | Word, letters and digits - 4 | email | Email address - 5 | url | URL - 6 | host | Host - 7 | sfloat | Scientific notation - 8 | version | Version number - 9 | hword_numpart | Hyphenated word part, letters and digits - 10 | hword_part | Hyphenated word part, all letters - 11 | hword_asciipart | Hyphenated word part, all ASCII - 12 | blank | Space symbols - 13 | tag | XML tag - 14 | protocol | Protocol head - 15 | numhword | Hyphenated word, letters and digits - 16 | asciihword | Hyphenated word, all ASCII - 17 | hword | Hyphenated word, all letters - 18 | url_path | URL path - 19 | file | File or path name - 20 | float | Decimal notation - 21 | int | Signed integer - 22 | uint | Unsigned integer - 23 | entity | XML entity -(23 rows) - -SELECT * FROM ts_parse('default', '345 qwe@efd.r '' http://www.com/ http://aew.werc.ewr/?ad=qwe&dw 1aew.werc.ewr/?ad=qwe&dw 2aew.werc.ewr http://3aew.werc.ewr/?ad=qwe&dw http://4aew.werc.ewr http://5aew.werc.ewr:8100/? ad=qwe&dw 6aew.werc.ewr:8100/?ad=qwe&dw 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 +4.0e-10 qwe qwe qwqwe 234.435 455 5.005 teodor@stack.net teodor@123-stack.net 123_teodor@stack.net 123-teodor@stack.net qwe-wer asdf qwer jf sdjk ewr1> ewri2 -/usr/local/fff /awdf/dwqe/4325 rewt/ewr wefjn /wqe-324/ewr gist.h gist.h.c gist.c. readline 4.2 4.2. 4.2, readline-4.2 readline-4.2. 234 - wow < jqw <> qwerty'); - tokid | token --------+-------------------------------------- - 22 | 345 - 12 | - 1 | qwe - 12 | @ - 19 | efd.r - 12 | ' - 14 | http:// - 6 | www.com - 12 | / - 14 | http:// - 5 | aew.werc.ewr/?ad=qwe&dw - 6 | aew.werc.ewr - 18 | /?ad=qwe&dw - 12 | - 5 | 1aew.werc.ewr/?ad=qwe&dw - 6 | 1aew.werc.ewr - 18 | /?ad=qwe&dw - 12 | - 6 | 2aew.werc.ewr - 12 | - 14 | http:// - 5 | 3aew.werc.ewr/?ad=qwe&dw - 6 | 3aew.werc.ewr - 18 | /?ad=qwe&dw - 12 | - 14 | http:// - 6 | 4aew.werc.ewr - 12 | - 14 | http:// - 5 | 5aew.werc.ewr:8100/? - 6 | 5aew.werc.ewr:8100 - 18 | /? - 12 | - 1 | ad - 12 | = - 1 | qwe - 12 | & - 1 | dw - 12 | - 5 | 6aew.werc.ewr:8100/?ad=qwe&dw - 6 | 6aew.werc.ewr:8100 - 18 | /?ad=qwe&dw - 12 | - 5 | 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 - 6 | 7aew.werc.ewr:8100 - 18 | /?ad=qwe&dw=%20%32 - 12 | - 7 | +4.0e-10 - 12 | - 1 | qwe - 12 | - 1 | qwe - 12 | - 1 | qwqwe - 12 | - 20 | 234.435 - 12 | - 22 | 455 - 12 | - 20 | 5.005 - 12 | - 4 | teodor@stack.net - 12 | - 4 | teodor@123-stack.net - 12 | - 4 | 123_teodor@stack.net - 12 | - 4 | 123-teodor@stack.net - 12 | - 16 | qwe-wer - 11 | qwe - 12 | - - 11 | wer - 12 | - 1 | asdf - 12 | - 13 | - 1 | qwer - 12 | - 1 | jf - 12 | - 1 | sdjk - 12 | < - 1 | we - 12 | - 1 | hjwer - 12 | - 13 | - 12 | - 3 | ewr1 - 12 | > - 3 | ewri2 - 12 | - 13 | - 12 | + - | - 19 | /usr/local/fff - 12 | - 19 | /awdf/dwqe/4325 - 12 | - 19 | rewt/ewr - 12 | - 1 | wefjn - 12 | - 19 | /wqe-324/ewr - 12 | - 19 | gist.h - 12 | - 19 | gist.h.c - 12 | - 19 | gist.c - 12 | . - 1 | readline - 12 | - 20 | 4.2 - 12 | - 20 | 4.2 - 12 | . - 20 | 4.2 - 12 | , - 1 | readline - 20 | -4.2 - 12 | - 1 | readline - 20 | -4.2 - 12 | . - 22 | 234 - 12 | + - | - 12 | < - 1 | i - 12 | - 13 | - 12 | - 1 | wow - 12 | - 12 | < - 1 | jqw - 12 | - 12 | <> - 1 | qwerty -(139 rows) - -SELECT to_tsvector('english', '345 qwe@efd.r '' http://www.com/ http://aew.werc.ewr/?ad=qwe&dw 1aew.werc.ewr/?ad=qwe&dw 2aew.werc.ewr http://3aew.werc.ewr/?ad=qwe&dw http://4aew.werc.ewr http://5aew.werc.ewr:8100/? ad=qwe&dw 6aew.werc.ewr:8100/?ad=qwe&dw 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 +4.0e-10 qwe qwe qwqwe 234.435 455 5.005 teodor@stack.net teodor@123-stack.net 123_teodor@stack.net 123-teodor@stack.net qwe-wer asdf qwer jf sdjk ewr1> ewri2 -/usr/local/fff /awdf/dwqe/4325 rewt/ewr wefjn /wqe-324/ewr gist.h gist.h.c gist.c. readline 4.2 4.2. 4.2, readline-4.2 readline-4.2. 234 - wow < jqw <> qwerty'); - to_tsvector ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - '+4.0e-10':28 '-4.2':63,65 '/?':18 '/?ad=qwe&dw':7,10,14,24 '/?ad=qwe&dw=%20%32':27 '/awdf/dwqe/4325':51 '/usr/local/fff':50 '/wqe-324/ewr':54 '123-teodor@stack.net':38 '123_teodor@stack.net':37 '1aew.werc.ewr':9 '1aew.werc.ewr/?ad=qwe&dw':8 '234':66 '234.435':32 '2aew.werc.ewr':11 '345':1 '3aew.werc.ewr':13 '3aew.werc.ewr/?ad=qwe&dw':12 '4.2':59,60,61 '455':33 '4aew.werc.ewr':15 '5.005':34 '5aew.werc.ewr:8100':17 '5aew.werc.ewr:8100/?':16 '6aew.werc.ewr:8100':23 '6aew.werc.ewr:8100/?ad=qwe&dw':22 '7aew.werc.ewr:8100':26 '7aew.werc.ewr:8100/?ad=qwe&dw=%20%32':25 'ad':19 'aew.werc.ewr':6 'aew.werc.ewr/?ad=qwe&dw':5 'asdf':42 'dw':21 'efd.r':3 'ewr1':48 'ewri2':49 'gist.c':57 'gist.h':55 'gist.h.c':56 'hjwer':47 'jf':44 'jqw':69 'qwe':2,20,29,30,40 'qwe-wer':39 'qwer':43 'qwerti':70 'qwqwe':31 'readlin':58,62,64 'rewt/ewr':52 'sdjk':45 'teodor@123-stack.net':36 'teodor@stack.net':35 'wefjn':53 'wer':41 'wow':68 'www.com':4 -(1 row) - -SELECT length(to_tsvector('english', '345 qwe@efd.r '' http://www.com/ http://aew.werc.ewr/?ad=qwe&dw 1aew.werc.ewr/?ad=qwe&dw 2aew.werc.ewr http://3aew.werc.ewr/?ad=qwe&dw http://4aew.werc.ewr http://5aew.werc.ewr:8100/? ad=qwe&dw 6aew.werc.ewr:8100/?ad=qwe&dw 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 +4.0e-10 qwe qwe qwqwe 234.435 455 5.005 teodor@stack.net teodor@123-stack.net 123_teodor@stack.net 123-teodor@stack.net qwe-wer asdf qwer jf sdjk ewr1> ewri2 -/usr/local/fff /awdf/dwqe/4325 rewt/ewr wefjn /wqe-324/ewr gist.h gist.h.c gist.c. readline 4.2 4.2. 4.2, readline-4.2 readline-4.2. 234 - wow < jqw <> qwerty')); - length --------- - 56 -(1 row) - --- ts_debug -SELECT * from ts_debug('english', 'abc&nm1;def©ghiõjkl'); - alias | description | token | dictionaries | dictionary | lexemes ------------+-----------------+----------------------------+----------------+--------------+--------- - tag | XML tag | | {} | | - asciiword | Word, all ASCII | abc | {english_stem} | english_stem | {abc} - entity | XML entity | &nm1; | {} | | - asciiword | Word, all ASCII | def | {english_stem} | english_stem | {def} - entity | XML entity | © | {} | | - asciiword | Word, all ASCII | ghi | {english_stem} | english_stem | {ghi} - entity | XML entity | õ | {} | | - asciiword | Word, all ASCII | jkl | {english_stem} | english_stem | {jkl} - tag | XML tag | | {} | | -(9 rows) - --- check parsing of URLs -SELECT * from ts_debug('english', 'http://www.harewoodsolutions.co.uk/press.aspx'); - alias | description | token | dictionaries | dictionary | lexemes -----------+---------------+----------------------------------------+--------------+------------+------------------------------------------ - protocol | Protocol head | http:// | {} | | - url | URL | www.harewoodsolutions.co.uk/press.aspx | {simple} | simple | {www.harewoodsolutions.co.uk/press.aspx} - host | Host | www.harewoodsolutions.co.uk | {simple} | simple | {www.harewoodsolutions.co.uk} - url_path | URL path | /press.aspx | {simple} | simple | {/press.aspx} - tag | XML tag | | {} | | -(5 rows) - -SELECT * from ts_debug('english', 'http://aew.wer0c.ewr/id?ad=qwe&dw'); - alias | description | token | dictionaries | dictionary | lexemes -----------+---------------+----------------------------+--------------+------------+------------------------------ - protocol | Protocol head | http:// | {} | | - url | URL | aew.wer0c.ewr/id?ad=qwe&dw | {simple} | simple | {aew.wer0c.ewr/id?ad=qwe&dw} - host | Host | aew.wer0c.ewr | {simple} | simple | {aew.wer0c.ewr} - url_path | URL path | /id?ad=qwe&dw | {simple} | simple | {/id?ad=qwe&dw} - tag | XML tag | | {} | | -(5 rows) - -SELECT * from ts_debug('english', 'http://5aew.werc.ewr:8100/?'); - alias | description | token | dictionaries | dictionary | lexemes -----------+---------------+----------------------+--------------+------------+------------------------ - protocol | Protocol head | http:// | {} | | - url | URL | 5aew.werc.ewr:8100/? | {simple} | simple | {5aew.werc.ewr:8100/?} - host | Host | 5aew.werc.ewr:8100 | {simple} | simple | {5aew.werc.ewr:8100} - url_path | URL path | /? | {simple} | simple | {/?} -(4 rows) - -SELECT * from ts_debug('english', '5aew.werc.ewr:8100/?xx'); - alias | description | token | dictionaries | dictionary | lexemes -----------+-------------+------------------------+--------------+------------+-------------------------- - url | URL | 5aew.werc.ewr:8100/?xx | {simple} | simple | {5aew.werc.ewr:8100/?xx} - host | Host | 5aew.werc.ewr:8100 | {simple} | simple | {5aew.werc.ewr:8100} - url_path | URL path | /?xx | {simple} | simple | {/?xx} -(3 rows) - -SELECT token, alias, - dictionaries, dictionaries is null as dnull, array_dims(dictionaries) as ddims, - lexemes, lexemes is null as lnull, array_dims(lexemes) as ldims -from ts_debug('english', 'a title'); - token | alias | dictionaries | dnull | ddims | lexemes | lnull | ldims --------+-----------+----------------+-------+-------+---------+-------+------- - a | asciiword | {english_stem} | f | [1:1] | {} | f | - | blank | {} | f | | | t | - title | asciiword | {english_stem} | f | [1:1] | {titl} | f | [1:1] -(3 rows) - --- to_tsquery -SELECT to_tsquery('english', 'qwe & sKies '); - to_tsquery ---------------- - 'qwe' & 'sky' -(1 row) - -SELECT to_tsquery('simple', 'qwe & sKies '); - to_tsquery ------------------ - 'qwe' & 'skies' -(1 row) - -SELECT to_tsquery('english', '''the wether'':dc & '' sKies '':BC '); - to_tsquery ------------------------- - 'wether':CD & 'sky':BC -(1 row) - -SELECT to_tsquery('english', 'asd&(and|fghj)'); - to_tsquery ----------------- - 'asd' & 'fghj' -(1 row) - -SELECT to_tsquery('english', '(asd&and)|fghj'); - to_tsquery ----------------- - 'asd' | 'fghj' -(1 row) - -SELECT to_tsquery('english', '(asd&!and)|fghj'); - to_tsquery ----------------- - 'asd' | 'fghj' -(1 row) - -SELECT to_tsquery('english', '(the|and&(i&1))&fghj'); - to_tsquery --------------- - '1' & 'fghj' -(1 row) - -SELECT plainto_tsquery('english', 'the and z 1))& fghj'); - plainto_tsquery --------------------- - 'z' & '1' & 'fghj' -(1 row) - -SELECT plainto_tsquery('english', 'foo bar') && plainto_tsquery('english', 'asd'); - ?column? ------------------------ - 'foo' & 'bar' & 'asd' -(1 row) - -SELECT plainto_tsquery('english', 'foo bar') || plainto_tsquery('english', 'asd fg'); - ?column? ------------------------------- - 'foo' & 'bar' | 'asd' & 'fg' -(1 row) - -SELECT plainto_tsquery('english', 'foo bar') || !!plainto_tsquery('english', 'asd fg'); - ?column? ------------------------------------ - 'foo' & 'bar' | !( 'asd' & 'fg' ) -(1 row) - -SELECT plainto_tsquery('english', 'foo bar') && 'asd | fg'; - ?column? ----------------------------------- - 'foo' & 'bar' & ( 'asd' | 'fg' ) -(1 row) - --- Check stop word deletion, a and s are stop-words -SELECT to_tsquery('english', '!(a & !b) & c'); - to_tsquery -------------- - !!'b' & 'c' -(1 row) - -SELECT to_tsquery('english', '!(a & !b)'); - to_tsquery ------------- - !!'b' -(1 row) - -SELECT to_tsquery('english', '(1 <-> 2) <-> a'); - to_tsquery -------------- - '1' <-> '2' -(1 row) - -SELECT to_tsquery('english', '(1 <-> a) <-> 2'); - to_tsquery -------------- - '1' <2> '2' -(1 row) - -SELECT to_tsquery('english', '(a <-> 1) <-> 2'); - to_tsquery -------------- - '1' <-> '2' -(1 row) - -SELECT to_tsquery('english', 'a <-> (1 <-> 2)'); - to_tsquery -------------- - '1' <-> '2' -(1 row) - -SELECT to_tsquery('english', '1 <-> (a <-> 2)'); - to_tsquery -------------- - '1' <2> '2' -(1 row) - -SELECT to_tsquery('english', '1 <-> (2 <-> a)'); - to_tsquery -------------- - '1' <-> '2' -(1 row) - -SELECT to_tsquery('english', '(1 <-> 2) <3> a'); - to_tsquery -------------- - '1' <-> '2' -(1 row) - -SELECT to_tsquery('english', '(1 <-> a) <3> 2'); - to_tsquery -------------- - '1' <4> '2' -(1 row) - -SELECT to_tsquery('english', '(a <-> 1) <3> 2'); - to_tsquery -------------- - '1' <3> '2' -(1 row) - -SELECT to_tsquery('english', 'a <3> (1 <-> 2)'); - to_tsquery -------------- - '1' <-> '2' -(1 row) - -SELECT to_tsquery('english', '1 <3> (a <-> 2)'); - to_tsquery -------------- - '1' <4> '2' -(1 row) - -SELECT to_tsquery('english', '1 <3> (2 <-> a)'); - to_tsquery -------------- - '1' <3> '2' -(1 row) - -SELECT to_tsquery('english', '(1 <3> 2) <-> a'); - to_tsquery -------------- - '1' <3> '2' -(1 row) - -SELECT to_tsquery('english', '(1 <3> a) <-> 2'); - to_tsquery -------------- - '1' <4> '2' -(1 row) - -SELECT to_tsquery('english', '(a <3> 1) <-> 2'); - to_tsquery -------------- - '1' <-> '2' -(1 row) - -SELECT to_tsquery('english', 'a <-> (1 <3> 2)'); - to_tsquery -------------- - '1' <3> '2' -(1 row) - -SELECT to_tsquery('english', '1 <-> (a <3> 2)'); - to_tsquery -------------- - '1' <4> '2' -(1 row) - -SELECT to_tsquery('english', '1 <-> (2 <3> a)'); - to_tsquery -------------- - '1' <-> '2' -(1 row) - -SELECT to_tsquery('english', '((a <-> 1) <-> 2) <-> s'); - to_tsquery -------------- - '1' <-> '2' -(1 row) - -SELECT to_tsquery('english', '(2 <-> (a <-> 1)) <-> s'); - to_tsquery -------------- - '2' <2> '1' -(1 row) - -SELECT to_tsquery('english', '((1 <-> a) <-> 2) <-> s'); - to_tsquery -------------- - '1' <2> '2' -(1 row) - -SELECT to_tsquery('english', '(2 <-> (1 <-> a)) <-> s'); - to_tsquery -------------- - '2' <-> '1' -(1 row) - -SELECT to_tsquery('english', 's <-> ((a <-> 1) <-> 2)'); - to_tsquery -------------- - '1' <-> '2' -(1 row) - -SELECT to_tsquery('english', 's <-> (2 <-> (a <-> 1))'); - to_tsquery -------------- - '2' <2> '1' -(1 row) - -SELECT to_tsquery('english', 's <-> ((1 <-> a) <-> 2)'); - to_tsquery -------------- - '1' <2> '2' -(1 row) - -SELECT to_tsquery('english', 's <-> (2 <-> (1 <-> a))'); - to_tsquery -------------- - '2' <-> '1' -(1 row) - -SELECT to_tsquery('english', '((a <-> 1) <-> s) <-> 2'); - to_tsquery -------------- - '1' <2> '2' -(1 row) - -SELECT to_tsquery('english', '(s <-> (a <-> 1)) <-> 2'); - to_tsquery -------------- - '1' <-> '2' -(1 row) - -SELECT to_tsquery('english', '((1 <-> a) <-> s) <-> 2'); - to_tsquery -------------- - '1' <3> '2' -(1 row) - -SELECT to_tsquery('english', '(s <-> (1 <-> a)) <-> 2'); - to_tsquery -------------- - '1' <2> '2' -(1 row) - -SELECT to_tsquery('english', '2 <-> ((a <-> 1) <-> s)'); - to_tsquery -------------- - '2' <2> '1' -(1 row) - -SELECT to_tsquery('english', '2 <-> (s <-> (a <-> 1))'); - to_tsquery -------------- - '2' <3> '1' -(1 row) - -SELECT to_tsquery('english', '2 <-> ((1 <-> a) <-> s)'); - to_tsquery -------------- - '2' <-> '1' -(1 row) - -SELECT to_tsquery('english', '2 <-> (s <-> (1 <-> a))'); - to_tsquery -------------- - '2' <2> '1' -(1 row) - -SELECT to_tsquery('english', 'foo <-> (a <-> (the <-> bar))'); - to_tsquery ------------------ - 'foo' <3> 'bar' -(1 row) - -SELECT to_tsquery('english', '((foo <-> a) <-> the) <-> bar'); - to_tsquery ------------------ - 'foo' <3> 'bar' -(1 row) - -SELECT to_tsquery('english', 'foo <-> a <-> the <-> bar'); - to_tsquery ------------------ - 'foo' <3> 'bar' -(1 row) - -SELECT phraseto_tsquery('english', 'PostgreSQL can be extended by the user in many ways'); - phraseto_tsquery ------------------------------------------------------------ - 'postgresql' <3> 'extend' <3> 'user' <2> 'mani' <-> 'way' -(1 row) - -SELECT ts_rank_cd(to_tsvector('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -'), to_tsquery('english', 'paint&water')); - ts_rank_cd ------------- - 0.05 -(1 row) - -SELECT ts_rank_cd(to_tsvector('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -'), to_tsquery('english', 'breath&motion&water')); - ts_rank_cd -------------- - 0.008333334 -(1 row) - -SELECT ts_rank_cd(to_tsvector('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -'), to_tsquery('english', 'ocean')); - ts_rank_cd ------------- - 0.1 -(1 row) - -SELECT ts_rank_cd(to_tsvector('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -'), to_tsquery('english', 'painted <-> Ship')); - ts_rank_cd ------------- - 0.1 -(1 row) - -SELECT ts_rank_cd(strip(to_tsvector('both stripped')), - to_tsquery('both & stripped')); - ts_rank_cd ------------- - 0 -(1 row) - -SELECT ts_rank_cd(to_tsvector('unstripped') || strip(to_tsvector('stripped')), - to_tsquery('unstripped & stripped')); - ts_rank_cd ------------- - 0 -(1 row) - ---headline tests -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'paint&water')); - ts_headline ------------------------------------------ - painted Ocean. + - Water, water, every where+ - And all the boards did shrink; + - Water, water, every -(1 row) - -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'breath&motion&water')); - ts_headline ----------------------------------- - breath nor motion,+ - As idle as a painted Ship + - Upon a painted Ocean. + - Water, water -(1 row) - -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'ocean')); - ts_headline ----------------------------------- - Ocean. + - Water, water, every where + - And all the boards did shrink;+ - Water, water, every where -(1 row) - -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'day & drink')); - ts_headline ------------------------------------- - day, + - We stuck, nor breath nor motion,+ - As idle as a painted Ship + - Upon a painted Ocean. + - Water, water, every where + - And all the boards did shrink; + - Water, water, every where, + - Nor any drop -(1 row) - -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'day | drink')); - ts_headline ------------------------------------------------------------ - Day after day, day after day,+ - We stuck, nor breath nor motion, + - As idle as a painted -(1 row) - -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'day | !drink')); - ts_headline ------------------------------------------------------------ - Day after day, day after day,+ - We stuck, nor breath nor motion, + - As idle as a painted -(1 row) - -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'painted <-> Ship & drink')); - ts_headline ----------------------------------- - painted Ship + - Upon a painted Ocean. + - Water, water, every where + - And all the boards did shrink;+ - Water, water, every where, + - Nor any drop to drink -(1 row) - -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'painted <-> Ship | drink')); - ts_headline ---------------------------------- - painted Ship + - Upon a painted Ocean. + - Water, water, every where + - And all the boards did shrink -(1 row) - -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'painted <-> Ship | !drink')); - ts_headline ---------------------------------- - painted Ship + - Upon a painted Ocean. + - Water, water, every where + - And all the boards did shrink -(1 row) - -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', phraseto_tsquery('english', 'painted Ocean')); - ts_headline ----------------------------------- - painted Ocean. + - Water, water, every where + - And all the boards did shrink;+ - Water, water, every -(1 row) - -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', phraseto_tsquery('english', 'idle as a painted Ship')); - ts_headline ---------------------------------------------- - idle as a painted Ship+ - Upon a painted Ocean. + - Water, water, every where + - And all the boards -(1 row) - -SELECT ts_headline('english', -'Lorem ipsum urna. Nullam nullam ullamcorper urna.', -to_tsquery('english','Lorem') && phraseto_tsquery('english','ullamcorper urna'), -'MaxWords=100, MinWords=1'); - ts_headline -------------------------------------------------------------------------------- - Lorem ipsum urna. Nullam nullam ullamcorper urna -(1 row) - -SELECT ts_headline('english', -'Lorem ipsum urna. Nullam nullam ullamcorper urna.', -phraseto_tsquery('english','ullamcorper urna'), -'MaxWords=100, MinWords=5'); - ts_headline -------------------------------------------------------------- - urna. Nullam nullam ullamcorper urna. -(1 row) - -SELECT ts_headline('english', ' - - - -Sea view wow foo bar qq -YES   -ff-bg - - -', -to_tsquery('english', 'sea&foo'), 'HighlightAll=true'); - ts_headline ------------------------------------------------------------------------------ - + - + - + - + - Sea view wow foo bar qq + - YES  + - ff-bg + - + - + - -(1 row) - -SELECT ts_headline('simple', '1 2 3 1 3'::text, '1 <-> 3', 'MaxWords=2, MinWords=1'); - ts_headline -------------------- - 1 3 -(1 row) - -SELECT ts_headline('simple', '1 2 3 1 3'::text, '1 & 3', 'MaxWords=4, MinWords=1'); - ts_headline ---------------------- - 1 2 3 -(1 row) - -SELECT ts_headline('simple', '1 2 3 1 3'::text, '1 <-> 3', 'MaxWords=4, MinWords=1'); - ts_headline -------------------- - 1 3 -(1 row) - ---Check if headline fragments work -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'ocean'), 'MaxFragments=1'); - ts_headline ------------------------------------- - after day, + - We stuck, nor breath nor motion,+ - As idle as a painted Ship + - Upon a painted Ocean. + - Water, water, every where + - And all the boards did shrink; + - Water, water, every where, + - Nor any drop -(1 row) - ---Check if more than one fragments are displayed -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'Coleridge & stuck'), 'MaxFragments=2'); - ts_headline ----------------------------------------------- - after day, day after day, + - We stuck, nor breath nor motion, + - As idle as a painted Ship + - Upon a painted Ocean. + - Water, water, every where + - And all the boards did shrink; + - Water, water, every where ... drop to drink.+ - S. T. Coleridge -(1 row) - ---Fragments when there all query words are not in the document -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'ocean & seahorse'), 'MaxFragments=1'); - ts_headline ------------------------------------- - + - Day after day, day after day, + - We stuck, nor breath nor motion,+ - As idle as -(1 row) - ---FragmentDelimiter option -SELECT ts_headline('english', ' -Day after day, day after day, - We stuck, nor breath nor motion, -As idle as a painted Ship - Upon a painted Ocean. -Water, water, every where - And all the boards did shrink; -Water, water, every where, - Nor any drop to drink. -S. T. Coleridge (1772-1834) -', to_tsquery('english', 'Coleridge & stuck'), 'MaxFragments=2,FragmentDelimiter=***'); - ts_headline --------------------------------------------- - after day, day after day, + - We stuck, nor breath nor motion, + - As idle as a painted Ship + - Upon a painted Ocean. + - Water, water, every where + - And all the boards did shrink; + - Water, water, every where***drop to drink.+ - S. T. Coleridge -(1 row) - ---Fragments with phrase search -SELECT ts_headline('english', -'Lorem ipsum urna. Nullam nullam ullamcorper urna.', -to_tsquery('english','Lorem') && phraseto_tsquery('english','ullamcorper urna'), -'MaxFragments=100, MaxWords=100, MinWords=1'); - ts_headline -------------------------------------------------------------------------------- - Lorem ipsum urna. Nullam nullam ullamcorper urna -(1 row) - --- Edge cases with empty query -SELECT ts_headline('english', -'', to_tsquery('english', '')); -NOTICE: text-search query doesn't contain lexemes: "" - ts_headline -------------- - -(1 row) - -SELECT ts_headline('english', -'foo bar', to_tsquery('english', '')); -NOTICE: text-search query doesn't contain lexemes: "" - ts_headline -------------- - foo bar -(1 row) - ---Rewrite sub system -CREATE TABLE test_tsquery (txtkeyword TEXT, txtsample TEXT); -\set ECHO none -ALTER TABLE test_tsquery ADD COLUMN keyword tsquery; -UPDATE test_tsquery SET keyword = to_tsquery('english', txtkeyword); -ALTER TABLE test_tsquery ADD COLUMN sample tsquery; -UPDATE test_tsquery SET sample = to_tsquery('english', txtsample::text); -SELECT COUNT(*) FROM test_tsquery WHERE keyword < 'new <-> york'; - count -------- - 2 -(1 row) - -SELECT COUNT(*) FROM test_tsquery WHERE keyword <= 'new <-> york'; - count -------- - 3 -(1 row) - -SELECT COUNT(*) FROM test_tsquery WHERE keyword = 'new <-> york'; - count -------- - 1 -(1 row) - -SELECT COUNT(*) FROM test_tsquery WHERE keyword >= 'new <-> york'; - count -------- - 4 -(1 row) - -SELECT COUNT(*) FROM test_tsquery WHERE keyword > 'new <-> york'; - count -------- - 3 -(1 row) - -CREATE UNIQUE INDEX bt_tsq ON test_tsquery (keyword); -SET enable_seqscan=OFF; -SELECT COUNT(*) FROM test_tsquery WHERE keyword < 'new <-> york'; - count -------- - 2 -(1 row) - -SELECT COUNT(*) FROM test_tsquery WHERE keyword <= 'new <-> york'; - count -------- - 3 -(1 row) - -SELECT COUNT(*) FROM test_tsquery WHERE keyword = 'new <-> york'; - count -------- - 1 -(1 row) - -SELECT COUNT(*) FROM test_tsquery WHERE keyword >= 'new <-> york'; - count -------- - 4 -(1 row) - -SELECT COUNT(*) FROM test_tsquery WHERE keyword > 'new <-> york'; - count -------- - 3 -(1 row) - -RESET enable_seqscan; -SELECT ts_rewrite('foo & bar & qq & new & york', 'new & york'::tsquery, 'big & apple | nyc | new & york & city'); - ts_rewrite ------------------------------------------------------------------------------- - 'foo' & 'bar' & 'qq' & ( 'city' & 'new' & 'york' | 'nyc' | 'big' & 'apple' ) -(1 row) - -SELECT ts_rewrite(ts_rewrite('new & !york ', 'york', '!jersey'), - 'jersey', 'mexico'); - ts_rewrite --------------------- - 'new' & !!'mexico' -(1 row) - -SELECT ts_rewrite('moscow', 'SELECT keyword, sample FROM test_tsquery'::text ); - ts_rewrite ---------------------- - 'moskva' | 'moscow' -(1 row) - -SELECT ts_rewrite('moscow & hotel', 'SELECT keyword, sample FROM test_tsquery'::text ); - ts_rewrite ------------------------------------ - 'hotel' & ( 'moskva' | 'moscow' ) -(1 row) - -SELECT ts_rewrite('bar & qq & foo & (new <-> york)', 'SELECT keyword, sample FROM test_tsquery'::text ); - ts_rewrite -------------------------------------------------------------------------------------- - 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' ) -(1 row) - -SELECT ts_rewrite( 'moscow', 'SELECT keyword, sample FROM test_tsquery'); - ts_rewrite ---------------------- - 'moskva' | 'moscow' -(1 row) - -SELECT ts_rewrite( 'moscow & hotel', 'SELECT keyword, sample FROM test_tsquery'); - ts_rewrite ------------------------------------ - 'hotel' & ( 'moskva' | 'moscow' ) -(1 row) - -SELECT ts_rewrite( 'bar & qq & foo & (new <-> york)', 'SELECT keyword, sample FROM test_tsquery'); - ts_rewrite -------------------------------------------------------------------------------------- - 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' ) -(1 row) - -SELECT ts_rewrite('1 & (2 <-> 3)', 'SELECT keyword, sample FROM test_tsquery'::text ); - ts_rewrite -------------- - '2' <-> '4' -(1 row) - -SELECT ts_rewrite('1 & (2 <2> 3)', 'SELECT keyword, sample FROM test_tsquery'::text ); - ts_rewrite -------------------- - '1' & '2' <2> '3' -(1 row) - -SELECT ts_rewrite('5 <-> (1 & (2 <-> 3))', 'SELECT keyword, sample FROM test_tsquery'::text ); - ts_rewrite -------------------------- - '5' <-> ( '2' <-> '4' ) -(1 row) - -SELECT ts_rewrite('5 <-> (6 | 8)', 'SELECT keyword, sample FROM test_tsquery'::text ); - ts_rewrite ------------------------ - '5' <-> ( '6' | '8' ) -(1 row) - --- Check empty substitution -SELECT ts_rewrite(to_tsquery('5 & (6 | 5)'), to_tsquery('5'), to_tsquery('')); -NOTICE: text-search query doesn't contain lexemes: "" - ts_rewrite ------------- - '6' -(1 row) - -SELECT ts_rewrite(to_tsquery('!5'), to_tsquery('5'), to_tsquery('')); -NOTICE: text-search query doesn't contain lexemes: "" - ts_rewrite ------------- - -(1 row) - -SELECT keyword FROM test_tsquery WHERE keyword @> 'new'; - keyword ------------------- - 'new' <-> 'york' -(1 row) - -SELECT keyword FROM test_tsquery WHERE keyword @> 'moscow'; - keyword ----------- - 'moscow' -(1 row) - -SELECT keyword FROM test_tsquery WHERE keyword <@ 'new'; - keyword ---------- -(0 rows) - -SELECT keyword FROM test_tsquery WHERE keyword <@ 'moscow'; - keyword ----------- - 'moscow' -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow') AS query; - ts_rewrite ---------------------- - 'moskva' | 'moscow' -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow & hotel') AS query; - ts_rewrite ------------------------------------ - 'hotel' & ( 'moskva' | 'moscow' ) -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'bar & qq & foo & (new <-> york)') AS query; - ts_rewrite -------------------------------------------------------------------------------------- - 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' ) -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow') AS query; - ts_rewrite ---------------------- - 'moskva' | 'moscow' -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow & hotel') AS query; - ts_rewrite ------------------------------------ - 'hotel' & ( 'moskva' | 'moscow' ) -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'bar & qq & foo & (new <-> york)') AS query; - ts_rewrite -------------------------------------------------------------------------------------- - 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' ) -(1 row) - -CREATE INDEX qq ON test_tsquery USING gist (keyword tsquery_ops); -SET enable_seqscan=OFF; -SELECT keyword FROM test_tsquery WHERE keyword @> 'new'; - keyword ------------------- - 'new' <-> 'york' -(1 row) - -SELECT keyword FROM test_tsquery WHERE keyword @> 'moscow'; - keyword ----------- - 'moscow' -(1 row) - -SELECT keyword FROM test_tsquery WHERE keyword <@ 'new'; - keyword ---------- -(0 rows) - -SELECT keyword FROM test_tsquery WHERE keyword <@ 'moscow'; - keyword ----------- - 'moscow' -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow') AS query; - ts_rewrite ---------------------- - 'moskva' | 'moscow' -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow & hotel') AS query; - ts_rewrite ------------------------------------ - 'hotel' & ( 'moskva' | 'moscow' ) -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'bar & qq & foo & (new <-> york)') AS query; - ts_rewrite -------------------------------------------------------------------------------------- - 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' ) -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow') AS query; - ts_rewrite ---------------------- - 'moskva' | 'moscow' -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow & hotel') AS query; - ts_rewrite ------------------------------------ - 'hotel' & ( 'moskva' | 'moscow' ) -(1 row) - -SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'bar & qq & foo & (new <-> york)') AS query; - ts_rewrite -------------------------------------------------------------------------------------- - 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' ) -(1 row) - -SELECT ts_rewrite(tsquery_phrase('foo', 'foo'), 'foo', 'bar | baz'); - ts_rewrite ------------------------------------------ - ( 'bar' | 'baz' ) <-> ( 'bar' | 'baz' ) -(1 row) - -SELECT to_tsvector('foo bar') @@ - ts_rewrite(tsquery_phrase('foo', 'foo'), 'foo', 'bar | baz'); - ?column? ----------- - f -(1 row) - -SELECT to_tsvector('bar baz') @@ - ts_rewrite(tsquery_phrase('foo', 'foo'), 'foo', 'bar | baz'); - ?column? ----------- - t -(1 row) - -RESET enable_seqscan; ---test GUC -SET default_text_search_config=simple; -SELECT to_tsvector('SKIES My booKs'); - to_tsvector ----------------------------- - 'books':3 'my':2 'skies':1 -(1 row) - -SELECT plainto_tsquery('SKIES My booKs'); - plainto_tsquery --------------------------- - 'skies' & 'my' & 'books' -(1 row) - -SELECT to_tsquery('SKIES & My | booKs'); - to_tsquery --------------------------- - 'skies' & 'my' | 'books' -(1 row) - -SET default_text_search_config=english; -SELECT to_tsvector('SKIES My booKs'); - to_tsvector ------------------- - 'book':3 'sky':1 -(1 row) - -SELECT plainto_tsquery('SKIES My booKs'); - plainto_tsquery ------------------ - 'sky' & 'book' -(1 row) - -SELECT to_tsquery('SKIES & My | booKs'); - to_tsquery ----------------- - 'sky' | 'book' -(1 row) - ---trigger -CREATE TRIGGER tsvectorupdate -BEFORE UPDATE OR INSERT ON test_tsvector -FOR EACH ROW EXECUTE PROCEDURE tsvector_update_trigger(a, 'pg_catalog.english', t); -SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty'); - count -------- - 0 -(1 row) - -INSERT INTO test_tsvector (t) VALUES ('345 qwerty'); -SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty'); - count -------- - 1 -(1 row) - -UPDATE test_tsvector SET t = null WHERE t = '345 qwerty'; -SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty'); - count -------- - 0 -(1 row) - -INSERT INTO test_tsvector (t) VALUES ('345 qwerty'); -SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty'); - count -------- - 1 -(1 row) - --- Test inlining of immutable constant functions --- to_tsquery(text) is not immutable, so it won't be inlined -explain (costs off) -select * from test_tsquery, to_tsquery('new') q where txtsample @@ q; - QUERY PLAN ------------------------------------------------- - Nested Loop - Join Filter: (test_tsquery.txtsample @@ q.q) - -> Function Scan on to_tsquery q - -> Seq Scan on test_tsquery -(4 rows) - --- to_tsquery(regconfig, text) is an immutable function. --- That allows us to get rid of using function scan and join at all. -explain (costs off) -select * from test_tsquery, to_tsquery('english', 'new') q where txtsample @@ q; - QUERY PLAN ---------------------------------------------- - Seq Scan on test_tsquery - Filter: (txtsample @@ '''new'''::tsquery) -(2 rows) - --- test finding items in GIN's pending list -create temp table pendtest (ts tsvector); -create index pendtest_idx on pendtest using gin(ts); -insert into pendtest values (to_tsvector('Lore ipsam')); -insert into pendtest values (to_tsvector('Lore ipsum')); -select * from pendtest where 'ipsu:*'::tsquery @@ ts; - ts --------------------- - 'ipsum':2 'lore':1 -(1 row) - -select * from pendtest where 'ipsa:*'::tsquery @@ ts; - ts --------------------- - 'ipsam':2 'lore':1 -(1 row) - -select * from pendtest where 'ips:*'::tsquery @@ ts; - ts --------------------- - 'ipsam':2 'lore':1 - 'ipsum':2 'lore':1 -(2 rows) - -select * from pendtest where 'ipt:*'::tsquery @@ ts; - ts ----- -(0 rows) - -select * from pendtest where 'ipi:*'::tsquery @@ ts; - ts ----- -(0 rows) - ---check OP_PHRASE on index -create temp table phrase_index_test(fts tsvector); -insert into phrase_index_test values ('A fat cat has just eaten a rat.'); -insert into phrase_index_test values (to_tsvector('english', 'A fat cat has just eaten a rat.')); -create index phrase_index_test_idx on phrase_index_test using gin(fts); -set enable_seqscan = off; -select * from phrase_index_test where fts @@ phraseto_tsquery('english', 'fat cat'); - fts ------------------------------------ - 'cat':3 'eaten':6 'fat':2 'rat':8 -(1 row) - -set enable_seqscan = on; --- test websearch_to_tsquery function -select websearch_to_tsquery('simple', 'I have a fat:*ABCD cat'); - websearch_to_tsquery ---------------------------------------------- - 'i' & 'have' & 'a' & 'fat' & 'abcd' & 'cat' -(1 row) - -select websearch_to_tsquery('simple', 'orange:**AABBCCDD'); - websearch_to_tsquery ------------------------ - 'orange' & 'aabbccdd' -(1 row) - -select websearch_to_tsquery('simple', 'fat:A!cat:B|rat:C<'); - websearch_to_tsquery ------------------------------------------ - 'fat' & 'a' & 'cat' & 'b' & 'rat' & 'c' -(1 row) - -select websearch_to_tsquery('simple', 'fat:A : cat:B'); - websearch_to_tsquery ---------------------------- - 'fat' & 'a' & 'cat' & 'b' -(1 row) - -select websearch_to_tsquery('simple', 'fat*rat'); - websearch_to_tsquery ----------------------- - 'fat' <-> 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'fat-rat'); - websearch_to_tsquery -------------------------------- - 'fat-rat' <-> 'fat' <-> 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'fat_rat'); - websearch_to_tsquery ----------------------- - 'fat' <-> 'rat' -(1 row) - --- weights are completely ignored -select websearch_to_tsquery('simple', 'abc : def'); - websearch_to_tsquery ----------------------- - 'abc' & 'def' -(1 row) - -select websearch_to_tsquery('simple', 'abc:def'); - websearch_to_tsquery ----------------------- - 'abc' & 'def' -(1 row) - -select websearch_to_tsquery('simple', 'a:::b'); - websearch_to_tsquery ----------------------- - 'a' & 'b' -(1 row) - -select websearch_to_tsquery('simple', 'abc:d'); - websearch_to_tsquery ----------------------- - 'abc' & 'd' -(1 row) - -select websearch_to_tsquery('simple', ':'); -NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored - websearch_to_tsquery ----------------------- - -(1 row) - --- these operators are ignored -select websearch_to_tsquery('simple', 'abc & def'); - websearch_to_tsquery ----------------------- - 'abc' & 'def' -(1 row) - -select websearch_to_tsquery('simple', 'abc | def'); - websearch_to_tsquery ----------------------- - 'abc' & 'def' -(1 row) - -select websearch_to_tsquery('simple', 'abc <-> def'); - websearch_to_tsquery ----------------------- - 'abc' & 'def' -(1 row) - --- parens are ignored, too -select websearch_to_tsquery('simple', 'abc (pg or class)'); - websearch_to_tsquery ------------------------- - 'abc' & 'pg' | 'class' -(1 row) - -select websearch_to_tsquery('simple', '(foo bar) or (ding dong)'); - websearch_to_tsquery ---------------------------------- - 'foo' & 'bar' | 'ding' & 'dong' -(1 row) - --- NOT is ignored in quotes -select websearch_to_tsquery('english', 'My brand new smartphone'); - websearch_to_tsquery -------------------------------- - 'brand' & 'new' & 'smartphon' -(1 row) - -select websearch_to_tsquery('english', 'My brand "new smartphone"'); - websearch_to_tsquery ---------------------------------- - 'brand' & 'new' <-> 'smartphon' -(1 row) - -select websearch_to_tsquery('english', 'My brand "new -smartphone"'); - websearch_to_tsquery ---------------------------------- - 'brand' & 'new' <-> 'smartphon' -(1 row) - --- test OR operator -select websearch_to_tsquery('simple', 'cat or rat'); - websearch_to_tsquery ----------------------- - 'cat' | 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'cat OR rat'); - websearch_to_tsquery ----------------------- - 'cat' | 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'cat "OR" rat'); - websearch_to_tsquery ----------------------- - 'cat' & 'or' & 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'cat OR'); - websearch_to_tsquery ----------------------- - 'cat' & 'or' -(1 row) - -select websearch_to_tsquery('simple', 'OR rat'); - websearch_to_tsquery ----------------------- - 'or' & 'rat' -(1 row) - -select websearch_to_tsquery('simple', '"fat cat OR rat"'); - websearch_to_tsquery ------------------------------------- - 'fat' <-> 'cat' <-> 'or' <-> 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'fat (cat OR rat'); - websearch_to_tsquery ------------------------ - 'fat' & 'cat' | 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'or OR or'); - websearch_to_tsquery ----------------------- - 'or' | 'or' -(1 row) - --- OR is an operator here ... -select websearch_to_tsquery('simple', '"fat cat"or"fat rat"'); - websearch_to_tsquery ------------------------------------ - 'fat' <-> 'cat' | 'fat' <-> 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'fat or(rat'); - websearch_to_tsquery ----------------------- - 'fat' | 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'fat or)rat'); - websearch_to_tsquery ----------------------- - 'fat' | 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'fat or&rat'); - websearch_to_tsquery ----------------------- - 'fat' | 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'fat or|rat'); - websearch_to_tsquery ----------------------- - 'fat' | 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'fat or!rat'); - websearch_to_tsquery ----------------------- - 'fat' | 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'fat orrat'); - websearch_to_tsquery ----------------------- - 'fat' | 'rat' -(1 row) - -select websearch_to_tsquery('simple', 'fat or '); - websearch_to_tsquery ----------------------- - 'fat' & 'or' -(1 row) - --- ... but not here -select websearch_to_tsquery('simple', 'abc orange'); - websearch_to_tsquery ----------------------- - 'abc' & 'orange' -(1 row) - -select websearch_to_tsquery('simple', 'abc OR1234'); - websearch_to_tsquery ----------------------- - 'abc' & 'or1234' -(1 row) - -select websearch_to_tsquery('simple', 'abc or-abc'); - websearch_to_tsquery -------------------------------------- - 'abc' & 'or-abc' <-> 'or' <-> 'abc' -(1 row) - -select websearch_to_tsquery('simple', 'abc OR_abc'); - websearch_to_tsquery ------------------------- - 'abc' & 'or' <-> 'abc' -(1 row) - --- test quotes -select websearch_to_tsquery('english', '"pg_class pg'); - websearch_to_tsquery ---------------------------- - 'pg' <-> 'class' <-> 'pg' -(1 row) - -select websearch_to_tsquery('english', 'pg_class pg"'); - websearch_to_tsquery -------------------------- - 'pg' <-> 'class' & 'pg' -(1 row) - -select websearch_to_tsquery('english', '"pg_class pg"'); - websearch_to_tsquery ---------------------------- - 'pg' <-> 'class' <-> 'pg' -(1 row) - -select websearch_to_tsquery('english', '"pg_class : pg"'); - websearch_to_tsquery ---------------------------- - 'pg' <-> 'class' <-> 'pg' -(1 row) - -select websearch_to_tsquery('english', 'abc "pg_class pg"'); - websearch_to_tsquery ------------------------------------ - 'abc' & 'pg' <-> 'class' <-> 'pg' -(1 row) - -select websearch_to_tsquery('english', '"pg_class pg" def'); - websearch_to_tsquery ------------------------------------ - 'pg' <-> 'class' <-> 'pg' & 'def' -(1 row) - -select websearch_to_tsquery('english', 'abc "pg pg_class pg" def'); - websearch_to_tsquery ----------------------------------------------------- - 'abc' & 'pg' <-> 'pg' <-> 'class' <-> 'pg' & 'def' -(1 row) - -select websearch_to_tsquery('english', ' or "pg pg_class pg" or '); - websearch_to_tsquery ------------------------------------- - 'pg' <-> 'pg' <-> 'class' <-> 'pg' -(1 row) - -select websearch_to_tsquery('english', '""pg pg_class pg""'); - websearch_to_tsquery --------------------------------- - 'pg' & 'pg' <-> 'class' & 'pg' -(1 row) - -select websearch_to_tsquery('english', 'abc """"" def'); - websearch_to_tsquery ----------------------- - 'abc' & 'def' -(1 row) - -select websearch_to_tsquery('english', 'cat -"fat rat"'); - websearch_to_tsquery ------------------------------- - 'cat' & !( 'fat' <-> 'rat' ) -(1 row) - -select websearch_to_tsquery('english', 'cat -"fat rat" cheese'); - websearch_to_tsquery ----------------------------------------- - 'cat' & !( 'fat' <-> 'rat' ) & 'chees' -(1 row) - -select websearch_to_tsquery('english', 'abc "def -"'); - websearch_to_tsquery ----------------------- - 'abc' & 'def' -(1 row) - -select websearch_to_tsquery('english', 'abc "def :"'); - websearch_to_tsquery ----------------------- - 'abc' & 'def' -(1 row) - -select websearch_to_tsquery('english', '"A fat cat" has just eaten a -rat.'); - websearch_to_tsquery ------------------------------------- - 'fat' <-> 'cat' & 'eaten' & !'rat' -(1 row) - -select websearch_to_tsquery('english', '"A fat cat" has just eaten OR !rat.'); - websearch_to_tsquery ------------------------------------ - 'fat' <-> 'cat' & 'eaten' | 'rat' -(1 row) - -select websearch_to_tsquery('english', '"A fat cat" has just (+eaten OR -rat)'); - websearch_to_tsquery ------------------------------------- - 'fat' <-> 'cat' & 'eaten' | !'rat' -(1 row) - -select websearch_to_tsquery('english', 'this is ----fine'); - websearch_to_tsquery ----------------------- - !!!!'fine' -(1 row) - -select websearch_to_tsquery('english', '(()) )))) this ||| is && -fine, "dear friend" OR good'); - websearch_to_tsquery ----------------------------------------- - !'fine' & 'dear' <-> 'friend' | 'good' -(1 row) - -select websearch_to_tsquery('english', 'an old <-> cat " is fine &&& too'); - websearch_to_tsquery ------------------------- - 'old' & 'cat' & 'fine' -(1 row) - -select websearch_to_tsquery('english', '"A the" OR just on'); -NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored - websearch_to_tsquery ----------------------- - -(1 row) - -select websearch_to_tsquery('english', '"a fat cat" ate a rat'); - websearch_to_tsquery ---------------------------------- - 'fat' <-> 'cat' & 'ate' & 'rat' -(1 row) - -select to_tsvector('english', 'A fat cat ate a rat') @@ - websearch_to_tsquery('english', '"a fat cat" ate a rat'); - ?column? ----------- - t -(1 row) - -select to_tsvector('english', 'A fat grey cat ate a rat') @@ - websearch_to_tsquery('english', '"a fat cat" ate a rat'); - ?column? ----------- - f -(1 row) - --- cases handled by gettoken_tsvector() -select websearch_to_tsquery(''''); -NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored - websearch_to_tsquery ----------------------- - -(1 row) - -select websearch_to_tsquery('''abc''''def'''); - websearch_to_tsquery ----------------------- - 'abc' <-> 'def' -(1 row) - -select websearch_to_tsquery('\abc'); - websearch_to_tsquery ----------------------- - 'abc' -(1 row) - -select websearch_to_tsquery('\'); -NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored - websearch_to_tsquery ----------------------- - -(1 row) - +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/tsdicts.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/tsdicts.out --- /Users/admin/pgsql/src/test/regress/expected/tsdicts.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/tsdicts.out 2024-12-13 13:20:11 @@ -1,723 +1,2 @@ ---Test text search dictionaries and configurations --- Test ISpell dictionary with ispell affix file -CREATE TEXT SEARCH DICTIONARY ispell ( - Template=ispell, - DictFile=ispell_sample, - AffFile=ispell_sample -); -SELECT ts_lexize('ispell', 'skies'); - ts_lexize ------------ - {sky} -(1 row) - -SELECT ts_lexize('ispell', 'bookings'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('ispell', 'booking'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('ispell', 'foot'); - ts_lexize ------------ - {foot} -(1 row) - -SELECT ts_lexize('ispell', 'foots'); - ts_lexize ------------ - {foot} -(1 row) - -SELECT ts_lexize('ispell', 'rebookings'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('ispell', 'rebooking'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('ispell', 'rebook'); - ts_lexize ------------ - -(1 row) - -SELECT ts_lexize('ispell', 'unbookings'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('ispell', 'unbooking'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('ispell', 'unbook'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('ispell', 'footklubber'); - ts_lexize ----------------- - {foot,klubber} -(1 row) - -SELECT ts_lexize('ispell', 'footballklubber'); - ts_lexize ------------------------------------------------------- - {footballklubber,foot,ball,klubber,football,klubber} -(1 row) - -SELECT ts_lexize('ispell', 'ballyklubber'); - ts_lexize ----------------- - {ball,klubber} -(1 row) - -SELECT ts_lexize('ispell', 'footballyklubber'); - ts_lexize ---------------------- - {foot,ball,klubber} -(1 row) - --- Test ISpell dictionary with hunspell affix file -CREATE TEXT SEARCH DICTIONARY hunspell ( - Template=ispell, - DictFile=ispell_sample, - AffFile=hunspell_sample -); -SELECT ts_lexize('hunspell', 'skies'); - ts_lexize ------------ - {sky} -(1 row) - -SELECT ts_lexize('hunspell', 'bookings'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell', 'booking'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell', 'foot'); - ts_lexize ------------ - {foot} -(1 row) - -SELECT ts_lexize('hunspell', 'foots'); - ts_lexize ------------ - {foot} -(1 row) - -SELECT ts_lexize('hunspell', 'rebookings'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell', 'rebooking'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell', 'rebook'); - ts_lexize ------------ - -(1 row) - -SELECT ts_lexize('hunspell', 'unbookings'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('hunspell', 'unbooking'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('hunspell', 'unbook'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('hunspell', 'footklubber'); - ts_lexize ----------------- - {foot,klubber} -(1 row) - -SELECT ts_lexize('hunspell', 'footballklubber'); - ts_lexize ------------------------------------------------------- - {footballklubber,foot,ball,klubber,football,klubber} -(1 row) - -SELECT ts_lexize('hunspell', 'ballyklubber'); - ts_lexize ----------------- - {ball,klubber} -(1 row) - -SELECT ts_lexize('hunspell', 'footballyklubber'); - ts_lexize ---------------------- - {foot,ball,klubber} -(1 row) - --- Test ISpell dictionary with hunspell affix file with FLAG long parameter -CREATE TEXT SEARCH DICTIONARY hunspell_long ( - Template=ispell, - DictFile=hunspell_sample_long, - AffFile=hunspell_sample_long -); -SELECT ts_lexize('hunspell_long', 'skies'); - ts_lexize ------------ - {sky} -(1 row) - -SELECT ts_lexize('hunspell_long', 'bookings'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell_long', 'booking'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell_long', 'foot'); - ts_lexize ------------ - {foot} -(1 row) - -SELECT ts_lexize('hunspell_long', 'foots'); - ts_lexize ------------ - {foot} -(1 row) - -SELECT ts_lexize('hunspell_long', 'rebookings'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell_long', 'rebooking'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell_long', 'rebook'); - ts_lexize ------------ - -(1 row) - -SELECT ts_lexize('hunspell_long', 'unbookings'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('hunspell_long', 'unbooking'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('hunspell_long', 'unbook'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('hunspell_long', 'booked'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('hunspell_long', 'footklubber'); - ts_lexize ----------------- - {foot,klubber} -(1 row) - -SELECT ts_lexize('hunspell_long', 'footballklubber'); - ts_lexize ------------------------------------------------------- - {footballklubber,foot,ball,klubber,football,klubber} -(1 row) - -SELECT ts_lexize('hunspell_long', 'ballyklubber'); - ts_lexize ----------------- - {ball,klubber} -(1 row) - -SELECT ts_lexize('hunspell_long', 'ballsklubber'); - ts_lexize ----------------- - {ball,klubber} -(1 row) - -SELECT ts_lexize('hunspell_long', 'footballyklubber'); - ts_lexize ---------------------- - {foot,ball,klubber} -(1 row) - -SELECT ts_lexize('hunspell_long', 'ex-machina'); - ts_lexize ---------------- - {ex-,machina} -(1 row) - --- Test ISpell dictionary with hunspell affix file with FLAG num parameter -CREATE TEXT SEARCH DICTIONARY hunspell_num ( - Template=ispell, - DictFile=hunspell_sample_num, - AffFile=hunspell_sample_num -); -SELECT ts_lexize('hunspell_num', 'skies'); - ts_lexize ------------ - {sky} -(1 row) - -SELECT ts_lexize('hunspell_num', 'sk'); - ts_lexize ------------ - {sky} -(1 row) - -SELECT ts_lexize('hunspell_num', 'bookings'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell_num', 'booking'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell_num', 'foot'); - ts_lexize ------------ - {foot} -(1 row) - -SELECT ts_lexize('hunspell_num', 'foots'); - ts_lexize ------------ - {foot} -(1 row) - -SELECT ts_lexize('hunspell_num', 'rebookings'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell_num', 'rebooking'); - ts_lexize ----------------- - {booking,book} -(1 row) - -SELECT ts_lexize('hunspell_num', 'rebook'); - ts_lexize ------------ - -(1 row) - -SELECT ts_lexize('hunspell_num', 'unbookings'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('hunspell_num', 'unbooking'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('hunspell_num', 'unbook'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('hunspell_num', 'booked'); - ts_lexize ------------ - {book} -(1 row) - -SELECT ts_lexize('hunspell_num', 'footklubber'); - ts_lexize ----------------- - {foot,klubber} -(1 row) - -SELECT ts_lexize('hunspell_num', 'footballklubber'); - ts_lexize ------------------------------------------------------- - {footballklubber,foot,ball,klubber,football,klubber} -(1 row) - -SELECT ts_lexize('hunspell_num', 'ballyklubber'); - ts_lexize ----------------- - {ball,klubber} -(1 row) - -SELECT ts_lexize('hunspell_num', 'footballyklubber'); - ts_lexize ---------------------- - {foot,ball,klubber} -(1 row) - --- Test suitability of affix and dict files -CREATE TEXT SEARCH DICTIONARY hunspell_err ( - Template=ispell, - DictFile=ispell_sample, - AffFile=hunspell_sample_long -); -ERROR: invalid affix alias "GJUS" -CREATE TEXT SEARCH DICTIONARY hunspell_err ( - Template=ispell, - DictFile=ispell_sample, - AffFile=hunspell_sample_num -); -ERROR: invalid affix flag "SZ\" -CREATE TEXT SEARCH DICTIONARY hunspell_invalid_1 ( - Template=ispell, - DictFile=hunspell_sample_long, - AffFile=ispell_sample -); -CREATE TEXT SEARCH DICTIONARY hunspell_invalid_2 ( - Template=ispell, - DictFile=hunspell_sample_long, - AffFile=hunspell_sample_num -); -CREATE TEXT SEARCH DICTIONARY hunspell_invalid_3 ( - Template=ispell, - DictFile=hunspell_sample_num, - AffFile=ispell_sample -); -CREATE TEXT SEARCH DICTIONARY hunspell_err ( - Template=ispell, - DictFile=hunspell_sample_num, - AffFile=hunspell_sample_long -); -ERROR: invalid affix alias "302,301,202,303" --- Synonym dictionary -CREATE TEXT SEARCH DICTIONARY synonym ( - Template=synonym, - Synonyms=synonym_sample -); -SELECT ts_lexize('synonym', 'PoStGrEs'); - ts_lexize ------------ - {pgsql} -(1 row) - -SELECT ts_lexize('synonym', 'Gogle'); - ts_lexize ------------ - {googl} -(1 row) - -SELECT ts_lexize('synonym', 'indices'); - ts_lexize ------------ - {index} -(1 row) - --- test altering boolean parameters -SELECT dictinitoption FROM pg_ts_dict WHERE dictname = 'synonym'; - dictinitoption ------------------------------ - synonyms = 'synonym_sample' -(1 row) - -ALTER TEXT SEARCH DICTIONARY synonym (CaseSensitive = 1); -SELECT ts_lexize('synonym', 'PoStGrEs'); - ts_lexize ------------ - -(1 row) - -SELECT dictinitoption FROM pg_ts_dict WHERE dictname = 'synonym'; - dictinitoption ------------------------------------------------- - synonyms = 'synonym_sample', casesensitive = 1 -(1 row) - -ALTER TEXT SEARCH DICTIONARY synonym (CaseSensitive = 2); -- fail -ERROR: casesensitive requires a Boolean value -ALTER TEXT SEARCH DICTIONARY synonym (CaseSensitive = off); -SELECT ts_lexize('synonym', 'PoStGrEs'); - ts_lexize ------------ - {pgsql} -(1 row) - -SELECT dictinitoption FROM pg_ts_dict WHERE dictname = 'synonym'; - dictinitoption ----------------------------------------------------- - synonyms = 'synonym_sample', casesensitive = 'off' -(1 row) - --- Create and simple test thesaurus dictionary --- More tests in configuration checks because ts_lexize() --- cannot pass more than one word to thesaurus. -CREATE TEXT SEARCH DICTIONARY thesaurus ( - Template=thesaurus, - DictFile=thesaurus_sample, - Dictionary=english_stem -); -SELECT ts_lexize('thesaurus', 'one'); - ts_lexize ------------ - {1} -(1 row) - --- Test ispell dictionary in configuration -CREATE TEXT SEARCH CONFIGURATION ispell_tst ( - COPY=english -); -ALTER TEXT SEARCH CONFIGURATION ispell_tst ALTER MAPPING FOR - word, numword, asciiword, hword, numhword, asciihword, hword_part, hword_numpart, hword_asciipart - WITH ispell, english_stem; -SELECT to_tsvector('ispell_tst', 'Booking the skies after rebookings for footballklubber from a foot'); - to_tsvector ----------------------------------------------------------------------------------------------------- - 'ball':7 'book':1,5 'booking':1,5 'foot':7,10 'football':7 'footballklubber':7 'klubber':7 'sky':3 -(1 row) - -SELECT to_tsquery('ispell_tst', 'footballklubber'); - to_tsquery --------------------------------------------------------------------------- - 'footballklubber' | 'foot' & 'ball' & 'klubber' | 'football' & 'klubber' -(1 row) - -SELECT to_tsquery('ispell_tst', 'footballyklubber:b & rebookings:A & sky'); - to_tsquery ------------------------------------------------------------------------- - 'foot':B & 'ball':B & 'klubber':B & ( 'booking':A | 'book':A ) & 'sky' -(1 row) - --- Test ispell dictionary with hunspell affix in configuration -CREATE TEXT SEARCH CONFIGURATION hunspell_tst ( - COPY=ispell_tst -); -ALTER TEXT SEARCH CONFIGURATION hunspell_tst ALTER MAPPING - REPLACE ispell WITH hunspell; -SELECT to_tsvector('hunspell_tst', 'Booking the skies after rebookings for footballklubber from a foot'); - to_tsvector ----------------------------------------------------------------------------------------------------- - 'ball':7 'book':1,5 'booking':1,5 'foot':7,10 'football':7 'footballklubber':7 'klubber':7 'sky':3 -(1 row) - -SELECT to_tsquery('hunspell_tst', 'footballklubber'); - to_tsquery --------------------------------------------------------------------------- - 'footballklubber' | 'foot' & 'ball' & 'klubber' | 'football' & 'klubber' -(1 row) - -SELECT to_tsquery('hunspell_tst', 'footballyklubber:b & rebookings:A & sky'); - to_tsquery ------------------------------------------------------------------------- - 'foot':B & 'ball':B & 'klubber':B & ( 'booking':A | 'book':A ) & 'sky' -(1 row) - -SELECT to_tsquery('hunspell_tst', 'footballyklubber:b <-> sky'); - to_tsquery -------------------------------------------------- - ( 'foot':B & 'ball':B & 'klubber':B ) <-> 'sky' -(1 row) - -SELECT phraseto_tsquery('hunspell_tst', 'footballyklubber sky'); - phraseto_tsquery -------------------------------------------- - ( 'foot' & 'ball' & 'klubber' ) <-> 'sky' -(1 row) - --- Test ispell dictionary with hunspell affix with FLAG long in configuration -ALTER TEXT SEARCH CONFIGURATION hunspell_tst ALTER MAPPING - REPLACE hunspell WITH hunspell_long; -SELECT to_tsvector('hunspell_tst', 'Booking the skies after rebookings for footballklubber from a foot'); - to_tsvector ----------------------------------------------------------------------------------------------------- - 'ball':7 'book':1,5 'booking':1,5 'foot':7,10 'football':7 'footballklubber':7 'klubber':7 'sky':3 -(1 row) - -SELECT to_tsquery('hunspell_tst', 'footballklubber'); - to_tsquery --------------------------------------------------------------------------- - 'footballklubber' | 'foot' & 'ball' & 'klubber' | 'football' & 'klubber' -(1 row) - -SELECT to_tsquery('hunspell_tst', 'footballyklubber:b & rebookings:A & sky'); - to_tsquery ------------------------------------------------------------------------- - 'foot':B & 'ball':B & 'klubber':B & ( 'booking':A | 'book':A ) & 'sky' -(1 row) - --- Test ispell dictionary with hunspell affix with FLAG num in configuration -ALTER TEXT SEARCH CONFIGURATION hunspell_tst ALTER MAPPING - REPLACE hunspell_long WITH hunspell_num; -SELECT to_tsvector('hunspell_tst', 'Booking the skies after rebookings for footballklubber from a foot'); - to_tsvector ----------------------------------------------------------------------------------------------------- - 'ball':7 'book':1,5 'booking':1,5 'foot':7,10 'football':7 'footballklubber':7 'klubber':7 'sky':3 -(1 row) - -SELECT to_tsquery('hunspell_tst', 'footballklubber'); - to_tsquery --------------------------------------------------------------------------- - 'footballklubber' | 'foot' & 'ball' & 'klubber' | 'football' & 'klubber' -(1 row) - -SELECT to_tsquery('hunspell_tst', 'footballyklubber:b & rebookings:A & sky'); - to_tsquery ------------------------------------------------------------------------- - 'foot':B & 'ball':B & 'klubber':B & ( 'booking':A | 'book':A ) & 'sky' -(1 row) - --- Test synonym dictionary in configuration -CREATE TEXT SEARCH CONFIGURATION synonym_tst ( - COPY=english -); -ALTER TEXT SEARCH CONFIGURATION synonym_tst ALTER MAPPING FOR - asciiword, hword_asciipart, asciihword - WITH synonym, english_stem; -SELECT to_tsvector('synonym_tst', 'Postgresql is often called as postgres or pgsql and pronounced as postgre'); - to_tsvector ---------------------------------------------------- - 'call':4 'often':3 'pgsql':1,6,8,12 'pronounc':10 -(1 row) - -SELECT to_tsvector('synonym_tst', 'Most common mistake is to write Gogle instead of Google'); - to_tsvector ----------------------------------------------------------- - 'common':2 'googl':7,10 'instead':8 'mistak':3 'write':6 -(1 row) - -SELECT to_tsvector('synonym_tst', 'Indexes or indices - Which is right plural form of index?'); - to_tsvector ----------------------------------------------- - 'form':8 'index':1,3,10 'plural':7 'right':6 -(1 row) - -SELECT to_tsquery('synonym_tst', 'Index & indices'); - to_tsquery ---------------------- - 'index' & 'index':* -(1 row) - --- test thesaurus in configuration --- see thesaurus_sample.ths to understand 'odd' resulting tsvector -CREATE TEXT SEARCH CONFIGURATION thesaurus_tst ( - COPY=synonym_tst -); -ALTER TEXT SEARCH CONFIGURATION thesaurus_tst ALTER MAPPING FOR - asciiword, hword_asciipart, asciihword - WITH synonym, thesaurus, english_stem; -SELECT to_tsvector('thesaurus_tst', 'one postgres one two one two three one'); - to_tsvector ----------------------------------- - '1':1,5 '12':3 '123':4 'pgsql':2 -(1 row) - -SELECT to_tsvector('thesaurus_tst', 'Supernovae star is very new star and usually called supernovae (abbreviation SN)'); - to_tsvector --------------------------------------------------------------- - 'abbrevi':10 'call':8 'new':4 'sn':1,9,11 'star':5 'usual':7 -(1 row) - -SELECT to_tsvector('thesaurus_tst', 'Booking tickets is looking like a booking a tickets'); - to_tsvector -------------------------------------------------------- - 'card':3,10 'invit':2,9 'like':6 'look':5 'order':1,8 -(1 row) - --- invalid: non-lowercase quoted identifiers -CREATE TEXT SEARCH DICTIONARY tsdict_case -( - Template = ispell, - "DictFile" = ispell_sample, - "AffFile" = ispell_sample -); -ERROR: unrecognized Ispell parameter: "DictFile" --- Test grammar for configurations -CREATE TEXT SEARCH CONFIGURATION dummy_tst (COPY=english); --- Overridden mapping change with duplicated tokens. -ALTER TEXT SEARCH CONFIGURATION dummy_tst - ALTER MAPPING FOR word, word WITH ispell; --- Not a token supported by the configuration's parser, fails. -ALTER TEXT SEARCH CONFIGURATION dummy_tst - DROP MAPPING FOR not_a_token, not_a_token; -ERROR: token type "not_a_token" does not exist --- Not a token supported by the configuration's parser, fails even --- with IF EXISTS. -ALTER TEXT SEARCH CONFIGURATION dummy_tst - DROP MAPPING IF EXISTS FOR not_a_token, not_a_token; -ERROR: token type "not_a_token" does not exist --- Token supported by the configuration's parser, succeeds. -ALTER TEXT SEARCH CONFIGURATION dummy_tst - DROP MAPPING FOR word, word; --- No mapping for token supported by the configuration's parser, fails. -ALTER TEXT SEARCH CONFIGURATION dummy_tst - DROP MAPPING FOR word; -ERROR: mapping for token type "word" does not exist --- Token supported by the configuration's parser, cannot be found, --- succeeds with IF EXISTS. -ALTER TEXT SEARCH CONFIGURATION dummy_tst - DROP MAPPING IF EXISTS FOR word, word; -NOTICE: mapping for token type "word" does not exist, skipping --- Re-add mapping, with duplicated tokens supported by the parser. -ALTER TEXT SEARCH CONFIGURATION dummy_tst - ADD MAPPING FOR word, word WITH ispell; --- Not a token supported by the configuration's parser, fails. -ALTER TEXT SEARCH CONFIGURATION dummy_tst - ADD MAPPING FOR not_a_token WITH ispell; -ERROR: token type "not_a_token" does not exist -DROP TEXT SEARCH CONFIGURATION dummy_tst; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/foreign_data.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/foreign_data.out --- /Users/admin/pgsql/src/test/regress/expected/foreign_data.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/foreign_data.out 2024-12-13 13:20:11 @@ -1,2208 +1,2 @@ --- --- Test foreign-data wrapper and server management. --- --- directory paths and dlsuffix are passed to us in environment variables -\getenv libdir PG_LIBDIR -\getenv dlsuffix PG_DLSUFFIX -\set regresslib :libdir '/regress' :dlsuffix -CREATE FUNCTION test_fdw_handler() - RETURNS fdw_handler - AS :'regresslib', 'test_fdw_handler' - LANGUAGE C; --- Clean up in case a prior regression run failed --- Suppress NOTICE messages when roles don't exist -SET client_min_messages TO 'warning'; -DROP ROLE IF EXISTS regress_foreign_data_user, regress_test_role, regress_test_role2, regress_test_role_super, regress_test_indirect, regress_unprivileged_role; -RESET client_min_messages; -CREATE ROLE regress_foreign_data_user LOGIN SUPERUSER; -SET SESSION AUTHORIZATION 'regress_foreign_data_user'; -CREATE ROLE regress_test_role; -CREATE ROLE regress_test_role2; -CREATE ROLE regress_test_role_super SUPERUSER; -CREATE ROLE regress_test_indirect; -CREATE ROLE regress_unprivileged_role; -CREATE FOREIGN DATA WRAPPER dummy; -COMMENT ON FOREIGN DATA WRAPPER dummy IS 'useless'; -CREATE FOREIGN DATA WRAPPER postgresql VALIDATOR postgresql_fdw_validator; --- At this point we should have 2 built-in wrappers and no servers. -SELECT fdwname, fdwhandler::regproc, fdwvalidator::regproc, fdwoptions FROM pg_foreign_data_wrapper ORDER BY 1, 2, 3; - fdwname | fdwhandler | fdwvalidator | fdwoptions -------------+------------+--------------------------+------------ - dummy | - | - | - postgresql | - | postgresql_fdw_validator | -(2 rows) - -SELECT srvname, srvoptions FROM pg_foreign_server; - srvname | srvoptions ----------+------------ -(0 rows) - -SELECT * FROM pg_user_mapping; - oid | umuser | umserver | umoptions ------+--------+----------+----------- -(0 rows) - --- CREATE FOREIGN DATA WRAPPER -CREATE FOREIGN DATA WRAPPER foo VALIDATOR bar; -- ERROR -ERROR: function bar(text[], oid) does not exist -CREATE FOREIGN DATA WRAPPER foo; -\dew - List of foreign-data wrappers - Name | Owner | Handler | Validator -------------+---------------------------+---------+-------------------------- - dummy | regress_foreign_data_user | - | - - foo | regress_foreign_data_user | - | - - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator -(3 rows) - -CREATE FOREIGN DATA WRAPPER foo; -- duplicate -ERROR: foreign-data wrapper "foo" already exists -DROP FOREIGN DATA WRAPPER foo; -CREATE FOREIGN DATA WRAPPER foo OPTIONS (testing '1'); -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+---------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo | regress_foreign_data_user | - | - | | (testing '1') | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - -DROP FOREIGN DATA WRAPPER foo; -CREATE FOREIGN DATA WRAPPER foo OPTIONS (testing '1', testing '2'); -- ERROR -ERROR: option "testing" provided more than once -CREATE FOREIGN DATA WRAPPER foo OPTIONS (testing '1', another '2'); -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+----------------------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo | regress_foreign_data_user | - | - | | (testing '1', another '2') | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - -DROP FOREIGN DATA WRAPPER foo; -SET ROLE regress_test_role; -CREATE FOREIGN DATA WRAPPER foo; -- ERROR -ERROR: permission denied to create foreign-data wrapper "foo" -HINT: Must be superuser to create a foreign-data wrapper. -RESET ROLE; -CREATE FOREIGN DATA WRAPPER foo VALIDATOR postgresql_fdw_validator; -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+-------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo | regress_foreign_data_user | - | postgresql_fdw_validator | | | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - --- HANDLER related checks -CREATE FUNCTION invalid_fdw_handler() RETURNS int LANGUAGE SQL AS 'SELECT 1;'; -CREATE FOREIGN DATA WRAPPER test_fdw HANDLER invalid_fdw_handler; -- ERROR -ERROR: function invalid_fdw_handler must return type fdw_handler -CREATE FOREIGN DATA WRAPPER test_fdw HANDLER test_fdw_handler HANDLER invalid_fdw_handler; -- ERROR -ERROR: conflicting or redundant options -LINE 1: ...GN DATA WRAPPER test_fdw HANDLER test_fdw_handler HANDLER in... - ^ -CREATE FOREIGN DATA WRAPPER test_fdw HANDLER test_fdw_handler; -DROP FOREIGN DATA WRAPPER test_fdw; --- ALTER FOREIGN DATA WRAPPER -ALTER FOREIGN DATA WRAPPER foo OPTIONS (nonexistent 'fdw'); -- ERROR -ERROR: invalid option "nonexistent" -HINT: There are no valid options in this context. -ALTER FOREIGN DATA WRAPPER foo; -- ERROR -ERROR: syntax error at or near ";" -LINE 1: ALTER FOREIGN DATA WRAPPER foo; - ^ -ALTER FOREIGN DATA WRAPPER foo VALIDATOR bar; -- ERROR -ERROR: function bar(text[], oid) does not exist -ALTER FOREIGN DATA WRAPPER foo NO VALIDATOR; -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+-------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo | regress_foreign_data_user | - | - | | | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - -ALTER FOREIGN DATA WRAPPER foo OPTIONS (a '1', b '2'); -ALTER FOREIGN DATA WRAPPER foo OPTIONS (SET c '4'); -- ERROR -ERROR: option "c" not found -ALTER FOREIGN DATA WRAPPER foo OPTIONS (DROP c); -- ERROR -ERROR: option "c" not found -ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD x '1', DROP x); -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+----------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo | regress_foreign_data_user | - | - | | (a '1', b '2') | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - -ALTER FOREIGN DATA WRAPPER foo OPTIONS (DROP a, SET b '3', ADD c '4'); -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+----------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo | regress_foreign_data_user | - | - | | (b '3', c '4') | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - -ALTER FOREIGN DATA WRAPPER foo OPTIONS (a '2'); -ALTER FOREIGN DATA WRAPPER foo OPTIONS (b '4'); -- ERROR -ERROR: option "b" provided more than once -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+-----------------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo | regress_foreign_data_user | - | - | | (b '3', c '4', a '2') | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - -SET ROLE regress_test_role; -ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD d '5'); -- ERROR -ERROR: permission denied to alter foreign-data wrapper "foo" -HINT: Must be superuser to alter a foreign-data wrapper. -SET ROLE regress_test_role_super; -ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD d '5'); -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+------------------------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo | regress_foreign_data_user | - | - | | (b '3', c '4', a '2', d '5') | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - -ALTER FOREIGN DATA WRAPPER foo OWNER TO regress_test_role; -- ERROR -ERROR: permission denied to change owner of foreign-data wrapper "foo" -HINT: The owner of a foreign-data wrapper must be a superuser. -ALTER FOREIGN DATA WRAPPER foo OWNER TO regress_test_role_super; -ALTER ROLE regress_test_role_super NOSUPERUSER; -SET ROLE regress_test_role_super; -ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD e '6'); -- ERROR -ERROR: permission denied to alter foreign-data wrapper "foo" -HINT: Must be superuser to alter a foreign-data wrapper. -RESET ROLE; -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+------------------------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo | regress_test_role_super | - | - | | (b '3', c '4', a '2', d '5') | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - -ALTER FOREIGN DATA WRAPPER foo RENAME TO foo1; -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+------------------------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo1 | regress_test_role_super | - | - | | (b '3', c '4', a '2', d '5') | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - -ALTER FOREIGN DATA WRAPPER foo1 RENAME TO foo; --- HANDLER related checks -ALTER FOREIGN DATA WRAPPER foo HANDLER invalid_fdw_handler; -- ERROR -ERROR: function invalid_fdw_handler must return type fdw_handler -ALTER FOREIGN DATA WRAPPER foo HANDLER test_fdw_handler HANDLER anything; -- ERROR -ERROR: conflicting or redundant options -LINE 1: ...FOREIGN DATA WRAPPER foo HANDLER test_fdw_handler HANDLER an... - ^ -ALTER FOREIGN DATA WRAPPER foo HANDLER test_fdw_handler; -WARNING: changing the foreign-data wrapper handler can change behavior of existing foreign tables -DROP FUNCTION invalid_fdw_handler(); --- DROP FOREIGN DATA WRAPPER -DROP FOREIGN DATA WRAPPER nonexistent; -- ERROR -ERROR: foreign-data wrapper "nonexistent" does not exist -DROP FOREIGN DATA WRAPPER IF EXISTS nonexistent; -NOTICE: foreign-data wrapper "nonexistent" does not exist, skipping -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+------------------+--------------------------+-------------------+------------------------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo | regress_test_role_super | test_fdw_handler | - | | (b '3', c '4', a '2', d '5') | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - -DROP ROLE regress_test_role_super; -- ERROR -ERROR: role "regress_test_role_super" cannot be dropped because some objects depend on it -DETAIL: owner of foreign-data wrapper foo -SET ROLE regress_test_role_super; -DROP FOREIGN DATA WRAPPER foo; -RESET ROLE; -DROP ROLE regress_test_role_super; -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+-------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(2 rows) - -CREATE FOREIGN DATA WRAPPER foo; -CREATE SERVER s1 FOREIGN DATA WRAPPER foo; -COMMENT ON SERVER s1 IS 'foreign server'; -CREATE USER MAPPING FOR current_user SERVER s1; -CREATE USER MAPPING FOR current_user SERVER s1; -- ERROR -ERROR: user mapping for "regress_foreign_data_user" already exists for server "s1" -CREATE USER MAPPING IF NOT EXISTS FOR current_user SERVER s1; -- NOTICE -NOTICE: user mapping for "regress_foreign_data_user" already exists for server "s1", skipping -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+-------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo | regress_foreign_data_user | - | - | | | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(3 rows) - -\des+ - List of foreign servers - Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description -------+---------------------------+----------------------+-------------------+------+---------+-------------+---------------- - s1 | regress_foreign_data_user | foo | | | | | foreign server -(1 row) - -\deu+ - List of user mappings - Server | User name | FDW options ---------+---------------------------+------------- - s1 | regress_foreign_data_user | -(1 row) - -DROP FOREIGN DATA WRAPPER foo; -- ERROR -ERROR: cannot drop foreign-data wrapper foo because other objects depend on it -DETAIL: server s1 depends on foreign-data wrapper foo -user mapping for regress_foreign_data_user on server s1 depends on server s1 -HINT: Use DROP ... CASCADE to drop the dependent objects too. -SET ROLE regress_test_role; -DROP FOREIGN DATA WRAPPER foo CASCADE; -- ERROR -ERROR: must be owner of foreign-data wrapper foo -RESET ROLE; -DROP FOREIGN DATA WRAPPER foo CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to server s1 -drop cascades to user mapping for regress_foreign_data_user on server s1 -\dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+-------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | -(2 rows) - -\des+ - List of foreign servers - Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description -------+-------+----------------------+-------------------+------+---------+-------------+------------- -(0 rows) - -\deu+ - List of user mappings - Server | User name | FDW options ---------+-----------+------------- -(0 rows) - --- exercise CREATE SERVER -CREATE SERVER s1 FOREIGN DATA WRAPPER foo; -- ERROR -ERROR: foreign-data wrapper "foo" does not exist -CREATE FOREIGN DATA WRAPPER foo OPTIONS ("test wrapper" 'true'); -CREATE SERVER s1 FOREIGN DATA WRAPPER foo; -CREATE SERVER s1 FOREIGN DATA WRAPPER foo; -- ERROR -ERROR: server "s1" already exists -CREATE SERVER IF NOT EXISTS s1 FOREIGN DATA WRAPPER foo; -- No ERROR, just NOTICE -NOTICE: server "s1" already exists, skipping -CREATE SERVER s2 FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b'); -CREATE SERVER s3 TYPE 'oracle' FOREIGN DATA WRAPPER foo; -CREATE SERVER s4 TYPE 'oracle' FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b'); -CREATE SERVER s5 VERSION '15.0' FOREIGN DATA WRAPPER foo; -CREATE SERVER s6 VERSION '16.0' FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b'); -CREATE SERVER s7 TYPE 'oracle' VERSION '17.0' FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b'); -CREATE SERVER s8 FOREIGN DATA WRAPPER postgresql OPTIONS (foo '1'); -- ERROR -ERROR: invalid option "foo" -CREATE SERVER s8 FOREIGN DATA WRAPPER postgresql OPTIONS (host 'localhost', dbname 's8db'); -\des+ - List of foreign servers - Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description -------+---------------------------+----------------------+-------------------+--------+---------+-----------------------------------+------------- - s1 | regress_foreign_data_user | foo | | | | | - s2 | regress_foreign_data_user | foo | | | | (host 'a', dbname 'b') | - s3 | regress_foreign_data_user | foo | | oracle | | | - s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') | - s5 | regress_foreign_data_user | foo | | | 15.0 | | - s6 | regress_foreign_data_user | foo | | | 16.0 | (host 'a', dbname 'b') | - s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') | - s8 | regress_foreign_data_user | postgresql | | | | (host 'localhost', dbname 's8db') | -(8 rows) - -SET ROLE regress_test_role; -CREATE SERVER t1 FOREIGN DATA WRAPPER foo; -- ERROR: no usage on FDW -ERROR: permission denied for foreign-data wrapper foo -RESET ROLE; -GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role; -SET ROLE regress_test_role; -CREATE SERVER t1 FOREIGN DATA WRAPPER foo; -RESET ROLE; -\des+ - List of foreign servers - Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description -------+---------------------------+----------------------+-------------------+--------+---------+-----------------------------------+------------- - s1 | regress_foreign_data_user | foo | | | | | - s2 | regress_foreign_data_user | foo | | | | (host 'a', dbname 'b') | - s3 | regress_foreign_data_user | foo | | oracle | | | - s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') | - s5 | regress_foreign_data_user | foo | | | 15.0 | | - s6 | regress_foreign_data_user | foo | | | 16.0 | (host 'a', dbname 'b') | - s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') | - s8 | regress_foreign_data_user | postgresql | | | | (host 'localhost', dbname 's8db') | - t1 | regress_test_role | foo | | | | | -(9 rows) - -REVOKE USAGE ON FOREIGN DATA WRAPPER foo FROM regress_test_role; -GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_indirect; -SET ROLE regress_test_role; -CREATE SERVER t2 FOREIGN DATA WRAPPER foo; -- ERROR -ERROR: permission denied for foreign-data wrapper foo -RESET ROLE; -GRANT regress_test_indirect TO regress_test_role; -SET ROLE regress_test_role; -CREATE SERVER t2 FOREIGN DATA WRAPPER foo; -\des+ - List of foreign servers - Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description -------+---------------------------+----------------------+-------------------+--------+---------+-----------------------------------+------------- - s1 | regress_foreign_data_user | foo | | | | | - s2 | regress_foreign_data_user | foo | | | | (host 'a', dbname 'b') | - s3 | regress_foreign_data_user | foo | | oracle | | | - s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') | - s5 | regress_foreign_data_user | foo | | | 15.0 | | - s6 | regress_foreign_data_user | foo | | | 16.0 | (host 'a', dbname 'b') | - s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') | - s8 | regress_foreign_data_user | postgresql | | | | (host 'localhost', dbname 's8db') | - t1 | regress_test_role | foo | | | | | - t2 | regress_test_role | foo | | | | | -(10 rows) - -RESET ROLE; -REVOKE regress_test_indirect FROM regress_test_role; --- ALTER SERVER -ALTER SERVER s0; -- ERROR -ERROR: syntax error at or near ";" -LINE 1: ALTER SERVER s0; - ^ -ALTER SERVER s0 OPTIONS (a '1'); -- ERROR -ERROR: server "s0" does not exist -ALTER SERVER s1 VERSION '1.0' OPTIONS (servername 's1'); -ALTER SERVER s2 VERSION '1.1'; -ALTER SERVER s3 OPTIONS ("tns name" 'orcl', port '1521'); -GRANT USAGE ON FOREIGN SERVER s1 TO regress_test_role; -GRANT USAGE ON FOREIGN SERVER s6 TO regress_test_role2 WITH GRANT OPTION; -\des+ - List of foreign servers - Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description -------+---------------------------+----------------------+-------------------------------------------------------+--------+---------+-----------------------------------+------------- - s1 | regress_foreign_data_user | foo | regress_foreign_data_user=U/regress_foreign_data_user+| | 1.0 | (servername 's1') | - | | | regress_test_role=U/regress_foreign_data_user | | | | - s2 | regress_foreign_data_user | foo | | | 1.1 | (host 'a', dbname 'b') | - s3 | regress_foreign_data_user | foo | | oracle | | ("tns name" 'orcl', port '1521') | - s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') | - s5 | regress_foreign_data_user | foo | | | 15.0 | | - s6 | regress_foreign_data_user | foo | regress_foreign_data_user=U/regress_foreign_data_user+| | 16.0 | (host 'a', dbname 'b') | - | | | regress_test_role2=U*/regress_foreign_data_user | | | | - s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') | - s8 | regress_foreign_data_user | postgresql | | | | (host 'localhost', dbname 's8db') | - t1 | regress_test_role | foo | | | | | - t2 | regress_test_role | foo | | | | | -(10 rows) - -SET ROLE regress_test_role; -ALTER SERVER s1 VERSION '1.1'; -- ERROR -ERROR: must be owner of foreign server s1 -ALTER SERVER s1 OWNER TO regress_test_role; -- ERROR -ERROR: must be owner of foreign server s1 -RESET ROLE; -ALTER SERVER s1 OWNER TO regress_test_role; -GRANT regress_test_role2 TO regress_test_role; -SET ROLE regress_test_role; -ALTER SERVER s1 VERSION '1.1'; -ALTER SERVER s1 OWNER TO regress_test_role2; -- ERROR -ERROR: permission denied for foreign-data wrapper foo -RESET ROLE; -ALTER SERVER s8 OPTIONS (foo '1'); -- ERROR option validation -ERROR: invalid option "foo" -ALTER SERVER s8 OPTIONS (connect_timeout '30', SET dbname 'db1', DROP host); -SET ROLE regress_test_role; -ALTER SERVER s1 OWNER TO regress_test_indirect; -- ERROR -ERROR: must be able to SET ROLE "regress_test_indirect" -RESET ROLE; -GRANT regress_test_indirect TO regress_test_role; -SET ROLE regress_test_role; -ALTER SERVER s1 OWNER TO regress_test_indirect; -RESET ROLE; -GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_indirect; -SET ROLE regress_test_role; -ALTER SERVER s1 OWNER TO regress_test_indirect; -RESET ROLE; -DROP ROLE regress_test_indirect; -- ERROR -ERROR: role "regress_test_indirect" cannot be dropped because some objects depend on it -DETAIL: privileges for foreign-data wrapper foo -owner of server s1 -\des+ - List of foreign servers - Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description -------+---------------------------+----------------------+-------------------------------------------------------+--------+---------+--------------------------------------+------------- - s1 | regress_test_indirect | foo | regress_test_indirect=U/regress_test_indirect | | 1.1 | (servername 's1') | - s2 | regress_foreign_data_user | foo | | | 1.1 | (host 'a', dbname 'b') | - s3 | regress_foreign_data_user | foo | | oracle | | ("tns name" 'orcl', port '1521') | - s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') | - s5 | regress_foreign_data_user | foo | | | 15.0 | | - s6 | regress_foreign_data_user | foo | regress_foreign_data_user=U/regress_foreign_data_user+| | 16.0 | (host 'a', dbname 'b') | - | | | regress_test_role2=U*/regress_foreign_data_user | | | | - s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') | - s8 | regress_foreign_data_user | postgresql | | | | (dbname 'db1', connect_timeout '30') | - t1 | regress_test_role | foo | | | | | - t2 | regress_test_role | foo | | | | | -(10 rows) - -ALTER SERVER s8 RENAME to s8new; -\des+ - List of foreign servers - Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description --------+---------------------------+----------------------+-------------------------------------------------------+--------+---------+--------------------------------------+------------- - s1 | regress_test_indirect | foo | regress_test_indirect=U/regress_test_indirect | | 1.1 | (servername 's1') | - s2 | regress_foreign_data_user | foo | | | 1.1 | (host 'a', dbname 'b') | - s3 | regress_foreign_data_user | foo | | oracle | | ("tns name" 'orcl', port '1521') | - s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') | - s5 | regress_foreign_data_user | foo | | | 15.0 | | - s6 | regress_foreign_data_user | foo | regress_foreign_data_user=U/regress_foreign_data_user+| | 16.0 | (host 'a', dbname 'b') | - | | | regress_test_role2=U*/regress_foreign_data_user | | | | - s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') | - s8new | regress_foreign_data_user | postgresql | | | | (dbname 'db1', connect_timeout '30') | - t1 | regress_test_role | foo | | | | | - t2 | regress_test_role | foo | | | | | -(10 rows) - -ALTER SERVER s8new RENAME to s8; --- DROP SERVER -DROP SERVER nonexistent; -- ERROR -ERROR: server "nonexistent" does not exist -DROP SERVER IF EXISTS nonexistent; -NOTICE: server "nonexistent" does not exist, skipping -\des - List of foreign servers - Name | Owner | Foreign-data wrapper -------+---------------------------+---------------------- - s1 | regress_test_indirect | foo - s2 | regress_foreign_data_user | foo - s3 | regress_foreign_data_user | foo - s4 | regress_foreign_data_user | foo - s5 | regress_foreign_data_user | foo - s6 | regress_foreign_data_user | foo - s7 | regress_foreign_data_user | foo - s8 | regress_foreign_data_user | postgresql - t1 | regress_test_role | foo - t2 | regress_test_role | foo -(10 rows) - -SET ROLE regress_test_role; -DROP SERVER s2; -- ERROR -ERROR: must be owner of foreign server s2 -DROP SERVER s1; -RESET ROLE; -\des - List of foreign servers - Name | Owner | Foreign-data wrapper -------+---------------------------+---------------------- - s2 | regress_foreign_data_user | foo - s3 | regress_foreign_data_user | foo - s4 | regress_foreign_data_user | foo - s5 | regress_foreign_data_user | foo - s6 | regress_foreign_data_user | foo - s7 | regress_foreign_data_user | foo - s8 | regress_foreign_data_user | postgresql - t1 | regress_test_role | foo - t2 | regress_test_role | foo -(9 rows) - -ALTER SERVER s2 OWNER TO regress_test_role; -SET ROLE regress_test_role; -DROP SERVER s2; -RESET ROLE; -\des - List of foreign servers - Name | Owner | Foreign-data wrapper -------+---------------------------+---------------------- - s3 | regress_foreign_data_user | foo - s4 | regress_foreign_data_user | foo - s5 | regress_foreign_data_user | foo - s6 | regress_foreign_data_user | foo - s7 | regress_foreign_data_user | foo - s8 | regress_foreign_data_user | postgresql - t1 | regress_test_role | foo - t2 | regress_test_role | foo -(8 rows) - -CREATE USER MAPPING FOR current_user SERVER s3; -\deu - List of user mappings - Server | User name ---------+--------------------------- - s3 | regress_foreign_data_user -(1 row) - -DROP SERVER s3; -- ERROR -ERROR: cannot drop server s3 because other objects depend on it -DETAIL: user mapping for regress_foreign_data_user on server s3 depends on server s3 -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP SERVER s3 CASCADE; -NOTICE: drop cascades to user mapping for regress_foreign_data_user on server s3 -\des - List of foreign servers - Name | Owner | Foreign-data wrapper -------+---------------------------+---------------------- - s4 | regress_foreign_data_user | foo - s5 | regress_foreign_data_user | foo - s6 | regress_foreign_data_user | foo - s7 | regress_foreign_data_user | foo - s8 | regress_foreign_data_user | postgresql - t1 | regress_test_role | foo - t2 | regress_test_role | foo -(7 rows) - -\deu -List of user mappings - Server | User name ---------+----------- -(0 rows) - --- CREATE USER MAPPING -CREATE USER MAPPING FOR regress_test_missing_role SERVER s1; -- ERROR -ERROR: role "regress_test_missing_role" does not exist -CREATE USER MAPPING FOR current_user SERVER s1; -- ERROR -ERROR: server "s1" does not exist -CREATE USER MAPPING FOR current_user SERVER s4; -CREATE USER MAPPING FOR user SERVER s4; -- ERROR duplicate -ERROR: user mapping for "regress_foreign_data_user" already exists for server "s4" -CREATE USER MAPPING FOR public SERVER s4 OPTIONS ("this mapping" 'is public'); -CREATE USER MAPPING FOR user SERVER s8 OPTIONS (username 'test', password 'secret'); -- ERROR -ERROR: invalid option "username" -HINT: Perhaps you meant the option "user". -CREATE USER MAPPING FOR user SERVER s8 OPTIONS (user 'test', password 'secret'); -ALTER SERVER s5 OWNER TO regress_test_role; -ALTER SERVER s6 OWNER TO regress_test_indirect; -SET ROLE regress_test_role; -CREATE USER MAPPING FOR current_user SERVER s5; -CREATE USER MAPPING FOR current_user SERVER s6 OPTIONS (username 'test'); -CREATE USER MAPPING FOR current_user SERVER s7; -- ERROR -ERROR: permission denied for foreign server s7 -CREATE USER MAPPING FOR public SERVER s8; -- ERROR -ERROR: must be owner of foreign server s8 -RESET ROLE; -ALTER SERVER t1 OWNER TO regress_test_indirect; -SET ROLE regress_test_role; -CREATE USER MAPPING FOR current_user SERVER t1 OPTIONS (username 'bob', password 'boo'); -CREATE USER MAPPING FOR public SERVER t1; -RESET ROLE; -\deu - List of user mappings - Server | User name ---------+--------------------------- - s4 | public - s4 | regress_foreign_data_user - s5 | regress_test_role - s6 | regress_test_role - s8 | regress_foreign_data_user - t1 | public - t1 | regress_test_role -(7 rows) - --- ALTER USER MAPPING -ALTER USER MAPPING FOR regress_test_missing_role SERVER s4 OPTIONS (gotcha 'true'); -- ERROR -ERROR: role "regress_test_missing_role" does not exist -ALTER USER MAPPING FOR user SERVER ss4 OPTIONS (gotcha 'true'); -- ERROR -ERROR: server "ss4" does not exist -ALTER USER MAPPING FOR public SERVER s5 OPTIONS (gotcha 'true'); -- ERROR -ERROR: user mapping for "public" does not exist for server "s5" -ALTER USER MAPPING FOR current_user SERVER s8 OPTIONS (username 'test'); -- ERROR -ERROR: invalid option "username" -HINT: Perhaps you meant the option "user". -ALTER USER MAPPING FOR current_user SERVER s8 OPTIONS (DROP user, SET password 'public'); -SET ROLE regress_test_role; -ALTER USER MAPPING FOR current_user SERVER s5 OPTIONS (ADD modified '1'); -ALTER USER MAPPING FOR public SERVER s4 OPTIONS (ADD modified '1'); -- ERROR -ERROR: must be owner of foreign server s4 -ALTER USER MAPPING FOR public SERVER t1 OPTIONS (ADD modified '1'); -RESET ROLE; -\deu+ - List of user mappings - Server | User name | FDW options ---------+---------------------------+---------------------------------- - s4 | public | ("this mapping" 'is public') - s4 | regress_foreign_data_user | - s5 | regress_test_role | (modified '1') - s6 | regress_test_role | (username 'test') - s8 | regress_foreign_data_user | (password 'public') - t1 | public | (modified '1') - t1 | regress_test_role | (username 'bob', password 'boo') -(7 rows) - --- DROP USER MAPPING -DROP USER MAPPING FOR regress_test_missing_role SERVER s4; -- ERROR -ERROR: role "regress_test_missing_role" does not exist -DROP USER MAPPING FOR user SERVER ss4; -ERROR: server "ss4" does not exist -DROP USER MAPPING FOR public SERVER s7; -- ERROR -ERROR: user mapping for "public" does not exist for server "s7" -DROP USER MAPPING IF EXISTS FOR regress_test_missing_role SERVER s4; -NOTICE: role "regress_test_missing_role" does not exist, skipping -DROP USER MAPPING IF EXISTS FOR user SERVER ss4; -NOTICE: server "ss4" does not exist, skipping -DROP USER MAPPING IF EXISTS FOR public SERVER s7; -NOTICE: user mapping for "public" does not exist for server "s7", skipping -CREATE USER MAPPING FOR public SERVER s8; -SET ROLE regress_test_role; -DROP USER MAPPING FOR public SERVER s8; -- ERROR -ERROR: must be owner of foreign server s8 -RESET ROLE; -DROP SERVER s7; -\deu - List of user mappings - Server | User name ---------+--------------------------- - s4 | public - s4 | regress_foreign_data_user - s5 | regress_test_role - s6 | regress_test_role - s8 | public - s8 | regress_foreign_data_user - t1 | public - t1 | regress_test_role -(8 rows) - --- CREATE FOREIGN TABLE -CREATE SCHEMA foreign_schema; -CREATE SERVER s0 FOREIGN DATA WRAPPER dummy; -CREATE FOREIGN TABLE ft1 (); -- ERROR -ERROR: syntax error at or near ";" -LINE 1: CREATE FOREIGN TABLE ft1 (); - ^ -CREATE FOREIGN TABLE ft1 () SERVER no_server; -- ERROR -ERROR: server "no_server" does not exist -CREATE FOREIGN TABLE ft1 ( - c1 integer OPTIONS ("param 1" 'val1') PRIMARY KEY, - c2 text OPTIONS (param2 'val2', param3 'val3'), - c3 date -) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -- ERROR -ERROR: primary key constraints are not supported on foreign tables -LINE 2: c1 integer OPTIONS ("param 1" 'val1') PRIMARY KEY, - ^ -CREATE TABLE ref_table (id integer PRIMARY KEY); -CREATE FOREIGN TABLE ft1 ( - c1 integer OPTIONS ("param 1" 'val1') REFERENCES ref_table (id), - c2 text OPTIONS (param2 'val2', param3 'val3'), - c3 date -) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -- ERROR -ERROR: foreign key constraints are not supported on foreign tables -LINE 2: c1 integer OPTIONS ("param 1" 'val1') REFERENCES ref_table ... - ^ -DROP TABLE ref_table; -CREATE FOREIGN TABLE ft1 ( - c1 integer OPTIONS ("param 1" 'val1') NOT NULL, - c2 text OPTIONS (param2 'val2', param3 'val3'), - c3 date, - UNIQUE (c3) -) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -- ERROR -ERROR: unique constraints are not supported on foreign tables -LINE 5: UNIQUE (c3) - ^ -CREATE FOREIGN TABLE ft1 ( - c1 integer OPTIONS ("param 1" 'val1') NOT NULL, - c2 text OPTIONS (param2 'val2', param3 'val3') CHECK (c2 <> ''), - c3 date, - CHECK (c3 BETWEEN '1994-01-01'::date AND '1994-01-31'::date) -) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -COMMENT ON FOREIGN TABLE ft1 IS 'ft1'; -COMMENT ON COLUMN ft1.c1 IS 'ft1.c1'; -\d+ ft1 - Foreign table "public.ft1" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+--------------------------------+----------+--------------+------------- - c1 | integer | | not null | | ("param 1" 'val1') | plain | | ft1.c1 - c2 | text | | | | (param2 'val2', param3 'val3') | extended | | - c3 | date | | | | | plain | | -Check constraints: - "ft1_c2_check" CHECK (c2 <> ''::text) - "ft1_c3_check" CHECK (c3 >= '01-01-1994'::date AND c3 <= '01-31-1994'::date) -Not-null constraints: - "ft1_c1_not_null" NOT NULL "c1" -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') - -\det+ - List of foreign tables - Schema | Table | Server | FDW options | Description ---------+-------+--------+-------------------------------------------------+------------- - public | ft1 | s0 | (delimiter ',', quote '"', "be quoted" 'value') | ft1 -(1 row) - -CREATE INDEX id_ft1_c2 ON ft1 (c2); -- ERROR -ERROR: cannot create index on relation "ft1" -DETAIL: This operation is not supported for foreign tables. -SELECT * FROM ft1; -- ERROR -ERROR: foreign-data wrapper "dummy" has no handler -EXPLAIN SELECT * FROM ft1; -- ERROR -ERROR: foreign-data wrapper "dummy" has no handler -CREATE TABLE lt1 (a INT) PARTITION BY RANGE (a); -CREATE FOREIGN TABLE ft_part1 - PARTITION OF lt1 FOR VALUES FROM (0) TO (1000) SERVER s0; -CREATE INDEX ON lt1 (a); -- skips partition -CREATE UNIQUE INDEX ON lt1 (a); -- ERROR -ERROR: cannot create unique index on partitioned table "lt1" -DETAIL: Table "lt1" contains partitions that are foreign tables. -ALTER TABLE lt1 ADD PRIMARY KEY (a); -- ERROR -ERROR: cannot create unique index on partitioned table "lt1" -DETAIL: Table "lt1" contains partitions that are foreign tables. -DROP TABLE lt1; -CREATE TABLE lt1 (a INT) PARTITION BY RANGE (a); -CREATE INDEX ON lt1 (a); -CREATE FOREIGN TABLE ft_part1 - PARTITION OF lt1 FOR VALUES FROM (0) TO (1000) SERVER s0; -CREATE FOREIGN TABLE ft_part2 (a INT) SERVER s0; -ALTER TABLE lt1 ATTACH PARTITION ft_part2 FOR VALUES FROM (1000) TO (2000); -DROP FOREIGN TABLE ft_part1, ft_part2; -CREATE UNIQUE INDEX ON lt1 (a); -ALTER TABLE lt1 ADD PRIMARY KEY (a); -CREATE FOREIGN TABLE ft_part1 - PARTITION OF lt1 FOR VALUES FROM (0) TO (1000) SERVER s0; -- ERROR -ERROR: cannot create foreign partition of partitioned table "lt1" -DETAIL: Table "lt1" contains indexes that are unique. -CREATE FOREIGN TABLE ft_part2 (a INT NOT NULL) SERVER s0; -ALTER TABLE lt1 ATTACH PARTITION ft_part2 - FOR VALUES FROM (1000) TO (2000); -- ERROR -ERROR: cannot attach foreign table "ft_part2" as partition of partitioned table "lt1" -DETAIL: Partitioned table "lt1" contains unique indexes. -DROP TABLE lt1; -DROP FOREIGN TABLE ft_part2; -CREATE TABLE lt1 (a INT) PARTITION BY RANGE (a); -CREATE INDEX ON lt1 (a); -CREATE TABLE lt1_part1 - PARTITION OF lt1 FOR VALUES FROM (0) TO (1000) - PARTITION BY RANGE (a); -CREATE FOREIGN TABLE ft_part_1_1 - PARTITION OF lt1_part1 FOR VALUES FROM (0) TO (100) SERVER s0; -CREATE FOREIGN TABLE ft_part_1_2 (a INT) SERVER s0; -ALTER TABLE lt1_part1 ATTACH PARTITION ft_part_1_2 FOR VALUES FROM (100) TO (200); -CREATE UNIQUE INDEX ON lt1 (a); -ERROR: cannot create unique index on partitioned table "lt1" -DETAIL: Table "lt1" contains partitions that are foreign tables. -ALTER TABLE lt1 ADD PRIMARY KEY (a); -ERROR: cannot create unique index on partitioned table "lt1_part1" -DETAIL: Table "lt1_part1" contains partitions that are foreign tables. -DROP FOREIGN TABLE ft_part_1_1, ft_part_1_2; -CREATE UNIQUE INDEX ON lt1 (a); -ALTER TABLE lt1 ADD PRIMARY KEY (a); -CREATE FOREIGN TABLE ft_part_1_1 - PARTITION OF lt1_part1 FOR VALUES FROM (0) TO (100) SERVER s0; -ERROR: cannot create foreign partition of partitioned table "lt1_part1" -DETAIL: Table "lt1_part1" contains indexes that are unique. -CREATE FOREIGN TABLE ft_part_1_2 (a INT NOT NULL) SERVER s0; -ALTER TABLE lt1_part1 ATTACH PARTITION ft_part_1_2 FOR VALUES FROM (100) TO (200); -ERROR: cannot attach foreign table "ft_part_1_2" as partition of partitioned table "lt1_part1" -DETAIL: Partitioned table "lt1_part1" contains unique indexes. -DROP TABLE lt1; -DROP FOREIGN TABLE ft_part_1_2; --- ALTER FOREIGN TABLE -COMMENT ON FOREIGN TABLE ft1 IS 'foreign table'; -COMMENT ON FOREIGN TABLE ft1 IS NULL; -COMMENT ON COLUMN ft1.c1 IS 'foreign column'; -COMMENT ON COLUMN ft1.c1 IS NULL; -ALTER FOREIGN TABLE ft1 ADD COLUMN c4 integer; -ALTER FOREIGN TABLE ft1 ADD COLUMN c5 integer DEFAULT 0; -ALTER FOREIGN TABLE ft1 ADD COLUMN c6 integer; -ALTER FOREIGN TABLE ft1 ADD COLUMN c7 integer NOT NULL; -ALTER FOREIGN TABLE ft1 ADD COLUMN c8 integer; -ALTER FOREIGN TABLE ft1 ADD COLUMN c9 integer; -ALTER FOREIGN TABLE ft1 ADD COLUMN c10 integer OPTIONS (p1 'v1'); -ALTER FOREIGN TABLE ft1 ALTER COLUMN c4 SET DEFAULT 0; -ALTER FOREIGN TABLE ft1 ALTER COLUMN c5 DROP DEFAULT; -ALTER FOREIGN TABLE ft1 ALTER COLUMN c6 SET NOT NULL; -ALTER FOREIGN TABLE ft1 ALTER COLUMN c7 DROP NOT NULL; -ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE char(10) USING '0'; -- ERROR -ERROR: "ft1" is not a table -ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE char(10); -ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET DATA TYPE text; -ALTER FOREIGN TABLE ft1 ALTER COLUMN xmin OPTIONS (ADD p1 'v1'); -- ERROR -ERROR: cannot alter system column "xmin" -ALTER FOREIGN TABLE ft1 ALTER COLUMN c7 OPTIONS (ADD p1 'v1', ADD p2 'v2'), - ALTER COLUMN c8 OPTIONS (ADD p1 'v1', ADD p2 'v2'); -ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 OPTIONS (SET p2 'V2', DROP p1); -ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 SET STATISTICS 10000; -ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 SET (n_distinct = 100); -ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET STATISTICS -1; -ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET STORAGE PLAIN; -\d+ ft1 - Foreign table "public.ft1" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+--------------------------------+----------+--------------+------------- - c1 | integer | | not null | | ("param 1" 'val1') | plain | 10000 | - c2 | text | | | | (param2 'val2', param3 'val3') | extended | | - c3 | date | | | | | plain | | - c4 | integer | | | 0 | | plain | | - c5 | integer | | | | | plain | | - c6 | integer | | not null | | | plain | | - c7 | integer | | | | (p1 'v1', p2 'v2') | plain | | - c8 | text | | | | (p2 'V2') | plain | | - c9 | integer | | | | | plain | | - c10 | integer | | | | (p1 'v1') | plain | | -Check constraints: - "ft1_c2_check" CHECK (c2 <> ''::text) - "ft1_c3_check" CHECK (c3 >= '01-01-1994'::date AND c3 <= '01-31-1994'::date) -Not-null constraints: - "ft1_c1_not_null" NOT NULL "c1" - "ft1_c6_not_null" NOT NULL "c6" -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') - --- can't change the column type if it's used elsewhere -CREATE TABLE use_ft1_column_type (x ft1); -ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET DATA TYPE integer; -- ERROR -ERROR: cannot alter foreign table "ft1" because column "use_ft1_column_type.x" uses its row type -DROP TABLE use_ft1_column_type; -ALTER FOREIGN TABLE ft1 ADD PRIMARY KEY (c7); -- ERROR -ERROR: primary key constraints are not supported on foreign tables -LINE 1: ALTER FOREIGN TABLE ft1 ADD PRIMARY KEY (c7); - ^ -ALTER FOREIGN TABLE ft1 ADD CONSTRAINT ft1_c9_check CHECK (c9 < 0) NOT VALID; -ALTER FOREIGN TABLE ft1 ALTER CONSTRAINT ft1_c9_check DEFERRABLE; -- ERROR -ERROR: ALTER action ALTER CONSTRAINT cannot be performed on relation "ft1" -DETAIL: This operation is not supported for foreign tables. -ALTER FOREIGN TABLE ft1 DROP CONSTRAINT ft1_c9_check; -ALTER FOREIGN TABLE ft1 DROP CONSTRAINT no_const; -- ERROR -ERROR: constraint "no_const" of relation "ft1" does not exist -ALTER FOREIGN TABLE ft1 DROP CONSTRAINT IF EXISTS no_const; -NOTICE: constraint "no_const" of relation "ft1" does not exist, skipping -ALTER FOREIGN TABLE ft1 OWNER TO regress_test_role; -ALTER FOREIGN TABLE ft1 OPTIONS (DROP delimiter, SET quote '~', ADD escape '@'); -ALTER FOREIGN TABLE ft1 DROP COLUMN no_column; -- ERROR -ERROR: column "no_column" of relation "ft1" does not exist -ALTER FOREIGN TABLE ft1 DROP COLUMN IF EXISTS no_column; -NOTICE: column "no_column" of relation "ft1" does not exist, skipping -ALTER FOREIGN TABLE ft1 DROP COLUMN c9; -ALTER FOREIGN TABLE ft1 ADD COLUMN c11 serial; -ALTER FOREIGN TABLE ft1 SET SCHEMA foreign_schema; -ALTER FOREIGN TABLE ft1 SET TABLESPACE ts; -- ERROR -ERROR: relation "ft1" does not exist -ALTER SEQUENCE foreign_schema.ft1_c11_seq SET SCHEMA public; -- ERROR -ERROR: cannot move an owned sequence into another schema -DETAIL: Sequence "ft1_c11_seq" is linked to table "ft1". -ALTER FOREIGN TABLE foreign_schema.ft1 RENAME c1 TO foreign_column_1; -ALTER FOREIGN TABLE foreign_schema.ft1 RENAME TO foreign_table_1; -\d foreign_schema.foreign_table_1 - Foreign table "foreign_schema.foreign_table_1" - Column | Type | Collation | Nullable | Default | FDW options -------------------+---------+-----------+----------+-------------------------------------------------+-------------------------------- - foreign_column_1 | integer | | not null | | ("param 1" 'val1') - c2 | text | | | | (param2 'val2', param3 'val3') - c3 | date | | | | - c4 | integer | | | 0 | - c5 | integer | | | | - c6 | integer | | not null | | - c7 | integer | | | | (p1 'v1', p2 'v2') - c8 | text | | | | (p2 'V2') - c10 | integer | | | | (p1 'v1') - c11 | integer | | not null | nextval('foreign_schema.ft1_c11_seq'::regclass) | -Check constraints: - "ft1_c2_check" CHECK (c2 <> ''::text) - "ft1_c3_check" CHECK (c3 >= '01-01-1994'::date AND c3 <= '01-31-1994'::date) -Server: s0 -FDW options: (quote '~', "be quoted" 'value', escape '@') - --- alter noexisting table -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c4 integer; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c6 integer; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c7 integer NOT NULL; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c8 integer; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c9 integer; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c10 integer OPTIONS (p1 'v1'); -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c6 SET NOT NULL; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c7 DROP NOT NULL; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c8 TYPE char(10); -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c8 SET DATA TYPE text; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c7 OPTIONS (ADD p1 'v1', ADD p2 'v2'), - ALTER COLUMN c8 OPTIONS (ADD p1 'v1', ADD p2 'v2'); -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c8 OPTIONS (SET p2 'V2', DROP p1); -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP CONSTRAINT IF EXISTS no_const; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP CONSTRAINT ft1_c1_check; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 OWNER TO regress_test_role; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 OPTIONS (DROP delimiter, SET quote '~', ADD escape '@'); -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP COLUMN IF EXISTS no_column; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP COLUMN c9; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 SET SCHEMA foreign_schema; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 RENAME c1 TO foreign_column_1; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping -ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 RENAME TO foreign_table_1; -NOTICE: relation "doesnt_exist_ft1" does not exist, skipping --- Information schema -SELECT * FROM information_schema.foreign_data_wrappers ORDER BY 1, 2; - foreign_data_wrapper_catalog | foreign_data_wrapper_name | authorization_identifier | library_name | foreign_data_wrapper_language -------------------------------+---------------------------+---------------------------+--------------+------------------------------- - regression | dummy | regress_foreign_data_user | | c - regression | foo | regress_foreign_data_user | | c - regression | postgresql | regress_foreign_data_user | | c -(3 rows) - -SELECT * FROM information_schema.foreign_data_wrapper_options ORDER BY 1, 2, 3; - foreign_data_wrapper_catalog | foreign_data_wrapper_name | option_name | option_value -------------------------------+---------------------------+--------------+-------------- - regression | foo | test wrapper | true -(1 row) - -SELECT * FROM information_schema.foreign_servers ORDER BY 1, 2; - foreign_server_catalog | foreign_server_name | foreign_data_wrapper_catalog | foreign_data_wrapper_name | foreign_server_type | foreign_server_version | authorization_identifier -------------------------+---------------------+------------------------------+---------------------------+---------------------+------------------------+--------------------------- - regression | s0 | regression | dummy | | | regress_foreign_data_user - regression | s4 | regression | foo | oracle | | regress_foreign_data_user - regression | s5 | regression | foo | | 15.0 | regress_test_role - regression | s6 | regression | foo | | 16.0 | regress_test_indirect - regression | s8 | regression | postgresql | | | regress_foreign_data_user - regression | t1 | regression | foo | | | regress_test_indirect - regression | t2 | regression | foo | | | regress_test_role -(7 rows) - -SELECT * FROM information_schema.foreign_server_options ORDER BY 1, 2, 3; - foreign_server_catalog | foreign_server_name | option_name | option_value -------------------------+---------------------+-----------------+-------------- - regression | s4 | dbname | b - regression | s4 | host | a - regression | s6 | dbname | b - regression | s6 | host | a - regression | s8 | connect_timeout | 30 - regression | s8 | dbname | db1 -(6 rows) - -SELECT * FROM information_schema.user_mappings ORDER BY lower(authorization_identifier), 2, 3; - authorization_identifier | foreign_server_catalog | foreign_server_name ----------------------------+------------------------+--------------------- - PUBLIC | regression | s4 - PUBLIC | regression | s8 - PUBLIC | regression | t1 - regress_foreign_data_user | regression | s4 - regress_foreign_data_user | regression | s8 - regress_test_role | regression | s5 - regress_test_role | regression | s6 - regress_test_role | regression | t1 -(8 rows) - -SELECT * FROM information_schema.user_mapping_options ORDER BY lower(authorization_identifier), 2, 3, 4; - authorization_identifier | foreign_server_catalog | foreign_server_name | option_name | option_value ----------------------------+------------------------+---------------------+--------------+-------------- - PUBLIC | regression | s4 | this mapping | is public - PUBLIC | regression | t1 | modified | 1 - regress_foreign_data_user | regression | s8 | password | public - regress_test_role | regression | s5 | modified | 1 - regress_test_role | regression | s6 | username | test - regress_test_role | regression | t1 | password | boo - regress_test_role | regression | t1 | username | bob -(7 rows) - -SELECT * FROM information_schema.usage_privileges WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5; - grantor | grantee | object_catalog | object_schema | object_name | object_type | privilege_type | is_grantable ----------------------------+---------------------------+----------------+---------------+-------------+----------------------+----------------+-------------- - regress_foreign_data_user | regress_foreign_data_user | regression | | foo | FOREIGN DATA WRAPPER | USAGE | YES - regress_foreign_data_user | regress_test_indirect | regression | | foo | FOREIGN DATA WRAPPER | USAGE | NO - regress_test_indirect | regress_test_indirect | regression | | s6 | FOREIGN SERVER | USAGE | YES - regress_test_indirect | regress_test_role2 | regression | | s6 | FOREIGN SERVER | USAGE | YES -(4 rows) - -SELECT * FROM information_schema.role_usage_grants WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5; - grantor | grantee | object_catalog | object_schema | object_name | object_type | privilege_type | is_grantable ----------------------------+---------------------------+----------------+---------------+-------------+----------------------+----------------+-------------- - regress_foreign_data_user | regress_foreign_data_user | regression | | foo | FOREIGN DATA WRAPPER | USAGE | YES - regress_foreign_data_user | regress_test_indirect | regression | | foo | FOREIGN DATA WRAPPER | USAGE | NO - regress_test_indirect | regress_test_indirect | regression | | s6 | FOREIGN SERVER | USAGE | YES - regress_test_indirect | regress_test_role2 | regression | | s6 | FOREIGN SERVER | USAGE | YES -(4 rows) - -SELECT * FROM information_schema.foreign_tables ORDER BY 1, 2, 3; - foreign_table_catalog | foreign_table_schema | foreign_table_name | foreign_server_catalog | foreign_server_name ------------------------+----------------------+--------------------+------------------------+--------------------- - regression | foreign_schema | foreign_table_1 | regression | s0 -(1 row) - -SELECT * FROM information_schema.foreign_table_options ORDER BY 1, 2, 3, 4; - foreign_table_catalog | foreign_table_schema | foreign_table_name | option_name | option_value ------------------------+----------------------+--------------------+-------------+-------------- - regression | foreign_schema | foreign_table_1 | be quoted | value - regression | foreign_schema | foreign_table_1 | escape | @ - regression | foreign_schema | foreign_table_1 | quote | ~ -(3 rows) - -SET ROLE regress_test_role; -SELECT * FROM information_schema.user_mapping_options ORDER BY 1, 2, 3, 4; - authorization_identifier | foreign_server_catalog | foreign_server_name | option_name | option_value ---------------------------+------------------------+---------------------+-------------+-------------- - PUBLIC | regression | t1 | modified | 1 - regress_test_role | regression | s5 | modified | 1 - regress_test_role | regression | s6 | username | test - regress_test_role | regression | t1 | password | boo - regress_test_role | regression | t1 | username | bob -(5 rows) - -SELECT * FROM information_schema.usage_privileges WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5; - grantor | grantee | object_catalog | object_schema | object_name | object_type | privilege_type | is_grantable ----------------------------+-----------------------+----------------+---------------+-------------+----------------------+----------------+-------------- - regress_foreign_data_user | regress_test_indirect | regression | | foo | FOREIGN DATA WRAPPER | USAGE | NO - regress_test_indirect | regress_test_indirect | regression | | s6 | FOREIGN SERVER | USAGE | YES - regress_test_indirect | regress_test_role2 | regression | | s6 | FOREIGN SERVER | USAGE | YES -(3 rows) - -SELECT * FROM information_schema.role_usage_grants WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5; - grantor | grantee | object_catalog | object_schema | object_name | object_type | privilege_type | is_grantable ----------------------------+-----------------------+----------------+---------------+-------------+----------------------+----------------+-------------- - regress_foreign_data_user | regress_test_indirect | regression | | foo | FOREIGN DATA WRAPPER | USAGE | NO - regress_test_indirect | regress_test_indirect | regression | | s6 | FOREIGN SERVER | USAGE | YES - regress_test_indirect | regress_test_role2 | regression | | s6 | FOREIGN SERVER | USAGE | YES -(3 rows) - -DROP USER MAPPING FOR current_user SERVER t1; -SET ROLE regress_test_role2; -SELECT * FROM information_schema.user_mapping_options ORDER BY 1, 2, 3, 4; - authorization_identifier | foreign_server_catalog | foreign_server_name | option_name | option_value ---------------------------+------------------------+---------------------+-------------+-------------- - regress_test_role | regression | s6 | username | -(1 row) - -RESET ROLE; --- has_foreign_data_wrapper_privilege -SELECT has_foreign_data_wrapper_privilege('regress_test_role', - (SELECT oid FROM pg_foreign_data_wrapper WHERE fdwname='foo'), 'USAGE'); - has_foreign_data_wrapper_privilege ------------------------------------- - t -(1 row) - -SELECT has_foreign_data_wrapper_privilege('regress_test_role', 'foo', 'USAGE'); - has_foreign_data_wrapper_privilege ------------------------------------- - t -(1 row) - -SELECT has_foreign_data_wrapper_privilege( - (SELECT oid FROM pg_roles WHERE rolname='regress_test_role'), - (SELECT oid FROM pg_foreign_data_wrapper WHERE fdwname='foo'), 'USAGE'); - has_foreign_data_wrapper_privilege ------------------------------------- - t -(1 row) - -SELECT has_foreign_data_wrapper_privilege( - (SELECT oid FROM pg_foreign_data_wrapper WHERE fdwname='foo'), 'USAGE'); - has_foreign_data_wrapper_privilege ------------------------------------- - t -(1 row) - -SELECT has_foreign_data_wrapper_privilege( - (SELECT oid FROM pg_roles WHERE rolname='regress_test_role'), 'foo', 'USAGE'); - has_foreign_data_wrapper_privilege ------------------------------------- - t -(1 row) - -SELECT has_foreign_data_wrapper_privilege('foo', 'USAGE'); - has_foreign_data_wrapper_privilege ------------------------------------- - t -(1 row) - -GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role; -SELECT has_foreign_data_wrapper_privilege('regress_test_role', 'foo', 'USAGE'); - has_foreign_data_wrapper_privilege ------------------------------------- - t -(1 row) - --- has_server_privilege -SELECT has_server_privilege('regress_test_role', - (SELECT oid FROM pg_foreign_server WHERE srvname='s8'), 'USAGE'); - has_server_privilege ----------------------- - f -(1 row) - -SELECT has_server_privilege('regress_test_role', 's8', 'USAGE'); - has_server_privilege ----------------------- - f -(1 row) - -SELECT has_server_privilege( - (SELECT oid FROM pg_roles WHERE rolname='regress_test_role'), - (SELECT oid FROM pg_foreign_server WHERE srvname='s8'), 'USAGE'); - has_server_privilege ----------------------- - f -(1 row) - -SELECT has_server_privilege( - (SELECT oid FROM pg_foreign_server WHERE srvname='s8'), 'USAGE'); - has_server_privilege ----------------------- - t -(1 row) - -SELECT has_server_privilege( - (SELECT oid FROM pg_roles WHERE rolname='regress_test_role'), 's8', 'USAGE'); - has_server_privilege ----------------------- - f -(1 row) - -SELECT has_server_privilege('s8', 'USAGE'); - has_server_privilege ----------------------- - t -(1 row) - -GRANT USAGE ON FOREIGN SERVER s8 TO regress_test_role; -SELECT has_server_privilege('regress_test_role', 's8', 'USAGE'); - has_server_privilege ----------------------- - t -(1 row) - -REVOKE USAGE ON FOREIGN SERVER s8 FROM regress_test_role; -GRANT USAGE ON FOREIGN SERVER s4 TO regress_test_role; -DROP USER MAPPING FOR public SERVER s4; -ALTER SERVER s6 OPTIONS (DROP host, DROP dbname); -ALTER USER MAPPING FOR regress_test_role SERVER s6 OPTIONS (DROP username); -ALTER FOREIGN DATA WRAPPER foo VALIDATOR postgresql_fdw_validator; -WARNING: changing the foreign-data wrapper validator can cause the options for dependent objects to become invalid --- Privileges -SET ROLE regress_unprivileged_role; -CREATE FOREIGN DATA WRAPPER foobar; -- ERROR -ERROR: permission denied to create foreign-data wrapper "foobar" -HINT: Must be superuser to create a foreign-data wrapper. -ALTER FOREIGN DATA WRAPPER foo OPTIONS (gotcha 'true'); -- ERROR -ERROR: permission denied to alter foreign-data wrapper "foo" -HINT: Must be superuser to alter a foreign-data wrapper. -ALTER FOREIGN DATA WRAPPER foo OWNER TO regress_unprivileged_role; -- ERROR -ERROR: permission denied to change owner of foreign-data wrapper "foo" -HINT: Must be superuser to change owner of a foreign-data wrapper. -DROP FOREIGN DATA WRAPPER foo; -- ERROR -ERROR: must be owner of foreign-data wrapper foo -GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role; -- ERROR -ERROR: permission denied for foreign-data wrapper foo -CREATE SERVER s9 FOREIGN DATA WRAPPER foo; -- ERROR -ERROR: permission denied for foreign-data wrapper foo -ALTER SERVER s4 VERSION '0.5'; -- ERROR -ERROR: must be owner of foreign server s4 -ALTER SERVER s4 OWNER TO regress_unprivileged_role; -- ERROR -ERROR: must be owner of foreign server s4 -DROP SERVER s4; -- ERROR -ERROR: must be owner of foreign server s4 -GRANT USAGE ON FOREIGN SERVER s4 TO regress_test_role; -- ERROR -ERROR: permission denied for foreign server s4 -CREATE USER MAPPING FOR public SERVER s4; -- ERROR -ERROR: must be owner of foreign server s4 -ALTER USER MAPPING FOR regress_test_role SERVER s6 OPTIONS (gotcha 'true'); -- ERROR -ERROR: must be owner of foreign server s6 -DROP USER MAPPING FOR regress_test_role SERVER s6; -- ERROR -ERROR: must be owner of foreign server s6 -RESET ROLE; -GRANT USAGE ON FOREIGN DATA WRAPPER postgresql TO regress_unprivileged_role; -GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_unprivileged_role WITH GRANT OPTION; -SET ROLE regress_unprivileged_role; -CREATE FOREIGN DATA WRAPPER foobar; -- ERROR -ERROR: permission denied to create foreign-data wrapper "foobar" -HINT: Must be superuser to create a foreign-data wrapper. -ALTER FOREIGN DATA WRAPPER foo OPTIONS (gotcha 'true'); -- ERROR -ERROR: permission denied to alter foreign-data wrapper "foo" -HINT: Must be superuser to alter a foreign-data wrapper. -DROP FOREIGN DATA WRAPPER foo; -- ERROR -ERROR: must be owner of foreign-data wrapper foo -GRANT USAGE ON FOREIGN DATA WRAPPER postgresql TO regress_test_role; -- WARNING -WARNING: no privileges were granted for "postgresql" -GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role; -CREATE SERVER s9 FOREIGN DATA WRAPPER postgresql; -ALTER SERVER s6 VERSION '0.5'; -- ERROR -ERROR: must be owner of foreign server s6 -DROP SERVER s6; -- ERROR -ERROR: must be owner of foreign server s6 -GRANT USAGE ON FOREIGN SERVER s6 TO regress_test_role; -- ERROR -ERROR: permission denied for foreign server s6 -GRANT USAGE ON FOREIGN SERVER s9 TO regress_test_role; -CREATE USER MAPPING FOR public SERVER s6; -- ERROR -ERROR: must be owner of foreign server s6 -CREATE USER MAPPING FOR public SERVER s9; -ALTER USER MAPPING FOR regress_test_role SERVER s6 OPTIONS (gotcha 'true'); -- ERROR -ERROR: must be owner of foreign server s6 -DROP USER MAPPING FOR regress_test_role SERVER s6; -- ERROR -ERROR: must be owner of foreign server s6 -RESET ROLE; -REVOKE USAGE ON FOREIGN DATA WRAPPER foo FROM regress_unprivileged_role; -- ERROR -ERROR: dependent privileges exist -HINT: Use CASCADE to revoke them too. -REVOKE USAGE ON FOREIGN DATA WRAPPER foo FROM regress_unprivileged_role CASCADE; -SET ROLE regress_unprivileged_role; -GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role; -- ERROR -ERROR: permission denied for foreign-data wrapper foo -CREATE SERVER s10 FOREIGN DATA WRAPPER foo; -- ERROR -ERROR: permission denied for foreign-data wrapper foo -ALTER SERVER s9 VERSION '1.1'; -GRANT USAGE ON FOREIGN SERVER s9 TO regress_test_role; -CREATE USER MAPPING FOR current_user SERVER s9; -DROP SERVER s9 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to user mapping for public on server s9 -drop cascades to user mapping for regress_unprivileged_role on server s9 -RESET ROLE; -CREATE SERVER s9 FOREIGN DATA WRAPPER foo; -GRANT USAGE ON FOREIGN SERVER s9 TO regress_unprivileged_role; -SET ROLE regress_unprivileged_role; -ALTER SERVER s9 VERSION '1.2'; -- ERROR -ERROR: must be owner of foreign server s9 -GRANT USAGE ON FOREIGN SERVER s9 TO regress_test_role; -- WARNING -WARNING: no privileges were granted for "s9" -CREATE USER MAPPING FOR current_user SERVER s9; -DROP SERVER s9 CASCADE; -- ERROR -ERROR: must be owner of foreign server s9 --- Check visibility of user mapping data -SET ROLE regress_test_role; -CREATE SERVER s10 FOREIGN DATA WRAPPER foo; -CREATE USER MAPPING FOR public SERVER s10 OPTIONS (user 'secret'); -CREATE USER MAPPING FOR regress_unprivileged_role SERVER s10 OPTIONS (user 'secret'); --- owner of server can see some option fields -\deu+ - List of user mappings - Server | User name | FDW options ---------+---------------------------+------------------- - s10 | public | ("user" 'secret') - s10 | regress_unprivileged_role | - s4 | regress_foreign_data_user | - s5 | regress_test_role | (modified '1') - s6 | regress_test_role | - s8 | public | - s8 | regress_foreign_data_user | - s9 | regress_unprivileged_role | - t1 | public | (modified '1') -(9 rows) - -RESET ROLE; --- superuser can see all option fields -\deu+ - List of user mappings - Server | User name | FDW options ---------+---------------------------+--------------------- - s10 | public | ("user" 'secret') - s10 | regress_unprivileged_role | ("user" 'secret') - s4 | regress_foreign_data_user | - s5 | regress_test_role | (modified '1') - s6 | regress_test_role | - s8 | public | - s8 | regress_foreign_data_user | (password 'public') - s9 | regress_unprivileged_role | - t1 | public | (modified '1') -(9 rows) - --- unprivileged user cannot see any option field -SET ROLE regress_unprivileged_role; -\deu+ - List of user mappings - Server | User name | FDW options ---------+---------------------------+------------- - s10 | public | - s10 | regress_unprivileged_role | - s4 | regress_foreign_data_user | - s5 | regress_test_role | - s6 | regress_test_role | - s8 | public | - s8 | regress_foreign_data_user | - s9 | regress_unprivileged_role | - t1 | public | -(9 rows) - -RESET ROLE; -DROP SERVER s10 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to user mapping for public on server s10 -drop cascades to user mapping for regress_unprivileged_role on server s10 --- Triggers -CREATE FUNCTION dummy_trigger() RETURNS TRIGGER AS $$ - BEGIN - RETURN NULL; - END -$$ language plpgsql; -CREATE TRIGGER trigtest_before_stmt BEFORE INSERT OR UPDATE OR DELETE -ON foreign_schema.foreign_table_1 -FOR EACH STATEMENT -EXECUTE PROCEDURE dummy_trigger(); -CREATE TRIGGER trigtest_after_stmt AFTER INSERT OR UPDATE OR DELETE -ON foreign_schema.foreign_table_1 -FOR EACH STATEMENT -EXECUTE PROCEDURE dummy_trigger(); -CREATE TRIGGER trigtest_after_stmt_tt AFTER INSERT OR UPDATE OR DELETE -- ERROR -ON foreign_schema.foreign_table_1 -REFERENCING NEW TABLE AS new_table -FOR EACH STATEMENT -EXECUTE PROCEDURE dummy_trigger(); -ERROR: "foreign_table_1" is a foreign table -DETAIL: Triggers on foreign tables cannot have transition tables. -CREATE TRIGGER trigtest_before_row BEFORE INSERT OR UPDATE OR DELETE -ON foreign_schema.foreign_table_1 -FOR EACH ROW -EXECUTE PROCEDURE dummy_trigger(); -CREATE TRIGGER trigtest_after_row AFTER INSERT OR UPDATE OR DELETE -ON foreign_schema.foreign_table_1 -FOR EACH ROW -EXECUTE PROCEDURE dummy_trigger(); -CREATE CONSTRAINT TRIGGER trigtest_constraint AFTER INSERT OR UPDATE OR DELETE -ON foreign_schema.foreign_table_1 -FOR EACH ROW -EXECUTE PROCEDURE dummy_trigger(); -ERROR: "foreign_table_1" is a foreign table -DETAIL: Foreign tables cannot have constraint triggers. -ALTER FOREIGN TABLE foreign_schema.foreign_table_1 - DISABLE TRIGGER trigtest_before_stmt; -ALTER FOREIGN TABLE foreign_schema.foreign_table_1 - ENABLE TRIGGER trigtest_before_stmt; -DROP TRIGGER trigtest_before_stmt ON foreign_schema.foreign_table_1; -DROP TRIGGER trigtest_before_row ON foreign_schema.foreign_table_1; -DROP TRIGGER trigtest_after_stmt ON foreign_schema.foreign_table_1; -DROP TRIGGER trigtest_after_row ON foreign_schema.foreign_table_1; -DROP FUNCTION dummy_trigger(); --- Table inheritance -CREATE TABLE fd_pt1 ( - c1 integer NOT NULL, - c2 text, - c3 date -); -CREATE FOREIGN TABLE ft2 () INHERITS (fd_pt1) - SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -\d+ fd_pt1 - Table "public.fd_pt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "c1" -Child tables: ft2, FOREIGN - -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "c1" (inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: fd_pt1 - -DROP FOREIGN TABLE ft2; -\d+ fd_pt1 - Table "public.fd_pt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "c1" - -CREATE FOREIGN TABLE ft2 ( - c1 integer NOT NULL, - c2 text, - c3 date -) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') - -ALTER FOREIGN TABLE ft2 INHERIT fd_pt1; -\d+ fd_pt1 - Table "public.fd_pt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "c1" -Child tables: ft2, FOREIGN - -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" (local, inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: fd_pt1 - -CREATE TABLE ct3() INHERITS(ft2); -CREATE FOREIGN TABLE ft3 ( - c1 integer NOT NULL, - c2 text, - c3 date -) INHERITS(ft2) - SERVER s0; -NOTICE: merging column "c1" with inherited definition -NOTICE: merging column "c2" with inherited definition -NOTICE: merging column "c3" with inherited definition -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" (local, inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: fd_pt1 -Child tables: ct3, - ft3, FOREIGN - -\d+ ct3 - Table "public.ct3" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" (inherited) -Inherits: ft2 - -\d+ ft3 - Foreign table "public.ft3" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Not-null constraints: - "ft3_c1_not_null" NOT NULL "c1" (local, inherited) -Server: s0 -Inherits: ft2 - --- add attributes recursively -ALTER TABLE fd_pt1 ADD COLUMN c4 integer; -ALTER TABLE fd_pt1 ADD COLUMN c5 integer DEFAULT 0; -ALTER TABLE fd_pt1 ADD COLUMN c6 integer; -ALTER TABLE fd_pt1 ADD COLUMN c7 integer NOT NULL; -ALTER TABLE fd_pt1 ADD COLUMN c8 integer; -\d+ fd_pt1 - Table "public.fd_pt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | | | extended | | - c3 | date | | | | plain | | - c4 | integer | | | | plain | | - c5 | integer | | | 0 | plain | | - c6 | integer | | | | plain | | - c7 | integer | | not null | | plain | | - c8 | integer | | | | plain | | -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "c1" - "fd_pt1_c7_not_null" NOT NULL "c7" -Child tables: ft2, FOREIGN - -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | - c4 | integer | | | | | plain | | - c5 | integer | | | 0 | | plain | | - c6 | integer | | | | | plain | | - c7 | integer | | not null | | | plain | | - c8 | integer | | | | | plain | | -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" (local, inherited) - "fd_pt1_c7_not_null" NOT NULL "c7" (inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: fd_pt1 -Child tables: ct3, - ft3, FOREIGN - -\d+ ct3 - Table "public.ct3" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | | | extended | | - c3 | date | | | | plain | | - c4 | integer | | | | plain | | - c5 | integer | | | 0 | plain | | - c6 | integer | | | | plain | | - c7 | integer | | not null | | plain | | - c8 | integer | | | | plain | | -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" (inherited) - "fd_pt1_c7_not_null" NOT NULL "c7" (inherited) -Inherits: ft2 - -\d+ ft3 - Foreign table "public.ft3" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | - c4 | integer | | | | | plain | | - c5 | integer | | | 0 | | plain | | - c6 | integer | | | | | plain | | - c7 | integer | | not null | | | plain | | - c8 | integer | | | | | plain | | -Not-null constraints: - "ft3_c1_not_null" NOT NULL "c1" (local, inherited) - "fd_pt1_c7_not_null" NOT NULL "c7" (inherited) -Server: s0 -Inherits: ft2 - --- alter attributes recursively -ALTER TABLE fd_pt1 ALTER COLUMN c4 SET DEFAULT 0; -ALTER TABLE fd_pt1 ALTER COLUMN c5 DROP DEFAULT; -ALTER TABLE fd_pt1 ALTER COLUMN c6 SET NOT NULL; -ALTER TABLE fd_pt1 ALTER COLUMN c7 DROP NOT NULL; -ALTER TABLE fd_pt1 ALTER COLUMN c8 TYPE char(10) USING '0'; -- ERROR -ERROR: "ft2" is not a table -ALTER TABLE fd_pt1 ALTER COLUMN c8 TYPE char(10); -ALTER TABLE fd_pt1 ALTER COLUMN c8 SET DATA TYPE text; -ALTER TABLE fd_pt1 ALTER COLUMN c1 SET STATISTICS 10000; -ALTER TABLE fd_pt1 ALTER COLUMN c1 SET (n_distinct = 100); -ALTER TABLE fd_pt1 ALTER COLUMN c8 SET STATISTICS -1; -ALTER TABLE fd_pt1 ALTER COLUMN c8 SET STORAGE EXTERNAL; -\d+ fd_pt1 - Table "public.fd_pt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | 10000 | - c2 | text | | | | extended | | - c3 | date | | | | plain | | - c4 | integer | | | 0 | plain | | - c5 | integer | | | | plain | | - c6 | integer | | not null | | plain | | - c7 | integer | | | | plain | | - c8 | text | | | | external | | -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "c1" - "fd_pt1_c6_not_null" NOT NULL "c6" -Child tables: ft2, FOREIGN - -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | 10000 | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | - c4 | integer | | | 0 | | plain | | - c5 | integer | | | | | plain | | - c6 | integer | | not null | | | plain | | - c7 | integer | | | | | plain | | - c8 | text | | | | | external | | -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" (local, inherited) - "fd_pt1_c6_not_null" NOT NULL "c6" (inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: fd_pt1 -Child tables: ct3, - ft3, FOREIGN - --- drop attributes recursively -ALTER TABLE fd_pt1 DROP COLUMN c4; -ALTER TABLE fd_pt1 DROP COLUMN c5; -ALTER TABLE fd_pt1 DROP COLUMN c6; -ALTER TABLE fd_pt1 DROP COLUMN c7; -ALTER TABLE fd_pt1 DROP COLUMN c8; -\d+ fd_pt1 - Table "public.fd_pt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | 10000 | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "c1" -Child tables: ft2, FOREIGN - -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | 10000 | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" (local, inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: fd_pt1 -Child tables: ct3, - ft3, FOREIGN - --- add constraints recursively -ALTER TABLE fd_pt1 ADD CONSTRAINT fd_pt1chk1 CHECK (c1 > 0) NO INHERIT; -ALTER TABLE fd_pt1 ADD CONSTRAINT fd_pt1chk2 CHECK (c2 <> ''); --- connoinherit should be true for NO INHERIT constraint -SELECT relname, conname, contype, conislocal, coninhcount, connoinherit - FROM pg_class AS pc JOIN pg_constraint AS pgc ON (conrelid = pc.oid) - WHERE pc.relname = 'fd_pt1' - ORDER BY 1,2; - relname | conname | contype | conislocal | coninhcount | connoinherit ----------+--------------------+---------+------------+-------------+-------------- - fd_pt1 | fd_pt1_c1_not_null | n | t | 0 | f - fd_pt1 | fd_pt1chk1 | c | t | 0 | t - fd_pt1 | fd_pt1chk2 | c | t | 0 | f -(3 rows) - --- child does not inherit NO INHERIT constraints -\d+ fd_pt1 - Table "public.fd_pt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | 10000 | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Check constraints: - "fd_pt1chk1" CHECK (c1 > 0) NO INHERIT - "fd_pt1chk2" CHECK (c2 <> ''::text) -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "c1" -Child tables: ft2, FOREIGN - -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | 10000 | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Check constraints: - "fd_pt1chk2" CHECK (c2 <> ''::text) -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" (local, inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: fd_pt1 -Child tables: ct3, - ft3, FOREIGN - -DROP FOREIGN TABLE ft2; -- ERROR -ERROR: cannot drop foreign table ft2 because other objects depend on it -DETAIL: table ct3 depends on foreign table ft2 -foreign table ft3 depends on foreign table ft2 -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP FOREIGN TABLE ft2 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table ct3 -drop cascades to foreign table ft3 -CREATE FOREIGN TABLE ft2 ( - c1 integer NOT NULL, - c2 text, - c3 date -) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); --- child must have parent's INHERIT constraints -ALTER FOREIGN TABLE ft2 INHERIT fd_pt1; -- ERROR -ERROR: child table is missing constraint "fd_pt1chk2" -ALTER FOREIGN TABLE ft2 ADD CONSTRAINT fd_pt1chk2 CHECK (c2 <> ''); -ALTER FOREIGN TABLE ft2 INHERIT fd_pt1; --- child does not inherit NO INHERIT constraints -\d+ fd_pt1 - Table "public.fd_pt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | 10000 | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Check constraints: - "fd_pt1chk1" CHECK (c1 > 0) NO INHERIT - "fd_pt1chk2" CHECK (c2 <> ''::text) -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "c1" -Child tables: ft2, FOREIGN - -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Check constraints: - "fd_pt1chk2" CHECK (c2 <> ''::text) -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" (local, inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: fd_pt1 - --- drop constraints recursively -ALTER TABLE fd_pt1 DROP CONSTRAINT fd_pt1chk1 CASCADE; -ALTER TABLE fd_pt1 DROP CONSTRAINT fd_pt1chk2 CASCADE; --- NOT VALID case -INSERT INTO fd_pt1 VALUES (1, 'fd_pt1'::text, '1994-01-01'::date); -ALTER TABLE fd_pt1 ADD CONSTRAINT fd_pt1chk3 CHECK (c2 <> '') NOT VALID; -\d+ fd_pt1 - Table "public.fd_pt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | 10000 | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Check constraints: - "fd_pt1chk3" CHECK (c2 <> ''::text) NOT VALID -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "c1" -Child tables: ft2, FOREIGN - -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Check constraints: - "fd_pt1chk2" CHECK (c2 <> ''::text) - "fd_pt1chk3" CHECK (c2 <> ''::text) NOT VALID -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" (local, inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: fd_pt1 - --- VALIDATE CONSTRAINT need do nothing on foreign tables -ALTER TABLE fd_pt1 VALIDATE CONSTRAINT fd_pt1chk3; -\d+ fd_pt1 - Table "public.fd_pt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | 10000 | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Check constraints: - "fd_pt1chk3" CHECK (c2 <> ''::text) -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "c1" -Child tables: ft2, FOREIGN - -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Check constraints: - "fd_pt1chk2" CHECK (c2 <> ''::text) - "fd_pt1chk3" CHECK (c2 <> ''::text) -Not-null constraints: - "ft2_c1_not_null" NOT NULL "c1" (local, inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: fd_pt1 - --- changes name of an attribute recursively -ALTER TABLE fd_pt1 RENAME COLUMN c1 TO f1; -ALTER TABLE fd_pt1 RENAME COLUMN c2 TO f2; -ALTER TABLE fd_pt1 RENAME COLUMN c3 TO f3; --- changes name of a constraint recursively -ALTER TABLE fd_pt1 RENAME CONSTRAINT fd_pt1chk3 TO f2_check; -\d+ fd_pt1 - Table "public.fd_pt1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - f1 | integer | | not null | | plain | 10000 | - f2 | text | | | | extended | | - f3 | date | | | | plain | | -Check constraints: - "f2_check" CHECK (f2 <> ''::text) -Not-null constraints: - "fd_pt1_c1_not_null" NOT NULL "f1" -Child tables: ft2, FOREIGN - -\d+ ft2 - Foreign table "public.ft2" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - f1 | integer | | not null | | | plain | | - f2 | text | | | | | extended | | - f3 | date | | | | | plain | | -Check constraints: - "f2_check" CHECK (f2 <> ''::text) - "fd_pt1chk2" CHECK (f2 <> ''::text) -Not-null constraints: - "ft2_c1_not_null" NOT NULL "f1" (local, inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: fd_pt1 - -DROP TABLE fd_pt1 CASCADE; -NOTICE: drop cascades to foreign table ft2 --- IMPORT FOREIGN SCHEMA -IMPORT FOREIGN SCHEMA s1 FROM SERVER s9 INTO public; -- ERROR -ERROR: foreign-data wrapper "foo" has no handler -IMPORT FOREIGN SCHEMA s1 LIMIT TO (t1) FROM SERVER s9 INTO public; --ERROR -ERROR: foreign-data wrapper "foo" has no handler -IMPORT FOREIGN SCHEMA s1 EXCEPT (t1) FROM SERVER s9 INTO public; -- ERROR -ERROR: foreign-data wrapper "foo" has no handler -IMPORT FOREIGN SCHEMA s1 EXCEPT (t1, t2) FROM SERVER s9 INTO public -OPTIONS (option1 'value1', option2 'value2'); -- ERROR -ERROR: foreign-data wrapper "foo" has no handler --- DROP FOREIGN TABLE -DROP FOREIGN TABLE no_table; -- ERROR -ERROR: foreign table "no_table" does not exist -DROP FOREIGN TABLE IF EXISTS no_table; -NOTICE: foreign table "no_table" does not exist, skipping -DROP FOREIGN TABLE foreign_schema.foreign_table_1; --- REASSIGN OWNED/DROP OWNED of foreign objects -REASSIGN OWNED BY regress_test_role TO regress_test_role2; -DROP OWNED BY regress_test_role2; -ERROR: cannot drop desired object(s) because other objects depend on them -DETAIL: user mapping for regress_test_role on server s5 depends on server s5 -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP OWNED BY regress_test_role2 CASCADE; -NOTICE: drop cascades to user mapping for regress_test_role on server s5 --- Foreign partition DDL stuff -CREATE TABLE fd_pt2 ( - c1 integer NOT NULL, - c2 text, - c3 date -) PARTITION BY LIST (c1); -CREATE FOREIGN TABLE fd_pt2_1 PARTITION OF fd_pt2 FOR VALUES IN (1) - SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -\d+ fd_pt2 - Partitioned table "public.fd_pt2" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Partition key: LIST (c1) -Not-null constraints: - "fd_pt2_c1_not_null" NOT NULL "c1" -Partitions: fd_pt2_1 FOR VALUES IN (1), FOREIGN - -\d+ fd_pt2_1 - Foreign table "public.fd_pt2_1" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Partition of: fd_pt2 FOR VALUES IN (1) -Partition constraint: ((c1 IS NOT NULL) AND (c1 = 1)) -Not-null constraints: - "fd_pt2_c1_not_null" NOT NULL "c1" (inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') - --- partition cannot have additional columns -DROP FOREIGN TABLE fd_pt2_1; -CREATE FOREIGN TABLE fd_pt2_1 ( - c1 integer NOT NULL, - c2 text, - c3 date, - c4 char -) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -\d+ fd_pt2_1 - Foreign table "public.fd_pt2_1" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+--------------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | - c4 | character(1) | | | | | extended | | -Not-null constraints: - "fd_pt2_1_c1_not_null" NOT NULL "c1" -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') - -ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR -ERROR: table "fd_pt2_1" contains column "c4" not found in parent "fd_pt2" -DETAIL: The new partition may contain only the columns present in parent. -DROP FOREIGN TABLE fd_pt2_1; -\d+ fd_pt2 - Partitioned table "public.fd_pt2" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Partition key: LIST (c1) -Not-null constraints: - "fd_pt2_c1_not_null" NOT NULL "c1" -Number of partitions: 0 - -CREATE FOREIGN TABLE fd_pt2_1 ( - c1 integer NOT NULL, - c2 text, - c3 date -) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -\d+ fd_pt2_1 - Foreign table "public.fd_pt2_1" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Not-null constraints: - "fd_pt2_1_c1_not_null" NOT NULL "c1" -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') - --- no attach partition validation occurs for foreign tables -ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -\d+ fd_pt2 - Partitioned table "public.fd_pt2" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Partition key: LIST (c1) -Not-null constraints: - "fd_pt2_c1_not_null" NOT NULL "c1" -Partitions: fd_pt2_1 FOR VALUES IN (1), FOREIGN - -\d+ fd_pt2_1 - Foreign table "public.fd_pt2_1" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | | | | plain | | -Partition of: fd_pt2 FOR VALUES IN (1) -Partition constraint: ((c1 IS NOT NULL) AND (c1 = 1)) -Not-null constraints: - "fd_pt2_1_c1_not_null" NOT NULL "c1" (inherited) -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') - --- cannot add column to a partition -ALTER TABLE fd_pt2_1 ADD c4 char; -ERROR: cannot add column to a partition --- ok to have a partition's own constraints though -ALTER TABLE fd_pt2_1 ALTER c3 SET NOT NULL; -ALTER TABLE fd_pt2_1 ADD CONSTRAINT p21chk CHECK (c2 <> ''); -\d+ fd_pt2 - Partitioned table "public.fd_pt2" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | | | extended | | - c3 | date | | | | plain | | -Partition key: LIST (c1) -Not-null constraints: - "fd_pt2_c1_not_null" NOT NULL "c1" -Partitions: fd_pt2_1 FOR VALUES IN (1), FOREIGN - -\d+ fd_pt2_1 - Foreign table "public.fd_pt2_1" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | not null | | | plain | | -Partition of: fd_pt2 FOR VALUES IN (1) -Partition constraint: ((c1 IS NOT NULL) AND (c1 = 1)) -Check constraints: - "p21chk" CHECK (c2 <> ''::text) -Not-null constraints: - "fd_pt2_1_c1_not_null" NOT NULL "c1" (inherited) - "fd_pt2_1_c3_not_null" NOT NULL "c3" -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') - --- cannot drop inherited NOT NULL constraint from a partition -ALTER TABLE fd_pt2_1 ALTER c1 DROP NOT NULL; -ERROR: column "c1" is marked NOT NULL in parent table --- partition must have parent's constraints -ALTER TABLE fd_pt2 DETACH PARTITION fd_pt2_1; -ALTER TABLE fd_pt2 ALTER c2 SET NOT NULL; -\d+ fd_pt2 - Partitioned table "public.fd_pt2" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | not null | | extended | | - c3 | date | | | | plain | | -Partition key: LIST (c1) -Not-null constraints: - "fd_pt2_c1_not_null" NOT NULL "c1" - "fd_pt2_c2_not_null" NOT NULL "c2" -Number of partitions: 0 - -\d+ fd_pt2_1 - Foreign table "public.fd_pt2_1" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | | | | extended | | - c3 | date | | not null | | | plain | | -Check constraints: - "p21chk" CHECK (c2 <> ''::text) -Not-null constraints: - "fd_pt2_1_c1_not_null" NOT NULL "c1" - "fd_pt2_1_c3_not_null" NOT NULL "c3" -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') - -ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR -ERROR: column "c2" in child table "fd_pt2_1" must be marked NOT NULL -ALTER FOREIGN TABLE fd_pt2_1 ALTER c2 SET NOT NULL; -ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -ALTER TABLE fd_pt2 DETACH PARTITION fd_pt2_1; -ALTER TABLE fd_pt2 ADD CONSTRAINT fd_pt2chk1 CHECK (c1 > 0); -\d+ fd_pt2 - Partitioned table "public.fd_pt2" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - c1 | integer | | not null | | plain | | - c2 | text | | not null | | extended | | - c3 | date | | | | plain | | -Partition key: LIST (c1) -Check constraints: - "fd_pt2chk1" CHECK (c1 > 0) -Not-null constraints: - "fd_pt2_c1_not_null" NOT NULL "c1" - "fd_pt2_c2_not_null" NOT NULL "c2" -Number of partitions: 0 - -\d+ fd_pt2_1 - Foreign table "public.fd_pt2_1" - Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description ---------+---------+-----------+----------+---------+-------------+----------+--------------+------------- - c1 | integer | | not null | | | plain | | - c2 | text | | not null | | | extended | | - c3 | date | | not null | | | plain | | -Check constraints: - "p21chk" CHECK (c2 <> ''::text) -Not-null constraints: - "fd_pt2_1_c1_not_null" NOT NULL "c1" - "fd_pt2_1_c2_not_null" NOT NULL "c2" - "fd_pt2_1_c3_not_null" NOT NULL "c3" -Server: s0 -FDW options: (delimiter ',', quote '"', "be quoted" 'value') - -ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR -ERROR: child table is missing constraint "fd_pt2chk1" -ALTER FOREIGN TABLE fd_pt2_1 ADD CONSTRAINT fd_pt2chk1 CHECK (c1 > 0); -ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -DROP FOREIGN TABLE fd_pt2_1; -DROP TABLE fd_pt2; --- foreign table cannot be part of partition tree made of temporary --- relations. -CREATE TEMP TABLE temp_parted (a int) PARTITION BY LIST (a); -CREATE FOREIGN TABLE foreign_part PARTITION OF temp_parted DEFAULT - SERVER s0; -- ERROR -ERROR: cannot create a permanent relation as partition of temporary relation "temp_parted" -CREATE FOREIGN TABLE foreign_part (a int) SERVER s0; -ALTER TABLE temp_parted ATTACH PARTITION foreign_part DEFAULT; -- ERROR -ERROR: cannot attach a permanent relation as partition of temporary relation "temp_parted" -DROP FOREIGN TABLE foreign_part; -DROP TABLE temp_parted; --- Cleanup -DROP SCHEMA foreign_schema CASCADE; -DROP ROLE regress_test_role; -- ERROR -ERROR: role "regress_test_role" cannot be dropped because some objects depend on it -DETAIL: privileges for foreign-data wrapper foo -privileges for server s4 -owner of user mapping for regress_test_role on server s6 -DROP SERVER t1 CASCADE; -NOTICE: drop cascades to user mapping for public on server t1 -DROP USER MAPPING FOR regress_test_role SERVER s6; -DROP FOREIGN DATA WRAPPER foo CASCADE; -NOTICE: drop cascades to 5 other objects -DETAIL: drop cascades to server s4 -drop cascades to user mapping for regress_foreign_data_user on server s4 -drop cascades to server s6 -drop cascades to server s9 -drop cascades to user mapping for regress_unprivileged_role on server s9 -DROP SERVER s8 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to user mapping for regress_foreign_data_user on server s8 -drop cascades to user mapping for public on server s8 -DROP ROLE regress_test_indirect; -DROP ROLE regress_test_role; -DROP ROLE regress_unprivileged_role; -- ERROR -ERROR: role "regress_unprivileged_role" cannot be dropped because some objects depend on it -DETAIL: privileges for foreign-data wrapper postgresql -REVOKE ALL ON FOREIGN DATA WRAPPER postgresql FROM regress_unprivileged_role; -DROP ROLE regress_unprivileged_role; -DROP ROLE regress_test_role2; -DROP FOREIGN DATA WRAPPER postgresql CASCADE; -DROP FOREIGN DATA WRAPPER dummy CASCADE; -NOTICE: drop cascades to server s0 -\c -DROP ROLE regress_foreign_data_user; --- At this point we should have no wrappers, no servers, and no mappings. -SELECT fdwname, fdwhandler, fdwvalidator, fdwoptions FROM pg_foreign_data_wrapper; - fdwname | fdwhandler | fdwvalidator | fdwoptions ----------+------------+--------------+------------ -(0 rows) - -SELECT srvname, srvoptions FROM pg_foreign_server; - srvname | srvoptions ----------+------------ -(0 rows) - -SELECT * FROM pg_user_mapping; - oid | umuser | umserver | umoptions ------+--------+----------+----------- -(0 rows) - +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/window.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/window.out --- /Users/admin/pgsql/src/test/regress/expected/window.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/window.out 2024-12-13 13:20:11 @@ -1,5405 +1,2 @@ --- --- WINDOW FUNCTIONS --- -CREATE TEMPORARY TABLE empsalary ( - depname varchar, - empno bigint, - salary int, - enroll_date date -); -INSERT INTO empsalary VALUES -('develop', 10, 5200, '2007-08-01'), -('sales', 1, 5000, '2006-10-01'), -('personnel', 5, 3500, '2007-12-10'), -('sales', 4, 4800, '2007-08-08'), -('personnel', 2, 3900, '2006-12-23'), -('develop', 7, 4200, '2008-01-01'), -('develop', 9, 4500, '2008-01-01'), -('sales', 3, 4800, '2007-08-01'), -('develop', 8, 6000, '2006-10-01'), -('develop', 11, 5200, '2007-08-15'); -SELECT depname, empno, salary, sum(salary) OVER (PARTITION BY depname) FROM empsalary ORDER BY depname, salary; - depname | empno | salary | sum ------------+-------+--------+------- - develop | 7 | 4200 | 25100 - develop | 9 | 4500 | 25100 - develop | 11 | 5200 | 25100 - develop | 10 | 5200 | 25100 - develop | 8 | 6000 | 25100 - personnel | 5 | 3500 | 7400 - personnel | 2 | 3900 | 7400 - sales | 3 | 4800 | 14600 - sales | 4 | 4800 | 14600 - sales | 1 | 5000 | 14600 -(10 rows) - -SELECT depname, empno, salary, rank() OVER (PARTITION BY depname ORDER BY salary) FROM empsalary; - depname | empno | salary | rank ------------+-------+--------+------ - develop | 7 | 4200 | 1 - develop | 9 | 4500 | 2 - develop | 11 | 5200 | 3 - develop | 10 | 5200 | 3 - develop | 8 | 6000 | 5 - personnel | 5 | 3500 | 1 - personnel | 2 | 3900 | 2 - sales | 3 | 4800 | 1 - sales | 4 | 4800 | 1 - sales | 1 | 5000 | 3 -(10 rows) - --- with GROUP BY -SELECT four, ten, SUM(SUM(four)) OVER (PARTITION BY four), AVG(ten) FROM tenk1 -GROUP BY four, ten ORDER BY four, ten; - four | ten | sum | avg -------+-----+------+------------------------ - 0 | 0 | 0 | 0.00000000000000000000 - 0 | 2 | 0 | 2.0000000000000000 - 0 | 4 | 0 | 4.0000000000000000 - 0 | 6 | 0 | 6.0000000000000000 - 0 | 8 | 0 | 8.0000000000000000 - 1 | 1 | 2500 | 1.00000000000000000000 - 1 | 3 | 2500 | 3.0000000000000000 - 1 | 5 | 2500 | 5.0000000000000000 - 1 | 7 | 2500 | 7.0000000000000000 - 1 | 9 | 2500 | 9.0000000000000000 - 2 | 0 | 5000 | 0.00000000000000000000 - 2 | 2 | 5000 | 2.0000000000000000 - 2 | 4 | 5000 | 4.0000000000000000 - 2 | 6 | 5000 | 6.0000000000000000 - 2 | 8 | 5000 | 8.0000000000000000 - 3 | 1 | 7500 | 1.00000000000000000000 - 3 | 3 | 7500 | 3.0000000000000000 - 3 | 5 | 7500 | 5.0000000000000000 - 3 | 7 | 7500 | 7.0000000000000000 - 3 | 9 | 7500 | 9.0000000000000000 -(20 rows) - -SELECT depname, empno, salary, sum(salary) OVER w FROM empsalary WINDOW w AS (PARTITION BY depname); - depname | empno | salary | sum ------------+-------+--------+------- - develop | 11 | 5200 | 25100 - develop | 7 | 4200 | 25100 - develop | 9 | 4500 | 25100 - develop | 8 | 6000 | 25100 - develop | 10 | 5200 | 25100 - personnel | 5 | 3500 | 7400 - personnel | 2 | 3900 | 7400 - sales | 3 | 4800 | 14600 - sales | 1 | 5000 | 14600 - sales | 4 | 4800 | 14600 -(10 rows) - -SELECT depname, empno, salary, rank() OVER w FROM empsalary WINDOW w AS (PARTITION BY depname ORDER BY salary) ORDER BY rank() OVER w; - depname | empno | salary | rank ------------+-------+--------+------ - develop | 7 | 4200 | 1 - personnel | 5 | 3500 | 1 - sales | 3 | 4800 | 1 - sales | 4 | 4800 | 1 - personnel | 2 | 3900 | 2 - develop | 9 | 4500 | 2 - sales | 1 | 5000 | 3 - develop | 11 | 5200 | 3 - develop | 10 | 5200 | 3 - develop | 8 | 6000 | 5 -(10 rows) - --- empty window specification -SELECT COUNT(*) OVER () FROM tenk1 WHERE unique2 < 10; - count -------- - 10 - 10 - 10 - 10 - 10 - 10 - 10 - 10 - 10 - 10 -(10 rows) - -SELECT COUNT(*) OVER w FROM tenk1 WHERE unique2 < 10 WINDOW w AS (); - count -------- - 10 - 10 - 10 - 10 - 10 - 10 - 10 - 10 - 10 - 10 -(10 rows) - --- no window operation -SELECT four FROM tenk1 WHERE FALSE WINDOW w AS (PARTITION BY ten); - four ------- -(0 rows) - --- cumulative aggregate -SELECT sum(four) OVER (PARTITION BY ten ORDER BY unique2) AS sum_1, ten, four FROM tenk1 WHERE unique2 < 10; - sum_1 | ten | four --------+-----+------ - 0 | 0 | 0 - 0 | 0 | 0 - 2 | 0 | 2 - 3 | 1 | 3 - 4 | 1 | 1 - 5 | 1 | 1 - 3 | 3 | 3 - 0 | 4 | 0 - 1 | 7 | 1 - 1 | 9 | 1 -(10 rows) - -SELECT row_number() OVER (ORDER BY unique2) FROM tenk1 WHERE unique2 < 10; - row_number ------------- - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 -(10 rows) - -SELECT rank() OVER (PARTITION BY four ORDER BY ten) AS rank_1, ten, four FROM tenk1 WHERE unique2 < 10; - rank_1 | ten | four ---------+-----+------ - 1 | 0 | 0 - 1 | 0 | 0 - 3 | 4 | 0 - 1 | 1 | 1 - 1 | 1 | 1 - 3 | 7 | 1 - 4 | 9 | 1 - 1 | 0 | 2 - 1 | 1 | 3 - 2 | 3 | 3 -(10 rows) - -SELECT dense_rank() OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; - dense_rank | ten | four -------------+-----+------ - 1 | 0 | 0 - 1 | 0 | 0 - 2 | 4 | 0 - 1 | 1 | 1 - 1 | 1 | 1 - 2 | 7 | 1 - 3 | 9 | 1 - 1 | 0 | 2 - 1 | 1 | 3 - 2 | 3 | 3 -(10 rows) - -SELECT percent_rank() OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; - percent_rank | ten | four ---------------------+-----+------ - 0 | 0 | 0 - 0 | 0 | 0 - 1 | 4 | 0 - 0 | 1 | 1 - 0 | 1 | 1 - 0.6666666666666666 | 7 | 1 - 1 | 9 | 1 - 0 | 0 | 2 - 0 | 1 | 3 - 1 | 3 | 3 -(10 rows) - -SELECT cume_dist() OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; - cume_dist | ten | four ---------------------+-----+------ - 0.6666666666666666 | 0 | 0 - 0.6666666666666666 | 0 | 0 - 1 | 4 | 0 - 0.5 | 1 | 1 - 0.5 | 1 | 1 - 0.75 | 7 | 1 - 1 | 9 | 1 - 1 | 0 | 2 - 0.5 | 1 | 3 - 1 | 3 | 3 -(10 rows) - -SELECT ntile(3) OVER (ORDER BY ten, four), ten, four FROM tenk1 WHERE unique2 < 10; - ntile | ten | four --------+-----+------ - 1 | 0 | 0 - 1 | 0 | 0 - 1 | 0 | 2 - 1 | 1 | 1 - 2 | 1 | 1 - 2 | 1 | 3 - 2 | 3 | 3 - 3 | 4 | 0 - 3 | 7 | 1 - 3 | 9 | 1 -(10 rows) - -SELECT ntile(NULL) OVER (ORDER BY ten, four), ten, four FROM tenk1 LIMIT 2; - ntile | ten | four --------+-----+------ - | 0 | 0 - | 0 | 0 -(2 rows) - -SELECT lag(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; - lag | ten | four ------+-----+------ - | 0 | 0 - 0 | 0 | 0 - 0 | 4 | 0 - | 1 | 1 - 1 | 1 | 1 - 1 | 7 | 1 - 7 | 9 | 1 - | 0 | 2 - | 1 | 3 - 1 | 3 | 3 -(10 rows) - -SELECT lag(ten, four) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; - lag | ten | four ------+-----+------ - 0 | 0 | 0 - 0 | 0 | 0 - 4 | 4 | 0 - | 1 | 1 - 1 | 1 | 1 - 1 | 7 | 1 - 7 | 9 | 1 - | 0 | 2 - | 1 | 3 - | 3 | 3 -(10 rows) - -SELECT lag(ten, four, 0) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; - lag | ten | four ------+-----+------ - 0 | 0 | 0 - 0 | 0 | 0 - 4 | 4 | 0 - 0 | 1 | 1 - 1 | 1 | 1 - 1 | 7 | 1 - 7 | 9 | 1 - 0 | 0 | 2 - 0 | 1 | 3 - 0 | 3 | 3 -(10 rows) - -SELECT lag(ten, four, 0.7) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten; - lag | ten | four ------+-----+------ - 0 | 0 | 0 - 0 | 0 | 0 - 4 | 4 | 0 - 0.7 | 1 | 1 - 1 | 1 | 1 - 1 | 7 | 1 - 7 | 9 | 1 - 0.7 | 0 | 2 - 0.7 | 1 | 3 - 0.7 | 3 | 3 -(10 rows) - -SELECT lead(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; - lead | ten | four -------+-----+------ - 0 | 0 | 0 - 4 | 0 | 0 - | 4 | 0 - 1 | 1 | 1 - 7 | 1 | 1 - 9 | 7 | 1 - | 9 | 1 - | 0 | 2 - 3 | 1 | 3 - | 3 | 3 -(10 rows) - -SELECT lead(ten * 2, 1) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; - lead | ten | four -------+-----+------ - 0 | 0 | 0 - 8 | 0 | 0 - | 4 | 0 - 2 | 1 | 1 - 14 | 1 | 1 - 18 | 7 | 1 - | 9 | 1 - | 0 | 2 - 6 | 1 | 3 - | 3 | 3 -(10 rows) - -SELECT lead(ten * 2, 1, -1) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; - lead | ten | four -------+-----+------ - 0 | 0 | 0 - 8 | 0 | 0 - -1 | 4 | 0 - 2 | 1 | 1 - 14 | 1 | 1 - 18 | 7 | 1 - -1 | 9 | 1 - -1 | 0 | 2 - 6 | 1 | 3 - -1 | 3 | 3 -(10 rows) - -SELECT lead(ten * 2, 1, -1.4) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten; - lead | ten | four -------+-----+------ - 0 | 0 | 0 - 8 | 0 | 0 - -1.4 | 4 | 0 - 2 | 1 | 1 - 14 | 1 | 1 - 18 | 7 | 1 - -1.4 | 9 | 1 - -1.4 | 0 | 2 - 6 | 1 | 3 - -1.4 | 3 | 3 -(10 rows) - -SELECT first_value(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; - first_value | ten | four --------------+-----+------ - 0 | 0 | 0 - 0 | 0 | 0 - 0 | 4 | 0 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 7 | 1 - 1 | 9 | 1 - 0 | 0 | 2 - 1 | 1 | 3 - 1 | 3 | 3 -(10 rows) - --- last_value returns the last row of the frame, which is CURRENT ROW in ORDER BY window. -SELECT last_value(four) OVER (ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; - last_value | ten | four -------------+-----+------ - 0 | 0 | 0 - 0 | 0 | 2 - 0 | 0 | 0 - 1 | 1 | 1 - 1 | 1 | 3 - 1 | 1 | 1 - 3 | 3 | 3 - 0 | 4 | 0 - 1 | 7 | 1 - 1 | 9 | 1 -(10 rows) - -SELECT last_value(ten) OVER (PARTITION BY four), ten, four FROM - (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten)s - ORDER BY four, ten; - last_value | ten | four -------------+-----+------ - 4 | 0 | 0 - 4 | 0 | 0 - 4 | 4 | 0 - 9 | 1 | 1 - 9 | 1 | 1 - 9 | 7 | 1 - 9 | 9 | 1 - 0 | 0 | 2 - 3 | 1 | 3 - 3 | 3 | 3 -(10 rows) - -SELECT nth_value(ten, four + 1) OVER (PARTITION BY four), ten, four - FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten)s; - nth_value | ten | four ------------+-----+------ - 0 | 0 | 0 - 0 | 0 | 0 - 0 | 4 | 0 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 7 | 1 - 1 | 9 | 1 - | 0 | 2 - | 1 | 3 - | 3 | 3 -(10 rows) - -SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER (PARTITION BY two ORDER BY ten) AS wsum -FROM tenk1 GROUP BY ten, two; - ten | two | gsum | wsum ------+-----+-------+-------- - 0 | 0 | 45000 | 45000 - 2 | 0 | 47000 | 92000 - 4 | 0 | 49000 | 141000 - 6 | 0 | 51000 | 192000 - 8 | 0 | 53000 | 245000 - 1 | 1 | 46000 | 46000 - 3 | 1 | 48000 | 94000 - 5 | 1 | 50000 | 144000 - 7 | 1 | 52000 | 196000 - 9 | 1 | 54000 | 250000 -(10 rows) - -SELECT count(*) OVER (PARTITION BY four), four FROM (SELECT * FROM tenk1 WHERE two = 1)s WHERE unique2 < 10; - count | four --------+------ - 4 | 1 - 4 | 1 - 4 | 1 - 4 | 1 - 2 | 3 - 2 | 3 -(6 rows) - -SELECT (count(*) OVER (PARTITION BY four ORDER BY ten) + - sum(hundred) OVER (PARTITION BY four ORDER BY ten))::varchar AS cntsum - FROM tenk1 WHERE unique2 < 10; - cntsum --------- - 22 - 22 - 87 - 24 - 24 - 82 - 92 - 51 - 92 - 136 -(10 rows) - --- opexpr with different windows evaluation. -SELECT * FROM( - SELECT count(*) OVER (PARTITION BY four ORDER BY ten) + - sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS total, - count(*) OVER (PARTITION BY four ORDER BY ten) AS fourcount, - sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS twosum - FROM tenk1 -)sub -WHERE total <> fourcount + twosum; - total | fourcount | twosum --------+-----------+-------- -(0 rows) - -SELECT avg(four) OVER (PARTITION BY four ORDER BY thousand / 100) FROM tenk1 WHERE unique2 < 10; - avg ------------------------- - 0.00000000000000000000 - 0.00000000000000000000 - 0.00000000000000000000 - 1.00000000000000000000 - 1.00000000000000000000 - 1.00000000000000000000 - 1.00000000000000000000 - 2.0000000000000000 - 3.0000000000000000 - 3.0000000000000000 -(10 rows) - -SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER win AS wsum -FROM tenk1 GROUP BY ten, two WINDOW win AS (PARTITION BY two ORDER BY ten); - ten | two | gsum | wsum ------+-----+-------+-------- - 0 | 0 | 45000 | 45000 - 2 | 0 | 47000 | 92000 - 4 | 0 | 49000 | 141000 - 6 | 0 | 51000 | 192000 - 8 | 0 | 53000 | 245000 - 1 | 1 | 46000 | 46000 - 3 | 1 | 48000 | 94000 - 5 | 1 | 50000 | 144000 - 7 | 1 | 52000 | 196000 - 9 | 1 | 54000 | 250000 -(10 rows) - --- more than one window with GROUP BY -SELECT sum(salary), - row_number() OVER (ORDER BY depname), - sum(sum(salary)) OVER (ORDER BY depname DESC) -FROM empsalary GROUP BY depname; - sum | row_number | sum --------+------------+------- - 25100 | 1 | 47100 - 7400 | 2 | 22000 - 14600 | 3 | 14600 -(3 rows) - --- identical windows with different names -SELECT sum(salary) OVER w1, count(*) OVER w2 -FROM empsalary WINDOW w1 AS (ORDER BY salary), w2 AS (ORDER BY salary); - sum | count --------+------- - 3500 | 1 - 7400 | 2 - 11600 | 3 - 16100 | 4 - 25700 | 6 - 25700 | 6 - 30700 | 7 - 41100 | 9 - 41100 | 9 - 47100 | 10 -(10 rows) - --- subplan -SELECT lead(ten, (SELECT two FROM tenk1 WHERE s.unique2 = unique2)) OVER (PARTITION BY four ORDER BY ten) -FROM tenk1 s WHERE unique2 < 10; - lead ------- - 0 - 0 - 4 - 1 - 7 - 9 - - 0 - 3 - -(10 rows) - --- empty table -SELECT count(*) OVER (PARTITION BY four) FROM (SELECT * FROM tenk1 WHERE FALSE)s; - count -------- -(0 rows) - --- mixture of agg/wfunc in the same window -SELECT sum(salary) OVER w, rank() OVER w FROM empsalary WINDOW w AS (PARTITION BY depname ORDER BY salary DESC); - sum | rank --------+------ - 6000 | 1 - 16400 | 2 - 16400 | 2 - 20900 | 4 - 25100 | 5 - 3900 | 1 - 7400 | 2 - 5000 | 1 - 14600 | 2 - 14600 | 2 -(10 rows) - --- strict aggs -SELECT empno, depname, salary, bonus, depadj, MIN(bonus) OVER (ORDER BY empno), MAX(depadj) OVER () FROM( - SELECT *, - CASE WHEN enroll_date < '2008-01-01' THEN 2008 - extract(YEAR FROM enroll_date) END * 500 AS bonus, - CASE WHEN - AVG(salary) OVER (PARTITION BY depname) < salary - THEN 200 END AS depadj FROM empsalary -)s; - empno | depname | salary | bonus | depadj | min | max --------+-----------+--------+-------+--------+------+----- - 1 | sales | 5000 | 1000 | 200 | 1000 | 200 - 2 | personnel | 3900 | 1000 | 200 | 1000 | 200 - 3 | sales | 4800 | 500 | | 500 | 200 - 4 | sales | 4800 | 500 | | 500 | 200 - 5 | personnel | 3500 | 500 | | 500 | 200 - 7 | develop | 4200 | | | 500 | 200 - 8 | develop | 6000 | 1000 | 200 | 500 | 200 - 9 | develop | 4500 | | | 500 | 200 - 10 | develop | 5200 | 500 | 200 | 500 | 200 - 11 | develop | 5200 | 500 | 200 | 500 | 200 -(10 rows) - --- window function over ungrouped agg over empty row set (bug before 9.1) -SELECT SUM(COUNT(f1)) OVER () FROM int4_tbl WHERE f1=42; - sum ------ - 0 -(1 row) - --- window function with ORDER BY an expression involving aggregates (9.1 bug) -select ten, - sum(unique1) + sum(unique2) as res, - rank() over (order by sum(unique1) + sum(unique2)) as rank -from tenk1 -group by ten order by ten; - ten | res | rank ------+----------+------ - 0 | 9976146 | 4 - 1 | 10114187 | 9 - 2 | 10059554 | 8 - 3 | 9878541 | 1 - 4 | 9881005 | 2 - 5 | 9981670 | 5 - 6 | 9947099 | 3 - 7 | 10120309 | 10 - 8 | 9991305 | 6 - 9 | 10040184 | 7 -(10 rows) - --- window and aggregate with GROUP BY expression (9.2 bug) -explain (costs off) -select first_value(max(x)) over (), y - from (select unique1 as x, ten+four as y from tenk1) ss - group by y; - QUERY PLAN ---------------------------------------------- - WindowAgg - -> HashAggregate - Group Key: (tenk1.ten + tenk1.four) - -> Seq Scan on tenk1 -(4 rows) - --- window functions returning pass-by-ref values from different rows -select x, lag(x, 1) over (order by x), lead(x, 3) over (order by x) -from (select x::numeric as x from generate_series(1,10) x); - x | lag | lead -----+-----+------ - 1 | | 4 - 2 | 1 | 5 - 3 | 2 | 6 - 4 | 3 | 7 - 5 | 4 | 8 - 6 | 5 | 9 - 7 | 6 | 10 - 8 | 7 | - 9 | 8 | - 10 | 9 | -(10 rows) - --- test non-default frame specifications -SELECT four, ten, - sum(ten) over (partition by four order by ten), - last_value(ten) over (partition by four order by ten) -FROM (select distinct ten, four from tenk1) ss; - four | ten | sum | last_value -------+-----+-----+------------ - 0 | 0 | 0 | 0 - 0 | 2 | 2 | 2 - 0 | 4 | 6 | 4 - 0 | 6 | 12 | 6 - 0 | 8 | 20 | 8 - 1 | 1 | 1 | 1 - 1 | 3 | 4 | 3 - 1 | 5 | 9 | 5 - 1 | 7 | 16 | 7 - 1 | 9 | 25 | 9 - 2 | 0 | 0 | 0 - 2 | 2 | 2 | 2 - 2 | 4 | 6 | 4 - 2 | 6 | 12 | 6 - 2 | 8 | 20 | 8 - 3 | 1 | 1 | 1 - 3 | 3 | 4 | 3 - 3 | 5 | 9 | 5 - 3 | 7 | 16 | 7 - 3 | 9 | 25 | 9 -(20 rows) - -SELECT four, ten, - sum(ten) over (partition by four order by ten range between unbounded preceding and current row), - last_value(ten) over (partition by four order by ten range between unbounded preceding and current row) -FROM (select distinct ten, four from tenk1) ss; - four | ten | sum | last_value -------+-----+-----+------------ - 0 | 0 | 0 | 0 - 0 | 2 | 2 | 2 - 0 | 4 | 6 | 4 - 0 | 6 | 12 | 6 - 0 | 8 | 20 | 8 - 1 | 1 | 1 | 1 - 1 | 3 | 4 | 3 - 1 | 5 | 9 | 5 - 1 | 7 | 16 | 7 - 1 | 9 | 25 | 9 - 2 | 0 | 0 | 0 - 2 | 2 | 2 | 2 - 2 | 4 | 6 | 4 - 2 | 6 | 12 | 6 - 2 | 8 | 20 | 8 - 3 | 1 | 1 | 1 - 3 | 3 | 4 | 3 - 3 | 5 | 9 | 5 - 3 | 7 | 16 | 7 - 3 | 9 | 25 | 9 -(20 rows) - -SELECT four, ten, - sum(ten) over (partition by four order by ten range between unbounded preceding and unbounded following), - last_value(ten) over (partition by four order by ten range between unbounded preceding and unbounded following) -FROM (select distinct ten, four from tenk1) ss; - four | ten | sum | last_value -------+-----+-----+------------ - 0 | 0 | 20 | 8 - 0 | 2 | 20 | 8 - 0 | 4 | 20 | 8 - 0 | 6 | 20 | 8 - 0 | 8 | 20 | 8 - 1 | 1 | 25 | 9 - 1 | 3 | 25 | 9 - 1 | 5 | 25 | 9 - 1 | 7 | 25 | 9 - 1 | 9 | 25 | 9 - 2 | 0 | 20 | 8 - 2 | 2 | 20 | 8 - 2 | 4 | 20 | 8 - 2 | 6 | 20 | 8 - 2 | 8 | 20 | 8 - 3 | 1 | 25 | 9 - 3 | 3 | 25 | 9 - 3 | 5 | 25 | 9 - 3 | 7 | 25 | 9 - 3 | 9 | 25 | 9 -(20 rows) - -SELECT four, ten/4 as two, - sum(ten/4) over (partition by four order by ten/4 range between unbounded preceding and current row), - last_value(ten/4) over (partition by four order by ten/4 range between unbounded preceding and current row) -FROM (select distinct ten, four from tenk1) ss; - four | two | sum | last_value -------+-----+-----+------------ - 0 | 0 | 0 | 0 - 0 | 0 | 0 | 0 - 0 | 1 | 2 | 1 - 0 | 1 | 2 | 1 - 0 | 2 | 4 | 2 - 1 | 0 | 0 | 0 - 1 | 0 | 0 | 0 - 1 | 1 | 2 | 1 - 1 | 1 | 2 | 1 - 1 | 2 | 4 | 2 - 2 | 0 | 0 | 0 - 2 | 0 | 0 | 0 - 2 | 1 | 2 | 1 - 2 | 1 | 2 | 1 - 2 | 2 | 4 | 2 - 3 | 0 | 0 | 0 - 3 | 0 | 0 | 0 - 3 | 1 | 2 | 1 - 3 | 1 | 2 | 1 - 3 | 2 | 4 | 2 -(20 rows) - -SELECT four, ten/4 as two, - sum(ten/4) over (partition by four order by ten/4 rows between unbounded preceding and current row), - last_value(ten/4) over (partition by four order by ten/4 rows between unbounded preceding and current row) -FROM (select distinct ten, four from tenk1) ss; - four | two | sum | last_value -------+-----+-----+------------ - 0 | 0 | 0 | 0 - 0 | 0 | 0 | 0 - 0 | 1 | 1 | 1 - 0 | 1 | 2 | 1 - 0 | 2 | 4 | 2 - 1 | 0 | 0 | 0 - 1 | 0 | 0 | 0 - 1 | 1 | 1 | 1 - 1 | 1 | 2 | 1 - 1 | 2 | 4 | 2 - 2 | 0 | 0 | 0 - 2 | 0 | 0 | 0 - 2 | 1 | 1 | 1 - 2 | 1 | 2 | 1 - 2 | 2 | 4 | 2 - 3 | 0 | 0 | 0 - 3 | 0 | 0 | 0 - 3 | 1 | 1 | 1 - 3 | 1 | 2 | 1 - 3 | 2 | 4 | 2 -(20 rows) - -SELECT sum(unique1) over (order by four range between current row and unbounded following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 45 | 0 | 0 - 45 | 8 | 0 - 45 | 4 | 0 - 33 | 5 | 1 - 33 | 9 | 1 - 33 | 1 | 1 - 18 | 6 | 2 - 18 | 2 | 2 - 10 | 3 | 3 - 10 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (rows between current row and unbounded following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 45 | 4 | 0 - 41 | 2 | 2 - 39 | 1 | 1 - 38 | 6 | 2 - 32 | 9 | 1 - 23 | 8 | 0 - 15 | 5 | 1 - 10 | 3 | 3 - 7 | 7 | 3 - 0 | 0 | 0 -(10 rows) - -SELECT sum(unique1) over (rows between 2 preceding and 2 following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 7 | 4 | 0 - 13 | 2 | 2 - 22 | 1 | 1 - 26 | 6 | 2 - 29 | 9 | 1 - 31 | 8 | 0 - 32 | 5 | 1 - 23 | 3 | 3 - 15 | 7 | 3 - 10 | 0 | 0 -(10 rows) - -SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude no others), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 7 | 4 | 0 - 13 | 2 | 2 - 22 | 1 | 1 - 26 | 6 | 2 - 29 | 9 | 1 - 31 | 8 | 0 - 32 | 5 | 1 - 23 | 3 | 3 - 15 | 7 | 3 - 10 | 0 | 0 -(10 rows) - -SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude current row), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 3 | 4 | 0 - 11 | 2 | 2 - 21 | 1 | 1 - 20 | 6 | 2 - 20 | 9 | 1 - 23 | 8 | 0 - 27 | 5 | 1 - 20 | 3 | 3 - 8 | 7 | 3 - 10 | 0 | 0 -(10 rows) - -SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude group), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - | 4 | 0 - | 2 | 2 - | 1 | 1 - | 6 | 2 - | 9 | 1 - | 8 | 0 - | 5 | 1 - | 3 | 3 - | 7 | 3 - | 0 | 0 -(10 rows) - -SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude ties), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 4 | 4 | 0 - 2 | 2 | 2 - 1 | 1 | 1 - 6 | 6 | 2 - 9 | 9 | 1 - 8 | 8 | 0 - 5 | 5 | 1 - 3 | 3 | 3 - 7 | 7 | 3 - 0 | 0 | 0 -(10 rows) - -SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude current row), - unique1, four -FROM tenk1 WHERE unique1 < 10; - first_value | unique1 | four --------------+---------+------ - 8 | 0 | 0 - 4 | 8 | 0 - 5 | 4 | 0 - 9 | 5 | 1 - 1 | 9 | 1 - 6 | 1 | 1 - 2 | 6 | 2 - 3 | 2 | 2 - 7 | 3 | 3 - | 7 | 3 -(10 rows) - -SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude group), - unique1, four -FROM tenk1 WHERE unique1 < 10; - first_value | unique1 | four --------------+---------+------ - | 0 | 0 - 5 | 8 | 0 - 5 | 4 | 0 - | 5 | 1 - 6 | 9 | 1 - 6 | 1 | 1 - 3 | 6 | 2 - 3 | 2 | 2 - | 3 | 3 - | 7 | 3 -(10 rows) - -SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude ties), - unique1, four -FROM tenk1 WHERE unique1 < 10; - first_value | unique1 | four --------------+---------+------ - 0 | 0 | 0 - 8 | 8 | 0 - 4 | 4 | 0 - 5 | 5 | 1 - 9 | 9 | 1 - 1 | 1 | 1 - 6 | 6 | 2 - 2 | 2 | 2 - 3 | 3 | 3 - 7 | 7 | 3 -(10 rows) - -SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude current row), - unique1, four -FROM tenk1 WHERE unique1 < 10; - last_value | unique1 | four -------------+---------+------ - 4 | 0 | 0 - 5 | 8 | 0 - 9 | 4 | 0 - 1 | 5 | 1 - 6 | 9 | 1 - 2 | 1 | 1 - 3 | 6 | 2 - 7 | 2 | 2 - 7 | 3 | 3 - | 7 | 3 -(10 rows) - -SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude group), - unique1, four -FROM tenk1 WHERE unique1 < 10; - last_value | unique1 | four -------------+---------+------ - | 0 | 0 - 5 | 8 | 0 - 9 | 4 | 0 - | 5 | 1 - 6 | 9 | 1 - 2 | 1 | 1 - 3 | 6 | 2 - 7 | 2 | 2 - | 3 | 3 - | 7 | 3 -(10 rows) - -SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude ties), - unique1, four -FROM tenk1 WHERE unique1 < 10; - last_value | unique1 | four -------------+---------+------ - 0 | 0 | 0 - 5 | 8 | 0 - 9 | 4 | 0 - 5 | 5 | 1 - 6 | 9 | 1 - 2 | 1 | 1 - 3 | 6 | 2 - 7 | 2 | 2 - 3 | 3 | 3 - 7 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (rows between 2 preceding and 1 preceding), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - | 4 | 0 - 4 | 2 | 2 - 6 | 1 | 1 - 3 | 6 | 2 - 7 | 9 | 1 - 15 | 8 | 0 - 17 | 5 | 1 - 13 | 3 | 3 - 8 | 7 | 3 - 10 | 0 | 0 -(10 rows) - -SELECT sum(unique1) over (rows between 1 following and 3 following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 9 | 4 | 0 - 16 | 2 | 2 - 23 | 1 | 1 - 22 | 6 | 2 - 16 | 9 | 1 - 15 | 8 | 0 - 10 | 5 | 1 - 7 | 3 | 3 - 0 | 7 | 3 - | 0 | 0 -(10 rows) - -SELECT sum(unique1) over (rows between unbounded preceding and 1 following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 6 | 4 | 0 - 7 | 2 | 2 - 13 | 1 | 1 - 22 | 6 | 2 - 30 | 9 | 1 - 35 | 8 | 0 - 38 | 5 | 1 - 45 | 3 | 3 - 45 | 7 | 3 - 45 | 0 | 0 -(10 rows) - -SELECT sum(unique1) over (w range between current row and unbounded following), - unique1, four -FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four); - sum | unique1 | four ------+---------+------ - 45 | 0 | 0 - 45 | 8 | 0 - 45 | 4 | 0 - 33 | 5 | 1 - 33 | 9 | 1 - 33 | 1 | 1 - 18 | 6 | 2 - 18 | 2 | 2 - 10 | 3 | 3 - 10 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (w range between unbounded preceding and current row exclude current row), - unique1, four -FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four); - sum | unique1 | four ------+---------+------ - 12 | 0 | 0 - 4 | 8 | 0 - 8 | 4 | 0 - 22 | 5 | 1 - 18 | 9 | 1 - 26 | 1 | 1 - 29 | 6 | 2 - 33 | 2 | 2 - 42 | 3 | 3 - 38 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (w range between unbounded preceding and current row exclude group), - unique1, four -FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four); - sum | unique1 | four ------+---------+------ - | 0 | 0 - | 8 | 0 - | 4 | 0 - 12 | 5 | 1 - 12 | 9 | 1 - 12 | 1 | 1 - 27 | 6 | 2 - 27 | 2 | 2 - 35 | 3 | 3 - 35 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (w range between unbounded preceding and current row exclude ties), - unique1, four -FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four); - sum | unique1 | four ------+---------+------ - 0 | 0 | 0 - 8 | 8 | 0 - 4 | 4 | 0 - 17 | 5 | 1 - 21 | 9 | 1 - 13 | 1 | 1 - 33 | 6 | 2 - 29 | 2 | 2 - 38 | 3 | 3 - 42 | 7 | 3 -(10 rows) - -SELECT first_value(unique1) over w, - nth_value(unique1, 2) over w AS nth_2, - last_value(unique1) over w, unique1, four -FROM tenk1 WHERE unique1 < 10 -WINDOW w AS (order by four range between current row and unbounded following); - first_value | nth_2 | last_value | unique1 | four --------------+-------+------------+---------+------ - 0 | 8 | 7 | 0 | 0 - 0 | 8 | 7 | 8 | 0 - 0 | 8 | 7 | 4 | 0 - 5 | 9 | 7 | 5 | 1 - 5 | 9 | 7 | 9 | 1 - 5 | 9 | 7 | 1 | 1 - 6 | 2 | 7 | 6 | 2 - 6 | 2 | 7 | 2 | 2 - 3 | 7 | 7 | 3 | 3 - 3 | 7 | 7 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over - (order by unique1 - rows (SELECT unique1 FROM tenk1 ORDER BY unique1 LIMIT 1) + 1 PRECEDING), - unique1 -FROM tenk1 WHERE unique1 < 10; - sum | unique1 ------+--------- - 0 | 0 - 1 | 1 - 3 | 2 - 5 | 3 - 7 | 4 - 9 | 5 - 11 | 6 - 13 | 7 - 15 | 8 - 17 | 9 -(10 rows) - -CREATE TEMP VIEW v_window AS - SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following) as sum_rows - FROM generate_series(1, 10) i; -SELECT * FROM v_window; - i | sum_rows -----+---------- - 1 | 3 - 2 | 6 - 3 | 9 - 4 | 12 - 5 | 15 - 6 | 18 - 7 | 21 - 8 | 24 - 9 | 27 - 10 | 19 -(10 rows) - -SELECT pg_get_viewdef('v_window'); - pg_get_viewdef ------------------------------------------------------------------------------------ - SELECT i, + - sum(i) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS sum_rows+ - FROM generate_series(1, 10) i(i); -(1 row) - -CREATE OR REPLACE TEMP VIEW v_window AS - SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following - exclude current row) as sum_rows FROM generate_series(1, 10) i; -SELECT * FROM v_window; - i | sum_rows -----+---------- - 1 | 2 - 2 | 4 - 3 | 6 - 4 | 8 - 5 | 10 - 6 | 12 - 7 | 14 - 8 | 16 - 9 | 18 - 10 | 9 -(10 rows) - -SELECT pg_get_viewdef('v_window'); - pg_get_viewdef -------------------------------------------------------------------------------------------------------- - SELECT i, + - sum(i) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE CURRENT ROW) AS sum_rows+ - FROM generate_series(1, 10) i(i); -(1 row) - -CREATE OR REPLACE TEMP VIEW v_window AS - SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following - exclude group) as sum_rows FROM generate_series(1, 10) i; -SELECT * FROM v_window; - i | sum_rows -----+---------- - 1 | 2 - 2 | 4 - 3 | 6 - 4 | 8 - 5 | 10 - 6 | 12 - 7 | 14 - 8 | 16 - 9 | 18 - 10 | 9 -(10 rows) - -SELECT pg_get_viewdef('v_window'); - pg_get_viewdef -------------------------------------------------------------------------------------------------- - SELECT i, + - sum(i) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE GROUP) AS sum_rows+ - FROM generate_series(1, 10) i(i); -(1 row) - -CREATE OR REPLACE TEMP VIEW v_window AS - SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following - exclude ties) as sum_rows FROM generate_series(1, 10) i; -SELECT * FROM v_window; - i | sum_rows -----+---------- - 1 | 3 - 2 | 6 - 3 | 9 - 4 | 12 - 5 | 15 - 6 | 18 - 7 | 21 - 8 | 24 - 9 | 27 - 10 | 19 -(10 rows) - -SELECT pg_get_viewdef('v_window'); - pg_get_viewdef ------------------------------------------------------------------------------------------------- - SELECT i, + - sum(i) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE TIES) AS sum_rows+ - FROM generate_series(1, 10) i(i); -(1 row) - -CREATE OR REPLACE TEMP VIEW v_window AS - SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following - exclude no others) as sum_rows FROM generate_series(1, 10) i; -SELECT * FROM v_window; - i | sum_rows -----+---------- - 1 | 3 - 2 | 6 - 3 | 9 - 4 | 12 - 5 | 15 - 6 | 18 - 7 | 21 - 8 | 24 - 9 | 27 - 10 | 19 -(10 rows) - -SELECT pg_get_viewdef('v_window'); - pg_get_viewdef ------------------------------------------------------------------------------------ - SELECT i, + - sum(i) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS sum_rows+ - FROM generate_series(1, 10) i(i); -(1 row) - -CREATE OR REPLACE TEMP VIEW v_window AS - SELECT i, sum(i) over (order by i groups between 1 preceding and 1 following) as sum_rows FROM generate_series(1, 10) i; -SELECT * FROM v_window; - i | sum_rows -----+---------- - 1 | 3 - 2 | 6 - 3 | 9 - 4 | 12 - 5 | 15 - 6 | 18 - 7 | 21 - 8 | 24 - 9 | 27 - 10 | 19 -(10 rows) - -SELECT pg_get_viewdef('v_window'); - pg_get_viewdef -------------------------------------------------------------------------------------- - SELECT i, + - sum(i) OVER (ORDER BY i GROUPS BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS sum_rows+ - FROM generate_series(1, 10) i(i); -(1 row) - -DROP VIEW v_window; -CREATE TEMP VIEW v_window AS - SELECT i, min(i) over (order by i range between '1 day' preceding and '10 days' following) as min_i - FROM generate_series(now(), now()+'100 days'::interval, '1 hour') i; -SELECT pg_get_viewdef('v_window'); - pg_get_viewdef ------------------------------------------------------------------------------------------------------------------------ - SELECT i, + - min(i) OVER (ORDER BY i RANGE BETWEEN '@ 1 day'::interval PRECEDING AND '@ 10 days'::interval FOLLOWING) AS min_i+ - FROM generate_series(now(), (now() + '@ 100 days'::interval), '@ 1 hour'::interval) i(i); -(1 row) - --- RANGE offset PRECEDING/FOLLOWING tests -SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - | 0 | 0 - | 8 | 0 - | 4 | 0 - 12 | 5 | 1 - 12 | 9 | 1 - 12 | 1 | 1 - 27 | 6 | 2 - 27 | 2 | 2 - 23 | 3 | 3 - 23 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four desc range between 2::int8 preceding and 1::int2 preceding), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - | 3 | 3 - | 7 | 3 - 10 | 6 | 2 - 10 | 2 | 2 - 18 | 9 | 1 - 18 | 5 | 1 - 18 | 1 | 1 - 23 | 0 | 0 - 23 | 8 | 0 - 23 | 4 | 0 -(10 rows) - -SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude no others), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - | 0 | 0 - | 8 | 0 - | 4 | 0 - 12 | 5 | 1 - 12 | 9 | 1 - 12 | 1 | 1 - 27 | 6 | 2 - 27 | 2 | 2 - 23 | 3 | 3 - 23 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude current row), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - | 0 | 0 - | 8 | 0 - | 4 | 0 - 12 | 5 | 1 - 12 | 9 | 1 - 12 | 1 | 1 - 27 | 6 | 2 - 27 | 2 | 2 - 23 | 3 | 3 - 23 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude group), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - | 0 | 0 - | 8 | 0 - | 4 | 0 - 12 | 5 | 1 - 12 | 9 | 1 - 12 | 1 | 1 - 27 | 6 | 2 - 27 | 2 | 2 - 23 | 3 | 3 - 23 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude ties), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - | 0 | 0 - | 8 | 0 - | 4 | 0 - 12 | 5 | 1 - 12 | 9 | 1 - 12 | 1 | 1 - 27 | 6 | 2 - 27 | 2 | 2 - 23 | 3 | 3 - 23 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four range between 2::int8 preceding and 6::int2 following exclude ties), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 33 | 0 | 0 - 41 | 8 | 0 - 37 | 4 | 0 - 35 | 5 | 1 - 39 | 9 | 1 - 31 | 1 | 1 - 43 | 6 | 2 - 39 | 2 | 2 - 26 | 3 | 3 - 30 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four range between 2::int8 preceding and 6::int2 following exclude group), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 33 | 0 | 0 - 33 | 8 | 0 - 33 | 4 | 0 - 30 | 5 | 1 - 30 | 9 | 1 - 30 | 1 | 1 - 37 | 6 | 2 - 37 | 2 | 2 - 23 | 3 | 3 - 23 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (partition by four order by unique1 range between 5::int8 preceding and 6::int2 following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 4 | 0 | 0 - 12 | 4 | 0 - 12 | 8 | 0 - 6 | 1 | 1 - 15 | 5 | 1 - 14 | 9 | 1 - 8 | 2 | 2 - 8 | 6 | 2 - 10 | 3 | 3 - 10 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (partition by four order by unique1 range between 5::int8 preceding and 6::int2 following - exclude current row),unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 4 | 0 | 0 - 8 | 4 | 0 - 4 | 8 | 0 - 5 | 1 | 1 - 10 | 5 | 1 - 5 | 9 | 1 - 6 | 2 | 2 - 2 | 6 | 2 - 7 | 3 | 3 - 3 | 7 | 3 -(10 rows) - -select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following), - salary, enroll_date from empsalary; - sum | salary | enroll_date --------+--------+------------- - 34900 | 5000 | 10-01-2006 - 34900 | 6000 | 10-01-2006 - 38400 | 3900 | 12-23-2006 - 47100 | 4800 | 08-01-2007 - 47100 | 5200 | 08-01-2007 - 47100 | 4800 | 08-08-2007 - 47100 | 5200 | 08-15-2007 - 36100 | 3500 | 12-10-2007 - 32200 | 4500 | 01-01-2008 - 32200 | 4200 | 01-01-2008 -(10 rows) - -select sum(salary) over (order by enroll_date desc range between '1 year'::interval preceding and '1 year'::interval following), - salary, enroll_date from empsalary; - sum | salary | enroll_date --------+--------+------------- - 32200 | 4200 | 01-01-2008 - 32200 | 4500 | 01-01-2008 - 36100 | 3500 | 12-10-2007 - 47100 | 5200 | 08-15-2007 - 47100 | 4800 | 08-08-2007 - 47100 | 4800 | 08-01-2007 - 47100 | 5200 | 08-01-2007 - 38400 | 3900 | 12-23-2006 - 34900 | 5000 | 10-01-2006 - 34900 | 6000 | 10-01-2006 -(10 rows) - -select sum(salary) over (order by enroll_date desc range between '1 year'::interval following and '1 year'::interval following), - salary, enroll_date from empsalary; - sum | salary | enroll_date ------+--------+------------- - | 4200 | 01-01-2008 - | 4500 | 01-01-2008 - | 3500 | 12-10-2007 - | 5200 | 08-15-2007 - | 4800 | 08-08-2007 - | 4800 | 08-01-2007 - | 5200 | 08-01-2007 - | 3900 | 12-23-2006 - | 5000 | 10-01-2006 - | 6000 | 10-01-2006 -(10 rows) - -select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following - exclude current row), salary, enroll_date from empsalary; - sum | salary | enroll_date --------+--------+------------- - 29900 | 5000 | 10-01-2006 - 28900 | 6000 | 10-01-2006 - 34500 | 3900 | 12-23-2006 - 42300 | 4800 | 08-01-2007 - 41900 | 5200 | 08-01-2007 - 42300 | 4800 | 08-08-2007 - 41900 | 5200 | 08-15-2007 - 32600 | 3500 | 12-10-2007 - 27700 | 4500 | 01-01-2008 - 28000 | 4200 | 01-01-2008 -(10 rows) - -select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following - exclude group), salary, enroll_date from empsalary; - sum | salary | enroll_date --------+--------+------------- - 23900 | 5000 | 10-01-2006 - 23900 | 6000 | 10-01-2006 - 34500 | 3900 | 12-23-2006 - 37100 | 4800 | 08-01-2007 - 37100 | 5200 | 08-01-2007 - 42300 | 4800 | 08-08-2007 - 41900 | 5200 | 08-15-2007 - 32600 | 3500 | 12-10-2007 - 23500 | 4500 | 01-01-2008 - 23500 | 4200 | 01-01-2008 -(10 rows) - -select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following - exclude ties), salary, enroll_date from empsalary; - sum | salary | enroll_date --------+--------+------------- - 28900 | 5000 | 10-01-2006 - 29900 | 6000 | 10-01-2006 - 38400 | 3900 | 12-23-2006 - 41900 | 4800 | 08-01-2007 - 42300 | 5200 | 08-01-2007 - 47100 | 4800 | 08-08-2007 - 47100 | 5200 | 08-15-2007 - 36100 | 3500 | 12-10-2007 - 28000 | 4500 | 01-01-2008 - 27700 | 4200 | 01-01-2008 -(10 rows) - -select first_value(salary) over(order by salary range between 1000 preceding and 1000 following), - lead(salary) over(order by salary range between 1000 preceding and 1000 following), - nth_value(salary, 1) over(order by salary range between 1000 preceding and 1000 following), - salary from empsalary; - first_value | lead | nth_value | salary --------------+------+-----------+-------- - 3500 | 3900 | 3500 | 3500 - 3500 | 4200 | 3500 | 3900 - 3500 | 4500 | 3500 | 4200 - 3500 | 4800 | 3500 | 4500 - 3900 | 4800 | 3900 | 4800 - 3900 | 5000 | 3900 | 4800 - 4200 | 5200 | 4200 | 5000 - 4200 | 5200 | 4200 | 5200 - 4200 | 6000 | 4200 | 5200 - 5000 | | 5000 | 6000 -(10 rows) - -select last_value(salary) over(order by salary range between 1000 preceding and 1000 following), - lag(salary) over(order by salary range between 1000 preceding and 1000 following), - salary from empsalary; - last_value | lag | salary -------------+------+-------- - 4500 | | 3500 - 4800 | 3500 | 3900 - 5200 | 3900 | 4200 - 5200 | 4200 | 4500 - 5200 | 4500 | 4800 - 5200 | 4800 | 4800 - 6000 | 4800 | 5000 - 6000 | 5000 | 5200 - 6000 | 5200 | 5200 - 6000 | 5200 | 6000 -(10 rows) - -select first_value(salary) over(order by salary range between 1000 following and 3000 following - exclude current row), - lead(salary) over(order by salary range between 1000 following and 3000 following exclude ties), - nth_value(salary, 1) over(order by salary range between 1000 following and 3000 following - exclude ties), - salary from empsalary; - first_value | lead | nth_value | salary --------------+------+-----------+-------- - 4500 | 3900 | 4500 | 3500 - 5000 | 4200 | 5000 | 3900 - 5200 | 4500 | 5200 | 4200 - 6000 | 4800 | 6000 | 4500 - 6000 | 4800 | 6000 | 4800 - 6000 | 5000 | 6000 | 4800 - 6000 | 5200 | 6000 | 5000 - | 5200 | | 5200 - | 6000 | | 5200 - | | | 6000 -(10 rows) - -select last_value(salary) over(order by salary range between 1000 following and 3000 following - exclude group), - lag(salary) over(order by salary range between 1000 following and 3000 following exclude group), - salary from empsalary; - last_value | lag | salary -------------+------+-------- - 6000 | | 3500 - 6000 | 3500 | 3900 - 6000 | 3900 | 4200 - 6000 | 4200 | 4500 - 6000 | 4500 | 4800 - 6000 | 4800 | 4800 - 6000 | 4800 | 5000 - | 5000 | 5200 - | 5200 | 5200 - | 5200 | 6000 -(10 rows) - -select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following - exclude ties), - last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following), - salary, enroll_date from empsalary; - first_value | last_value | salary | enroll_date --------------+------------+--------+------------- - 5000 | 5200 | 5000 | 10-01-2006 - 6000 | 5200 | 6000 | 10-01-2006 - 5000 | 3500 | 3900 | 12-23-2006 - 5000 | 4200 | 4800 | 08-01-2007 - 5000 | 4200 | 5200 | 08-01-2007 - 5000 | 4200 | 4800 | 08-08-2007 - 5000 | 4200 | 5200 | 08-15-2007 - 5000 | 4200 | 3500 | 12-10-2007 - 5000 | 4200 | 4500 | 01-01-2008 - 5000 | 4200 | 4200 | 01-01-2008 -(10 rows) - -select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following - exclude ties), - last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following - exclude ties), - salary, enroll_date from empsalary; - first_value | last_value | salary | enroll_date --------------+------------+--------+------------- - 5000 | 5200 | 5000 | 10-01-2006 - 6000 | 5200 | 6000 | 10-01-2006 - 5000 | 3500 | 3900 | 12-23-2006 - 5000 | 4200 | 4800 | 08-01-2007 - 5000 | 4200 | 5200 | 08-01-2007 - 5000 | 4200 | 4800 | 08-08-2007 - 5000 | 4200 | 5200 | 08-15-2007 - 5000 | 4200 | 3500 | 12-10-2007 - 5000 | 4500 | 4500 | 01-01-2008 - 5000 | 4200 | 4200 | 01-01-2008 -(10 rows) - -select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following - exclude group), - last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following - exclude group), - salary, enroll_date from empsalary; - first_value | last_value | salary | enroll_date --------------+------------+--------+------------- - 3900 | 5200 | 5000 | 10-01-2006 - 3900 | 5200 | 6000 | 10-01-2006 - 5000 | 3500 | 3900 | 12-23-2006 - 5000 | 4200 | 4800 | 08-01-2007 - 5000 | 4200 | 5200 | 08-01-2007 - 5000 | 4200 | 4800 | 08-08-2007 - 5000 | 4200 | 5200 | 08-15-2007 - 5000 | 4200 | 3500 | 12-10-2007 - 5000 | 3500 | 4500 | 01-01-2008 - 5000 | 3500 | 4200 | 01-01-2008 -(10 rows) - -select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following - exclude current row), - last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following - exclude current row), - salary, enroll_date from empsalary; - first_value | last_value | salary | enroll_date --------------+------------+--------+------------- - 6000 | 5200 | 5000 | 10-01-2006 - 5000 | 5200 | 6000 | 10-01-2006 - 5000 | 3500 | 3900 | 12-23-2006 - 5000 | 4200 | 4800 | 08-01-2007 - 5000 | 4200 | 5200 | 08-01-2007 - 5000 | 4200 | 4800 | 08-08-2007 - 5000 | 4200 | 5200 | 08-15-2007 - 5000 | 4200 | 3500 | 12-10-2007 - 5000 | 4200 | 4500 | 01-01-2008 - 5000 | 4500 | 4200 | 01-01-2008 -(10 rows) - --- RANGE offset PRECEDING/FOLLOWING with null values -select x, y, - first_value(y) over w, - last_value(y) over w -from - (select x, x as y from generate_series(1,5) as x - union all select null, 42 - union all select null, 43) ss -window w as - (order by x asc nulls first range between 2 preceding and 2 following); - x | y | first_value | last_value ----+----+-------------+------------ - | 42 | 42 | 43 - | 43 | 42 | 43 - 1 | 1 | 1 | 3 - 2 | 2 | 1 | 4 - 3 | 3 | 1 | 5 - 4 | 4 | 2 | 5 - 5 | 5 | 3 | 5 -(7 rows) - -select x, y, - first_value(y) over w, - last_value(y) over w -from - (select x, x as y from generate_series(1,5) as x - union all select null, 42 - union all select null, 43) ss -window w as - (order by x asc nulls last range between 2 preceding and 2 following); - x | y | first_value | last_value ----+----+-------------+------------ - 1 | 1 | 1 | 3 - 2 | 2 | 1 | 4 - 3 | 3 | 1 | 5 - 4 | 4 | 2 | 5 - 5 | 5 | 3 | 5 - | 42 | 42 | 43 - | 43 | 42 | 43 -(7 rows) - -select x, y, - first_value(y) over w, - last_value(y) over w -from - (select x, x as y from generate_series(1,5) as x - union all select null, 42 - union all select null, 43) ss -window w as - (order by x desc nulls first range between 2 preceding and 2 following); - x | y | first_value | last_value ----+----+-------------+------------ - | 43 | 43 | 42 - | 42 | 43 | 42 - 5 | 5 | 5 | 3 - 4 | 4 | 5 | 2 - 3 | 3 | 5 | 1 - 2 | 2 | 4 | 1 - 1 | 1 | 3 | 1 -(7 rows) - -select x, y, - first_value(y) over w, - last_value(y) over w -from - (select x, x as y from generate_series(1,5) as x - union all select null, 42 - union all select null, 43) ss -window w as - (order by x desc nulls last range between 2 preceding and 2 following); - x | y | first_value | last_value ----+----+-------------+------------ - 5 | 5 | 5 | 3 - 4 | 4 | 5 | 2 - 3 | 3 | 5 | 1 - 2 | 2 | 4 | 1 - 1 | 1 | 3 | 1 - | 42 | 42 | 43 - | 43 | 42 | 43 -(7 rows) - --- There is a syntactic ambiguity in the SQL standard. Since --- UNBOUNDED is a non-reserved word, it could be the name of a --- function parameter and be used as an expression. There is a --- grammar hack to resolve such cases as the keyword. The following --- tests record this behavior. -CREATE FUNCTION unbounded_syntax_test1a(x int) RETURNS TABLE (a int, b int, c int) -LANGUAGE SQL -BEGIN ATOMIC - SELECT sum(unique1) over (rows between x preceding and x following), - unique1, four - FROM tenk1 WHERE unique1 < 10; -END; -CREATE FUNCTION unbounded_syntax_test1b(x int) RETURNS TABLE (a int, b int, c int) -LANGUAGE SQL -AS $$ - SELECT sum(unique1) over (rows between x preceding and x following), - unique1, four - FROM tenk1 WHERE unique1 < 10; -$$; --- These will apply the argument to the window specification inside the function. -SELECT * FROM unbounded_syntax_test1a(2); - a | b | c -----+---+--- - 7 | 4 | 0 - 13 | 2 | 2 - 22 | 1 | 1 - 26 | 6 | 2 - 29 | 9 | 1 - 31 | 8 | 0 - 32 | 5 | 1 - 23 | 3 | 3 - 15 | 7 | 3 - 10 | 0 | 0 -(10 rows) - -SELECT * FROM unbounded_syntax_test1b(2); - a | b | c -----+---+--- - 7 | 4 | 0 - 13 | 2 | 2 - 22 | 1 | 1 - 26 | 6 | 2 - 29 | 9 | 1 - 31 | 8 | 0 - 32 | 5 | 1 - 23 | 3 | 3 - 15 | 7 | 3 - 10 | 0 | 0 -(10 rows) - -CREATE FUNCTION unbounded_syntax_test2a(unbounded int) RETURNS TABLE (a int, b int, c int) -LANGUAGE SQL -BEGIN ATOMIC - SELECT sum(unique1) over (rows between unbounded preceding and unbounded following), - unique1, four - FROM tenk1 WHERE unique1 < 10; -END; -CREATE FUNCTION unbounded_syntax_test2b(unbounded int) RETURNS TABLE (a int, b int, c int) -LANGUAGE SQL -AS $$ - SELECT sum(unique1) over (rows between unbounded preceding and unbounded following), - unique1, four - FROM tenk1 WHERE unique1 < 10; -$$; --- These will not apply the argument but instead treat UNBOUNDED as a keyword. -SELECT * FROM unbounded_syntax_test2a(2); - a | b | c -----+---+--- - 45 | 4 | 0 - 45 | 2 | 2 - 45 | 1 | 1 - 45 | 6 | 2 - 45 | 9 | 1 - 45 | 8 | 0 - 45 | 5 | 1 - 45 | 3 | 3 - 45 | 7 | 3 - 45 | 0 | 0 -(10 rows) - -SELECT * FROM unbounded_syntax_test2b(2); - a | b | c -----+---+--- - 45 | 4 | 0 - 45 | 2 | 2 - 45 | 1 | 1 - 45 | 6 | 2 - 45 | 9 | 1 - 45 | 8 | 0 - 45 | 5 | 1 - 45 | 3 | 3 - 45 | 7 | 3 - 45 | 0 | 0 -(10 rows) - -DROP FUNCTION unbounded_syntax_test1a, unbounded_syntax_test1b, - unbounded_syntax_test2a, unbounded_syntax_test2b; --- Other tests with token UNBOUNDED in potentially problematic position -CREATE FUNCTION unbounded(x int) RETURNS int LANGUAGE SQL IMMUTABLE RETURN x; -SELECT sum(unique1) over (rows between 1 preceding and 1 following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 6 | 4 | 0 - 7 | 2 | 2 - 9 | 1 | 1 - 16 | 6 | 2 - 23 | 9 | 1 - 22 | 8 | 0 - 16 | 5 | 1 - 15 | 3 | 3 - 10 | 7 | 3 - 7 | 0 | 0 -(10 rows) - -SELECT sum(unique1) over (rows between unbounded(1) preceding and unbounded(1) following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 6 | 4 | 0 - 7 | 2 | 2 - 9 | 1 | 1 - 16 | 6 | 2 - 23 | 9 | 1 - 22 | 8 | 0 - 16 | 5 | 1 - 15 | 3 | 3 - 10 | 7 | 3 - 7 | 0 | 0 -(10 rows) - -SELECT sum(unique1) over (rows between unbounded.x preceding and unbounded.x following), - unique1, four -FROM tenk1, (values (1)) as unbounded(x) WHERE unique1 < 10; -ERROR: argument of ROWS must not contain variables -LINE 1: SELECT sum(unique1) over (rows between unbounded.x preceding... - ^ -DROP FUNCTION unbounded; --- Check overflow behavior for various integer sizes -select x, last_value(x) over (order by x::smallint range between current row and 2147450884 following) -from generate_series(32764, 32766) x; - x | last_value --------+------------ - 32764 | 32766 - 32765 | 32766 - 32766 | 32766 -(3 rows) - -select x, last_value(x) over (order by x::smallint desc range between current row and 2147450885 following) -from generate_series(-32766, -32764) x; - x | last_value ---------+------------ - -32764 | -32766 - -32765 | -32766 - -32766 | -32766 -(3 rows) - -select x, last_value(x) over (order by x range between current row and 4 following) -from generate_series(2147483644, 2147483646) x; - x | last_value -------------+------------ - 2147483644 | 2147483646 - 2147483645 | 2147483646 - 2147483646 | 2147483646 -(3 rows) - -select x, last_value(x) over (order by x desc range between current row and 5 following) -from generate_series(-2147483646, -2147483644) x; - x | last_value --------------+------------- - -2147483644 | -2147483646 - -2147483645 | -2147483646 - -2147483646 | -2147483646 -(3 rows) - -select x, last_value(x) over (order by x range between current row and 4 following) -from generate_series(9223372036854775804, 9223372036854775806) x; - x | last_value ----------------------+--------------------- - 9223372036854775804 | 9223372036854775806 - 9223372036854775805 | 9223372036854775806 - 9223372036854775806 | 9223372036854775806 -(3 rows) - -select x, last_value(x) over (order by x desc range between current row and 5 following) -from generate_series(-9223372036854775806, -9223372036854775804) x; - x | last_value -----------------------+---------------------- - -9223372036854775804 | -9223372036854775806 - -9223372036854775805 | -9223372036854775806 - -9223372036854775806 | -9223372036854775806 -(3 rows) - --- Test in_range for other numeric datatypes -create temp table numerics( - id int, - f_float4 float4, - f_float8 float8, - f_numeric numeric -); -insert into numerics values -(0, '-infinity', '-infinity', '-infinity'), -(1, -3, -3, -3), -(2, -1, -1, -1), -(3, 0, 0, 0), -(4, 1.1, 1.1, 1.1), -(5, 1.12, 1.12, 1.12), -(6, 2, 2, 2), -(7, 100, 100, 100), -(8, 'infinity', 'infinity', 'infinity'), -(9, 'NaN', 'NaN', 'NaN'); -select id, f_float4, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float4 range between - 1 preceding and 1 following); - id | f_float4 | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 0 - 1 | -3 | 1 | 1 - 2 | -1 | 2 | 3 - 3 | 0 | 2 | 3 - 4 | 1.1 | 4 | 6 - 5 | 1.12 | 4 | 6 - 6 | 2 | 4 | 6 - 7 | 100 | 7 | 7 - 8 | Infinity | 8 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_float4, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float4 range between - 1 preceding and 1.1::float4 following); - id | f_float4 | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 0 - 1 | -3 | 1 | 1 - 2 | -1 | 2 | 3 - 3 | 0 | 2 | 4 - 4 | 1.1 | 4 | 6 - 5 | 1.12 | 4 | 6 - 6 | 2 | 4 | 6 - 7 | 100 | 7 | 7 - 8 | Infinity | 8 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_float4, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float4 range between - 'inf' preceding and 'inf' following); - id | f_float4 | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 8 - 1 | -3 | 0 | 8 - 2 | -1 | 0 | 8 - 3 | 0 | 0 | 8 - 4 | 1.1 | 0 | 8 - 5 | 1.12 | 0 | 8 - 6 | 2 | 0 | 8 - 7 | 100 | 0 | 8 - 8 | Infinity | 0 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_float4, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float4 range between - 'inf' preceding and 'inf' preceding); - id | f_float4 | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 0 - 1 | -3 | 0 | 0 - 2 | -1 | 0 | 0 - 3 | 0 | 0 | 0 - 4 | 1.1 | 0 | 0 - 5 | 1.12 | 0 | 0 - 6 | 2 | 0 | 0 - 7 | 100 | 0 | 0 - 8 | Infinity | 0 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_float4, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float4 range between - 'inf' following and 'inf' following); - id | f_float4 | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 8 - 1 | -3 | 8 | 8 - 2 | -1 | 8 | 8 - 3 | 0 | 8 | 8 - 4 | 1.1 | 8 | 8 - 5 | 1.12 | 8 | 8 - 6 | 2 | 8 | 8 - 7 | 100 | 8 | 8 - 8 | Infinity | 8 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_float4, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float4 range between - 1.1 preceding and 'NaN' following); -- error, NaN disallowed -ERROR: invalid preceding or following size in window function -select id, f_float8, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float8 range between - 1 preceding and 1 following); - id | f_float8 | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 0 - 1 | -3 | 1 | 1 - 2 | -1 | 2 | 3 - 3 | 0 | 2 | 3 - 4 | 1.1 | 4 | 6 - 5 | 1.12 | 4 | 6 - 6 | 2 | 4 | 6 - 7 | 100 | 7 | 7 - 8 | Infinity | 8 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_float8, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float8 range between - 1 preceding and 1.1::float8 following); - id | f_float8 | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 0 - 1 | -3 | 1 | 1 - 2 | -1 | 2 | 3 - 3 | 0 | 2 | 4 - 4 | 1.1 | 4 | 6 - 5 | 1.12 | 4 | 6 - 6 | 2 | 4 | 6 - 7 | 100 | 7 | 7 - 8 | Infinity | 8 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_float8, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float8 range between - 'inf' preceding and 'inf' following); - id | f_float8 | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 8 - 1 | -3 | 0 | 8 - 2 | -1 | 0 | 8 - 3 | 0 | 0 | 8 - 4 | 1.1 | 0 | 8 - 5 | 1.12 | 0 | 8 - 6 | 2 | 0 | 8 - 7 | 100 | 0 | 8 - 8 | Infinity | 0 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_float8, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float8 range between - 'inf' preceding and 'inf' preceding); - id | f_float8 | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 0 - 1 | -3 | 0 | 0 - 2 | -1 | 0 | 0 - 3 | 0 | 0 | 0 - 4 | 1.1 | 0 | 0 - 5 | 1.12 | 0 | 0 - 6 | 2 | 0 | 0 - 7 | 100 | 0 | 0 - 8 | Infinity | 0 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_float8, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float8 range between - 'inf' following and 'inf' following); - id | f_float8 | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 8 - 1 | -3 | 8 | 8 - 2 | -1 | 8 | 8 - 3 | 0 | 8 | 8 - 4 | 1.1 | 8 | 8 - 5 | 1.12 | 8 | 8 - 6 | 2 | 8 | 8 - 7 | 100 | 8 | 8 - 8 | Infinity | 8 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_float8, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_float8 range between - 1.1 preceding and 'NaN' following); -- error, NaN disallowed -ERROR: invalid preceding or following size in window function -select id, f_numeric, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_numeric range between - 1 preceding and 1 following); - id | f_numeric | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 0 - 1 | -3 | 1 | 1 - 2 | -1 | 2 | 3 - 3 | 0 | 2 | 3 - 4 | 1.1 | 4 | 6 - 5 | 1.12 | 4 | 6 - 6 | 2 | 4 | 6 - 7 | 100 | 7 | 7 - 8 | Infinity | 8 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_numeric, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_numeric range between - 1 preceding and 1.1::numeric following); - id | f_numeric | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 0 - 1 | -3 | 1 | 1 - 2 | -1 | 2 | 3 - 3 | 0 | 2 | 4 - 4 | 1.1 | 4 | 6 - 5 | 1.12 | 4 | 6 - 6 | 2 | 4 | 6 - 7 | 100 | 7 | 7 - 8 | Infinity | 8 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_numeric, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_numeric range between - 1 preceding and 1.1::float8 following); -- currently unsupported -ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type numeric and offset type double precision -LINE 4: 1 preceding and 1.1::float8 following); - ^ -HINT: Cast the offset value to an appropriate type. -select id, f_numeric, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_numeric range between - 'inf' preceding and 'inf' following); - id | f_numeric | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 8 - 1 | -3 | 0 | 8 - 2 | -1 | 0 | 8 - 3 | 0 | 0 | 8 - 4 | 1.1 | 0 | 8 - 5 | 1.12 | 0 | 8 - 6 | 2 | 0 | 8 - 7 | 100 | 0 | 8 - 8 | Infinity | 0 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_numeric, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_numeric range between - 'inf' preceding and 'inf' preceding); - id | f_numeric | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 0 - 1 | -3 | 0 | 0 - 2 | -1 | 0 | 0 - 3 | 0 | 0 | 0 - 4 | 1.1 | 0 | 0 - 5 | 1.12 | 0 | 0 - 6 | 2 | 0 | 0 - 7 | 100 | 0 | 0 - 8 | Infinity | 0 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_numeric, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_numeric range between - 'inf' following and 'inf' following); - id | f_numeric | first_value | last_value -----+-----------+-------------+------------ - 0 | -Infinity | 0 | 8 - 1 | -3 | 8 | 8 - 2 | -1 | 8 | 8 - 3 | 0 | 8 | 8 - 4 | 1.1 | 8 | 8 - 5 | 1.12 | 8 | 8 - 6 | 2 | 8 | 8 - 7 | 100 | 8 | 8 - 8 | Infinity | 8 | 8 - 9 | NaN | 9 | 9 -(10 rows) - -select id, f_numeric, first_value(id) over w, last_value(id) over w -from numerics -window w as (order by f_numeric range between - 1.1 preceding and 'NaN' following); -- error, NaN disallowed -ERROR: invalid preceding or following size in window function --- Test in_range for other datetime datatypes -create temp table datetimes( - id int, - f_time time, - f_timetz timetz, - f_interval interval, - f_timestamptz timestamptz, - f_timestamp timestamp -); -insert into datetimes values -(0, '10:00', '10:00 BST', '-infinity', '-infinity', '-infinity'), -(1, '11:00', '11:00 BST', '1 year', '2000-10-19 10:23:54+01', '2000-10-19 10:23:54'), -(2, '12:00', '12:00 BST', '2 years', '2001-10-19 10:23:54+01', '2001-10-19 10:23:54'), -(3, '13:00', '13:00 BST', '3 years', '2001-10-19 10:23:54+01', '2001-10-19 10:23:54'), -(4, '14:00', '14:00 BST', '4 years', '2002-10-19 10:23:54+01', '2002-10-19 10:23:54'), -(5, '15:00', '15:00 BST', '5 years', '2003-10-19 10:23:54+01', '2003-10-19 10:23:54'), -(6, '15:00', '15:00 BST', '5 years', '2004-10-19 10:23:54+01', '2004-10-19 10:23:54'), -(7, '17:00', '17:00 BST', '7 years', '2005-10-19 10:23:54+01', '2005-10-19 10:23:54'), -(8, '18:00', '18:00 BST', '8 years', '2006-10-19 10:23:54+01', '2006-10-19 10:23:54'), -(9, '19:00', '19:00 BST', '9 years', '2007-10-19 10:23:54+01', '2007-10-19 10:23:54'), -(10, '20:00', '20:00 BST', '10 years', '2008-10-19 10:23:54+01', '2008-10-19 10:23:54'), -(11, '21:00', '21:00 BST', 'infinity', 'infinity', 'infinity'); -select id, f_time, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_time range between - '70 min'::interval preceding and '2 hours'::interval following); - id | f_time | first_value | last_value -----+----------+-------------+------------ - 0 | 10:00:00 | 0 | 2 - 1 | 11:00:00 | 0 | 3 - 2 | 12:00:00 | 1 | 4 - 3 | 13:00:00 | 2 | 6 - 4 | 14:00:00 | 3 | 6 - 5 | 15:00:00 | 4 | 7 - 6 | 15:00:00 | 4 | 7 - 7 | 17:00:00 | 7 | 9 - 8 | 18:00:00 | 7 | 10 - 9 | 19:00:00 | 8 | 11 - 10 | 20:00:00 | 9 | 11 - 11 | 21:00:00 | 10 | 11 -(12 rows) - -select id, f_time, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_time desc range between - '70 min' preceding and '2 hours' following); - id | f_time | first_value | last_value -----+----------+-------------+------------ - 11 | 21:00:00 | 11 | 9 - 10 | 20:00:00 | 11 | 8 - 9 | 19:00:00 | 10 | 7 - 8 | 18:00:00 | 9 | 7 - 7 | 17:00:00 | 8 | 5 - 6 | 15:00:00 | 6 | 3 - 5 | 15:00:00 | 6 | 3 - 4 | 14:00:00 | 6 | 2 - 3 | 13:00:00 | 4 | 1 - 2 | 12:00:00 | 3 | 0 - 1 | 11:00:00 | 2 | 0 - 0 | 10:00:00 | 1 | 0 -(12 rows) - -select id, f_time, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_time desc range between - '-70 min' preceding and '2 hours' following); -- error, negative offset disallowed -ERROR: invalid preceding or following size in window function -select id, f_time, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_time range between - 'infinity'::interval preceding and 'infinity'::interval following); - id | f_time | first_value | last_value -----+----------+-------------+------------ - 0 | 10:00:00 | 0 | 11 - 1 | 11:00:00 | 0 | 11 - 2 | 12:00:00 | 0 | 11 - 3 | 13:00:00 | 0 | 11 - 4 | 14:00:00 | 0 | 11 - 5 | 15:00:00 | 0 | 11 - 6 | 15:00:00 | 0 | 11 - 7 | 17:00:00 | 0 | 11 - 8 | 18:00:00 | 0 | 11 - 9 | 19:00:00 | 0 | 11 - 10 | 20:00:00 | 0 | 11 - 11 | 21:00:00 | 0 | 11 -(12 rows) - -select id, f_time, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_time range between - 'infinity'::interval preceding and 'infinity'::interval preceding); - id | f_time | first_value | last_value -----+----------+-------------+------------ - 0 | 10:00:00 | | - 1 | 11:00:00 | | - 2 | 12:00:00 | | - 3 | 13:00:00 | | - 4 | 14:00:00 | | - 5 | 15:00:00 | | - 6 | 15:00:00 | | - 7 | 17:00:00 | | - 8 | 18:00:00 | | - 9 | 19:00:00 | | - 10 | 20:00:00 | | - 11 | 21:00:00 | | -(12 rows) - -select id, f_time, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_time range between - 'infinity'::interval following and 'infinity'::interval following); - id | f_time | first_value | last_value -----+----------+-------------+------------ - 0 | 10:00:00 | | - 1 | 11:00:00 | | - 2 | 12:00:00 | | - 3 | 13:00:00 | | - 4 | 14:00:00 | | - 5 | 15:00:00 | | - 6 | 15:00:00 | | - 7 | 17:00:00 | | - 8 | 18:00:00 | | - 9 | 19:00:00 | | - 10 | 20:00:00 | | - 11 | 21:00:00 | | -(12 rows) - -select id, f_time, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_time range between - '-infinity'::interval following and - 'infinity'::interval following); -- error, negative offset disallowed -ERROR: invalid preceding or following size in window function -select id, f_timetz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timetz range between - '70 min'::interval preceding and '2 hours'::interval following); - id | f_timetz | first_value | last_value -----+-------------+-------------+------------ - 0 | 10:00:00+01 | 0 | 2 - 1 | 11:00:00+01 | 0 | 3 - 2 | 12:00:00+01 | 1 | 4 - 3 | 13:00:00+01 | 2 | 6 - 4 | 14:00:00+01 | 3 | 6 - 5 | 15:00:00+01 | 4 | 7 - 6 | 15:00:00+01 | 4 | 7 - 7 | 17:00:00+01 | 7 | 9 - 8 | 18:00:00+01 | 7 | 10 - 9 | 19:00:00+01 | 8 | 11 - 10 | 20:00:00+01 | 9 | 11 - 11 | 21:00:00+01 | 10 | 11 -(12 rows) - -select id, f_timetz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timetz desc range between - '70 min' preceding and '2 hours' following); - id | f_timetz | first_value | last_value -----+-------------+-------------+------------ - 11 | 21:00:00+01 | 11 | 9 - 10 | 20:00:00+01 | 11 | 8 - 9 | 19:00:00+01 | 10 | 7 - 8 | 18:00:00+01 | 9 | 7 - 7 | 17:00:00+01 | 8 | 5 - 6 | 15:00:00+01 | 6 | 3 - 5 | 15:00:00+01 | 6 | 3 - 4 | 14:00:00+01 | 6 | 2 - 3 | 13:00:00+01 | 4 | 1 - 2 | 12:00:00+01 | 3 | 0 - 1 | 11:00:00+01 | 2 | 0 - 0 | 10:00:00+01 | 1 | 0 -(12 rows) - -select id, f_timetz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timetz desc range between - '70 min' preceding and '-2 hours' following); -- error, negative offset disallowed -ERROR: invalid preceding or following size in window function -select id, f_timetz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timetz range between - 'infinity'::interval preceding and 'infinity'::interval following); - id | f_timetz | first_value | last_value -----+-------------+-------------+------------ - 0 | 10:00:00+01 | 0 | 11 - 1 | 11:00:00+01 | 0 | 11 - 2 | 12:00:00+01 | 0 | 11 - 3 | 13:00:00+01 | 0 | 11 - 4 | 14:00:00+01 | 0 | 11 - 5 | 15:00:00+01 | 0 | 11 - 6 | 15:00:00+01 | 0 | 11 - 7 | 17:00:00+01 | 0 | 11 - 8 | 18:00:00+01 | 0 | 11 - 9 | 19:00:00+01 | 0 | 11 - 10 | 20:00:00+01 | 0 | 11 - 11 | 21:00:00+01 | 0 | 11 -(12 rows) - -select id, f_timetz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timetz range between - 'infinity'::interval preceding and 'infinity'::interval preceding); - id | f_timetz | first_value | last_value -----+-------------+-------------+------------ - 0 | 10:00:00+01 | | - 1 | 11:00:00+01 | | - 2 | 12:00:00+01 | | - 3 | 13:00:00+01 | | - 4 | 14:00:00+01 | | - 5 | 15:00:00+01 | | - 6 | 15:00:00+01 | | - 7 | 17:00:00+01 | | - 8 | 18:00:00+01 | | - 9 | 19:00:00+01 | | - 10 | 20:00:00+01 | | - 11 | 21:00:00+01 | | -(12 rows) - -select id, f_timetz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timetz range between - 'infinity'::interval following and 'infinity'::interval following); - id | f_timetz | first_value | last_value -----+-------------+-------------+------------ - 0 | 10:00:00+01 | | - 1 | 11:00:00+01 | | - 2 | 12:00:00+01 | | - 3 | 13:00:00+01 | | - 4 | 14:00:00+01 | | - 5 | 15:00:00+01 | | - 6 | 15:00:00+01 | | - 7 | 17:00:00+01 | | - 8 | 18:00:00+01 | | - 9 | 19:00:00+01 | | - 10 | 20:00:00+01 | | - 11 | 21:00:00+01 | | -(12 rows) - -select id, f_timetz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timetz range between - 'infinity'::interval following and - '-infinity'::interval following); -- error, negative offset disallowed -ERROR: invalid preceding or following size in window function -select id, f_interval, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_interval range between - '1 year'::interval preceding and '1 year'::interval following); - id | f_interval | first_value | last_value -----+------------+-------------+------------ - 0 | -infinity | 0 | 0 - 1 | @ 1 year | 1 | 2 - 2 | @ 2 years | 1 | 3 - 3 | @ 3 years | 2 | 4 - 4 | @ 4 years | 3 | 6 - 5 | @ 5 years | 4 | 6 - 6 | @ 5 years | 4 | 6 - 7 | @ 7 years | 7 | 8 - 8 | @ 8 years | 7 | 9 - 9 | @ 9 years | 8 | 10 - 10 | @ 10 years | 9 | 10 - 11 | infinity | 11 | 11 -(12 rows) - -select id, f_interval, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_interval desc range between - '1 year' preceding and '1 year' following); - id | f_interval | first_value | last_value -----+------------+-------------+------------ - 11 | infinity | 11 | 11 - 10 | @ 10 years | 10 | 9 - 9 | @ 9 years | 10 | 8 - 8 | @ 8 years | 9 | 7 - 7 | @ 7 years | 8 | 7 - 6 | @ 5 years | 6 | 4 - 5 | @ 5 years | 6 | 4 - 4 | @ 4 years | 6 | 3 - 3 | @ 3 years | 4 | 2 - 2 | @ 2 years | 3 | 1 - 1 | @ 1 year | 2 | 1 - 0 | -infinity | 0 | 0 -(12 rows) - -select id, f_interval, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_interval desc range between - '-1 year' preceding and '1 year' following); -- error, negative offset disallowed -ERROR: invalid preceding or following size in window function -select id, f_interval, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_interval range between - 'infinity'::interval preceding and 'infinity'::interval following); - id | f_interval | first_value | last_value -----+------------+-------------+------------ - 0 | -infinity | 0 | 11 - 1 | @ 1 year | 0 | 11 - 2 | @ 2 years | 0 | 11 - 3 | @ 3 years | 0 | 11 - 4 | @ 4 years | 0 | 11 - 5 | @ 5 years | 0 | 11 - 6 | @ 5 years | 0 | 11 - 7 | @ 7 years | 0 | 11 - 8 | @ 8 years | 0 | 11 - 9 | @ 9 years | 0 | 11 - 10 | @ 10 years | 0 | 11 - 11 | infinity | 0 | 11 -(12 rows) - -select id, f_interval, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_interval range between - 'infinity'::interval preceding and 'infinity'::interval preceding); - id | f_interval | first_value | last_value -----+------------+-------------+------------ - 0 | -infinity | 0 | 0 - 1 | @ 1 year | 0 | 0 - 2 | @ 2 years | 0 | 0 - 3 | @ 3 years | 0 | 0 - 4 | @ 4 years | 0 | 0 - 5 | @ 5 years | 0 | 0 - 6 | @ 5 years | 0 | 0 - 7 | @ 7 years | 0 | 0 - 8 | @ 8 years | 0 | 0 - 9 | @ 9 years | 0 | 0 - 10 | @ 10 years | 0 | 0 - 11 | infinity | 0 | 11 -(12 rows) - -select id, f_interval, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_interval range between - 'infinity'::interval following and 'infinity'::interval following); - id | f_interval | first_value | last_value -----+------------+-------------+------------ - 0 | -infinity | 0 | 11 - 1 | @ 1 year | 11 | 11 - 2 | @ 2 years | 11 | 11 - 3 | @ 3 years | 11 | 11 - 4 | @ 4 years | 11 | 11 - 5 | @ 5 years | 11 | 11 - 6 | @ 5 years | 11 | 11 - 7 | @ 7 years | 11 | 11 - 8 | @ 8 years | 11 | 11 - 9 | @ 9 years | 11 | 11 - 10 | @ 10 years | 11 | 11 - 11 | infinity | 11 | 11 -(12 rows) - -select id, f_interval, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_interval range between - '-infinity'::interval following and - 'infinity'::interval following); -- error, negative offset disallowed -ERROR: invalid preceding or following size in window function -select id, f_timestamptz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamptz range between - '1 year'::interval preceding and '1 year'::interval following); - id | f_timestamptz | first_value | last_value -----+------------------------------+-------------+------------ - 0 | -infinity | 0 | 0 - 1 | Thu Oct 19 02:23:54 2000 PDT | 1 | 3 - 2 | Fri Oct 19 02:23:54 2001 PDT | 1 | 4 - 3 | Fri Oct 19 02:23:54 2001 PDT | 1 | 4 - 4 | Sat Oct 19 02:23:54 2002 PDT | 2 | 5 - 5 | Sun Oct 19 02:23:54 2003 PDT | 4 | 6 - 6 | Tue Oct 19 02:23:54 2004 PDT | 5 | 7 - 7 | Wed Oct 19 02:23:54 2005 PDT | 6 | 8 - 8 | Thu Oct 19 02:23:54 2006 PDT | 7 | 9 - 9 | Fri Oct 19 02:23:54 2007 PDT | 8 | 10 - 10 | Sun Oct 19 02:23:54 2008 PDT | 9 | 10 - 11 | infinity | 11 | 11 -(12 rows) - -select id, f_timestamptz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamptz desc range between - '1 year' preceding and '1 year' following); - id | f_timestamptz | first_value | last_value -----+------------------------------+-------------+------------ - 11 | infinity | 11 | 11 - 10 | Sun Oct 19 02:23:54 2008 PDT | 10 | 9 - 9 | Fri Oct 19 02:23:54 2007 PDT | 10 | 8 - 8 | Thu Oct 19 02:23:54 2006 PDT | 9 | 7 - 7 | Wed Oct 19 02:23:54 2005 PDT | 8 | 6 - 6 | Tue Oct 19 02:23:54 2004 PDT | 7 | 5 - 5 | Sun Oct 19 02:23:54 2003 PDT | 6 | 4 - 4 | Sat Oct 19 02:23:54 2002 PDT | 5 | 2 - 3 | Fri Oct 19 02:23:54 2001 PDT | 4 | 1 - 2 | Fri Oct 19 02:23:54 2001 PDT | 4 | 1 - 1 | Thu Oct 19 02:23:54 2000 PDT | 3 | 1 - 0 | -infinity | 0 | 0 -(12 rows) - -select id, f_timestamptz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamptz desc range between - '1 year' preceding and '-1 year' following); -- error, negative offset disallowed -ERROR: invalid preceding or following size in window function -select id, f_timestamptz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamptz range between - 'infinity'::interval preceding and 'infinity'::interval following); - id | f_timestamptz | first_value | last_value -----+------------------------------+-------------+------------ - 0 | -infinity | 0 | 11 - 1 | Thu Oct 19 02:23:54 2000 PDT | 0 | 11 - 2 | Fri Oct 19 02:23:54 2001 PDT | 0 | 11 - 3 | Fri Oct 19 02:23:54 2001 PDT | 0 | 11 - 4 | Sat Oct 19 02:23:54 2002 PDT | 0 | 11 - 5 | Sun Oct 19 02:23:54 2003 PDT | 0 | 11 - 6 | Tue Oct 19 02:23:54 2004 PDT | 0 | 11 - 7 | Wed Oct 19 02:23:54 2005 PDT | 0 | 11 - 8 | Thu Oct 19 02:23:54 2006 PDT | 0 | 11 - 9 | Fri Oct 19 02:23:54 2007 PDT | 0 | 11 - 10 | Sun Oct 19 02:23:54 2008 PDT | 0 | 11 - 11 | infinity | 0 | 11 -(12 rows) - -select id, f_timestamptz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamptz range between - 'infinity'::interval preceding and 'infinity'::interval preceding); - id | f_timestamptz | first_value | last_value -----+------------------------------+-------------+------------ - 0 | -infinity | 0 | 0 - 1 | Thu Oct 19 02:23:54 2000 PDT | 0 | 0 - 2 | Fri Oct 19 02:23:54 2001 PDT | 0 | 0 - 3 | Fri Oct 19 02:23:54 2001 PDT | 0 | 0 - 4 | Sat Oct 19 02:23:54 2002 PDT | 0 | 0 - 5 | Sun Oct 19 02:23:54 2003 PDT | 0 | 0 - 6 | Tue Oct 19 02:23:54 2004 PDT | 0 | 0 - 7 | Wed Oct 19 02:23:54 2005 PDT | 0 | 0 - 8 | Thu Oct 19 02:23:54 2006 PDT | 0 | 0 - 9 | Fri Oct 19 02:23:54 2007 PDT | 0 | 0 - 10 | Sun Oct 19 02:23:54 2008 PDT | 0 | 0 - 11 | infinity | 0 | 11 -(12 rows) - -select id, f_timestamptz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamptz range between - 'infinity'::interval following and 'infinity'::interval following); - id | f_timestamptz | first_value | last_value -----+------------------------------+-------------+------------ - 0 | -infinity | 0 | 11 - 1 | Thu Oct 19 02:23:54 2000 PDT | 11 | 11 - 2 | Fri Oct 19 02:23:54 2001 PDT | 11 | 11 - 3 | Fri Oct 19 02:23:54 2001 PDT | 11 | 11 - 4 | Sat Oct 19 02:23:54 2002 PDT | 11 | 11 - 5 | Sun Oct 19 02:23:54 2003 PDT | 11 | 11 - 6 | Tue Oct 19 02:23:54 2004 PDT | 11 | 11 - 7 | Wed Oct 19 02:23:54 2005 PDT | 11 | 11 - 8 | Thu Oct 19 02:23:54 2006 PDT | 11 | 11 - 9 | Fri Oct 19 02:23:54 2007 PDT | 11 | 11 - 10 | Sun Oct 19 02:23:54 2008 PDT | 11 | 11 - 11 | infinity | 11 | 11 -(12 rows) - -select id, f_timestamptz, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamptz range between - '-infinity'::interval following and - 'infinity'::interval following); -- error, negative offset disallowed -ERROR: invalid preceding or following size in window function -select id, f_timestamp, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamp range between - '1 year'::interval preceding and '1 year'::interval following); - id | f_timestamp | first_value | last_value -----+--------------------------+-------------+------------ - 0 | -infinity | 0 | 0 - 1 | Thu Oct 19 10:23:54 2000 | 1 | 3 - 2 | Fri Oct 19 10:23:54 2001 | 1 | 4 - 3 | Fri Oct 19 10:23:54 2001 | 1 | 4 - 4 | Sat Oct 19 10:23:54 2002 | 2 | 5 - 5 | Sun Oct 19 10:23:54 2003 | 4 | 6 - 6 | Tue Oct 19 10:23:54 2004 | 5 | 7 - 7 | Wed Oct 19 10:23:54 2005 | 6 | 8 - 8 | Thu Oct 19 10:23:54 2006 | 7 | 9 - 9 | Fri Oct 19 10:23:54 2007 | 8 | 10 - 10 | Sun Oct 19 10:23:54 2008 | 9 | 10 - 11 | infinity | 11 | 11 -(12 rows) - -select id, f_timestamp, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamp desc range between - '1 year' preceding and '1 year' following); - id | f_timestamp | first_value | last_value -----+--------------------------+-------------+------------ - 11 | infinity | 11 | 11 - 10 | Sun Oct 19 10:23:54 2008 | 10 | 9 - 9 | Fri Oct 19 10:23:54 2007 | 10 | 8 - 8 | Thu Oct 19 10:23:54 2006 | 9 | 7 - 7 | Wed Oct 19 10:23:54 2005 | 8 | 6 - 6 | Tue Oct 19 10:23:54 2004 | 7 | 5 - 5 | Sun Oct 19 10:23:54 2003 | 6 | 4 - 4 | Sat Oct 19 10:23:54 2002 | 5 | 2 - 3 | Fri Oct 19 10:23:54 2001 | 4 | 1 - 2 | Fri Oct 19 10:23:54 2001 | 4 | 1 - 1 | Thu Oct 19 10:23:54 2000 | 3 | 1 - 0 | -infinity | 0 | 0 -(12 rows) - -select id, f_timestamp, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamp desc range between - '-1 year' preceding and '1 year' following); -- error, negative offset disallowed -ERROR: invalid preceding or following size in window function -select id, f_timestamp, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamp range between - 'infinity'::interval preceding and 'infinity'::interval following); - id | f_timestamp | first_value | last_value -----+--------------------------+-------------+------------ - 0 | -infinity | 0 | 11 - 1 | Thu Oct 19 10:23:54 2000 | 0 | 11 - 2 | Fri Oct 19 10:23:54 2001 | 0 | 11 - 3 | Fri Oct 19 10:23:54 2001 | 0 | 11 - 4 | Sat Oct 19 10:23:54 2002 | 0 | 11 - 5 | Sun Oct 19 10:23:54 2003 | 0 | 11 - 6 | Tue Oct 19 10:23:54 2004 | 0 | 11 - 7 | Wed Oct 19 10:23:54 2005 | 0 | 11 - 8 | Thu Oct 19 10:23:54 2006 | 0 | 11 - 9 | Fri Oct 19 10:23:54 2007 | 0 | 11 - 10 | Sun Oct 19 10:23:54 2008 | 0 | 11 - 11 | infinity | 0 | 11 -(12 rows) - -select id, f_timestamp, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamp range between - 'infinity'::interval preceding and 'infinity'::interval preceding); - id | f_timestamp | first_value | last_value -----+--------------------------+-------------+------------ - 0 | -infinity | 0 | 0 - 1 | Thu Oct 19 10:23:54 2000 | 0 | 0 - 2 | Fri Oct 19 10:23:54 2001 | 0 | 0 - 3 | Fri Oct 19 10:23:54 2001 | 0 | 0 - 4 | Sat Oct 19 10:23:54 2002 | 0 | 0 - 5 | Sun Oct 19 10:23:54 2003 | 0 | 0 - 6 | Tue Oct 19 10:23:54 2004 | 0 | 0 - 7 | Wed Oct 19 10:23:54 2005 | 0 | 0 - 8 | Thu Oct 19 10:23:54 2006 | 0 | 0 - 9 | Fri Oct 19 10:23:54 2007 | 0 | 0 - 10 | Sun Oct 19 10:23:54 2008 | 0 | 0 - 11 | infinity | 0 | 11 -(12 rows) - -select id, f_timestamp, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamp range between - 'infinity'::interval following and 'infinity'::interval following); - id | f_timestamp | first_value | last_value -----+--------------------------+-------------+------------ - 0 | -infinity | 0 | 11 - 1 | Thu Oct 19 10:23:54 2000 | 11 | 11 - 2 | Fri Oct 19 10:23:54 2001 | 11 | 11 - 3 | Fri Oct 19 10:23:54 2001 | 11 | 11 - 4 | Sat Oct 19 10:23:54 2002 | 11 | 11 - 5 | Sun Oct 19 10:23:54 2003 | 11 | 11 - 6 | Tue Oct 19 10:23:54 2004 | 11 | 11 - 7 | Wed Oct 19 10:23:54 2005 | 11 | 11 - 8 | Thu Oct 19 10:23:54 2006 | 11 | 11 - 9 | Fri Oct 19 10:23:54 2007 | 11 | 11 - 10 | Sun Oct 19 10:23:54 2008 | 11 | 11 - 11 | infinity | 11 | 11 -(12 rows) - -select id, f_timestamp, first_value(id) over w, last_value(id) over w -from datetimes -window w as (order by f_timestamp range between - '-infinity'::interval following and - 'infinity'::interval following); -- error, negative offset disallowed -ERROR: invalid preceding or following size in window function --- RANGE offset PRECEDING/FOLLOWING error cases -select sum(salary) over (order by enroll_date, salary range between '1 year'::interval preceding and '2 years'::interval following - exclude ties), salary, enroll_date from empsalary; -ERROR: RANGE with offset PRECEDING/FOLLOWING requires exactly one ORDER BY column -LINE 1: select sum(salary) over (order by enroll_date, salary range ... - ^ -select sum(salary) over (range between '1 year'::interval preceding and '2 years'::interval following - exclude ties), salary, enroll_date from empsalary; -ERROR: RANGE with offset PRECEDING/FOLLOWING requires exactly one ORDER BY column -LINE 1: select sum(salary) over (range between '1 year'::interval pr... - ^ -select sum(salary) over (order by depname range between '1 year'::interval preceding and '2 years'::interval following - exclude ties), salary, enroll_date from empsalary; -ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type text -LINE 1: ... sum(salary) over (order by depname range between '1 year'::... - ^ -select max(enroll_date) over (order by enroll_date range between 1 preceding and 2 following - exclude ties), salary, enroll_date from empsalary; -ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type date and offset type integer -LINE 1: ...ll_date) over (order by enroll_date range between 1 precedin... - ^ -HINT: Cast the offset value to an appropriate type. -select max(enroll_date) over (order by salary range between -1 preceding and 2 following - exclude ties), salary, enroll_date from empsalary; -ERROR: invalid preceding or following size in window function -select max(enroll_date) over (order by salary range between 1 preceding and -2 following - exclude ties), salary, enroll_date from empsalary; -ERROR: invalid preceding or following size in window function -select max(enroll_date) over (order by salary range between '1 year'::interval preceding and '2 years'::interval following - exclude ties), salary, enroll_date from empsalary; -ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type integer and offset type interval -LINE 1: ...(enroll_date) over (order by salary range between '1 year'::... - ^ -HINT: Cast the offset value to an appropriate type. -select max(enroll_date) over (order by enroll_date range between '1 year'::interval preceding and '-2 years'::interval following - exclude ties), salary, enroll_date from empsalary; -ERROR: invalid preceding or following size in window function --- GROUPS tests -SELECT sum(unique1) over (order by four groups between unbounded preceding and current row), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 12 | 0 | 0 - 12 | 8 | 0 - 12 | 4 | 0 - 27 | 5 | 1 - 27 | 9 | 1 - 27 | 1 | 1 - 35 | 6 | 2 - 35 | 2 | 2 - 45 | 3 | 3 - 45 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four groups between unbounded preceding and unbounded following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 45 | 0 | 0 - 45 | 8 | 0 - 45 | 4 | 0 - 45 | 5 | 1 - 45 | 9 | 1 - 45 | 1 | 1 - 45 | 6 | 2 - 45 | 2 | 2 - 45 | 3 | 3 - 45 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four groups between current row and unbounded following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 45 | 0 | 0 - 45 | 8 | 0 - 45 | 4 | 0 - 33 | 5 | 1 - 33 | 9 | 1 - 33 | 1 | 1 - 18 | 6 | 2 - 18 | 2 | 2 - 10 | 3 | 3 - 10 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four groups between 1 preceding and unbounded following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 45 | 0 | 0 - 45 | 8 | 0 - 45 | 4 | 0 - 45 | 5 | 1 - 45 | 9 | 1 - 45 | 1 | 1 - 33 | 6 | 2 - 33 | 2 | 2 - 18 | 3 | 3 - 18 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four groups between 1 following and unbounded following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 33 | 0 | 0 - 33 | 8 | 0 - 33 | 4 | 0 - 18 | 5 | 1 - 18 | 9 | 1 - 18 | 1 | 1 - 10 | 6 | 2 - 10 | 2 | 2 - | 3 | 3 - | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four groups between unbounded preceding and 2 following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 35 | 0 | 0 - 35 | 8 | 0 - 35 | 4 | 0 - 45 | 5 | 1 - 45 | 9 | 1 - 45 | 1 | 1 - 45 | 6 | 2 - 45 | 2 | 2 - 45 | 3 | 3 - 45 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four groups between 2 preceding and 1 preceding), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - | 0 | 0 - | 8 | 0 - | 4 | 0 - 12 | 5 | 1 - 12 | 9 | 1 - 12 | 1 | 1 - 27 | 6 | 2 - 27 | 2 | 2 - 23 | 3 | 3 - 23 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 27 | 0 | 0 - 27 | 8 | 0 - 27 | 4 | 0 - 35 | 5 | 1 - 35 | 9 | 1 - 35 | 1 | 1 - 45 | 6 | 2 - 45 | 2 | 2 - 33 | 3 | 3 - 33 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four groups between 0 preceding and 0 following), - unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 12 | 0 | 0 - 12 | 8 | 0 - 12 | 4 | 0 - 15 | 5 | 1 - 15 | 9 | 1 - 15 | 1 | 1 - 8 | 6 | 2 - 8 | 2 | 2 - 10 | 3 | 3 - 10 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following - exclude current row), unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 27 | 0 | 0 - 19 | 8 | 0 - 23 | 4 | 0 - 30 | 5 | 1 - 26 | 9 | 1 - 34 | 1 | 1 - 39 | 6 | 2 - 43 | 2 | 2 - 30 | 3 | 3 - 26 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following - exclude group), unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 15 | 0 | 0 - 15 | 8 | 0 - 15 | 4 | 0 - 20 | 5 | 1 - 20 | 9 | 1 - 20 | 1 | 1 - 37 | 6 | 2 - 37 | 2 | 2 - 23 | 3 | 3 - 23 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following - exclude ties), unique1, four -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four ------+---------+------ - 15 | 0 | 0 - 23 | 8 | 0 - 19 | 4 | 0 - 25 | 5 | 1 - 29 | 9 | 1 - 21 | 1 | 1 - 43 | 6 | 2 - 39 | 2 | 2 - 26 | 3 | 3 - 30 | 7 | 3 -(10 rows) - -SELECT sum(unique1) over (partition by ten - order by four groups between 0 preceding and 0 following),unique1, four, ten -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four | ten ------+---------+------+----- - 0 | 0 | 0 | 0 - 1 | 1 | 1 | 1 - 2 | 2 | 2 | 2 - 3 | 3 | 3 | 3 - 4 | 4 | 0 | 4 - 5 | 5 | 1 | 5 - 6 | 6 | 2 | 6 - 7 | 7 | 3 | 7 - 8 | 8 | 0 | 8 - 9 | 9 | 1 | 9 -(10 rows) - -SELECT sum(unique1) over (partition by ten - order by four groups between 0 preceding and 0 following exclude current row), unique1, four, ten -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four | ten ------+---------+------+----- - | 0 | 0 | 0 - | 1 | 1 | 1 - | 2 | 2 | 2 - | 3 | 3 | 3 - | 4 | 0 | 4 - | 5 | 1 | 5 - | 6 | 2 | 6 - | 7 | 3 | 7 - | 8 | 0 | 8 - | 9 | 1 | 9 -(10 rows) - -SELECT sum(unique1) over (partition by ten - order by four groups between 0 preceding and 0 following exclude group), unique1, four, ten -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four | ten ------+---------+------+----- - | 0 | 0 | 0 - | 1 | 1 | 1 - | 2 | 2 | 2 - | 3 | 3 | 3 - | 4 | 0 | 4 - | 5 | 1 | 5 - | 6 | 2 | 6 - | 7 | 3 | 7 - | 8 | 0 | 8 - | 9 | 1 | 9 -(10 rows) - -SELECT sum(unique1) over (partition by ten - order by four groups between 0 preceding and 0 following exclude ties), unique1, four, ten -FROM tenk1 WHERE unique1 < 10; - sum | unique1 | four | ten ------+---------+------+----- - 0 | 0 | 0 | 0 - 1 | 1 | 1 | 1 - 2 | 2 | 2 | 2 - 3 | 3 | 3 | 3 - 4 | 4 | 0 | 4 - 5 | 5 | 1 | 5 - 6 | 6 | 2 | 6 - 7 | 7 | 3 | 7 - 8 | 8 | 0 | 8 - 9 | 9 | 1 | 9 -(10 rows) - -select first_value(salary) over(order by enroll_date groups between 1 preceding and 1 following), - lead(salary) over(order by enroll_date groups between 1 preceding and 1 following), - nth_value(salary, 1) over(order by enroll_date groups between 1 preceding and 1 following), - salary, enroll_date from empsalary; - first_value | lead | nth_value | salary | enroll_date --------------+------+-----------+--------+------------- - 5000 | 6000 | 5000 | 5000 | 10-01-2006 - 5000 | 3900 | 5000 | 6000 | 10-01-2006 - 5000 | 4800 | 5000 | 3900 | 12-23-2006 - 3900 | 5200 | 3900 | 4800 | 08-01-2007 - 3900 | 4800 | 3900 | 5200 | 08-01-2007 - 4800 | 5200 | 4800 | 4800 | 08-08-2007 - 4800 | 3500 | 4800 | 5200 | 08-15-2007 - 5200 | 4500 | 5200 | 3500 | 12-10-2007 - 3500 | 4200 | 3500 | 4500 | 01-01-2008 - 3500 | | 3500 | 4200 | 01-01-2008 -(10 rows) - -select last_value(salary) over(order by enroll_date groups between 1 preceding and 1 following), - lag(salary) over(order by enroll_date groups between 1 preceding and 1 following), - salary, enroll_date from empsalary; - last_value | lag | salary | enroll_date -------------+------+--------+------------- - 3900 | | 5000 | 10-01-2006 - 3900 | 5000 | 6000 | 10-01-2006 - 5200 | 6000 | 3900 | 12-23-2006 - 4800 | 3900 | 4800 | 08-01-2007 - 4800 | 4800 | 5200 | 08-01-2007 - 5200 | 5200 | 4800 | 08-08-2007 - 3500 | 4800 | 5200 | 08-15-2007 - 4200 | 5200 | 3500 | 12-10-2007 - 4200 | 3500 | 4500 | 01-01-2008 - 4200 | 4500 | 4200 | 01-01-2008 -(10 rows) - -select first_value(salary) over(order by enroll_date groups between 1 following and 3 following - exclude current row), - lead(salary) over(order by enroll_date groups between 1 following and 3 following exclude ties), - nth_value(salary, 1) over(order by enroll_date groups between 1 following and 3 following - exclude ties), - salary, enroll_date from empsalary; - first_value | lead | nth_value | salary | enroll_date --------------+------+-----------+--------+------------- - 3900 | 6000 | 3900 | 5000 | 10-01-2006 - 3900 | 3900 | 3900 | 6000 | 10-01-2006 - 4800 | 4800 | 4800 | 3900 | 12-23-2006 - 4800 | 5200 | 4800 | 4800 | 08-01-2007 - 4800 | 4800 | 4800 | 5200 | 08-01-2007 - 5200 | 5200 | 5200 | 4800 | 08-08-2007 - 3500 | 3500 | 3500 | 5200 | 08-15-2007 - 4500 | 4500 | 4500 | 3500 | 12-10-2007 - | 4200 | | 4500 | 01-01-2008 - | | | 4200 | 01-01-2008 -(10 rows) - -select last_value(salary) over(order by enroll_date groups between 1 following and 3 following - exclude group), - lag(salary) over(order by enroll_date groups between 1 following and 3 following exclude group), - salary, enroll_date from empsalary; - last_value | lag | salary | enroll_date -------------+------+--------+------------- - 4800 | | 5000 | 10-01-2006 - 4800 | 5000 | 6000 | 10-01-2006 - 5200 | 6000 | 3900 | 12-23-2006 - 3500 | 3900 | 4800 | 08-01-2007 - 3500 | 4800 | 5200 | 08-01-2007 - 4200 | 5200 | 4800 | 08-08-2007 - 4200 | 4800 | 5200 | 08-15-2007 - 4200 | 5200 | 3500 | 12-10-2007 - | 3500 | 4500 | 01-01-2008 - | 4500 | 4200 | 01-01-2008 -(10 rows) - --- Show differences in offset interpretation between ROWS, RANGE, and GROUPS -WITH cte (x) AS ( - SELECT * FROM generate_series(1, 35, 2) -) -SELECT x, (sum(x) over w) -FROM cte -WINDOW w AS (ORDER BY x rows between 1 preceding and 1 following); - x | sum -----+----- - 1 | 4 - 3 | 9 - 5 | 15 - 7 | 21 - 9 | 27 - 11 | 33 - 13 | 39 - 15 | 45 - 17 | 51 - 19 | 57 - 21 | 63 - 23 | 69 - 25 | 75 - 27 | 81 - 29 | 87 - 31 | 93 - 33 | 99 - 35 | 68 -(18 rows) - -WITH cte (x) AS ( - SELECT * FROM generate_series(1, 35, 2) -) -SELECT x, (sum(x) over w) -FROM cte -WINDOW w AS (ORDER BY x range between 1 preceding and 1 following); - x | sum -----+----- - 1 | 1 - 3 | 3 - 5 | 5 - 7 | 7 - 9 | 9 - 11 | 11 - 13 | 13 - 15 | 15 - 17 | 17 - 19 | 19 - 21 | 21 - 23 | 23 - 25 | 25 - 27 | 27 - 29 | 29 - 31 | 31 - 33 | 33 - 35 | 35 -(18 rows) - -WITH cte (x) AS ( - SELECT * FROM generate_series(1, 35, 2) -) -SELECT x, (sum(x) over w) -FROM cte -WINDOW w AS (ORDER BY x groups between 1 preceding and 1 following); - x | sum -----+----- - 1 | 4 - 3 | 9 - 5 | 15 - 7 | 21 - 9 | 27 - 11 | 33 - 13 | 39 - 15 | 45 - 17 | 51 - 19 | 57 - 21 | 63 - 23 | 69 - 25 | 75 - 27 | 81 - 29 | 87 - 31 | 93 - 33 | 99 - 35 | 68 -(18 rows) - -WITH cte (x) AS ( - select 1 union all select 1 union all select 1 union all - SELECT * FROM generate_series(5, 49, 2) -) -SELECT x, (sum(x) over w) -FROM cte -WINDOW w AS (ORDER BY x rows between 1 preceding and 1 following); - x | sum -----+----- - 1 | 2 - 1 | 3 - 1 | 7 - 5 | 13 - 7 | 21 - 9 | 27 - 11 | 33 - 13 | 39 - 15 | 45 - 17 | 51 - 19 | 57 - 21 | 63 - 23 | 69 - 25 | 75 - 27 | 81 - 29 | 87 - 31 | 93 - 33 | 99 - 35 | 105 - 37 | 111 - 39 | 117 - 41 | 123 - 43 | 129 - 45 | 135 - 47 | 141 - 49 | 96 -(26 rows) - -WITH cte (x) AS ( - select 1 union all select 1 union all select 1 union all - SELECT * FROM generate_series(5, 49, 2) -) -SELECT x, (sum(x) over w) -FROM cte -WINDOW w AS (ORDER BY x range between 1 preceding and 1 following); - x | sum -----+----- - 1 | 3 - 1 | 3 - 1 | 3 - 5 | 5 - 7 | 7 - 9 | 9 - 11 | 11 - 13 | 13 - 15 | 15 - 17 | 17 - 19 | 19 - 21 | 21 - 23 | 23 - 25 | 25 - 27 | 27 - 29 | 29 - 31 | 31 - 33 | 33 - 35 | 35 - 37 | 37 - 39 | 39 - 41 | 41 - 43 | 43 - 45 | 45 - 47 | 47 - 49 | 49 -(26 rows) - -WITH cte (x) AS ( - select 1 union all select 1 union all select 1 union all - SELECT * FROM generate_series(5, 49, 2) -) -SELECT x, (sum(x) over w) -FROM cte -WINDOW w AS (ORDER BY x groups between 1 preceding and 1 following); - x | sum -----+----- - 1 | 8 - 1 | 8 - 1 | 8 - 5 | 15 - 7 | 21 - 9 | 27 - 11 | 33 - 13 | 39 - 15 | 45 - 17 | 51 - 19 | 57 - 21 | 63 - 23 | 69 - 25 | 75 - 27 | 81 - 29 | 87 - 31 | 93 - 33 | 99 - 35 | 105 - 37 | 111 - 39 | 117 - 41 | 123 - 43 | 129 - 45 | 135 - 47 | 141 - 49 | 96 -(26 rows) - --- with UNION -SELECT count(*) OVER (PARTITION BY four) FROM (SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk2)s LIMIT 0; - count -------- -(0 rows) - --- check some degenerate cases -create temp table t1 (f1 int, f2 int8); -insert into t1 values (1,1),(1,2),(2,2); -select f1, sum(f1) over (partition by f1 - range between 1 preceding and 1 following) -from t1 where f1 = f2; -- error, must have order by -ERROR: RANGE with offset PRECEDING/FOLLOWING requires exactly one ORDER BY column -LINE 1: select f1, sum(f1) over (partition by f1 - ^ -explain (costs off) -select f1, sum(f1) over (partition by f1 order by f2 - range between 1 preceding and 1 following) -from t1 where f1 = f2; - QUERY PLAN ---------------------------------- - WindowAgg - -> Sort - Sort Key: f1 - -> Seq Scan on t1 - Filter: (f1 = f2) -(5 rows) - -select f1, sum(f1) over (partition by f1 order by f2 - range between 1 preceding and 1 following) -from t1 where f1 = f2; - f1 | sum -----+----- - 1 | 1 - 2 | 2 -(2 rows) - -select f1, sum(f1) over (partition by f1, f1 order by f2 - range between 2 preceding and 1 preceding) -from t1 where f1 = f2; - f1 | sum -----+----- - 1 | - 2 | -(2 rows) - -select f1, sum(f1) over (partition by f1, f2 order by f2 - range between 1 following and 2 following) -from t1 where f1 = f2; - f1 | sum -----+----- - 1 | - 2 | -(2 rows) - -select f1, sum(f1) over (partition by f1 - groups between 1 preceding and 1 following) -from t1 where f1 = f2; -- error, must have order by -ERROR: GROUPS mode requires an ORDER BY clause -LINE 1: select f1, sum(f1) over (partition by f1 - ^ -explain (costs off) -select f1, sum(f1) over (partition by f1 order by f2 - groups between 1 preceding and 1 following) -from t1 where f1 = f2; - QUERY PLAN ---------------------------------- - WindowAgg - -> Sort - Sort Key: f1 - -> Seq Scan on t1 - Filter: (f1 = f2) -(5 rows) - -select f1, sum(f1) over (partition by f1 order by f2 - groups between 1 preceding and 1 following) -from t1 where f1 = f2; - f1 | sum -----+----- - 1 | 1 - 2 | 2 -(2 rows) - -select f1, sum(f1) over (partition by f1, f1 order by f2 - groups between 2 preceding and 1 preceding) -from t1 where f1 = f2; - f1 | sum -----+----- - 1 | - 2 | -(2 rows) - -select f1, sum(f1) over (partition by f1, f2 order by f2 - groups between 1 following and 2 following) -from t1 where f1 = f2; - f1 | sum -----+----- - 1 | - 2 | -(2 rows) - --- ordering by a non-integer constant is allowed -SELECT rank() OVER (ORDER BY length('abc')); - rank ------- - 1 -(1 row) - --- can't order by another window function -SELECT rank() OVER (ORDER BY rank() OVER (ORDER BY random())); -ERROR: window functions are not allowed in window definitions -LINE 1: SELECT rank() OVER (ORDER BY rank() OVER (ORDER BY random())... - ^ --- some other errors -SELECT * FROM empsalary WHERE row_number() OVER (ORDER BY salary) < 10; -ERROR: window functions are not allowed in WHERE -LINE 1: SELECT * FROM empsalary WHERE row_number() OVER (ORDER BY sa... - ^ -SELECT * FROM empsalary INNER JOIN tenk1 ON row_number() OVER (ORDER BY salary) < 10; -ERROR: window functions are not allowed in JOIN conditions -LINE 1: SELECT * FROM empsalary INNER JOIN tenk1 ON row_number() OVE... - ^ -SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary GROUP BY 1; -ERROR: window functions are not allowed in GROUP BY -LINE 1: SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary GRO... - ^ -SELECT * FROM rank() OVER (ORDER BY random()); -ERROR: syntax error at or near "ORDER" -LINE 1: SELECT * FROM rank() OVER (ORDER BY random()); - ^ -DELETE FROM empsalary WHERE (rank() OVER (ORDER BY random())) > 10; -ERROR: window functions are not allowed in WHERE -LINE 1: DELETE FROM empsalary WHERE (rank() OVER (ORDER BY random())... - ^ -DELETE FROM empsalary RETURNING rank() OVER (ORDER BY random()); -ERROR: window functions are not allowed in RETURNING -LINE 1: DELETE FROM empsalary RETURNING rank() OVER (ORDER BY random... - ^ -SELECT count(*) OVER w FROM tenk1 WINDOW w AS (ORDER BY unique1), w AS (ORDER BY unique1); -ERROR: window "w" is already defined -LINE 1: ...w FROM tenk1 WINDOW w AS (ORDER BY unique1), w AS (ORDER BY ... - ^ -SELECT rank() OVER (PARTITION BY four, ORDER BY ten) FROM tenk1; -ERROR: syntax error at or near "ORDER" -LINE 1: SELECT rank() OVER (PARTITION BY four, ORDER BY ten) FROM te... - ^ -SELECT count() OVER () FROM tenk1; -ERROR: count(*) must be used to call a parameterless aggregate function -LINE 1: SELECT count() OVER () FROM tenk1; - ^ -SELECT generate_series(1, 100) OVER () FROM empsalary; -ERROR: OVER specified, but generate_series is not a window function nor an aggregate function -LINE 1: SELECT generate_series(1, 100) OVER () FROM empsalary; - ^ -SELECT ntile(0) OVER (ORDER BY ten), ten, four FROM tenk1; -ERROR: argument of ntile must be greater than zero -SELECT nth_value(four, 0) OVER (ORDER BY ten), ten, four FROM tenk1; -ERROR: argument of nth_value must be greater than zero --- filter -SELECT sum(salary), row_number() OVER (ORDER BY depname), sum( - sum(salary) FILTER (WHERE enroll_date > '2007-01-01') -) FILTER (WHERE depname <> 'sales') OVER (ORDER BY depname DESC) AS "filtered_sum", - depname -FROM empsalary GROUP BY depname; - sum | row_number | filtered_sum | depname --------+------------+--------------+----------- - 25100 | 1 | 22600 | develop - 7400 | 2 | 3500 | personnel - 14600 | 3 | | sales -(3 rows) - --- --- Test SupportRequestOptimizeWindowClause's ability to de-duplicate --- WindowClauses --- --- Ensure WindowClause frameOptions are changed so that only a single --- WindowAgg exists in the plan. -EXPLAIN (COSTS OFF) -SELECT - empno, - depname, - row_number() OVER (PARTITION BY depname ORDER BY enroll_date) rn, - rank() OVER (PARTITION BY depname ORDER BY enroll_date ROWS BETWEEN - UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) rnk, - dense_rank() OVER (PARTITION BY depname ORDER BY enroll_date RANGE BETWEEN - CURRENT ROW AND CURRENT ROW) drnk, - ntile(10) OVER (PARTITION BY depname ORDER BY enroll_date RANGE BETWEEN - CURRENT ROW AND UNBOUNDED FOLLOWING) nt, - percent_rank() OVER (PARTITION BY depname ORDER BY enroll_date ROWS BETWEEN - CURRENT ROW AND UNBOUNDED FOLLOWING) pr, - cume_dist() OVER (PARTITION BY depname ORDER BY enroll_date RANGE BETWEEN - CURRENT ROW AND UNBOUNDED FOLLOWING) cd -FROM empsalary; - QUERY PLAN ----------------------------------------- - WindowAgg - -> Sort - Sort Key: depname, enroll_date - -> Seq Scan on empsalary -(4 rows) - --- Ensure WindowFuncs which cannot support their WindowClause's frameOptions --- being changed are untouched -EXPLAIN (COSTS OFF, VERBOSE) -SELECT - empno, - depname, - row_number() OVER (PARTITION BY depname ORDER BY enroll_date) rn, - rank() OVER (PARTITION BY depname ORDER BY enroll_date ROWS BETWEEN - UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) rnk, - count(*) OVER (PARTITION BY depname ORDER BY enroll_date RANGE BETWEEN - CURRENT ROW AND CURRENT ROW) cnt -FROM empsalary; - QUERY PLAN ------------------------------------------------------------------------------------------------------- - WindowAgg - Output: empno, depname, (row_number() OVER (?)), (rank() OVER (?)), count(*) OVER (?), enroll_date - -> WindowAgg - Output: depname, enroll_date, empno, row_number() OVER (?), rank() OVER (?) - -> Sort - Output: depname, enroll_date, empno - Sort Key: empsalary.depname, empsalary.enroll_date - -> Seq Scan on pg_temp.empsalary - Output: depname, enroll_date, empno -(9 rows) - --- Ensure the above query gives us the expected results -SELECT - empno, - depname, - row_number() OVER (PARTITION BY depname ORDER BY enroll_date) rn, - rank() OVER (PARTITION BY depname ORDER BY enroll_date ROWS BETWEEN - UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) rnk, - count(*) OVER (PARTITION BY depname ORDER BY enroll_date RANGE BETWEEN - CURRENT ROW AND CURRENT ROW) cnt -FROM empsalary; - empno | depname | rn | rnk | cnt --------+-----------+----+-----+----- - 8 | develop | 1 | 1 | 1 - 10 | develop | 2 | 2 | 1 - 11 | develop | 3 | 3 | 1 - 9 | develop | 4 | 4 | 2 - 7 | develop | 5 | 4 | 2 - 2 | personnel | 1 | 1 | 1 - 5 | personnel | 2 | 2 | 1 - 1 | sales | 1 | 1 | 1 - 3 | sales | 2 | 2 | 1 - 4 | sales | 3 | 3 | 1 -(10 rows) - --- Test pushdown of quals into a subquery containing window functions --- pushdown is safe because all PARTITION BY clauses include depname: -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT depname, - sum(salary) OVER (PARTITION BY depname) depsalary, - min(salary) OVER (PARTITION BY depname || 'A', depname) depminsalary - FROM empsalary) emp -WHERE depname = 'sales'; - QUERY PLAN --------------------------------------------------------------------------- - Subquery Scan on emp - -> WindowAgg - -> WindowAgg - -> Sort - Sort Key: (((empsalary.depname)::text || 'A'::text)) - -> Seq Scan on empsalary - Filter: ((depname)::text = 'sales'::text) -(7 rows) - --- pushdown is unsafe because there's a PARTITION BY clause without depname: -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT depname, - sum(salary) OVER (PARTITION BY enroll_date) enroll_salary, - min(salary) OVER (PARTITION BY depname) depminsalary - FROM empsalary) emp -WHERE depname = 'sales'; - QUERY PLAN -------------------------------------------------------- - Subquery Scan on emp - Filter: ((emp.depname)::text = 'sales'::text) - -> WindowAgg - -> Sort - Sort Key: empsalary.enroll_date - -> WindowAgg - -> Sort - Sort Key: empsalary.depname - -> Seq Scan on empsalary -(9 rows) - --- Test window function run conditions are properly pushed down into the --- WindowAgg -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - row_number() OVER (ORDER BY empno) rn - FROM empsalary) emp -WHERE rn < 3; - QUERY PLAN ----------------------------------------------- - WindowAgg - Run Condition: (row_number() OVER (?) < 3) - -> Sort - Sort Key: empsalary.empno - -> Seq Scan on empsalary -(5 rows) - --- The following 3 statements should result the same result. -SELECT * FROM - (SELECT empno, - row_number() OVER (ORDER BY empno) rn - FROM empsalary) emp -WHERE rn < 3; - empno | rn --------+---- - 1 | 1 - 2 | 2 -(2 rows) - -SELECT * FROM - (SELECT empno, - row_number() OVER (ORDER BY empno) rn - FROM empsalary) emp -WHERE 3 > rn; - empno | rn --------+---- - 1 | 1 - 2 | 2 -(2 rows) - -SELECT * FROM - (SELECT empno, - row_number() OVER (ORDER BY empno) rn - FROM empsalary) emp -WHERE 2 >= rn; - empno | rn --------+---- - 1 | 1 - 2 | 2 -(2 rows) - --- Ensure r <= 3 is pushed down into the run condition of the window agg -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - salary, - rank() OVER (ORDER BY salary DESC) r - FROM empsalary) emp -WHERE r <= 3; - QUERY PLAN ------------------------------------------ - WindowAgg - Run Condition: (rank() OVER (?) <= 3) - -> Sort - Sort Key: empsalary.salary DESC - -> Seq Scan on empsalary -(5 rows) - -SELECT * FROM - (SELECT empno, - salary, - rank() OVER (ORDER BY salary DESC) r - FROM empsalary) emp -WHERE r <= 3; - empno | salary | r --------+--------+--- - 8 | 6000 | 1 - 10 | 5200 | 2 - 11 | 5200 | 2 -(3 rows) - --- Ensure dr = 1 is converted to dr <= 1 to get all rows leading up to dr = 1 -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - salary, - dense_rank() OVER (ORDER BY salary DESC) dr - FROM empsalary) emp -WHERE dr = 1; - QUERY PLAN ------------------------------------------------------ - Subquery Scan on emp - Filter: (emp.dr = 1) - -> WindowAgg - Run Condition: (dense_rank() OVER (?) <= 1) - -> Sort - Sort Key: empsalary.salary DESC - -> Seq Scan on empsalary -(7 rows) - -SELECT * FROM - (SELECT empno, - salary, - dense_rank() OVER (ORDER BY salary DESC) dr - FROM empsalary) emp -WHERE dr = 1; - empno | salary | dr --------+--------+---- - 8 | 6000 | 1 -(1 row) - --- Check COUNT() and COUNT(*) -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - salary, - count(*) OVER (ORDER BY salary DESC) c - FROM empsalary) emp -WHERE c <= 3; - QUERY PLAN -------------------------------------------- - WindowAgg - Run Condition: (count(*) OVER (?) <= 3) - -> Sort - Sort Key: empsalary.salary DESC - -> Seq Scan on empsalary -(5 rows) - -SELECT * FROM - (SELECT empno, - salary, - count(*) OVER (ORDER BY salary DESC) c - FROM empsalary) emp -WHERE c <= 3; - empno | salary | c --------+--------+--- - 8 | 6000 | 1 - 10 | 5200 | 3 - 11 | 5200 | 3 -(3 rows) - -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - salary, - count(empno) OVER (ORDER BY salary DESC) c - FROM empsalary) emp -WHERE c <= 3; - QUERY PLAN ---------------------------------------------------------- - WindowAgg - Run Condition: (count(empsalary.empno) OVER (?) <= 3) - -> Sort - Sort Key: empsalary.salary DESC - -> Seq Scan on empsalary -(5 rows) - -SELECT * FROM - (SELECT empno, - salary, - count(empno) OVER (ORDER BY salary DESC) c - FROM empsalary) emp -WHERE c <= 3; - empno | salary | c --------+--------+--- - 8 | 6000 | 1 - 10 | 5200 | 3 - 11 | 5200 | 3 -(3 rows) - -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - salary, - count(*) OVER (ORDER BY salary DESC ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) c - FROM empsalary) emp -WHERE c >= 3; - QUERY PLAN -------------------------------------------- - WindowAgg - Run Condition: (count(*) OVER (?) >= 3) - -> Sort - Sort Key: empsalary.salary DESC - -> Seq Scan on empsalary -(5 rows) - -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - salary, - count(*) OVER () c - FROM empsalary) emp -WHERE 11 <= c; - QUERY PLAN --------------------------------------------- - WindowAgg - Run Condition: (11 <= count(*) OVER (?)) - -> Seq Scan on empsalary -(3 rows) - -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - salary, - count(*) OVER (ORDER BY salary DESC) c, - dense_rank() OVER (ORDER BY salary DESC) dr - FROM empsalary) emp -WHERE dr = 1; - QUERY PLAN ------------------------------------------------------ - Subquery Scan on emp - Filter: (emp.dr = 1) - -> WindowAgg - Run Condition: (dense_rank() OVER (?) <= 1) - -> Sort - Sort Key: empsalary.salary DESC - -> Seq Scan on empsalary -(7 rows) - --- Ensure we get a run condition when there's a PARTITION BY clause -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - depname, - row_number() OVER (PARTITION BY depname ORDER BY empno) rn - FROM empsalary) emp -WHERE rn < 3; - QUERY PLAN ------------------------------------------------------- - WindowAgg - Run Condition: (row_number() OVER (?) < 3) - -> Sort - Sort Key: empsalary.depname, empsalary.empno - -> Seq Scan on empsalary -(5 rows) - --- and ensure we get the correct results from the above plan -SELECT * FROM - (SELECT empno, - depname, - row_number() OVER (PARTITION BY depname ORDER BY empno) rn - FROM empsalary) emp -WHERE rn < 3; - empno | depname | rn --------+-----------+---- - 7 | develop | 1 - 8 | develop | 2 - 2 | personnel | 1 - 5 | personnel | 2 - 1 | sales | 1 - 3 | sales | 2 -(6 rows) - --- ensure that "unused" subquery columns are not removed when the column only --- exists in the run condition -EXPLAIN (COSTS OFF) -SELECT empno, depname FROM - (SELECT empno, - depname, - row_number() OVER (PARTITION BY depname ORDER BY empno) rn - FROM empsalary) emp -WHERE rn < 3; - QUERY PLAN ------------------------------------------------------------- - Subquery Scan on emp - -> WindowAgg - Run Condition: (row_number() OVER (?) < 3) - -> Sort - Sort Key: empsalary.depname, empsalary.empno - -> Seq Scan on empsalary -(6 rows) - --- likewise with count(empno) instead of row_number() -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - depname, - salary, - count(empno) OVER (PARTITION BY depname ORDER BY salary DESC) c - FROM empsalary) emp -WHERE c <= 3; - QUERY PLAN ------------------------------------------------------------- - WindowAgg - Run Condition: (count(empsalary.empno) OVER (?) <= 3) - -> Sort - Sort Key: empsalary.depname, empsalary.salary DESC - -> Seq Scan on empsalary -(5 rows) - --- and again, check the results are what we expect. -SELECT * FROM - (SELECT empno, - depname, - salary, - count(empno) OVER (PARTITION BY depname ORDER BY salary DESC) c - FROM empsalary) emp -WHERE c <= 3; - empno | depname | salary | c --------+-----------+--------+--- - 8 | develop | 6000 | 1 - 10 | develop | 5200 | 3 - 11 | develop | 5200 | 3 - 2 | personnel | 3900 | 1 - 5 | personnel | 3500 | 2 - 1 | sales | 5000 | 1 - 4 | sales | 4800 | 3 - 3 | sales | 4800 | 3 -(8 rows) - --- Ensure we get the correct run condition when the window function is both --- monotonically increasing and decreasing. -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - depname, - salary, - count(empno) OVER () c - FROM empsalary) emp -WHERE c = 1; - QUERY PLAN --------------------------------------------------------- - WindowAgg - Run Condition: (count(empsalary.empno) OVER (?) = 1) - -> Seq Scan on empsalary -(3 rows) - --- Try another case with a WindowFunc with a byref return type -SELECT * FROM - (SELECT row_number() OVER (PARTITION BY salary) AS rn, - lead(depname) OVER (PARTITION BY salary) || ' Department' AS n_dep - FROM empsalary) emp -WHERE rn < 1; - rn | n_dep -----+------- -(0 rows) - --- Some more complex cases with multiple window clauses -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT *, - count(salary) OVER (PARTITION BY depname || '') c1, -- w1 - row_number() OVER (PARTITION BY depname) rn, -- w2 - count(*) OVER (PARTITION BY depname) c2, -- w2 - count(*) OVER (PARTITION BY '' || depname) c3, -- w3 - ntile(2) OVER (PARTITION BY depname) nt -- w2 - FROM empsalary -) e WHERE rn <= 1 AND c1 <= 3 AND nt < 2; - QUERY PLAN ------------------------------------------------------------------------------------------------ - Subquery Scan on e - -> WindowAgg - Filter: (((row_number() OVER (?)) <= 1) AND ((ntile(2) OVER (?)) < 2)) - Run Condition: (count(empsalary.salary) OVER (?) <= 3) - -> Sort - Sort Key: (((empsalary.depname)::text || ''::text)) - -> WindowAgg - Run Condition: ((row_number() OVER (?) <= 1) AND (ntile(2) OVER (?) < 2)) - -> Sort - Sort Key: empsalary.depname - -> WindowAgg - -> Sort - Sort Key: ((''::text || (empsalary.depname)::text)) - -> Seq Scan on empsalary -(14 rows) - --- Ensure we correctly filter out all of the run conditions from each window -SELECT * FROM - (SELECT *, - count(salary) OVER (PARTITION BY depname || '') c1, -- w1 - row_number() OVER (PARTITION BY depname) rn, -- w2 - count(*) OVER (PARTITION BY depname) c2, -- w2 - count(*) OVER (PARTITION BY '' || depname) c3, -- w3 - ntile(2) OVER (PARTITION BY depname) nt -- w2 - FROM empsalary -) e WHERE rn <= 1 AND c1 <= 3 AND nt < 2; - depname | empno | salary | enroll_date | c1 | rn | c2 | c3 | nt ------------+-------+--------+-------------+----+----+----+----+---- - personnel | 5 | 3500 | 12-10-2007 | 2 | 1 | 2 | 2 | 1 - sales | 3 | 4800 | 08-01-2007 | 3 | 1 | 3 | 3 | 1 -(2 rows) - --- Ensure we remove references to reduced outer joins as nulling rels in run --- conditions -EXPLAIN (COSTS OFF) -SELECT 1 FROM - (SELECT ntile(e2.salary) OVER (PARTITION BY e1.depname) AS c - FROM empsalary e1 LEFT JOIN empsalary e2 ON TRUE - WHERE e1.empno = e2.empno) s -WHERE s.c = 1; - QUERY PLAN ---------------------------------------------------------- - Subquery Scan on s - Filter: (s.c = 1) - -> WindowAgg - Run Condition: (ntile(e2.salary) OVER (?) <= 1) - -> Sort - Sort Key: e1.depname - -> Merge Join - Merge Cond: (e1.empno = e2.empno) - -> Sort - Sort Key: e1.empno - -> Seq Scan on empsalary e1 - -> Sort - Sort Key: e2.empno - -> Seq Scan on empsalary e2 -(14 rows) - --- Ensure the run condition optimization is used in cases where the WindowFunc --- has a Var from another query level -EXPLAIN (COSTS OFF) -SELECT 1 FROM - (SELECT ntile(s1.x) OVER () AS c - FROM (SELECT (SELECT 1) AS x) AS s1) s -WHERE s.c = 1; - QUERY PLAN ------------------------------------------------------------------ - Subquery Scan on s - Filter: (s.c = 1) - -> WindowAgg - Run Condition: (ntile((InitPlan 1).col1) OVER (?) <= 1) - InitPlan 1 - -> Result - -> Result -(7 rows) - --- Tests to ensure we don't push down the run condition when it's not valid to --- do so. --- Ensure we don't push down when the frame options show that the window --- function is not monotonically increasing -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - salary, - count(*) OVER (ORDER BY salary DESC ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) c - FROM empsalary) emp -WHERE c <= 3; - QUERY PLAN ------------------------------------------------ - Subquery Scan on emp - Filter: (emp.c <= 3) - -> WindowAgg - -> Sort - Sort Key: empsalary.salary DESC - -> Seq Scan on empsalary -(6 rows) - --- Ensure we don't push down when the window function's monotonic properties --- don't match that of the clauses. -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - salary, - count(*) OVER (ORDER BY salary) c - FROM empsalary) emp -WHERE 3 <= c; - QUERY PLAN ------------------------------------------- - Subquery Scan on emp - Filter: (3 <= emp.c) - -> WindowAgg - -> Sort - Sort Key: empsalary.salary - -> Seq Scan on empsalary -(6 rows) - --- Ensure we don't use a run condition when there's a volatile function in the --- WindowFunc -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - salary, - count(random()) OVER (ORDER BY empno DESC) c - FROM empsalary) emp -WHERE c = 1; - QUERY PLAN ----------------------------------------------- - Subquery Scan on emp - Filter: (emp.c = 1) - -> WindowAgg - -> Sort - Sort Key: empsalary.empno DESC - -> Seq Scan on empsalary -(6 rows) - --- Ensure we don't use a run condition when the WindowFunc contains subplans -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT empno, - salary, - count((SELECT 1)) OVER (ORDER BY empno DESC) c - FROM empsalary) emp -WHERE c = 1; - QUERY PLAN ----------------------------------------------- - Subquery Scan on emp - Filter: (emp.c = 1) - -> WindowAgg - InitPlan 1 - -> Result - -> Sort - Sort Key: empsalary.empno DESC - -> Seq Scan on empsalary -(8 rows) - --- Test Sort node collapsing -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT depname, - sum(salary) OVER (PARTITION BY depname order by empno) depsalary, - min(salary) OVER (PARTITION BY depname, empno order by enroll_date) depminsalary - FROM empsalary) emp -WHERE depname = 'sales'; - QUERY PLAN ----------------------------------------------------------------------- - Subquery Scan on emp - -> WindowAgg - -> WindowAgg - -> Sort - Sort Key: empsalary.empno, empsalary.enroll_date - -> Seq Scan on empsalary - Filter: ((depname)::text = 'sales'::text) -(7 rows) - --- Ensure that the evaluation order of the WindowAggs results in the WindowAgg --- with the same sort order that's required by the ORDER BY is evaluated last. -EXPLAIN (COSTS OFF) -SELECT empno, - enroll_date, - depname, - sum(salary) OVER (PARTITION BY depname order by empno) depsalary, - min(salary) OVER (PARTITION BY depname order by enroll_date) depminsalary -FROM empsalary -ORDER BY depname, empno; - QUERY PLAN ----------------------------------------------------- - WindowAgg - -> Incremental Sort - Sort Key: depname, empno - Presorted Key: depname - -> WindowAgg - -> Sort - Sort Key: depname, enroll_date - -> Seq Scan on empsalary -(8 rows) - --- As above, but with an adjusted ORDER BY to ensure the above plan didn't --- perform only 2 sorts by accident. -EXPLAIN (COSTS OFF) -SELECT empno, - enroll_date, - depname, - sum(salary) OVER (PARTITION BY depname order by empno) depsalary, - min(salary) OVER (PARTITION BY depname order by enroll_date) depminsalary -FROM empsalary -ORDER BY depname, enroll_date; - QUERY PLAN ------------------------------------------------ - WindowAgg - -> Incremental Sort - Sort Key: depname, enroll_date - Presorted Key: depname - -> WindowAgg - -> Sort - Sort Key: depname, empno - -> Seq Scan on empsalary -(8 rows) - -SET enable_hashagg TO off; --- Ensure we don't get a sort for both DISTINCT and ORDER BY. We expect the --- sort for the DISTINCT to provide presorted input for the ORDER BY. -EXPLAIN (COSTS OFF) -SELECT DISTINCT - empno, - enroll_date, - depname, - sum(salary) OVER (PARTITION BY depname order by empno) depsalary, - min(salary) OVER (PARTITION BY depname order by enroll_date) depminsalary -FROM empsalary -ORDER BY depname, enroll_date; - QUERY PLAN ------------------------------------------------------------------------------------------------ - Unique - -> Incremental Sort - Sort Key: depname, enroll_date, empno, (sum(salary) OVER (?)), (min(salary) OVER (?)) - Presorted Key: depname, enroll_date - -> WindowAgg - -> Incremental Sort - Sort Key: depname, enroll_date - Presorted Key: depname - -> WindowAgg - -> Sort - Sort Key: depname, empno - -> Seq Scan on empsalary -(12 rows) - --- As above but adjust the ORDER BY clause to help ensure the plan with the --- minimum amount of sorting wasn't a fluke. -EXPLAIN (COSTS OFF) -SELECT DISTINCT - empno, - enroll_date, - depname, - sum(salary) OVER (PARTITION BY depname order by empno) depsalary, - min(salary) OVER (PARTITION BY depname order by enroll_date) depminsalary -FROM empsalary -ORDER BY depname, empno; - QUERY PLAN ------------------------------------------------------------------------------------------------ - Unique - -> Incremental Sort - Sort Key: depname, empno, enroll_date, (sum(salary) OVER (?)), (min(salary) OVER (?)) - Presorted Key: depname, empno - -> WindowAgg - -> Incremental Sort - Sort Key: depname, empno - Presorted Key: depname - -> WindowAgg - -> Sort - Sort Key: depname, enroll_date - -> Seq Scan on empsalary -(12 rows) - -RESET enable_hashagg; --- Test Sort node reordering -EXPLAIN (COSTS OFF) -SELECT - lead(1) OVER (PARTITION BY depname ORDER BY salary, enroll_date), - lag(1) OVER (PARTITION BY depname ORDER BY salary,enroll_date,empno) -FROM empsalary; - QUERY PLAN -------------------------------------------------------------- - WindowAgg - -> WindowAgg - -> Sort - Sort Key: depname, salary, enroll_date, empno - -> Seq Scan on empsalary -(5 rows) - --- Test incremental sorting -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT depname, - empno, - salary, - enroll_date, - row_number() OVER (PARTITION BY depname ORDER BY enroll_date) AS first_emp, - row_number() OVER (PARTITION BY depname ORDER BY enroll_date DESC) AS last_emp - FROM empsalary) emp -WHERE first_emp = 1 OR last_emp = 1; - QUERY PLAN ------------------------------------------------------------------------------------ - Subquery Scan on emp - Filter: ((emp.first_emp = 1) OR (emp.last_emp = 1)) - -> WindowAgg - -> Incremental Sort - Sort Key: empsalary.depname, empsalary.enroll_date - Presorted Key: empsalary.depname - -> WindowAgg - -> Sort - Sort Key: empsalary.depname, empsalary.enroll_date DESC - -> Seq Scan on empsalary -(10 rows) - -SELECT * FROM - (SELECT depname, - empno, - salary, - enroll_date, - row_number() OVER (PARTITION BY depname ORDER BY enroll_date) AS first_emp, - row_number() OVER (PARTITION BY depname ORDER BY enroll_date DESC) AS last_emp - FROM empsalary) emp -WHERE first_emp = 1 OR last_emp = 1; - depname | empno | salary | enroll_date | first_emp | last_emp ------------+-------+--------+-------------+-----------+---------- - develop | 8 | 6000 | 10-01-2006 | 1 | 5 - develop | 7 | 4200 | 01-01-2008 | 5 | 1 - personnel | 2 | 3900 | 12-23-2006 | 1 | 2 - personnel | 5 | 3500 | 12-10-2007 | 2 | 1 - sales | 1 | 5000 | 10-01-2006 | 1 | 3 - sales | 4 | 4800 | 08-08-2007 | 3 | 1 -(6 rows) - --- cleanup -DROP TABLE empsalary; --- test user-defined window function with named args and default args -CREATE FUNCTION nth_value_def(val anyelement, n integer = 1) RETURNS anyelement - LANGUAGE internal WINDOW IMMUTABLE STRICT AS 'window_nth_value'; -SELECT nth_value_def(n := 2, val := ten) OVER (PARTITION BY four), ten, four - FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten) s; - nth_value_def | ten | four ----------------+-----+------ - 0 | 0 | 0 - 0 | 0 | 0 - 0 | 4 | 0 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 7 | 1 - 1 | 9 | 1 - | 0 | 2 - 3 | 1 | 3 - 3 | 3 | 3 -(10 rows) - -SELECT nth_value_def(ten) OVER (PARTITION BY four), ten, four - FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten) s; - nth_value_def | ten | four ----------------+-----+------ - 0 | 0 | 0 - 0 | 0 | 0 - 0 | 4 | 0 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 7 | 1 - 1 | 9 | 1 - 0 | 0 | 2 - 1 | 1 | 3 - 1 | 3 | 3 -(10 rows) - --- --- Test the basic moving-aggregate machinery --- --- create aggregates that record the series of transform calls (these are --- intentionally not true inverses) -CREATE FUNCTION logging_sfunc_nonstrict(text, anyelement) RETURNS text AS -$$ SELECT COALESCE($1, '') || '*' || quote_nullable($2) $$ -LANGUAGE SQL IMMUTABLE; -CREATE FUNCTION logging_msfunc_nonstrict(text, anyelement) RETURNS text AS -$$ SELECT COALESCE($1, '') || '+' || quote_nullable($2) $$ -LANGUAGE SQL IMMUTABLE; -CREATE FUNCTION logging_minvfunc_nonstrict(text, anyelement) RETURNS text AS -$$ SELECT $1 || '-' || quote_nullable($2) $$ -LANGUAGE SQL IMMUTABLE; -CREATE AGGREGATE logging_agg_nonstrict (anyelement) -( - stype = text, - sfunc = logging_sfunc_nonstrict, - mstype = text, - msfunc = logging_msfunc_nonstrict, - minvfunc = logging_minvfunc_nonstrict -); -CREATE AGGREGATE logging_agg_nonstrict_initcond (anyelement) -( - stype = text, - sfunc = logging_sfunc_nonstrict, - mstype = text, - msfunc = logging_msfunc_nonstrict, - minvfunc = logging_minvfunc_nonstrict, - initcond = 'I', - minitcond = 'MI' -); -CREATE FUNCTION logging_sfunc_strict(text, anyelement) RETURNS text AS -$$ SELECT $1 || '*' || quote_nullable($2) $$ -LANGUAGE SQL STRICT IMMUTABLE; -CREATE FUNCTION logging_msfunc_strict(text, anyelement) RETURNS text AS -$$ SELECT $1 || '+' || quote_nullable($2) $$ -LANGUAGE SQL STRICT IMMUTABLE; -CREATE FUNCTION logging_minvfunc_strict(text, anyelement) RETURNS text AS -$$ SELECT $1 || '-' || quote_nullable($2) $$ -LANGUAGE SQL STRICT IMMUTABLE; -CREATE AGGREGATE logging_agg_strict (text) -( - stype = text, - sfunc = logging_sfunc_strict, - mstype = text, - msfunc = logging_msfunc_strict, - minvfunc = logging_minvfunc_strict -); -CREATE AGGREGATE logging_agg_strict_initcond (anyelement) -( - stype = text, - sfunc = logging_sfunc_strict, - mstype = text, - msfunc = logging_msfunc_strict, - minvfunc = logging_minvfunc_strict, - initcond = 'I', - minitcond = 'MI' -); --- test strict and non-strict cases -SELECT - p::text || ',' || i::text || ':' || COALESCE(v::text, 'NULL') AS row, - logging_agg_nonstrict(v) over wnd as nstrict, - logging_agg_nonstrict_initcond(v) over wnd as nstrict_init, - logging_agg_strict(v::text) over wnd as strict, - logging_agg_strict_initcond(v) over wnd as strict_init -FROM (VALUES - (1, 1, NULL), - (1, 2, 'a'), - (1, 3, 'b'), - (1, 4, NULL), - (1, 5, NULL), - (1, 6, 'c'), - (2, 1, NULL), - (2, 2, 'x'), - (3, 1, 'z') -) AS t(p, i, v) -WINDOW wnd AS (PARTITION BY P ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) -ORDER BY p, i; - row | nstrict | nstrict_init | strict | strict_init -----------+-----------------------------------------------+-------------------------------------------------+-----------+---------------- - 1,1:NULL | +NULL | MI+NULL | | MI - 1,2:a | +NULL+'a' | MI+NULL+'a' | a | MI+'a' - 1,3:b | +NULL+'a'-NULL+'b' | MI+NULL+'a'-NULL+'b' | a+'b' | MI+'a'+'b' - 1,4:NULL | +NULL+'a'-NULL+'b'-'a'+NULL | MI+NULL+'a'-NULL+'b'-'a'+NULL | a+'b'-'a' | MI+'a'+'b'-'a' - 1,5:NULL | +NULL+'a'-NULL+'b'-'a'+NULL-'b'+NULL | MI+NULL+'a'-NULL+'b'-'a'+NULL-'b'+NULL | | MI - 1,6:c | +NULL+'a'-NULL+'b'-'a'+NULL-'b'+NULL-NULL+'c' | MI+NULL+'a'-NULL+'b'-'a'+NULL-'b'+NULL-NULL+'c' | c | MI+'c' - 2,1:NULL | +NULL | MI+NULL | | MI - 2,2:x | +NULL+'x' | MI+NULL+'x' | x | MI+'x' - 3,1:z | +'z' | MI+'z' | z | MI+'z' -(9 rows) - --- and again, but with filter -SELECT - p::text || ',' || i::text || ':' || - CASE WHEN f THEN COALESCE(v::text, 'NULL') ELSE '-' END as row, - logging_agg_nonstrict(v) filter(where f) over wnd as nstrict_filt, - logging_agg_nonstrict_initcond(v) filter(where f) over wnd as nstrict_init_filt, - logging_agg_strict(v::text) filter(where f) over wnd as strict_filt, - logging_agg_strict_initcond(v) filter(where f) over wnd as strict_init_filt -FROM (VALUES - (1, 1, true, NULL), - (1, 2, false, 'a'), - (1, 3, true, 'b'), - (1, 4, false, NULL), - (1, 5, false, NULL), - (1, 6, false, 'c'), - (2, 1, false, NULL), - (2, 2, true, 'x'), - (3, 1, true, 'z') -) AS t(p, i, f, v) -WINDOW wnd AS (PARTITION BY p ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) -ORDER BY p, i; - row | nstrict_filt | nstrict_init_filt | strict_filt | strict_init_filt -----------+--------------+-------------------+-------------+------------------ - 1,1:NULL | +NULL | MI+NULL | | MI - 1,2:- | +NULL | MI+NULL | | MI - 1,3:b | +'b' | MI+'b' | b | MI+'b' - 1,4:- | +'b' | MI+'b' | b | MI+'b' - 1,5:- | | MI | | MI - 1,6:- | | MI | | MI - 2,1:- | | MI | | MI - 2,2:x | +'x' | MI+'x' | x | MI+'x' - 3,1:z | +'z' | MI+'z' | z | MI+'z' -(9 rows) - --- test that volatile arguments disable moving-aggregate mode -SELECT - i::text || ':' || COALESCE(v::text, 'NULL') as row, - logging_agg_strict(v::text) - over wnd as inverse, - logging_agg_strict(v::text || CASE WHEN random() < 0 then '?' ELSE '' END) - over wnd as noinverse -FROM (VALUES - (1, 'a'), - (2, 'b'), - (3, 'c') -) AS t(i, v) -WINDOW wnd AS (ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) -ORDER BY i; - row | inverse | noinverse ------+---------------+----------- - 1:a | a | a - 2:b | a+'b' | a*'b' - 3:c | a+'b'-'a'+'c' | b*'c' -(3 rows) - -SELECT - i::text || ':' || COALESCE(v::text, 'NULL') as row, - logging_agg_strict(v::text) filter(where true) - over wnd as inverse, - logging_agg_strict(v::text) filter(where random() >= 0) - over wnd as noinverse -FROM (VALUES - (1, 'a'), - (2, 'b'), - (3, 'c') -) AS t(i, v) -WINDOW wnd AS (ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) -ORDER BY i; - row | inverse | noinverse ------+---------------+----------- - 1:a | a | a - 2:b | a+'b' | a*'b' - 3:c | a+'b'-'a'+'c' | b*'c' -(3 rows) - --- test that non-overlapping windows don't use inverse transitions -SELECT - logging_agg_strict(v::text) OVER wnd -FROM (VALUES - (1, 'a'), - (2, 'b'), - (3, 'c') -) AS t(i, v) -WINDOW wnd AS (ORDER BY i ROWS BETWEEN CURRENT ROW AND CURRENT ROW) -ORDER BY i; - logging_agg_strict --------------------- - a - b - c -(3 rows) - --- test that returning NULL from the inverse transition functions --- restarts the aggregation from scratch. The second aggregate is supposed --- to test cases where only some aggregates restart, the third one checks --- that one aggregate restarting doesn't cause others to restart. -CREATE FUNCTION sum_int_randrestart_minvfunc(int4, int4) RETURNS int4 AS -$$ SELECT CASE WHEN random() < 0.2 THEN NULL ELSE $1 - $2 END $$ -LANGUAGE SQL STRICT; -CREATE AGGREGATE sum_int_randomrestart (int4) -( - stype = int4, - sfunc = int4pl, - mstype = int4, - msfunc = int4pl, - minvfunc = sum_int_randrestart_minvfunc -); -WITH -vs AS ( - SELECT i, (random() * 100)::int4 AS v - FROM generate_series(1, 100) AS i -), -sum_following AS ( - SELECT i, SUM(v) OVER - (ORDER BY i DESC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS s - FROM vs -) -SELECT DISTINCT - sum_following.s = sum_int_randomrestart(v) OVER fwd AS eq1, - -sum_following.s = sum_int_randomrestart(-v) OVER fwd AS eq2, - 100*3+(vs.i-1)*3 = length(logging_agg_nonstrict(''::text) OVER fwd) AS eq3 -FROM vs -JOIN sum_following ON sum_following.i = vs.i -WINDOW fwd AS ( - ORDER BY vs.i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING -); - eq1 | eq2 | eq3 ------+-----+----- - t | t | t -(1 row) - --- --- Test various built-in aggregates that have moving-aggregate support --- --- test inverse transition functions handle NULLs properly -SELECT i,AVG(v::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); - i | avg ----+-------------------- - 1 | 1.5000000000000000 - 2 | 2.0000000000000000 - 3 | - 4 | -(4 rows) - -SELECT i,AVG(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); - i | avg ----+-------------------- - 1 | 1.5000000000000000 - 2 | 2.0000000000000000 - 3 | - 4 | -(4 rows) - -SELECT i,AVG(v::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); - i | avg ----+-------------------- - 1 | 1.5000000000000000 - 2 | 2.0000000000000000 - 3 | - 4 | -(4 rows) - -SELECT i,AVG(v::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,1.5),(2,2.5),(3,NULL),(4,NULL)) t(i,v); - i | avg ----+-------------------- - 1 | 2.0000000000000000 - 2 | 2.5000000000000000 - 3 | - 4 | -(4 rows) - -SELECT i,AVG(v::interval) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,'1 sec'),(2,'2 sec'),(3,NULL),(4,NULL)) t(i,v); - i | avg ----+------------ - 1 | @ 1.5 secs - 2 | @ 2 secs - 3 | - 4 | -(4 rows) - --- moving aggregates over infinite intervals -SELECT x - ,avg(x) OVER(ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING ) as curr_next_avg - ,avg(x) OVER(ROWS BETWEEN 1 PRECEDING AND CURRENT ROW ) as prev_curr_avg - ,sum(x) OVER(ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING ) as curr_next_sum - ,sum(x) OVER(ROWS BETWEEN 1 PRECEDING AND CURRENT ROW ) as prev_curr_sum -FROM (VALUES (NULL::interval), - ('infinity'::interval), - ('-2147483648 days -2147483648 months -9223372036854775807 usecs'), -- extreme interval value - ('-infinity'::interval), - ('2147483647 days 2147483647 months 9223372036854775806 usecs'), -- extreme interval value - ('infinity'::interval), - ('6 days'::interval), - ('7 days'::interval), - (NULL::interval), - ('-infinity'::interval)) v(x); - x | curr_next_avg | prev_curr_avg | curr_next_sum | prev_curr_sum -------------------------------------------------------------------------------+-------------------+-------------------+---------------+--------------- - | infinity | | infinity | - infinity | infinity | infinity | infinity | infinity - @ 178956970 years 8 mons 2147483648 days 2562047788 hours 54.775807 secs ago | -infinity | infinity | -infinity | infinity - -infinity | -infinity | -infinity | -infinity | -infinity - @ 178956970 years 7 mons 2147483647 days 2562047788 hours 54.775806 secs | infinity | -infinity | infinity | -infinity - infinity | infinity | infinity | infinity | infinity - @ 6 days | @ 6 days 12 hours | infinity | @ 13 days | infinity - @ 7 days | @ 7 days | @ 6 days 12 hours | @ 7 days | @ 13 days - | -infinity | @ 7 days | -infinity | @ 7 days - -infinity | -infinity | -infinity | -infinity | -infinity -(10 rows) - ---should fail. -SELECT x, avg(x) OVER(ROWS BETWEEN CURRENT ROW AND 2 FOLLOWING) -FROM (VALUES (NULL::interval), - ('3 days'::interval), - ('infinity'::timestamptz - now()), - ('6 days'::interval), - ('-infinity'::interval)) v(x); -ERROR: interval out of range ---should fail. -SELECT x, sum(x) OVER(ROWS BETWEEN CURRENT ROW AND 2 FOLLOWING) -FROM (VALUES (NULL::interval), - ('3 days'::interval), - ('infinity'::timestamptz - now()), - ('6 days'::interval), - ('-infinity'::interval)) v(x); -ERROR: interval out of range -SELECT i,SUM(v::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); - i | sum ----+----- - 1 | 3 - 2 | 2 - 3 | - 4 | -(4 rows) - -SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); - i | sum ----+----- - 1 | 3 - 2 | 2 - 3 | - 4 | -(4 rows) - -SELECT i,SUM(v::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); - i | sum ----+----- - 1 | 3 - 2 | 2 - 3 | - 4 | -(4 rows) - -SELECT i,SUM(v::money) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,'1.10'),(2,'2.20'),(3,NULL),(4,NULL)) t(i,v); - i | sum ----+------- - 1 | $3.30 - 2 | $2.20 - 3 | - 4 | -(4 rows) - -SELECT i,SUM(v::interval) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,'1 sec'),(2,'2 sec'),(3,NULL),(4,NULL)) t(i,v); - i | sum ----+---------- - 1 | @ 3 secs - 2 | @ 2 secs - 3 | - 4 | -(4 rows) - -SELECT i,SUM(v::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,1.1),(2,2.2),(3,NULL),(4,NULL)) t(i,v); - i | sum ----+----- - 1 | 3.3 - 2 | 2.2 - 3 | - 4 | -(4 rows) - -SELECT SUM(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,1.01),(2,2),(3,3)) v(i,n); - sum ------- - 6.01 - 5 - 3 -(3 rows) - -SELECT i,COUNT(v) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); - i | count ----+------- - 1 | 2 - 2 | 1 - 3 | 0 - 4 | 0 -(4 rows) - -SELECT i,COUNT(*) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); - i | count ----+------- - 1 | 4 - 2 | 3 - 3 | 2 - 4 | 1 -(4 rows) - -SELECT VAR_POP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - var_pop ------------------------ - 21704.000000000000 - 13868.750000000000 - 11266.666666666667 - 4225.0000000000000000 - 0 -(5 rows) - -SELECT VAR_POP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - var_pop ------------------------ - 21704.000000000000 - 13868.750000000000 - 11266.666666666667 - 4225.0000000000000000 - 0 -(5 rows) - -SELECT VAR_POP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - var_pop ------------------------ - 21704.000000000000 - 13868.750000000000 - 11266.666666666667 - 4225.0000000000000000 - 0 -(5 rows) - -SELECT VAR_POP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - var_pop ------------------------ - 21704.000000000000 - 13868.750000000000 - 11266.666666666667 - 4225.0000000000000000 - 0 -(5 rows) - -SELECT VAR_SAMP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - var_samp ------------------------ - 27130.000000000000 - 18491.666666666667 - 16900.000000000000 - 8450.0000000000000000 - -(5 rows) - -SELECT VAR_SAMP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - var_samp ------------------------ - 27130.000000000000 - 18491.666666666667 - 16900.000000000000 - 8450.0000000000000000 - -(5 rows) - -SELECT VAR_SAMP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - var_samp ------------------------ - 27130.000000000000 - 18491.666666666667 - 16900.000000000000 - 8450.0000000000000000 - -(5 rows) - -SELECT VAR_SAMP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - var_samp ------------------------ - 27130.000000000000 - 18491.666666666667 - 16900.000000000000 - 8450.0000000000000000 - -(5 rows) - -SELECT VARIANCE(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - variance ------------------------ - 27130.000000000000 - 18491.666666666667 - 16900.000000000000 - 8450.0000000000000000 - -(5 rows) - -SELECT VARIANCE(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - variance ------------------------ - 27130.000000000000 - 18491.666666666667 - 16900.000000000000 - 8450.0000000000000000 - -(5 rows) - -SELECT VARIANCE(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - variance ------------------------ - 27130.000000000000 - 18491.666666666667 - 16900.000000000000 - 8450.0000000000000000 - -(5 rows) - -SELECT VARIANCE(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - variance ------------------------ - 27130.000000000000 - 18491.666666666667 - 16900.000000000000 - 8450.0000000000000000 - -(5 rows) - -SELECT STDDEV_POP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); - stddev_pop ---------------------- - 147.322774885623 - 147.322774885623 - 117.765657133139 - 106.144555520604 - 65.0000000000000000 - 0 -(6 rows) - -SELECT STDDEV_POP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); - stddev_pop ---------------------- - 147.322774885623 - 147.322774885623 - 117.765657133139 - 106.144555520604 - 65.0000000000000000 - 0 -(6 rows) - -SELECT STDDEV_POP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); - stddev_pop ---------------------- - 147.322774885623 - 147.322774885623 - 117.765657133139 - 106.144555520604 - 65.0000000000000000 - 0 -(6 rows) - -SELECT STDDEV_POP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); - stddev_pop ---------------------- - 147.322774885623 - 147.322774885623 - 117.765657133139 - 106.144555520604 - 65.0000000000000000 - 0 -(6 rows) - -SELECT STDDEV_SAMP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); - stddev_samp ---------------------- - 164.711869639076 - 164.711869639076 - 135.984067694222 - 130.000000000000 - 91.9238815542511782 - -(6 rows) - -SELECT STDDEV_SAMP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); - stddev_samp ---------------------- - 164.711869639076 - 164.711869639076 - 135.984067694222 - 130.000000000000 - 91.9238815542511782 - -(6 rows) - -SELECT STDDEV_SAMP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); - stddev_samp ---------------------- - 164.711869639076 - 164.711869639076 - 135.984067694222 - 130.000000000000 - 91.9238815542511782 - -(6 rows) - -SELECT STDDEV_SAMP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); - stddev_samp ---------------------- - 164.711869639076 - 164.711869639076 - 135.984067694222 - 130.000000000000 - 91.9238815542511782 - -(6 rows) - -SELECT STDDEV(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - stddev ---------------------- - 164.711869639076 - 164.711869639076 - 135.984067694222 - 130.000000000000 - 91.9238815542511782 - -(6 rows) - -SELECT STDDEV(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - stddev ---------------------- - 164.711869639076 - 164.711869639076 - 135.984067694222 - 130.000000000000 - 91.9238815542511782 - -(6 rows) - -SELECT STDDEV(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - stddev ---------------------- - 164.711869639076 - 164.711869639076 - 135.984067694222 - 130.000000000000 - 91.9238815542511782 - -(6 rows) - -SELECT STDDEV(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) - FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); - stddev ---------------------- - 164.711869639076 - 164.711869639076 - 135.984067694222 - 130.000000000000 - 91.9238815542511782 - -(6 rows) - --- test that inverse transition functions work with various frame options -SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND CURRENT ROW) - FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); - i | sum ----+----- - 1 | 1 - 2 | 2 - 3 | - 4 | -(4 rows) - -SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING) - FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); - i | sum ----+----- - 1 | 3 - 2 | 2 - 3 | - 4 | -(4 rows) - -SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) - FROM (VALUES(1,1),(2,2),(3,3),(4,4)) t(i,v); - i | sum ----+----- - 1 | 3 - 2 | 6 - 3 | 9 - 4 | 7 -(4 rows) - --- ensure aggregate over numeric properly recovers from NaN values -SELECT a, b, - SUM(b) OVER(ORDER BY A ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) -FROM (VALUES(1,1::numeric),(2,2),(3,'NaN'),(4,3),(5,4)) t(a,b); - a | b | sum ----+-----+----- - 1 | 1 | 1 - 2 | 2 | 3 - 3 | NaN | NaN - 4 | 3 | NaN - 5 | 4 | 7 -(5 rows) - --- It might be tempting for someone to add an inverse trans function for --- float and double precision. This should not be done as it can give incorrect --- results. This test should fail if anyone ever does this without thinking too --- hard about it. -SELECT to_char(SUM(n::float8) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING),'999999999999999999999D9') - FROM (VALUES(1,1e20),(2,1)) n(i,n); - to_char --------------------------- - 100000000000000000000 - 1.0 -(2 rows) - -SELECT i, b, bool_and(b) OVER w, bool_or(b) OVER w - FROM (VALUES (1,true), (2,true), (3,false), (4,false), (5,true)) v(i,b) - WINDOW w AS (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING); - i | b | bool_and | bool_or ----+---+----------+--------- - 1 | t | t | t - 2 | t | f | t - 3 | f | f | f - 4 | f | f | t - 5 | t | t | t -(5 rows) - --- --- Test WindowAgg costing takes into account the number of rows that need to --- be fetched before the first row can be output. --- --- Ensure we get a cheap start up plan as the WindowAgg can output the first --- row after reading 1 row from the join. -EXPLAIN (COSTS OFF) -SELECT COUNT(*) OVER (ORDER BY t1.unique1) -FROM tenk1 t1 INNER JOIN tenk1 t2 ON t1.unique1 = t2.tenthous -LIMIT 1; - QUERY PLAN --------------------------------------------------------------------------- - Limit - -> WindowAgg - -> Nested Loop - -> Index Only Scan using tenk1_unique1 on tenk1 t1 - -> Index Only Scan using tenk1_thous_tenthous on tenk1 t2 - Index Cond: (tenthous = t1.unique1) -(6 rows) - --- Ensure we get a cheap total plan. Lack of ORDER BY in the WindowClause --- means that all rows must be read from the join, so a cheap startup plan --- isn't a good choice. -EXPLAIN (COSTS OFF) -SELECT COUNT(*) OVER () -FROM tenk1 t1 INNER JOIN tenk1 t2 ON t1.unique1 = t2.tenthous -WHERE t2.two = 1 -LIMIT 1; - QUERY PLAN -------------------------------------------------------------------- - Limit - -> WindowAgg - -> Hash Join - Hash Cond: (t1.unique1 = t2.tenthous) - -> Index Only Scan using tenk1_unique1 on tenk1 t1 - -> Hash - -> Seq Scan on tenk1 t2 - Filter: (two = 1) -(8 rows) - --- Ensure we get a cheap total plan. This time use UNBOUNDED FOLLOWING, which --- needs to read all join rows to output the first WindowAgg row. -EXPLAIN (COSTS OFF) -SELECT COUNT(*) OVER (ORDER BY t1.unique1 ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) -FROM tenk1 t1 INNER JOIN tenk1 t2 ON t1.unique1 = t2.tenthous -LIMIT 1; - QUERY PLAN --------------------------------------------------------------------------------- - Limit - -> WindowAgg - -> Merge Join - Merge Cond: (t1.unique1 = t2.tenthous) - -> Index Only Scan using tenk1_unique1 on tenk1 t1 - -> Sort - Sort Key: t2.tenthous - -> Index Only Scan using tenk1_thous_tenthous on tenk1 t2 -(8 rows) - --- Ensure we get a cheap total plan. This time use 10000 FOLLOWING so we need --- to read all join rows. -EXPLAIN (COSTS OFF) -SELECT COUNT(*) OVER (ORDER BY t1.unique1 ROWS BETWEEN UNBOUNDED PRECEDING AND 10000 FOLLOWING) -FROM tenk1 t1 INNER JOIN tenk1 t2 ON t1.unique1 = t2.tenthous -LIMIT 1; - QUERY PLAN --------------------------------------------------------------------------------- - Limit - -> WindowAgg - -> Merge Join - Merge Cond: (t1.unique1 = t2.tenthous) - -> Index Only Scan using tenk1_unique1 on tenk1 t1 - -> Sort - Sort Key: t2.tenthous - -> Index Only Scan using tenk1_thous_tenthous on tenk1 t2 -(8 rows) - --- Tests for problems with failure to walk or mutate expressions --- within window frame clauses. --- test walker (fails with collation error if expressions are not walked) -SELECT array_agg(i) OVER w - FROM generate_series(1,5) i -WINDOW w AS (ORDER BY i ROWS BETWEEN (('foo' < 'foobar')::integer) PRECEDING AND CURRENT ROW); - array_agg ------------ - {1} - {1,2} - {2,3} - {3,4} - {4,5} -(5 rows) - --- test mutator (fails when inlined if expressions are not mutated) -CREATE FUNCTION pg_temp.f(group_size BIGINT) RETURNS SETOF integer[] -AS $$ - SELECT array_agg(s) OVER w - FROM generate_series(1,5) s - WINDOW w AS (ORDER BY s ROWS BETWEEN CURRENT ROW AND GROUP_SIZE FOLLOWING) -$$ LANGUAGE SQL STABLE; -EXPLAIN (costs off) SELECT * FROM pg_temp.f(2); - QUERY PLAN ------------------------------------------------------- - Subquery Scan on f - -> WindowAgg - -> Sort - Sort Key: s.s - -> Function Scan on generate_series s -(5 rows) - -SELECT * FROM pg_temp.f(2); - f ---------- - {1,2,3} - {2,3,4} - {3,4,5} - {4,5} - {5} -(5 rows) - +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/xmlmap_1.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/xmlmap.out --- /Users/admin/pgsql/src/test/regress/expected/xmlmap_1.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/xmlmap.out 2024-12-13 13:20:11 @@ -1,107 +1,2 @@ -CREATE SCHEMA testxmlschema; -CREATE TABLE testxmlschema.test1 (a int, b text); -INSERT INTO testxmlschema.test1 VALUES (1, 'one'), (2, 'two'), (-1, null); -CREATE DOMAIN testxmldomain AS varchar; -CREATE TABLE testxmlschema.test2 (z int, y varchar(500), x char(6), - w numeric(9,2), v smallint, u bigint, t real, - s time, stz timetz, r timestamp, rtz timestamptz, q date, - p xml, o testxmldomain, n bool, m bytea, aaa text); -ALTER TABLE testxmlschema.test2 DROP COLUMN aaa; -INSERT INTO testxmlschema.test2 VALUES (55, 'abc', 'def', - 98.6, 2, 999, 0, - '21:07', '21:11 +05', '2009-06-08 21:07:30', '2009-06-08 21:07:30 -07', '2009-06-08', - NULL, 'ABC', true, 'XYZ'); -SELECT table_to_xml('testxmlschema.test1', false, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xml('testxmlschema.test1', true, false, 'foo'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xml('testxmlschema.test1', false, true, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xml('testxmlschema.test1', true, true, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xml('testxmlschema.test2', false, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xmlschema('testxmlschema.test1', false, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xmlschema('testxmlschema.test1', true, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xmlschema('testxmlschema.test1', false, true, 'foo'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xmlschema('testxmlschema.test1', true, true, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xmlschema('testxmlschema.test2', false, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xml_and_xmlschema('testxmlschema.test1', false, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xml_and_xmlschema('testxmlschema.test1', true, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xml_and_xmlschema('testxmlschema.test1', false, true, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xml_and_xmlschema('testxmlschema.test1', true, true, 'foo'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT query_to_xml('SELECT * FROM testxmlschema.test1', false, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT query_to_xmlschema('SELECT * FROM testxmlschema.test1', false, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT query_to_xml_and_xmlschema('SELECT * FROM testxmlschema.test1', true, true, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -DECLARE xc CURSOR WITH HOLD FOR SELECT * FROM testxmlschema.test1 ORDER BY 1, 2; -SELECT cursor_to_xml('xc'::refcursor, 5, false, true, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT cursor_to_xmlschema('xc'::refcursor, false, true, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -MOVE BACKWARD ALL IN xc; -SELECT cursor_to_xml('xc'::refcursor, 5, true, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT cursor_to_xmlschema('xc'::refcursor, true, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT schema_to_xml('testxmlschema', false, true, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT schema_to_xml('testxmlschema', true, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT schema_to_xmlschema('testxmlschema', false, true, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT schema_to_xmlschema('testxmlschema', true, false, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT schema_to_xml_and_xmlschema('testxmlschema', true, true, 'foo'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. --- test that domains are transformed like their base types -CREATE DOMAIN testboolxmldomain AS bool; -CREATE DOMAIN testdatexmldomain AS date; -CREATE TABLE testxmlschema.test3 - AS SELECT true c1, - true::testboolxmldomain c2, - '2013-02-21'::date c3, - '2013-02-21'::testdatexmldomain c4; -SELECT xmlforest(c1, c2, c3, c4) FROM testxmlschema.test3; -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_to_xml('testxmlschema.test3', true, true, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/functional_deps.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/functional_deps.out --- /Users/admin/pgsql/src/test/regress/expected/functional_deps.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/functional_deps.out 2024-12-13 13:20:11 @@ -1,232 +1,2 @@ --- from http://www.depesz.com/index.php/2010/04/19/getting-unique-elements/ -CREATE TEMP TABLE articles ( - id int CONSTRAINT articles_pkey PRIMARY KEY, - keywords text, - title text UNIQUE NOT NULL, - body text UNIQUE, - created date -); -CREATE TEMP TABLE articles_in_category ( - article_id int, - category_id int, - changed date, - PRIMARY KEY (article_id, category_id) -); --- test functional dependencies based on primary keys/unique constraints --- base tables --- group by primary key (OK) -SELECT id, keywords, title, body, created -FROM articles -GROUP BY id; - id | keywords | title | body | created -----+----------+-------+------+--------- -(0 rows) - --- group by unique not null (fail/todo) -SELECT id, keywords, title, body, created -FROM articles -GROUP BY title; -ERROR: column "articles.id" must appear in the GROUP BY clause or be used in an aggregate function -LINE 1: SELECT id, keywords, title, body, created - ^ --- group by unique nullable (fail) -SELECT id, keywords, title, body, created -FROM articles -GROUP BY body; -ERROR: column "articles.id" must appear in the GROUP BY clause or be used in an aggregate function -LINE 1: SELECT id, keywords, title, body, created - ^ --- group by something else (fail) -SELECT id, keywords, title, body, created -FROM articles -GROUP BY keywords; -ERROR: column "articles.id" must appear in the GROUP BY clause or be used in an aggregate function -LINE 1: SELECT id, keywords, title, body, created - ^ --- multiple tables --- group by primary key (OK) -SELECT a.id, a.keywords, a.title, a.body, a.created -FROM articles AS a, articles_in_category AS aic -WHERE a.id = aic.article_id AND aic.category_id in (14,62,70,53,138) -GROUP BY a.id; - id | keywords | title | body | created -----+----------+-------+------+--------- -(0 rows) - --- group by something else (fail) -SELECT a.id, a.keywords, a.title, a.body, a.created -FROM articles AS a, articles_in_category AS aic -WHERE a.id = aic.article_id AND aic.category_id in (14,62,70,53,138) -GROUP BY aic.article_id, aic.category_id; -ERROR: column "a.id" must appear in the GROUP BY clause or be used in an aggregate function -LINE 1: SELECT a.id, a.keywords, a.title, a.body, a.created - ^ --- JOIN syntax --- group by left table's primary key (OK) -SELECT a.id, a.keywords, a.title, a.body, a.created -FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id -WHERE aic.category_id in (14,62,70,53,138) -GROUP BY a.id; - id | keywords | title | body | created -----+----------+-------+------+--------- -(0 rows) - --- group by something else (fail) -SELECT a.id, a.keywords, a.title, a.body, a.created -FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id -WHERE aic.category_id in (14,62,70,53,138) -GROUP BY aic.article_id, aic.category_id; -ERROR: column "a.id" must appear in the GROUP BY clause or be used in an aggregate function -LINE 1: SELECT a.id, a.keywords, a.title, a.body, a.created - ^ --- group by right table's (composite) primary key (OK) -SELECT aic.changed -FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id -WHERE aic.category_id in (14,62,70,53,138) -GROUP BY aic.category_id, aic.article_id; - changed ---------- -(0 rows) - --- group by right table's partial primary key (fail) -SELECT aic.changed -FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id -WHERE aic.category_id in (14,62,70,53,138) -GROUP BY aic.article_id; -ERROR: column "aic.changed" must appear in the GROUP BY clause or be used in an aggregate function -LINE 1: SELECT aic.changed - ^ --- example from documentation -CREATE TEMP TABLE products (product_id int, name text, price numeric); -CREATE TEMP TABLE sales (product_id int, units int); --- OK -SELECT product_id, p.name, (sum(s.units) * p.price) AS sales - FROM products p LEFT JOIN sales s USING (product_id) - GROUP BY product_id, p.name, p.price; - product_id | name | sales -------------+------+------- -(0 rows) - --- fail -SELECT product_id, p.name, (sum(s.units) * p.price) AS sales - FROM products p LEFT JOIN sales s USING (product_id) - GROUP BY product_id; -ERROR: column "p.name" must appear in the GROUP BY clause or be used in an aggregate function -LINE 1: SELECT product_id, p.name, (sum(s.units) * p.price) AS sales - ^ -ALTER TABLE products ADD PRIMARY KEY (product_id); --- OK now -SELECT product_id, p.name, (sum(s.units) * p.price) AS sales - FROM products p LEFT JOIN sales s USING (product_id) - GROUP BY product_id; - product_id | name | sales -------------+------+------- -(0 rows) - --- Drupal example, http://drupal.org/node/555530 -CREATE TEMP TABLE node ( - nid SERIAL, - vid integer NOT NULL default '0', - type varchar(32) NOT NULL default '', - title varchar(128) NOT NULL default '', - uid integer NOT NULL default '0', - status integer NOT NULL default '1', - created integer NOT NULL default '0', - -- snip - PRIMARY KEY (nid, vid) -); -CREATE TEMP TABLE users ( - uid integer NOT NULL default '0', - name varchar(60) NOT NULL default '', - pass varchar(32) NOT NULL default '', - -- snip - PRIMARY KEY (uid), - UNIQUE (name) -); --- OK -SELECT u.uid, u.name FROM node n -INNER JOIN users u ON u.uid = n.uid -WHERE n.type = 'blog' AND n.status = 1 -GROUP BY u.uid, u.name; - uid | name ------+------ -(0 rows) - --- OK -SELECT u.uid, u.name FROM node n -INNER JOIN users u ON u.uid = n.uid -WHERE n.type = 'blog' AND n.status = 1 -GROUP BY u.uid; - uid | name ------+------ -(0 rows) - --- Check views and dependencies --- fail -CREATE TEMP VIEW fdv1 AS -SELECT id, keywords, title, body, created -FROM articles -GROUP BY body; -ERROR: column "articles.id" must appear in the GROUP BY clause or be used in an aggregate function -LINE 2: SELECT id, keywords, title, body, created - ^ --- OK -CREATE TEMP VIEW fdv1 AS -SELECT id, keywords, title, body, created -FROM articles -GROUP BY id; --- fail -ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; -ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it -DETAIL: view fdv1 depends on constraint articles_pkey on table articles -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP VIEW fdv1; --- multiple dependencies -CREATE TEMP VIEW fdv2 AS -SELECT a.id, a.keywords, a.title, aic.category_id, aic.changed -FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id -WHERE aic.category_id in (14,62,70,53,138) -GROUP BY a.id, aic.category_id, aic.article_id; -ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; -- fail -ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it -DETAIL: view fdv2 depends on constraint articles_pkey on table articles -HINT: Use DROP ... CASCADE to drop the dependent objects too. -ALTER TABLE articles_in_category DROP CONSTRAINT articles_in_category_pkey RESTRICT; --fail -ERROR: cannot drop constraint articles_in_category_pkey on table articles_in_category because other objects depend on it -DETAIL: view fdv2 depends on constraint articles_in_category_pkey on table articles_in_category -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP VIEW fdv2; --- nested queries -CREATE TEMP VIEW fdv3 AS -SELECT id, keywords, title, body, created -FROM articles -GROUP BY id -UNION -SELECT id, keywords, title, body, created -FROM articles -GROUP BY id; -ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; -- fail -ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it -DETAIL: view fdv3 depends on constraint articles_pkey on table articles -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP VIEW fdv3; -CREATE TEMP VIEW fdv4 AS -SELECT * FROM articles WHERE title IN (SELECT title FROM articles GROUP BY id); -ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; -- fail -ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it -DETAIL: view fdv4 depends on constraint articles_pkey on table articles -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP VIEW fdv4; --- prepared query plans: this results in failure on reuse -PREPARE foo AS - SELECT id, keywords, title, body, created - FROM articles - GROUP BY id; -EXECUTE foo; - id | keywords | title | body | created -----+----------+-------+------+--------- -(0 rows) - -ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; -EXECUTE foo; -- fail -ERROR: column "articles.keywords" must appear in the GROUP BY clause or be used in an aggregate function +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/advisory_lock.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/advisory_lock.out --- /Users/admin/pgsql/src/test/regress/expected/advisory_lock.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/advisory_lock.out 2024-12-13 13:20:11 @@ -1,276 +1,2 @@ --- --- ADVISORY LOCKS --- -SELECT oid AS datoid FROM pg_database WHERE datname = current_database() \gset -BEGIN; -SELECT - pg_advisory_xact_lock(1), pg_advisory_xact_lock_shared(2), - pg_advisory_xact_lock(1, 1), pg_advisory_xact_lock_shared(2, 2); - pg_advisory_xact_lock | pg_advisory_xact_lock_shared | pg_advisory_xact_lock | pg_advisory_xact_lock_shared ------------------------+------------------------------+-----------------------+------------------------------ - | | | -(1 row) - -SELECT locktype, classid, objid, objsubid, mode, granted - FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid - ORDER BY classid, objid, objsubid; - locktype | classid | objid | objsubid | mode | granted -----------+---------+-------+----------+---------------+--------- - advisory | 0 | 1 | 1 | ExclusiveLock | t - advisory | 0 | 2 | 1 | ShareLock | t - advisory | 1 | 1 | 2 | ExclusiveLock | t - advisory | 2 | 2 | 2 | ShareLock | t -(4 rows) - --- pg_advisory_unlock_all() shouldn't release xact locks -SELECT pg_advisory_unlock_all(); - pg_advisory_unlock_all ------------------------- - -(1 row) - -SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid; - count -------- - 4 -(1 row) - --- can't unlock xact locks -SELECT - pg_advisory_unlock(1), pg_advisory_unlock_shared(2), - pg_advisory_unlock(1, 1), pg_advisory_unlock_shared(2, 2); -WARNING: you don't own a lock of type ExclusiveLock -WARNING: you don't own a lock of type ShareLock -WARNING: you don't own a lock of type ExclusiveLock -WARNING: you don't own a lock of type ShareLock - pg_advisory_unlock | pg_advisory_unlock_shared | pg_advisory_unlock | pg_advisory_unlock_shared ---------------------+---------------------------+--------------------+--------------------------- - f | f | f | f -(1 row) - --- automatically release xact locks at commit -COMMIT; -SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid; - count -------- - 0 -(1 row) - -BEGIN; --- holding both session and xact locks on the same objects, xact first -SELECT - pg_advisory_xact_lock(1), pg_advisory_xact_lock_shared(2), - pg_advisory_xact_lock(1, 1), pg_advisory_xact_lock_shared(2, 2); - pg_advisory_xact_lock | pg_advisory_xact_lock_shared | pg_advisory_xact_lock | pg_advisory_xact_lock_shared ------------------------+------------------------------+-----------------------+------------------------------ - | | | -(1 row) - -SELECT locktype, classid, objid, objsubid, mode, granted - FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid - ORDER BY classid, objid, objsubid; - locktype | classid | objid | objsubid | mode | granted -----------+---------+-------+----------+---------------+--------- - advisory | 0 | 1 | 1 | ExclusiveLock | t - advisory | 0 | 2 | 1 | ShareLock | t - advisory | 1 | 1 | 2 | ExclusiveLock | t - advisory | 2 | 2 | 2 | ShareLock | t -(4 rows) - -SELECT - pg_advisory_lock(1), pg_advisory_lock_shared(2), - pg_advisory_lock(1, 1), pg_advisory_lock_shared(2, 2); - pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock | pg_advisory_lock_shared -------------------+-------------------------+------------------+------------------------- - | | | -(1 row) - -ROLLBACK; -SELECT locktype, classid, objid, objsubid, mode, granted - FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid - ORDER BY classid, objid, objsubid; - locktype | classid | objid | objsubid | mode | granted -----------+---------+-------+----------+---------------+--------- - advisory | 0 | 1 | 1 | ExclusiveLock | t - advisory | 0 | 2 | 1 | ShareLock | t - advisory | 1 | 1 | 2 | ExclusiveLock | t - advisory | 2 | 2 | 2 | ShareLock | t -(4 rows) - --- unlocking session locks -SELECT - pg_advisory_unlock(1), pg_advisory_unlock(1), - pg_advisory_unlock_shared(2), pg_advisory_unlock_shared(2), - pg_advisory_unlock(1, 1), pg_advisory_unlock(1, 1), - pg_advisory_unlock_shared(2, 2), pg_advisory_unlock_shared(2, 2); -WARNING: you don't own a lock of type ExclusiveLock -WARNING: you don't own a lock of type ShareLock -WARNING: you don't own a lock of type ExclusiveLock -WARNING: you don't own a lock of type ShareLock - pg_advisory_unlock | pg_advisory_unlock | pg_advisory_unlock_shared | pg_advisory_unlock_shared | pg_advisory_unlock | pg_advisory_unlock | pg_advisory_unlock_shared | pg_advisory_unlock_shared ---------------------+--------------------+---------------------------+---------------------------+--------------------+--------------------+---------------------------+--------------------------- - t | f | t | f | t | f | t | f -(1 row) - -SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid; - count -------- - 0 -(1 row) - -BEGIN; --- holding both session and xact locks on the same objects, session first -SELECT - pg_advisory_lock(1), pg_advisory_lock_shared(2), - pg_advisory_lock(1, 1), pg_advisory_lock_shared(2, 2); - pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock | pg_advisory_lock_shared -------------------+-------------------------+------------------+------------------------- - | | | -(1 row) - -SELECT locktype, classid, objid, objsubid, mode, granted - FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid - ORDER BY classid, objid, objsubid; - locktype | classid | objid | objsubid | mode | granted -----------+---------+-------+----------+---------------+--------- - advisory | 0 | 1 | 1 | ExclusiveLock | t - advisory | 0 | 2 | 1 | ShareLock | t - advisory | 1 | 1 | 2 | ExclusiveLock | t - advisory | 2 | 2 | 2 | ShareLock | t -(4 rows) - -SELECT - pg_advisory_xact_lock(1), pg_advisory_xact_lock_shared(2), - pg_advisory_xact_lock(1, 1), pg_advisory_xact_lock_shared(2, 2); - pg_advisory_xact_lock | pg_advisory_xact_lock_shared | pg_advisory_xact_lock | pg_advisory_xact_lock_shared ------------------------+------------------------------+-----------------------+------------------------------ - | | | -(1 row) - -ROLLBACK; -SELECT locktype, classid, objid, objsubid, mode, granted - FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid - ORDER BY classid, objid, objsubid; - locktype | classid | objid | objsubid | mode | granted -----------+---------+-------+----------+---------------+--------- - advisory | 0 | 1 | 1 | ExclusiveLock | t - advisory | 0 | 2 | 1 | ShareLock | t - advisory | 1 | 1 | 2 | ExclusiveLock | t - advisory | 2 | 2 | 2 | ShareLock | t -(4 rows) - --- releasing all session locks -SELECT pg_advisory_unlock_all(); - pg_advisory_unlock_all ------------------------- - -(1 row) - -SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid; - count -------- - 0 -(1 row) - -BEGIN; --- grabbing txn locks multiple times -SELECT - pg_advisory_xact_lock(1), pg_advisory_xact_lock(1), - pg_advisory_xact_lock_shared(2), pg_advisory_xact_lock_shared(2), - pg_advisory_xact_lock(1, 1), pg_advisory_xact_lock(1, 1), - pg_advisory_xact_lock_shared(2, 2), pg_advisory_xact_lock_shared(2, 2); - pg_advisory_xact_lock | pg_advisory_xact_lock | pg_advisory_xact_lock_shared | pg_advisory_xact_lock_shared | pg_advisory_xact_lock | pg_advisory_xact_lock | pg_advisory_xact_lock_shared | pg_advisory_xact_lock_shared ------------------------+-----------------------+------------------------------+------------------------------+-----------------------+-----------------------+------------------------------+------------------------------ - | | | | | | | -(1 row) - -SELECT locktype, classid, objid, objsubid, mode, granted - FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid - ORDER BY classid, objid, objsubid; - locktype | classid | objid | objsubid | mode | granted -----------+---------+-------+----------+---------------+--------- - advisory | 0 | 1 | 1 | ExclusiveLock | t - advisory | 0 | 2 | 1 | ShareLock | t - advisory | 1 | 1 | 2 | ExclusiveLock | t - advisory | 2 | 2 | 2 | ShareLock | t -(4 rows) - -COMMIT; -SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid; - count -------- - 0 -(1 row) - --- grabbing session locks multiple times -SELECT - pg_advisory_lock(1), pg_advisory_lock(1), - pg_advisory_lock_shared(2), pg_advisory_lock_shared(2), - pg_advisory_lock(1, 1), pg_advisory_lock(1, 1), - pg_advisory_lock_shared(2, 2), pg_advisory_lock_shared(2, 2); - pg_advisory_lock | pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock_shared | pg_advisory_lock | pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock_shared -------------------+------------------+-------------------------+-------------------------+------------------+------------------+-------------------------+------------------------- - | | | | | | | -(1 row) - -SELECT locktype, classid, objid, objsubid, mode, granted - FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid - ORDER BY classid, objid, objsubid; - locktype | classid | objid | objsubid | mode | granted -----------+---------+-------+----------+---------------+--------- - advisory | 0 | 1 | 1 | ExclusiveLock | t - advisory | 0 | 2 | 1 | ShareLock | t - advisory | 1 | 1 | 2 | ExclusiveLock | t - advisory | 2 | 2 | 2 | ShareLock | t -(4 rows) - -SELECT - pg_advisory_unlock(1), pg_advisory_unlock(1), - pg_advisory_unlock_shared(2), pg_advisory_unlock_shared(2), - pg_advisory_unlock(1, 1), pg_advisory_unlock(1, 1), - pg_advisory_unlock_shared(2, 2), pg_advisory_unlock_shared(2, 2); - pg_advisory_unlock | pg_advisory_unlock | pg_advisory_unlock_shared | pg_advisory_unlock_shared | pg_advisory_unlock | pg_advisory_unlock | pg_advisory_unlock_shared | pg_advisory_unlock_shared ---------------------+--------------------+---------------------------+---------------------------+--------------------+--------------------+---------------------------+--------------------------- - t | t | t | t | t | t | t | t -(1 row) - -SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid; - count -------- - 0 -(1 row) - --- .. and releasing them all at once -SELECT - pg_advisory_lock(1), pg_advisory_lock(1), - pg_advisory_lock_shared(2), pg_advisory_lock_shared(2), - pg_advisory_lock(1, 1), pg_advisory_lock(1, 1), - pg_advisory_lock_shared(2, 2), pg_advisory_lock_shared(2, 2); - pg_advisory_lock | pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock_shared | pg_advisory_lock | pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock_shared -------------------+------------------+-------------------------+-------------------------+------------------+------------------+-------------------------+------------------------- - | | | | | | | -(1 row) - -SELECT locktype, classid, objid, objsubid, mode, granted - FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid - ORDER BY classid, objid, objsubid; - locktype | classid | objid | objsubid | mode | granted -----------+---------+-------+----------+---------------+--------- - advisory | 0 | 1 | 1 | ExclusiveLock | t - advisory | 0 | 2 | 1 | ShareLock | t - advisory | 1 | 1 | 2 | ExclusiveLock | t - advisory | 2 | 2 | 2 | ShareLock | t -(4 rows) - -SELECT pg_advisory_unlock_all(); - pg_advisory_unlock_all ------------------------- - -(1 row) - -SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid; - count -------- - 0 -(1 row) - +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/indirect_toast.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/indirect_toast.out --- /Users/admin/pgsql/src/test/regress/expected/indirect_toast.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/indirect_toast.out 2024-12-13 13:20:11 @@ -1,166 +1,2 @@ --- --- Tests for external toast datums --- --- directory paths and dlsuffix are passed to us in environment variables -\getenv libdir PG_LIBDIR -\getenv dlsuffix PG_DLSUFFIX -\set regresslib :libdir '/regress' :dlsuffix -CREATE FUNCTION make_tuple_indirect (record) - RETURNS record - AS :'regresslib' - LANGUAGE C STRICT; --- Other compression algorithms may cause the compressed data to be stored --- inline. pglz guarantees that the data is externalized, so stick to it. -SET default_toast_compression = 'pglz'; -CREATE TABLE indtoasttest(descr text, cnt int DEFAULT 0, f1 text, f2 text); -INSERT INTO indtoasttest(descr, f1, f2) VALUES('two-compressed', repeat('1234567890',1000), repeat('1234567890',1000)); -INSERT INTO indtoasttest(descr, f1, f2) VALUES('two-toasted', repeat('1234567890',30000), repeat('1234567890',50000)); -INSERT INTO indtoasttest(descr, f1, f2) VALUES('one-compressed,one-null', NULL, repeat('1234567890',1000)); -INSERT INTO indtoasttest(descr, f1, f2) VALUES('one-toasted,one-null', NULL, repeat('1234567890',50000)); --- check whether indirect tuples works on the most basic level -SELECT descr, substring(make_tuple_indirect(indtoasttest)::text, 1, 200) FROM indtoasttest; - descr | substring --------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - two-compressed | (two-compressed,0,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012 - two-toasted | (two-toasted,0,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345 - one-compressed,one-null | ("one-compressed,one-null",0,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - one-toasted,one-null | ("one-toasted,one-null",0,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 -(4 rows) - --- modification without changing varlenas -UPDATE indtoasttest SET cnt = cnt +1 RETURNING substring(indtoasttest::text, 1, 200); - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,1,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012 - (two-toasted,1,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345 - ("one-compressed,one-null",1,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",1,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 -(4 rows) - --- modification without modifying assigned value -UPDATE indtoasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(indtoasttest::text, 1, 200); - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,2,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012 - (two-toasted,2,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345 - ("one-compressed,one-null",2,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",2,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 -(4 rows) - --- modification modifying, but effectively not changing -UPDATE indtoasttest SET cnt = cnt +1, f1 = f1||'' RETURNING substring(indtoasttest::text, 1, 200); - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,3,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012 - (two-toasted,3,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345 - ("one-compressed,one-null",3,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",3,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 -(4 rows) - -UPDATE indtoasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING substring(indtoasttest::text, 1, 200); - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 - (two-toasted,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234 - ("one-compressed,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 -(4 rows) - -SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest; - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 - (two-toasted,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234 - ("one-compressed,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 -(4 rows) - --- check we didn't screw with main/toast tuple visibility -VACUUM FREEZE indtoasttest; -SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest; - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 - (two-toasted,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234 - ("one-compressed,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 -(4 rows) - --- now create a trigger that forces all Datums to be indirect ones -CREATE FUNCTION update_using_indirect() - RETURNS trigger - LANGUAGE plpgsql AS $$ -BEGIN - NEW := make_tuple_indirect(NEW); - RETURN NEW; -END$$; -CREATE TRIGGER indtoasttest_update_indirect - BEFORE INSERT OR UPDATE - ON indtoasttest - FOR EACH ROW - EXECUTE PROCEDURE update_using_indirect(); --- modification without changing varlenas -UPDATE indtoasttest SET cnt = cnt +1 RETURNING substring(indtoasttest::text, 1, 200); - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,5,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 - (two-toasted,5,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234 - ("one-compressed,one-null",5,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",5,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 -(4 rows) - --- modification without modifying assigned value -UPDATE indtoasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(indtoasttest::text, 1, 200); - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,6,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 - (two-toasted,6,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234 - ("one-compressed,one-null",6,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",6,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 -(4 rows) - --- modification modifying, but effectively not changing -UPDATE indtoasttest SET cnt = cnt +1, f1 = f1||'' RETURNING substring(indtoasttest::text, 1, 200); - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,7,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 - (two-toasted,7,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234 - ("one-compressed,one-null",7,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",7,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 -(4 rows) - -UPDATE indtoasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING substring(indtoasttest::text, 1, 200); - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - (two-toasted,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 - ("one-compressed,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 -(4 rows) - -INSERT INTO indtoasttest(descr, f1, f2) VALUES('one-toasted,one-null, via indirect', repeat('1234567890',30000), NULL); -SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest; - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - (two-toasted,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 - ("one-compressed,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 - ("one-toasted,one-null, via indirect",0,1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 -(5 rows) - --- check we didn't screw with main/toast tuple visibility -VACUUM FREEZE indtoasttest; -SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest; - substring ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - (two-compressed,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - (two-toasted,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 - ("one-compressed,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 - ("one-toasted,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 - ("one-toasted,one-null, via indirect",0,1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 -(5 rows) - -DROP TABLE indtoasttest; -DROP FUNCTION update_using_indirect(); -RESET default_toast_compression; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/equivclass.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/equivclass.out --- /Users/admin/pgsql/src/test/regress/expected/equivclass.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/equivclass.out 2024-12-13 13:20:11 @@ -1,505 +1,2 @@ --- --- Tests for the planner's "equivalence class" mechanism --- --- One thing that's not tested well during normal querying is the logic --- for handling "broken" ECs. This is because an EC can only become broken --- if its underlying btree operator family doesn't include a complete set --- of cross-type equality operators. There are not (and should not be) --- any such families built into Postgres; so we have to hack things up --- to create one. We do this by making two alias types that are really --- int8 (so we need no new C code) and adding only some operators for them --- into the standard integer_ops opfamily. -create type int8alias1; -create function int8alias1in(cstring) returns int8alias1 - strict immutable language internal as 'int8in'; -NOTICE: return type int8alias1 is only a shell -create function int8alias1out(int8alias1) returns cstring - strict immutable language internal as 'int8out'; -NOTICE: argument type int8alias1 is only a shell -LINE 1: create function int8alias1out(int8alias1) returns cstring - ^ -create type int8alias1 ( - input = int8alias1in, - output = int8alias1out, - like = int8 -); -create type int8alias2; -create function int8alias2in(cstring) returns int8alias2 - strict immutable language internal as 'int8in'; -NOTICE: return type int8alias2 is only a shell -create function int8alias2out(int8alias2) returns cstring - strict immutable language internal as 'int8out'; -NOTICE: argument type int8alias2 is only a shell -LINE 1: create function int8alias2out(int8alias2) returns cstring - ^ -create type int8alias2 ( - input = int8alias2in, - output = int8alias2out, - like = int8 -); -create cast (int8 as int8alias1) without function; -create cast (int8 as int8alias2) without function; -create cast (int8alias1 as int8) without function; -create cast (int8alias2 as int8) without function; -create function int8alias1eq(int8alias1, int8alias1) returns bool - strict immutable language internal as 'int8eq'; -create operator = ( - procedure = int8alias1eq, - leftarg = int8alias1, rightarg = int8alias1, - commutator = =, - restrict = eqsel, join = eqjoinsel, - merges -); -alter operator family integer_ops using btree add - operator 3 = (int8alias1, int8alias1); -create function int8alias2eq(int8alias2, int8alias2) returns bool - strict immutable language internal as 'int8eq'; -create operator = ( - procedure = int8alias2eq, - leftarg = int8alias2, rightarg = int8alias2, - commutator = =, - restrict = eqsel, join = eqjoinsel, - merges -); -alter operator family integer_ops using btree add - operator 3 = (int8alias2, int8alias2); -create function int8alias1eq(int8, int8alias1) returns bool - strict immutable language internal as 'int8eq'; -create operator = ( - procedure = int8alias1eq, - leftarg = int8, rightarg = int8alias1, - restrict = eqsel, join = eqjoinsel, - merges -); -alter operator family integer_ops using btree add - operator 3 = (int8, int8alias1); -create function int8alias1eq(int8alias1, int8alias2) returns bool - strict immutable language internal as 'int8eq'; -create operator = ( - procedure = int8alias1eq, - leftarg = int8alias1, rightarg = int8alias2, - restrict = eqsel, join = eqjoinsel, - merges -); -alter operator family integer_ops using btree add - operator 3 = (int8alias1, int8alias2); -create function int8alias1lt(int8alias1, int8alias1) returns bool - strict immutable language internal as 'int8lt'; -create operator < ( - procedure = int8alias1lt, - leftarg = int8alias1, rightarg = int8alias1 -); -alter operator family integer_ops using btree add - operator 1 < (int8alias1, int8alias1); -create function int8alias1cmp(int8, int8alias1) returns int - strict immutable language internal as 'btint8cmp'; -alter operator family integer_ops using btree add - function 1 int8alias1cmp (int8, int8alias1); -create table ec0 (ff int8 primary key, f1 int8, f2 int8); -create table ec1 (ff int8 primary key, f1 int8alias1, f2 int8alias2); -create table ec2 (xf int8 primary key, x1 int8alias1, x2 int8alias2); --- for the moment we only want to look at nestloop plans -set enable_hashjoin = off; -set enable_mergejoin = off; --- --- Note that for cases where there's a missing operator, we don't care so --- much whether the plan is ideal as that we don't fail or generate an --- outright incorrect plan. --- -explain (costs off) - select * from ec0 where ff = f1 and f1 = '42'::int8; - QUERY PLAN ------------------------------------ - Index Scan using ec0_pkey on ec0 - Index Cond: (ff = '42'::bigint) - Filter: (f1 = '42'::bigint) -(3 rows) - -explain (costs off) - select * from ec0 where ff = f1 and f1 = '42'::int8alias1; - QUERY PLAN ---------------------------------------- - Index Scan using ec0_pkey on ec0 - Index Cond: (ff = '42'::int8alias1) - Filter: (f1 = '42'::int8alias1) -(3 rows) - -explain (costs off) - select * from ec1 where ff = f1 and f1 = '42'::int8alias1; - QUERY PLAN ---------------------------------------- - Index Scan using ec1_pkey on ec1 - Index Cond: (ff = '42'::int8alias1) - Filter: (f1 = '42'::int8alias1) -(3 rows) - -explain (costs off) - select * from ec1 where ff = f1 and f1 = '42'::int8alias2; - QUERY PLAN ---------------------------------------------------- - Seq Scan on ec1 - Filter: ((ff = f1) AND (f1 = '42'::int8alias2)) -(2 rows) - -explain (costs off) - select * from ec1, ec2 where ff = x1 and ff = '42'::int8; - QUERY PLAN -------------------------------------------------------------------- - Nested Loop - Join Filter: (ec1.ff = ec2.x1) - -> Index Scan using ec1_pkey on ec1 - Index Cond: ((ff = '42'::bigint) AND (ff = '42'::bigint)) - -> Seq Scan on ec2 -(5 rows) - -explain (costs off) - select * from ec1, ec2 where ff = x1 and ff = '42'::int8alias1; - QUERY PLAN ---------------------------------------------- - Nested Loop - -> Index Scan using ec1_pkey on ec1 - Index Cond: (ff = '42'::int8alias1) - -> Seq Scan on ec2 - Filter: (x1 = '42'::int8alias1) -(5 rows) - -explain (costs off) - select * from ec1, ec2 where ff = x1 and '42'::int8 = x1; - QUERY PLAN ------------------------------------------ - Nested Loop - Join Filter: (ec1.ff = ec2.x1) - -> Index Scan using ec1_pkey on ec1 - Index Cond: (ff = '42'::bigint) - -> Seq Scan on ec2 - Filter: ('42'::bigint = x1) -(6 rows) - -explain (costs off) - select * from ec1, ec2 where ff = x1 and x1 = '42'::int8alias1; - QUERY PLAN ---------------------------------------------- - Nested Loop - -> Index Scan using ec1_pkey on ec1 - Index Cond: (ff = '42'::int8alias1) - -> Seq Scan on ec2 - Filter: (x1 = '42'::int8alias1) -(5 rows) - -explain (costs off) - select * from ec1, ec2 where ff = x1 and x1 = '42'::int8alias2; - QUERY PLAN ------------------------------------------ - Nested Loop - -> Seq Scan on ec2 - Filter: (x1 = '42'::int8alias2) - -> Index Scan using ec1_pkey on ec1 - Index Cond: (ff = ec2.x1) -(5 rows) - -create unique index ec1_expr1 on ec1((ff + 1)); -create unique index ec1_expr2 on ec1((ff + 2 + 1)); -create unique index ec1_expr3 on ec1((ff + 3 + 1)); -create unique index ec1_expr4 on ec1((ff + 4)); -explain (costs off) - select * from ec1, - (select ff + 1 as x from - (select ff + 2 as ff from ec1 - union all - select ff + 3 as ff from ec1) ss0 - union all - select ff + 4 as x from ec1) as ss1 - where ss1.x = ec1.f1 and ec1.ff = 42::int8; - QUERY PLAN ------------------------------------------------------ - Nested Loop - -> Index Scan using ec1_pkey on ec1 - Index Cond: (ff = '42'::bigint) - -> Append - -> Index Scan using ec1_expr2 on ec1 ec1_1 - Index Cond: (((ff + 2) + 1) = ec1.f1) - -> Index Scan using ec1_expr3 on ec1 ec1_2 - Index Cond: (((ff + 3) + 1) = ec1.f1) - -> Index Scan using ec1_expr4 on ec1 ec1_3 - Index Cond: ((ff + 4) = ec1.f1) -(10 rows) - -explain (costs off) - select * from ec1, - (select ff + 1 as x from - (select ff + 2 as ff from ec1 - union all - select ff + 3 as ff from ec1) ss0 - union all - select ff + 4 as x from ec1) as ss1 - where ss1.x = ec1.f1 and ec1.ff = 42::int8 and ec1.ff = ec1.f1; - QUERY PLAN -------------------------------------------------------------------- - Nested Loop - Join Filter: ((((ec1_1.ff + 2) + 1)) = ec1.f1) - -> Index Scan using ec1_pkey on ec1 - Index Cond: ((ff = '42'::bigint) AND (ff = '42'::bigint)) - Filter: (ff = f1) - -> Append - -> Index Scan using ec1_expr2 on ec1 ec1_1 - Index Cond: (((ff + 2) + 1) = '42'::bigint) - -> Index Scan using ec1_expr3 on ec1 ec1_2 - Index Cond: (((ff + 3) + 1) = '42'::bigint) - -> Index Scan using ec1_expr4 on ec1 ec1_3 - Index Cond: ((ff + 4) = '42'::bigint) -(12 rows) - -explain (costs off) - select * from ec1, - (select ff + 1 as x from - (select ff + 2 as ff from ec1 - union all - select ff + 3 as ff from ec1) ss0 - union all - select ff + 4 as x from ec1) as ss1, - (select ff + 1 as x from - (select ff + 2 as ff from ec1 - union all - select ff + 3 as ff from ec1) ss0 - union all - select ff + 4 as x from ec1) as ss2 - where ss1.x = ec1.f1 and ss1.x = ss2.x and ec1.ff = 42::int8; - QUERY PLAN ---------------------------------------------------------------------- - Nested Loop - -> Nested Loop - -> Index Scan using ec1_pkey on ec1 - Index Cond: (ff = '42'::bigint) - -> Append - -> Index Scan using ec1_expr2 on ec1 ec1_1 - Index Cond: (((ff + 2) + 1) = ec1.f1) - -> Index Scan using ec1_expr3 on ec1 ec1_2 - Index Cond: (((ff + 3) + 1) = ec1.f1) - -> Index Scan using ec1_expr4 on ec1 ec1_3 - Index Cond: ((ff + 4) = ec1.f1) - -> Append - -> Index Scan using ec1_expr2 on ec1 ec1_4 - Index Cond: (((ff + 2) + 1) = (((ec1_1.ff + 2) + 1))) - -> Index Scan using ec1_expr3 on ec1 ec1_5 - Index Cond: (((ff + 3) + 1) = (((ec1_1.ff + 2) + 1))) - -> Index Scan using ec1_expr4 on ec1 ec1_6 - Index Cond: ((ff + 4) = (((ec1_1.ff + 2) + 1))) -(18 rows) - --- let's try that as a mergejoin -set enable_mergejoin = on; -set enable_nestloop = off; -explain (costs off) - select * from ec1, - (select ff + 1 as x from - (select ff + 2 as ff from ec1 - union all - select ff + 3 as ff from ec1) ss0 - union all - select ff + 4 as x from ec1) as ss1, - (select ff + 1 as x from - (select ff + 2 as ff from ec1 - union all - select ff + 3 as ff from ec1) ss0 - union all - select ff + 4 as x from ec1) as ss2 - where ss1.x = ec1.f1 and ss1.x = ss2.x and ec1.ff = 42::int8; - QUERY PLAN ------------------------------------------------------------------ - Merge Join - Merge Cond: ((((ec1_4.ff + 2) + 1)) = (((ec1_1.ff + 2) + 1))) - -> Merge Append - Sort Key: (((ec1_4.ff + 2) + 1)) - -> Index Scan using ec1_expr2 on ec1 ec1_4 - -> Index Scan using ec1_expr3 on ec1 ec1_5 - -> Index Scan using ec1_expr4 on ec1 ec1_6 - -> Materialize - -> Merge Join - Merge Cond: ((((ec1_1.ff + 2) + 1)) = ec1.f1) - -> Merge Append - Sort Key: (((ec1_1.ff + 2) + 1)) - -> Index Scan using ec1_expr2 on ec1 ec1_1 - -> Index Scan using ec1_expr3 on ec1 ec1_2 - -> Index Scan using ec1_expr4 on ec1 ec1_3 - -> Sort - Sort Key: ec1.f1 USING < - -> Index Scan using ec1_pkey on ec1 - Index Cond: (ff = '42'::bigint) -(19 rows) - --- check partially indexed scan -set enable_nestloop = on; -set enable_mergejoin = off; -drop index ec1_expr3; -explain (costs off) - select * from ec1, - (select ff + 1 as x from - (select ff + 2 as ff from ec1 - union all - select ff + 3 as ff from ec1) ss0 - union all - select ff + 4 as x from ec1) as ss1 - where ss1.x = ec1.f1 and ec1.ff = 42::int8; - QUERY PLAN ------------------------------------------------------ - Nested Loop - -> Index Scan using ec1_pkey on ec1 - Index Cond: (ff = '42'::bigint) - -> Append - -> Index Scan using ec1_expr2 on ec1 ec1_1 - Index Cond: (((ff + 2) + 1) = ec1.f1) - -> Seq Scan on ec1 ec1_2 - Filter: (((ff + 3) + 1) = ec1.f1) - -> Index Scan using ec1_expr4 on ec1 ec1_3 - Index Cond: ((ff + 4) = ec1.f1) -(10 rows) - --- let's try that as a mergejoin -set enable_mergejoin = on; -set enable_nestloop = off; -explain (costs off) - select * from ec1, - (select ff + 1 as x from - (select ff + 2 as ff from ec1 - union all - select ff + 3 as ff from ec1) ss0 - union all - select ff + 4 as x from ec1) as ss1 - where ss1.x = ec1.f1 and ec1.ff = 42::int8; - QUERY PLAN ------------------------------------------------------ - Merge Join - Merge Cond: ((((ec1_1.ff + 2) + 1)) = ec1.f1) - -> Merge Append - Sort Key: (((ec1_1.ff + 2) + 1)) - -> Index Scan using ec1_expr2 on ec1 ec1_1 - -> Sort - Sort Key: (((ec1_2.ff + 3) + 1)) - -> Seq Scan on ec1 ec1_2 - -> Index Scan using ec1_expr4 on ec1 ec1_3 - -> Sort - Sort Key: ec1.f1 USING < - -> Index Scan using ec1_pkey on ec1 - Index Cond: (ff = '42'::bigint) -(13 rows) - --- check effects of row-level security -set enable_nestloop = on; -set enable_mergejoin = off; -alter table ec1 enable row level security; -create policy p1 on ec1 using (f1 < '5'::int8alias1); -create user regress_user_ectest; -grant select on ec0 to regress_user_ectest; -grant select on ec1 to regress_user_ectest; --- without any RLS, we'll treat {a.ff, b.ff, 43} as an EquivalenceClass -explain (costs off) - select * from ec0 a, ec1 b - where a.ff = b.ff and a.ff = 43::bigint::int8alias1; - QUERY PLAN ---------------------------------------------- - Nested Loop - -> Index Scan using ec0_pkey on ec0 a - Index Cond: (ff = '43'::int8alias1) - -> Index Scan using ec1_pkey on ec1 b - Index Cond: (ff = '43'::int8alias1) -(5 rows) - -set session authorization regress_user_ectest; --- with RLS active, the non-leakproof a.ff = 43 clause is not treated --- as a suitable source for an EquivalenceClass; currently, this is true --- even though the RLS clause has nothing to do directly with the EC -explain (costs off) - select * from ec0 a, ec1 b - where a.ff = b.ff and a.ff = 43::bigint::int8alias1; - QUERY PLAN ---------------------------------------------- - Nested Loop - -> Index Scan using ec0_pkey on ec0 a - Index Cond: (ff = '43'::int8alias1) - -> Index Scan using ec1_pkey on ec1 b - Index Cond: (ff = a.ff) - Filter: (f1 < '5'::int8alias1) -(6 rows) - -reset session authorization; -revoke select on ec0 from regress_user_ectest; -revoke select on ec1 from regress_user_ectest; -drop user regress_user_ectest; --- check that X=X is converted to X IS NOT NULL when appropriate -explain (costs off) - select * from tenk1 where unique1 = unique1 and unique2 = unique2; - QUERY PLAN -------------------------------------------------------------- - Seq Scan on tenk1 - Filter: ((unique1 IS NOT NULL) AND (unique2 IS NOT NULL)) -(2 rows) - --- this could be converted, but isn't at present -explain (costs off) - select * from tenk1 where unique1 = unique1 or unique2 = unique2; - QUERY PLAN --------------------------------------------------------- - Seq Scan on tenk1 - Filter: ((unique1 = unique1) OR (unique2 = unique2)) -(2 rows) - --- check that we recognize equivalence with dummy domains in the way -create temp table undername (f1 name, f2 int); -create temp view overview as - select f1::information_schema.sql_identifier as sqli, f2 from undername; -explain (costs off) -- this should not require a sort - select * from overview where sqli = 'foo' order by sqli; - QUERY PLAN ------------------------------- - Seq Scan on undername - Filter: (f1 = 'foo'::name) -(2 rows) - --- --- test handling of merge/hash clauses that do not have valid commutators --- --- There are not (and should not be) any such operators built into Postgres --- that are mergejoinable or hashable but have no commutators; so we leverage --- the alias type 'int8alias1' created in this file to conduct the tests. --- That's why this test is included here rather than in join.sql. -begin; -create table tbl_nocom(a int8, b int8alias1); --- check that non-commutable merge clauses do not lead to error -set enable_hashjoin to off; -set enable_mergejoin to on; -explain (costs off) -select * from tbl_nocom t1 full join tbl_nocom t2 on t2.a = t1.b; - QUERY PLAN --------------------------------------- - Merge Full Join - Merge Cond: (t2.a = t1.b) - -> Sort - Sort Key: t2.a - -> Seq Scan on tbl_nocom t2 - -> Sort - Sort Key: t1.b USING < - -> Seq Scan on tbl_nocom t1 -(8 rows) - --- check that non-commutable hash clauses do not lead to error -alter operator = (int8, int8alias1) set (hashes); -alter operator family integer_ops using hash add - operator 1 = (int8, int8alias1); -create function hashint8alias1(int8alias1) returns int - strict immutable language internal as 'hashint8'; -alter operator family integer_ops using hash add - function 1 hashint8alias1(int8alias1); -set enable_hashjoin to on; -set enable_mergejoin to off; -explain (costs off) -select * from tbl_nocom t1 full join tbl_nocom t2 on t2.a = t1.b; - QUERY PLAN --------------------------------------- - Hash Full Join - Hash Cond: (t2.a = t1.b) - -> Seq Scan on tbl_nocom t2 - -> Hash - -> Seq Scan on tbl_nocom t1 -(5 rows) - -abort; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/json.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/json.out --- /Users/admin/pgsql/src/test/regress/expected/json.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/json.out 2024-12-13 13:20:12 @@ -1,2718 +1,2 @@ --- Strings. -SELECT '""'::json; -- OK. - json ------- - "" -(1 row) - -SELECT $$''$$::json; -- ERROR, single quotes are not allowed -ERROR: invalid input syntax for type json -LINE 1: SELECT $$''$$::json; - ^ -DETAIL: Token "'" is invalid. -CONTEXT: JSON data, line 1: '... -SELECT '"abc"'::json; -- OK - json -------- - "abc" -(1 row) - -SELECT '"abc'::json; -- ERROR, quotes not closed -ERROR: invalid input syntax for type json -LINE 1: SELECT '"abc'::json; - ^ -DETAIL: Token ""abc" is invalid. -CONTEXT: JSON data, line 1: "abc -SELECT '"abc -def"'::json; -- ERROR, unescaped newline in string constant -ERROR: invalid input syntax for type json -LINE 1: SELECT '"abc - ^ -DETAIL: Character with value 0x0a must be escaped. -CONTEXT: JSON data, line 1: "abc -SELECT '"\n\"\\"'::json; -- OK, legal escapes - json ----------- - "\n\"\\" -(1 row) - -SELECT '"\v"'::json; -- ERROR, not a valid JSON escape -ERROR: invalid input syntax for type json -LINE 1: SELECT '"\v"'::json; - ^ -DETAIL: Escape sequence "\v" is invalid. -CONTEXT: JSON data, line 1: "\v... --- Check fast path for longer strings (at least 16 bytes long) -SELECT ('"'||repeat('.', 12)||'abc"')::json; -- OK - json -------------------- - "............abc" -(1 row) - -SELECT ('"'||repeat('.', 12)||'abc\n"')::json; -- OK, legal escapes - json ---------------------- - "............abc\n" -(1 row) - --- Test various lengths of strings to validate SIMD processing to escape --- special chars in the JSON. -SELECT row_to_json(j)::jsonb FROM ( - SELECT left(E'abcdefghijklmnopqrstuv"\twxyz012345678', a) AS a - FROM generate_series(0,37) a -) j; - row_to_json --------------------------------------------------- - {"a": ""} - {"a": "a"} - {"a": "ab"} - {"a": "abc"} - {"a": "abcd"} - {"a": "abcde"} - {"a": "abcdef"} - {"a": "abcdefg"} - {"a": "abcdefgh"} - {"a": "abcdefghi"} - {"a": "abcdefghij"} - {"a": "abcdefghijk"} - {"a": "abcdefghijkl"} - {"a": "abcdefghijklm"} - {"a": "abcdefghijklmn"} - {"a": "abcdefghijklmno"} - {"a": "abcdefghijklmnop"} - {"a": "abcdefghijklmnopq"} - {"a": "abcdefghijklmnopqr"} - {"a": "abcdefghijklmnopqrs"} - {"a": "abcdefghijklmnopqrst"} - {"a": "abcdefghijklmnopqrstu"} - {"a": "abcdefghijklmnopqrstuv"} - {"a": "abcdefghijklmnopqrstuv\""} - {"a": "abcdefghijklmnopqrstuv\"\t"} - {"a": "abcdefghijklmnopqrstuv\"\tw"} - {"a": "abcdefghijklmnopqrstuv\"\twx"} - {"a": "abcdefghijklmnopqrstuv\"\twxy"} - {"a": "abcdefghijklmnopqrstuv\"\twxyz"} - {"a": "abcdefghijklmnopqrstuv\"\twxyz0"} - {"a": "abcdefghijklmnopqrstuv\"\twxyz01"} - {"a": "abcdefghijklmnopqrstuv\"\twxyz012"} - {"a": "abcdefghijklmnopqrstuv\"\twxyz0123"} - {"a": "abcdefghijklmnopqrstuv\"\twxyz01234"} - {"a": "abcdefghijklmnopqrstuv\"\twxyz012345"} - {"a": "abcdefghijklmnopqrstuv\"\twxyz0123456"} - {"a": "abcdefghijklmnopqrstuv\"\twxyz01234567"} - {"a": "abcdefghijklmnopqrstuv\"\twxyz012345678"} -(38 rows) - --- see json_encoding test for input with unicode escapes --- Numbers. -SELECT '1'::json; -- OK - json ------- - 1 -(1 row) - -SELECT '0'::json; -- OK - json ------- - 0 -(1 row) - -SELECT '01'::json; -- ERROR, not valid according to JSON spec -ERROR: invalid input syntax for type json -LINE 1: SELECT '01'::json; - ^ -DETAIL: Token "01" is invalid. -CONTEXT: JSON data, line 1: 01 -SELECT '0.1'::json; -- OK - json ------- - 0.1 -(1 row) - -SELECT '9223372036854775808'::json; -- OK, even though it's too large for int8 - json ---------------------- - 9223372036854775808 -(1 row) - -SELECT '1e100'::json; -- OK - json -------- - 1e100 -(1 row) - -SELECT '1.3e100'::json; -- OK - json ---------- - 1.3e100 -(1 row) - -SELECT '1f2'::json; -- ERROR -ERROR: invalid input syntax for type json -LINE 1: SELECT '1f2'::json; - ^ -DETAIL: Token "1f2" is invalid. -CONTEXT: JSON data, line 1: 1f2 -SELECT '0.x1'::json; -- ERROR -ERROR: invalid input syntax for type json -LINE 1: SELECT '0.x1'::json; - ^ -DETAIL: Token "0.x1" is invalid. -CONTEXT: JSON data, line 1: 0.x1 -SELECT '1.3ex100'::json; -- ERROR -ERROR: invalid input syntax for type json -LINE 1: SELECT '1.3ex100'::json; - ^ -DETAIL: Token "1.3ex100" is invalid. -CONTEXT: JSON data, line 1: 1.3ex100 --- Arrays. -SELECT '[]'::json; -- OK - json ------- - [] -(1 row) - -SELECT '[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]'::json; -- OK - json ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]] -(1 row) - -SELECT '[1,2]'::json; -- OK - json -------- - [1,2] -(1 row) - -SELECT '[1,2,]'::json; -- ERROR, trailing comma -ERROR: invalid input syntax for type json -LINE 1: SELECT '[1,2,]'::json; - ^ -DETAIL: Expected JSON value, but found "]". -CONTEXT: JSON data, line 1: [1,2,] -SELECT '[1,2'::json; -- ERROR, no closing bracket -ERROR: invalid input syntax for type json -LINE 1: SELECT '[1,2'::json; - ^ -DETAIL: The input string ended unexpectedly. -CONTEXT: JSON data, line 1: [1,2 -SELECT '[1,[2]'::json; -- ERROR, no closing bracket -ERROR: invalid input syntax for type json -LINE 1: SELECT '[1,[2]'::json; - ^ -DETAIL: The input string ended unexpectedly. -CONTEXT: JSON data, line 1: [1,[2] --- Objects. -SELECT '{}'::json; -- OK - json ------- - {} -(1 row) - -SELECT '{"abc"}'::json; -- ERROR, no value -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc"}'::json; - ^ -DETAIL: Expected ":", but found "}". -CONTEXT: JSON data, line 1: {"abc"} -SELECT '{"abc":1}'::json; -- OK - json ------------ - {"abc":1} -(1 row) - -SELECT '{1:"abc"}'::json; -- ERROR, keys must be strings -ERROR: invalid input syntax for type json -LINE 1: SELECT '{1:"abc"}'::json; - ^ -DETAIL: Expected string or "}", but found "1". -CONTEXT: JSON data, line 1: {1... -SELECT '{"abc",1}'::json; -- ERROR, wrong separator -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc",1}'::json; - ^ -DETAIL: Expected ":", but found ",". -CONTEXT: JSON data, line 1: {"abc",... -SELECT '{"abc"=1}'::json; -- ERROR, totally wrong separator -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc"=1}'::json; - ^ -DETAIL: Token "=" is invalid. -CONTEXT: JSON data, line 1: {"abc"=... -SELECT '{"abc"::1}'::json; -- ERROR, another wrong separator -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc"::1}'::json; - ^ -DETAIL: Expected JSON value, but found ":". -CONTEXT: JSON data, line 1: {"abc"::... -SELECT '{"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}}'::json; -- OK - json ---------------------------------------------------------- - {"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}} -(1 row) - -SELECT '{"abc":1:2}'::json; -- ERROR, colon in wrong spot -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc":1:2}'::json; - ^ -DETAIL: Expected "," or "}", but found ":". -CONTEXT: JSON data, line 1: {"abc":1:... -SELECT '{"abc":1,3}'::json; -- ERROR, no value -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc":1,3}'::json; - ^ -DETAIL: Expected string, but found "3". -CONTEXT: JSON data, line 1: {"abc":1,3... --- Recursion. -SET max_stack_depth = '100kB'; -SELECT repeat('[', 10000)::json; -ERROR: stack depth limit exceeded -HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate. -SELECT repeat('{"a":', 10000)::json; -ERROR: stack depth limit exceeded -HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate. -RESET max_stack_depth; --- Miscellaneous stuff. -SELECT 'true'::json; -- OK - json ------- - true -(1 row) - -SELECT 'false'::json; -- OK - json -------- - false -(1 row) - -SELECT 'null'::json; -- OK - json ------- - null -(1 row) - -SELECT ' true '::json; -- OK, even with extra whitespace - json --------- - true -(1 row) - -SELECT 'true false'::json; -- ERROR, too many values -ERROR: invalid input syntax for type json -LINE 1: SELECT 'true false'::json; - ^ -DETAIL: Expected end of input, but found "false". -CONTEXT: JSON data, line 1: true false -SELECT 'true, false'::json; -- ERROR, too many values -ERROR: invalid input syntax for type json -LINE 1: SELECT 'true, false'::json; - ^ -DETAIL: Expected end of input, but found ",". -CONTEXT: JSON data, line 1: true,... -SELECT 'truf'::json; -- ERROR, not a keyword -ERROR: invalid input syntax for type json -LINE 1: SELECT 'truf'::json; - ^ -DETAIL: Token "truf" is invalid. -CONTEXT: JSON data, line 1: truf -SELECT 'trues'::json; -- ERROR, not a keyword -ERROR: invalid input syntax for type json -LINE 1: SELECT 'trues'::json; - ^ -DETAIL: Token "trues" is invalid. -CONTEXT: JSON data, line 1: trues -SELECT ''::json; -- ERROR, no value -ERROR: invalid input syntax for type json -LINE 1: SELECT ''::json; - ^ -DETAIL: The input string ended unexpectedly. -CONTEXT: JSON data, line 1: -SELECT ' '::json; -- ERROR, no value -ERROR: invalid input syntax for type json -LINE 1: SELECT ' '::json; - ^ -DETAIL: The input string ended unexpectedly. -CONTEXT: JSON data, line 1: --- Multi-line JSON input to check ERROR reporting -SELECT '{ - "one": 1, - "two":"two", - "three": - true}'::json; -- OK - json ------------------------------- - { + - "one": 1, + - "two":"two",+ - "three": + - true} -(1 row) - -SELECT '{ - "one": 1, - "two":,"two", -- ERROR extraneous comma before field "two" - "three": - true}'::json; -ERROR: invalid input syntax for type json -LINE 1: SELECT '{ - ^ -DETAIL: Expected JSON value, but found ",". -CONTEXT: JSON data, line 3: "two":,... -SELECT '{ - "one": 1, - "two":"two", - "averyveryveryveryveryveryveryveryveryverylongfieldname":}'::json; -ERROR: invalid input syntax for type json -LINE 1: SELECT '{ - ^ -DETAIL: Expected JSON value, but found "}". -CONTEXT: JSON data, line 4: ...yveryveryveryveryveryveryveryverylongfieldname":} --- ERROR missing value for last field --- test non-error-throwing input -select pg_input_is_valid('{"a":true}', 'json'); - pg_input_is_valid -------------------- - t -(1 row) - -select pg_input_is_valid('{"a":true', 'json'); - pg_input_is_valid -------------------- - f -(1 row) - -select * from pg_input_error_info('{"a":true', 'json'); - message | detail | hint | sql_error_code -------------------------------------+--------------------------------------+------+---------------- - invalid input syntax for type json | The input string ended unexpectedly. | | 22P02 -(1 row) - ---constructors --- array_to_json -SELECT array_to_json(array(select 1 as a)); - array_to_json ---------------- - [1] -(1 row) - -SELECT array_to_json(array_agg(q),false) from (select x as b, x * 2 as c from generate_series(1,3) x) q; - array_to_json ---------------------------------------------- - [{"b":1,"c":2},{"b":2,"c":4},{"b":3,"c":6}] -(1 row) - -SELECT array_to_json(array_agg(q),true) from (select x as b, x * 2 as c from generate_series(1,3) x) q; - array_to_json ------------------ - [{"b":1,"c":2},+ - {"b":2,"c":4},+ - {"b":3,"c":6}] -(1 row) - -SELECT array_to_json(array_agg(q),false) - FROM ( SELECT $$a$$ || x AS b, y AS c, - ARRAY[ROW(x.*,ARRAY[1,2,3]), - ROW(y.*,ARRAY[4,5,6])] AS z - FROM generate_series(1,2) x, - generate_series(4,5) y) q; - array_to_json -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - [{"b":"a1","c":4,"z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]},{"b":"a1","c":5,"z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]},{"b":"a2","c":4,"z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]},{"b":"a2","c":5,"z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}] -(1 row) - -SELECT array_to_json(array_agg(x),false) from generate_series(5,10) x; - array_to_json ----------------- - [5,6,7,8,9,10] -(1 row) - -SELECT array_to_json('{{1,5},{99,100}}'::int[]); - array_to_json ------------------- - [[1,5],[99,100]] -(1 row) - --- row_to_json -SELECT row_to_json(row(1,'foo')); - row_to_json ---------------------- - {"f1":1,"f2":"foo"} -(1 row) - -SELECT row_to_json(q) -FROM (SELECT $$a$$ || x AS b, - y AS c, - ARRAY[ROW(x.*,ARRAY[1,2,3]), - ROW(y.*,ARRAY[4,5,6])] AS z - FROM generate_series(1,2) x, - generate_series(4,5) y) q; - row_to_json --------------------------------------------------------------------- - {"b":"a1","c":4,"z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]} - {"b":"a1","c":5,"z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]} - {"b":"a2","c":4,"z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]} - {"b":"a2","c":5,"z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]} -(4 rows) - -SELECT row_to_json(q,true) -FROM (SELECT $$a$$ || x AS b, - y AS c, - ARRAY[ROW(x.*,ARRAY[1,2,3]), - ROW(y.*,ARRAY[4,5,6])] AS z - FROM generate_series(1,2) x, - generate_series(4,5) y) q; - row_to_json ------------------------------------------------------ - {"b":"a1", + - "c":4, + - "z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]} - {"b":"a1", + - "c":5, + - "z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]} - {"b":"a2", + - "c":4, + - "z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]} - {"b":"a2", + - "c":5, + - "z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]} -(4 rows) - -CREATE TEMP TABLE rows AS -SELECT x, 'txt' || x as y -FROM generate_series(1,3) AS x; -SELECT row_to_json(q,true) -FROM rows q; - row_to_json --------------- - {"x":1, + - "y":"txt1"} - {"x":2, + - "y":"txt2"} - {"x":3, + - "y":"txt3"} -(3 rows) - -SELECT row_to_json(row((select array_agg(x) as d from generate_series(5,10) x)),false); - row_to_json ------------------------ - {"f1":[5,6,7,8,9,10]} -(1 row) - --- anyarray column -analyze rows; -select attname, to_json(histogram_bounds) histogram_bounds -from pg_stats -where tablename = 'rows' and - schemaname = pg_my_temp_schema()::regnamespace::text -order by 1; - attname | histogram_bounds ----------+------------------------ - x | [1,2,3] - y | ["txt1","txt2","txt3"] -(2 rows) - --- to_json, timestamps -select to_json(timestamp '2014-05-28 12:22:35.614298'); - to_json ------------------------------- - "2014-05-28T12:22:35.614298" -(1 row) - -BEGIN; -SET LOCAL TIME ZONE 10.5; -select to_json(timestamptz '2014-05-28 12:22:35.614298-04'); - to_json ------------------------------------- - "2014-05-29T02:52:35.614298+10:30" -(1 row) - -SET LOCAL TIME ZONE -8; -select to_json(timestamptz '2014-05-28 12:22:35.614298-04'); - to_json ------------------------------------- - "2014-05-28T08:22:35.614298-08:00" -(1 row) - -COMMIT; -select to_json(date '2014-05-28'); - to_json --------------- - "2014-05-28" -(1 row) - -select to_json(date 'Infinity'); - to_json ------------- - "infinity" -(1 row) - -select to_json(date '-Infinity'); - to_json -------------- - "-infinity" -(1 row) - -select to_json(timestamp 'Infinity'); - to_json ------------- - "infinity" -(1 row) - -select to_json(timestamp '-Infinity'); - to_json -------------- - "-infinity" -(1 row) - -select to_json(timestamptz 'Infinity'); - to_json ------------- - "infinity" -(1 row) - -select to_json(timestamptz '-Infinity'); - to_json -------------- - "-infinity" -(1 row) - ---json_agg -SELECT json_agg(q) - FROM ( SELECT $$a$$ || x AS b, y AS c, - ARRAY[ROW(x.*,ARRAY[1,2,3]), - ROW(y.*,ARRAY[4,5,6])] AS z - FROM generate_series(1,2) x, - generate_series(4,5) y) q; - json_agg ------------------------------------------------------------------------ - [{"b":"a1","c":4,"z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]}, + - {"b":"a1","c":5,"z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}, + - {"b":"a2","c":4,"z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]}, + - {"b":"a2","c":5,"z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}] -(1 row) - -SELECT json_agg(q ORDER BY x, y) - FROM rows q; - json_agg ------------------------ - [{"x":1,"y":"txt1"}, + - {"x":2,"y":"txt2"}, + - {"x":3,"y":"txt3"}] -(1 row) - -UPDATE rows SET x = NULL WHERE x = 1; -SELECT json_agg(q ORDER BY x NULLS FIRST, y) - FROM rows q; - json_agg --------------------------- - [{"x":null,"y":"txt1"}, + - {"x":2,"y":"txt2"}, + - {"x":3,"y":"txt3"}] -(1 row) - --- non-numeric output -SELECT row_to_json(q) -FROM (SELECT 'NaN'::float8 AS "float8field") q; - row_to_json ------------------------ - {"float8field":"NaN"} -(1 row) - -SELECT row_to_json(q) -FROM (SELECT 'Infinity'::float8 AS "float8field") q; - row_to_json ----------------------------- - {"float8field":"Infinity"} -(1 row) - -SELECT row_to_json(q) -FROM (SELECT '-Infinity'::float8 AS "float8field") q; - row_to_json ------------------------------ - {"float8field":"-Infinity"} -(1 row) - --- json input -SELECT row_to_json(q) -FROM (SELECT '{"a":1,"b": [2,3,4,"d","e","f"],"c":{"p":1,"q":2}}'::json AS "jsonfield") q; - row_to_json ------------------------------------------------------------------- - {"jsonfield":{"a":1,"b": [2,3,4,"d","e","f"],"c":{"p":1,"q":2}}} -(1 row) - --- json extraction functions -CREATE TEMP TABLE test_json ( - json_type text, - test_json json -); -INSERT INTO test_json VALUES -('scalar','"a scalar"'), -('array','["zero", "one","two",null,"four","five", [1,2,3],{"f1":9}]'), -('object','{"field1":"val1","field2":"val2","field3":null, "field4": 4, "field5": [1,2,3], "field6": {"f1":9}}'); -SELECT test_json -> 'x' -FROM test_json -WHERE json_type = 'scalar'; - ?column? ----------- - -(1 row) - -SELECT test_json -> 'x' -FROM test_json -WHERE json_type = 'array'; - ?column? ----------- - -(1 row) - -SELECT test_json -> 'x' -FROM test_json -WHERE json_type = 'object'; - ?column? ----------- - -(1 row) - -SELECT test_json->'field2' -FROM test_json -WHERE json_type = 'object'; - ?column? ----------- - "val2" -(1 row) - -SELECT test_json->>'field2' -FROM test_json -WHERE json_type = 'object'; - ?column? ----------- - val2 -(1 row) - -SELECT test_json -> 2 -FROM test_json -WHERE json_type = 'scalar'; - ?column? ----------- - -(1 row) - -SELECT test_json -> 2 -FROM test_json -WHERE json_type = 'array'; - ?column? ----------- - "two" -(1 row) - -SELECT test_json -> -1 -FROM test_json -WHERE json_type = 'array'; - ?column? ----------- - {"f1":9} -(1 row) - -SELECT test_json -> 2 -FROM test_json -WHERE json_type = 'object'; - ?column? ----------- - -(1 row) - -SELECT test_json->>2 -FROM test_json -WHERE json_type = 'array'; - ?column? ----------- - two -(1 row) - -SELECT test_json ->> 6 FROM test_json WHERE json_type = 'array'; - ?column? ----------- - [1,2,3] -(1 row) - -SELECT test_json ->> 7 FROM test_json WHERE json_type = 'array'; - ?column? ----------- - {"f1":9} -(1 row) - -SELECT test_json ->> 'field4' FROM test_json WHERE json_type = 'object'; - ?column? ----------- - 4 -(1 row) - -SELECT test_json ->> 'field5' FROM test_json WHERE json_type = 'object'; - ?column? ----------- - [1,2,3] -(1 row) - -SELECT test_json ->> 'field6' FROM test_json WHERE json_type = 'object'; - ?column? ----------- - {"f1":9} -(1 row) - -SELECT json_object_keys(test_json) -FROM test_json -WHERE json_type = 'scalar'; -ERROR: cannot call json_object_keys on a scalar -SELECT json_object_keys(test_json) -FROM test_json -WHERE json_type = 'array'; -ERROR: cannot call json_object_keys on an array -SELECT json_object_keys(test_json) -FROM test_json -WHERE json_type = 'object'; - json_object_keys ------------------- - field1 - field2 - field3 - field4 - field5 - field6 -(6 rows) - --- test extending object_keys resultset - initial resultset size is 256 -select count(*) from - (select json_object_keys(json_object(array_agg(g))) - from (select unnest(array['f'||n,n::text])as g - from generate_series(1,300) as n) x ) y; - count -------- - 300 -(1 row) - --- nulls -select (test_json->'field3') is null as expect_false -from test_json -where json_type = 'object'; - expect_false --------------- - f -(1 row) - -select (test_json->>'field3') is null as expect_true -from test_json -where json_type = 'object'; - expect_true -------------- - t -(1 row) - -select (test_json->3) is null as expect_false -from test_json -where json_type = 'array'; - expect_false --------------- - f -(1 row) - -select (test_json->>3) is null as expect_true -from test_json -where json_type = 'array'; - expect_true -------------- - t -(1 row) - --- corner cases -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> null::text; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> null::int; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> 1; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> -1; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> 'z'; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> ''; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::json -> 1; - ?column? -------------- - {"b": "cc"} -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::json -> 3; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::json -> 'z'; - ?column? ----------- - -(1 row) - -select '{"a": "c", "b": null}'::json -> 'b'; - ?column? ----------- - null -(1 row) - -select '"foo"'::json -> 1; - ?column? ----------- - -(1 row) - -select '"foo"'::json -> 'z'; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> null::text; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> null::int; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> 1; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> 'z'; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> ''; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::json ->> 1; - ?column? -------------- - {"b": "cc"} -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::json ->> 3; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::json ->> 'z'; - ?column? ----------- - -(1 row) - -select '{"a": "c", "b": null}'::json ->> 'b'; - ?column? ----------- - -(1 row) - -select '"foo"'::json ->> 1; - ?column? ----------- - -(1 row) - -select '"foo"'::json ->> 'z'; - ?column? ----------- - -(1 row) - --- array length -SELECT json_array_length('[1,2,3,{"f1":1,"f2":[5,6]},4]'); - json_array_length -------------------- - 5 -(1 row) - -SELECT json_array_length('[]'); - json_array_length -------------------- - 0 -(1 row) - -SELECT json_array_length('{"f1":1,"f2":[5,6]}'); -ERROR: cannot get array length of a non-array -SELECT json_array_length('4'); -ERROR: cannot get array length of a scalar --- each -select json_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null}'); - json_each -------------------- - (f1,"[1,2,3]") - (f2,"{""f3"":1}") - (f4,null) -(3 rows) - -select * from json_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q; - key | value ------+----------- - f1 | [1,2,3] - f2 | {"f3":1} - f4 | null - f5 | 99 - f6 | "stringy" -(5 rows) - -select json_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":"null"}'); - json_each_text -------------------- - (f1,"[1,2,3]") - (f2,"{""f3"":1}") - (f4,) - (f5,null) -(4 rows) - -select * from json_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q; - key | value ------+---------- - f1 | [1,2,3] - f2 | {"f3":1} - f4 | - f5 | 99 - f6 | stringy -(5 rows) - --- extract_path, extract_path_as_text -select json_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6'); - json_extract_path -------------------- - "stringy" -(1 row) - -select json_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2'); - json_extract_path -------------------- - {"f3":1} -(1 row) - -select json_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text); - json_extract_path -------------------- - "f3" -(1 row) - -select json_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text); - json_extract_path -------------------- - 1 -(1 row) - -select json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6'); - json_extract_path_text ------------------------- - stringy -(1 row) - -select json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2'); - json_extract_path_text ------------------------- - {"f3":1} -(1 row) - -select json_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text); - json_extract_path_text ------------------------- - f3 -(1 row) - -select json_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text); - json_extract_path_text ------------------------- - 1 -(1 row) - --- extract_path nulls -select json_extract_path('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') is null as expect_false; - expect_false --------------- - f -(1 row) - -select json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') is null as expect_true; - expect_true -------------- - t -(1 row) - -select json_extract_path('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') is null as expect_false; - expect_false --------------- - f -(1 row) - -select json_extract_path_text('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') is null as expect_true; - expect_true -------------- - t -(1 row) - --- extract_path operators -select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f4','f6']; - ?column? ------------ - "stringy" -(1 row) - -select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f2']; - ?column? ----------- - {"f3":1} -(1 row) - -select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f2','0']; - ?column? ----------- - "f3" -(1 row) - -select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f2','1']; - ?column? ----------- - 1 -(1 row) - -select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f4','f6']; - ?column? ----------- - stringy -(1 row) - -select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f2']; - ?column? ----------- - {"f3":1} -(1 row) - -select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f2','0']; - ?column? ----------- - f3 -(1 row) - -select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f2','1']; - ?column? ----------- - 1 -(1 row) - --- corner cases for same -select '{"a": {"b":{"c": "foo"}}}'::json #> '{}'; - ?column? ---------------------------- - {"a": {"b":{"c": "foo"}}} -(1 row) - -select '[1,2,3]'::json #> '{}'; - ?column? ----------- - [1,2,3] -(1 row) - -select '"foo"'::json #> '{}'; - ?column? ----------- - "foo" -(1 row) - -select '42'::json #> '{}'; - ?column? ----------- - 42 -(1 row) - -select 'null'::json #> '{}'; - ?column? ----------- - null -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #> array['a']; - ?column? --------------------- - {"b":{"c": "foo"}} -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #> array['a', null]; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #> array['a', '']; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','b']; - ?column? --------------- - {"c": "foo"} -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','b','c']; - ?column? ----------- - "foo" -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','b','c','d']; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','z','c']; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #> array['a','1','b']; - ?column? ----------- - "cc" -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #> array['a','z','b']; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::json #> array['1','b']; - ?column? ----------- - "cc" -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::json #> array['z','b']; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": null}]'::json #> array['1','b']; - ?column? ----------- - null -(1 row) - -select '"foo"'::json #> array['z']; - ?column? ----------- - -(1 row) - -select '42'::json #> array['f2']; - ?column? ----------- - -(1 row) - -select '42'::json #> array['0']; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #>> '{}'; - ?column? ---------------------------- - {"a": {"b":{"c": "foo"}}} -(1 row) - -select '[1,2,3]'::json #>> '{}'; - ?column? ----------- - [1,2,3] -(1 row) - -select '"foo"'::json #>> '{}'; - ?column? ----------- - foo -(1 row) - -select '42'::json #>> '{}'; - ?column? ----------- - 42 -(1 row) - -select 'null'::json #>> '{}'; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a']; - ?column? --------------------- - {"b":{"c": "foo"}} -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a', null]; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a', '']; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','b']; - ?column? --------------- - {"c": "foo"} -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','b','c']; - ?column? ----------- - foo -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','b','c','d']; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','z','c']; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #>> array['a','1','b']; - ?column? ----------- - cc -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #>> array['a','z','b']; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::json #>> array['1','b']; - ?column? ----------- - cc -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::json #>> array['z','b']; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": null}]'::json #>> array['1','b']; - ?column? ----------- - -(1 row) - -select '"foo"'::json #>> array['z']; - ?column? ----------- - -(1 row) - -select '42'::json #>> array['f2']; - ?column? ----------- - -(1 row) - -select '42'::json #>> array['0']; - ?column? ----------- - -(1 row) - --- array_elements -select json_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]'); - json_array_elements ------------------------ - 1 - true - [1,[2,3]] - null - {"f1":1,"f2":[7,8,9]} - false - "stringy" -(7 rows) - -select * from json_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]') q; - value ------------------------ - 1 - true - [1,[2,3]] - null - {"f1":1,"f2":[7,8,9]} - false - "stringy" -(7 rows) - -select json_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]'); - json_array_elements_text --------------------------- - 1 - true - [1,[2,3]] - - {"f1":1,"f2":[7,8,9]} - false - stringy -(7 rows) - -select * from json_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]') q; - value ------------------------ - 1 - true - [1,[2,3]] - - {"f1":1,"f2":[7,8,9]} - false - stringy -(7 rows) - --- populate_record -create type jpop as (a text, b int, c timestamp); -CREATE DOMAIN js_int_not_null AS int NOT NULL; -CREATE DOMAIN js_int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 3); -CREATE DOMAIN js_int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 3); -create type j_unordered_pair as (x int, y int); -create domain j_ordered_pair as j_unordered_pair check((value).x <= (value).y); -CREATE TYPE jsrec AS ( - i int, - ia _int4, - ia1 int[], - ia2 int[][], - ia3 int[][][], - ia1d js_int_array_1d, - ia2d js_int_array_2d, - t text, - ta text[], - c char(10), - ca char(10)[], - ts timestamp, - js json, - jsb jsonb, - jsa json[], - rec jpop, - reca jpop[] -); -CREATE TYPE jsrec_i_not_null AS ( - i js_int_not_null -); -select * from json_populate_record(null::jpop,'{"a":"blurfl","x":43.2}') q; - a | b | c ---------+---+--- - blurfl | | -(1 row) - -select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"a":"blurfl","x":43.2}') q; - a | b | c ---------+---+-------------------------- - blurfl | 3 | Mon Dec 31 15:30:56 2012 -(1 row) - -select * from json_populate_record(null::jpop,'{"a":"blurfl","x":43.2}') q; - a | b | c ---------+---+--- - blurfl | | -(1 row) - -select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"a":"blurfl","x":43.2}') q; - a | b | c ---------+---+-------------------------- - blurfl | 3 | Mon Dec 31 15:30:56 2012 -(1 row) - -select * from json_populate_record(null::jpop,'{"a":[100,200,false],"x":43.2}') q; - a | b | c ------------------+---+--- - [100,200,false] | | -(1 row) - -select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"a":[100,200,false],"x":43.2}') q; - a | b | c ------------------+---+-------------------------- - [100,200,false] | 3 | Mon Dec 31 15:30:56 2012 -(1 row) - -select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"c":[100,200,false],"x":43.2}') q; -ERROR: invalid input syntax for type timestamp: "[100,200,false]" -select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{}') q; - a | b | c ----+---+-------------------------- - x | 3 | Mon Dec 31 15:30:56 2012 -(1 row) - -SELECT i FROM json_populate_record(NULL::jsrec_i_not_null, '{"x": 43.2}') q; -ERROR: domain js_int_not_null does not allow null values -SELECT i FROM json_populate_record(NULL::jsrec_i_not_null, '{"i": null}') q; -ERROR: domain js_int_not_null does not allow null values -SELECT i FROM json_populate_record(NULL::jsrec_i_not_null, '{"i": 12345}') q; - i -------- - 12345 -(1 row) - -SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": null}') q; - ia ----- - -(1 row) - -SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "ia". -SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [1, "2", null, 4]}') q; - ia --------------- - {1,2,NULL,4} -(1 row) - -SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [[1, 2], [3, 4]]}') q; - ia ---------------- - {{1,2},{3,4}} -(1 row) - -SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [[1], 2]}') q; -ERROR: expected JSON array -HINT: See the array element [1] of key "ia". -SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [[1], [2, 3]]}') q; -ERROR: malformed JSON array -DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. -SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": "{1,2,3}"}') q; - ia ---------- - {1,2,3} -(1 row) - -SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": null}') q; - ia1 ------ - -(1 row) - -SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "ia1". -SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": [1, "2", null, 4]}') q; - ia1 --------------- - {1,2,NULL,4} -(1 row) - -SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": [[1, 2, 3]]}') q; - ia1 ------------ - {{1,2,3}} -(1 row) - -SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": null}') q; - ia1d ------- - -(1 row) - -SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "ia1d". -SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": [1, "2", null, 4]}') q; -ERROR: value for domain js_int_array_1d violates check constraint "js_int_array_1d_check" -SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": [1, "2", null]}') q; - ia1d ------------- - {1,2,NULL} -(1 row) - -SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [1, "2", null, 4]}') q; - ia2 --------------- - {1,2,NULL,4} -(1 row) - -SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[1, 2], [null, 4]]}') q; - ia2 ------------------- - {{1,2},{NULL,4}} -(1 row) - -SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[], []]}') q; - ia2 ------ - {} -(1 row) - -SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[1, 2], [3]]}') q; -ERROR: malformed JSON array -DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. -SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[1, 2], 3, 4]}') q; -ERROR: expected JSON array -HINT: See the array element [1] of key "ia2". -SELECT ia2d FROM json_populate_record(NULL::jsrec, '{"ia2d": [[1, "2"], [null, 4]]}') q; -ERROR: value for domain js_int_array_2d violates check constraint "js_int_array_2d_check" -SELECT ia2d FROM json_populate_record(NULL::jsrec, '{"ia2d": [[1, "2", 3], [null, 5, 6]]}') q; - ia2d ----------------------- - {{1,2,3},{NULL,5,6}} -(1 row) - -SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [1, "2", null, 4]}') q; - ia3 --------------- - {1,2,NULL,4} -(1 row) - -SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [[1, 2], [null, 4]]}') q; - ia3 ------------------- - {{1,2},{NULL,4}} -(1 row) - -SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[], []], [[], []], [[], []] ]}') q; - ia3 ------ - {} -(1 row) - -SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[1, 2]], [[3, 4]] ]}') q; - ia3 -------------------- - {{{1,2}},{{3,4}}} -(1 row) - -SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8]] ]}') q; - ia3 -------------------------------- - {{{1,2},{3,4}},{{5,6},{7,8}}} -(1 row) - -SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8], [9, 10]] ]}') q; -ERROR: malformed JSON array -DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. -SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": null}') q; - ta ----- - -(1 row) - -SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "ta". -SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": [1, "2", null, 4]}') q; - ta --------------- - {1,2,NULL,4} -(1 row) - -SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": [[1, 2, 3], {"k": "v"}]}') q; -ERROR: expected JSON array -HINT: See the array element [1] of key "ta". -SELECT c FROM json_populate_record(NULL::jsrec, '{"c": null}') q; - c ---- - -(1 row) - -SELECT c FROM json_populate_record(NULL::jsrec, '{"c": "aaa"}') q; - c ------------- - aaa -(1 row) - -SELECT c FROM json_populate_record(NULL::jsrec, '{"c": "aaaaaaaaaa"}') q; - c ------------- - aaaaaaaaaa -(1 row) - -SELECT c FROM json_populate_record(NULL::jsrec, '{"c": "aaaaaaaaaaaaa"}') q; -ERROR: value too long for type character(10) -SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": null}') q; - ca ----- - -(1 row) - -SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "ca". -SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": [1, "2", null, 4]}') q; - ca ------------------------------------------------ - {"1 ","2 ",NULL,"4 "} -(1 row) - -SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": ["aaaaaaaaaaaaaaaa"]}') q; -ERROR: value too long for type character(10) -SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": [[1, 2, 3], {"k": "v"}]}') q; -ERROR: expected JSON array -HINT: See the array element [1] of key "ca". -SELECT js FROM json_populate_record(NULL::jsrec, '{"js": null}') q; - js ----- - -(1 row) - -SELECT js FROM json_populate_record(NULL::jsrec, '{"js": true}') q; - js ------- - true -(1 row) - -SELECT js FROM json_populate_record(NULL::jsrec, '{"js": 123.45}') q; - js --------- - 123.45 -(1 row) - -SELECT js FROM json_populate_record(NULL::jsrec, '{"js": "123.45"}') q; - js ----------- - "123.45" -(1 row) - -SELECT js FROM json_populate_record(NULL::jsrec, '{"js": "abc"}') q; - js -------- - "abc" -(1 row) - -SELECT js FROM json_populate_record(NULL::jsrec, '{"js": [123, "123", null, {"key": "value"}]}') q; - js --------------------------------------- - [123, "123", null, {"key": "value"}] -(1 row) - -SELECT js FROM json_populate_record(NULL::jsrec, '{"js": {"a": "bbb", "b": null, "c": 123.45}}') q; - js --------------------------------------- - {"a": "bbb", "b": null, "c": 123.45} -(1 row) - -SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": null}') q; - jsb ------ - -(1 row) - -SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": true}') q; - jsb ------- - true -(1 row) - -SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": 123.45}') q; - jsb --------- - 123.45 -(1 row) - -SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": "123.45"}') q; - jsb ----------- - "123.45" -(1 row) - -SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": "abc"}') q; - jsb -------- - "abc" -(1 row) - -SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": [123, "123", null, {"key": "value"}]}') q; - jsb --------------------------------------- - [123, "123", null, {"key": "value"}] -(1 row) - -SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": {"a": "bbb", "b": null, "c": 123.45}}') q; - jsb --------------------------------------- - {"a": "bbb", "b": null, "c": 123.45} -(1 row) - -SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": null}') q; - jsa ------ - -(1 row) - -SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "jsa". -SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": [1, "2", null, 4]}') q; - jsa --------------------- - {1,"\"2\"",NULL,4} -(1 row) - -SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": ["aaa", null, [1, 2, "3", {}], { "k" : "v" }]}') q; - jsa ----------------------------------------------------------- - {"\"aaa\"",NULL,"[1, 2, \"3\", {}]","{ \"k\" : \"v\" }"} -(1 row) - -SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": 123}') q; -ERROR: cannot call populate_composite on a scalar -SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": [1, 2]}') q; -ERROR: cannot call populate_composite on an array -SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}') q; - rec ------------------------------------ - (abc,,"Thu Jan 02 00:00:00 2003") -(1 row) - -SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": "(abc,42,01.02.2003)"}') q; - rec -------------------------------------- - (abc,42,"Thu Jan 02 00:00:00 2003") -(1 row) - -SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "reca". -SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": [1, 2]}') q; -ERROR: cannot call populate_composite on a scalar -SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}]}') q; - reca --------------------------------------------------------- - {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} -(1 row) - -SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": ["(abc,42,01.02.2003)"]}') q; - reca -------------------------------------------- - {"(abc,42,\"Thu Jan 02 00:00:00 2003\")"} -(1 row) - -SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": "{\"(abc,42,01.02.2003)\"}"}') q; - reca -------------------------------------------- - {"(abc,42,\"Thu Jan 02 00:00:00 2003\")"} -(1 row) - -SELECT rec FROM json_populate_record( - row(NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, - row('x',3,'2012-12-31 15:30:56')::jpop,NULL)::jsrec, - '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}' -) q; - rec ------------------------------------- - (abc,3,"Thu Jan 02 00:00:00 2003") -(1 row) - --- anonymous record type -SELECT json_populate_record(null::record, '{"x": 0, "y": 1}'); -ERROR: could not determine row type for result of json_populate_record -HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list. -SELECT json_populate_record(row(1,2), '{"f1": 0, "f2": 1}'); - json_populate_record ----------------------- - (0,1) -(1 row) - -SELECT * FROM - json_populate_record(null::record, '{"x": 776}') AS (x int, y int); - x | y ------+--- - 776 | -(1 row) - --- composite domain -SELECT json_populate_record(null::j_ordered_pair, '{"x": 0, "y": 1}'); - json_populate_record ----------------------- - (0,1) -(1 row) - -SELECT json_populate_record(row(1,2)::j_ordered_pair, '{"x": 0}'); - json_populate_record ----------------------- - (0,2) -(1 row) - -SELECT json_populate_record(row(1,2)::j_ordered_pair, '{"x": 1, "y": 0}'); -ERROR: value for domain j_ordered_pair violates check constraint "j_ordered_pair_check" --- populate_recordset -select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+---+-------------------------- - blurfl | | - | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+----+-------------------------- - blurfl | 99 | - def | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+---+-------------------------- - blurfl | | - | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+----+-------------------------- - blurfl | 99 | - def | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ----------------+----+-------------------------- - [100,200,300] | 99 | - {"z":true} | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -select * from json_populate_recordset(row('def',99,null)::jpop,'[{"c":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q; -ERROR: invalid input syntax for type timestamp: "[100,200,300]" -create type jpop2 as (a int, b json, c int, d int); -select * from json_populate_recordset(null::jpop2, '[{"a":2,"c":3,"b":{"z":4},"d":6}]') q; - a | b | c | d ----+---------+---+--- - 2 | {"z":4} | 3 | 6 -(1 row) - -select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+---+-------------------------- - blurfl | | - | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+----+-------------------------- - blurfl | 99 | - def | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ----------------+----+-------------------------- - [100,200,300] | 99 | - {"z":true} | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - --- anonymous record type -SELECT json_populate_recordset(null::record, '[{"x": 0, "y": 1}]'); -ERROR: could not determine row type for result of json_populate_recordset -HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list. -SELECT json_populate_recordset(row(1,2), '[{"f1": 0, "f2": 1}]'); - json_populate_recordset -------------------------- - (0,1) -(1 row) - -SELECT i, json_populate_recordset(row(i,50), '[{"f1":"42"},{"f2":"43"}]') -FROM (VALUES (1),(2)) v(i); - i | json_populate_recordset ----+------------------------- - 1 | (42,50) - 1 | (1,43) - 2 | (42,50) - 2 | (2,43) -(4 rows) - -SELECT * FROM - json_populate_recordset(null::record, '[{"x": 776}]') AS (x int, y int); - x | y ------+--- - 776 | -(1 row) - --- empty array is a corner case -SELECT json_populate_recordset(null::record, '[]'); -ERROR: could not determine row type for result of json_populate_recordset -HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list. -SELECT json_populate_recordset(row(1,2), '[]'); - json_populate_recordset -------------------------- -(0 rows) - -SELECT * FROM json_populate_recordset(NULL::jpop,'[]') q; - a | b | c ----+---+--- -(0 rows) - -SELECT * FROM - json_populate_recordset(null::record, '[]') AS (x int, y int); - x | y ----+--- -(0 rows) - --- composite domain -SELECT json_populate_recordset(null::j_ordered_pair, '[{"x": 0, "y": 1}]'); - json_populate_recordset -------------------------- - (0,1) -(1 row) - -SELECT json_populate_recordset(row(1,2)::j_ordered_pair, '[{"x": 0}, {"y": 3}]'); - json_populate_recordset -------------------------- - (0,2) - (1,3) -(2 rows) - -SELECT json_populate_recordset(row(1,2)::j_ordered_pair, '[{"x": 1, "y": 0}]'); -ERROR: value for domain j_ordered_pair violates check constraint "j_ordered_pair_check" --- negative cases where the wrong record type is supplied -select * from json_populate_recordset(row(0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); -ERROR: function return row and query-specified return row do not match -DETAIL: Returned row contains 1 attribute, but query expects 2. -select * from json_populate_recordset(row(0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); -ERROR: function return row and query-specified return row do not match -DETAIL: Returned type integer at ordinal position 1, but query expects text. -select * from json_populate_recordset(row(0::int,0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); -ERROR: function return row and query-specified return row do not match -DETAIL: Returned row contains 3 attributes, but query expects 2. -select * from json_populate_recordset(row(1000000000::int,50::int),'[{"b":"2"},{"a":"3"}]') q (a text, b text); -ERROR: function return row and query-specified return row do not match -DETAIL: Returned type integer at ordinal position 1, but query expects text. --- test type info caching in json_populate_record() -CREATE TEMP TABLE jspoptest (js json); -INSERT INTO jspoptest -SELECT '{ - "jsa": [1, "2", null, 4], - "rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}, - "reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}] -}'::json -FROM generate_series(1, 3); -SELECT (json_populate_record(NULL::jsrec, js)).* FROM jspoptest; - i | ia | ia1 | ia2 | ia3 | ia1d | ia2d | t | ta | c | ca | ts | js | jsb | jsa | rec | reca ----+----+-----+-----+-----+------+------+---+----+---+----+----+----+-----+--------------------+-----------------------------------+-------------------------------------------------------- - | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} - | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} - | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} -(3 rows) - -DROP TYPE jsrec; -DROP TYPE jsrec_i_not_null; -DROP DOMAIN js_int_not_null; -DROP DOMAIN js_int_array_1d; -DROP DOMAIN js_int_array_2d; -DROP DOMAIN j_ordered_pair; -DROP TYPE j_unordered_pair; ---json_typeof() function -select value, json_typeof(value) - from (values (json '123.4'), - (json '-1'), - (json '"foo"'), - (json 'true'), - (json 'false'), - (json 'null'), - (json '[1, 2, 3]'), - (json '[]'), - (json '{"x":"foo", "y":123}'), - (json '{}'), - (NULL::json)) - as data(value); - value | json_typeof -----------------------+------------- - 123.4 | number - -1 | number - "foo" | string - true | boolean - false | boolean - null | null - [1, 2, 3] | array - [] | array - {"x":"foo", "y":123} | object - {} | object - | -(11 rows) - --- json_build_array, json_build_object, json_object_agg -SELECT json_build_array('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}'); - json_build_array ------------------------------------------------------------------------ - ["a", 1, "b", 1.2, "c", true, "d", null, "e", {"x": 3, "y": [1,2,3]}] -(1 row) - -SELECT json_build_array('a', NULL); -- ok - json_build_array ------------------- - ["a", null] -(1 row) - -SELECT json_build_array(VARIADIC NULL::text[]); -- ok - json_build_array ------------------- - -(1 row) - -SELECT json_build_array(VARIADIC '{}'::text[]); -- ok - json_build_array ------------------- - [] -(1 row) - -SELECT json_build_array(VARIADIC '{a,b,c}'::text[]); -- ok - json_build_array ------------------- - ["a", "b", "c"] -(1 row) - -SELECT json_build_array(VARIADIC ARRAY['a', NULL]::text[]); -- ok - json_build_array ------------------- - ["a", null] -(1 row) - -SELECT json_build_array(VARIADIC '{1,2,3,4}'::text[]); -- ok - json_build_array ----------------------- - ["1", "2", "3", "4"] -(1 row) - -SELECT json_build_array(VARIADIC '{1,2,3,4}'::int[]); -- ok - json_build_array ------------------- - [1, 2, 3, 4] -(1 row) - -SELECT json_build_array(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok - json_build_array --------------------- - [1, 4, 2, 5, 3, 6] -(1 row) - -SELECT json_build_object('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}'); - json_build_object ----------------------------------------------------------------------------- - {"a" : 1, "b" : 1.2, "c" : true, "d" : null, "e" : {"x": 3, "y": [1,2,3]}} -(1 row) - -SELECT json_build_object( - 'a', json_build_object('b',false,'c',99), - 'd', json_build_object('e',array[9,8,7]::int[], - 'f', (select row_to_json(r) from ( select relkind, oid::regclass as name from pg_class where relname = 'pg_class') r))); - json_build_object -------------------------------------------------------------------------------------------------- - {"a" : {"b" : false, "c" : 99}, "d" : {"e" : [9,8,7], "f" : {"relkind":"r","name":"pg_class"}}} -(1 row) - -SELECT json_build_object('{a,b,c}'::text[]); -- error -ERROR: argument list must have even number of elements -HINT: The arguments of json_build_object() must consist of alternating keys and values. -SELECT json_build_object('{a,b,c}'::text[], '{d,e,f}'::text[]); -- error, key cannot be array -ERROR: key value must be scalar, not array, composite, or json -SELECT json_build_object('a', 'b', 'c'); -- error -ERROR: argument list must have even number of elements -HINT: The arguments of json_build_object() must consist of alternating keys and values. -SELECT json_build_object(NULL, 'a'); -- error, key cannot be NULL -ERROR: null value not allowed for object key -SELECT json_build_object('a', NULL); -- ok - json_build_object -------------------- - {"a" : null} -(1 row) - -SELECT json_build_object(VARIADIC NULL::text[]); -- ok - json_build_object -------------------- - -(1 row) - -SELECT json_build_object(VARIADIC '{}'::text[]); -- ok - json_build_object -------------------- - {} -(1 row) - -SELECT json_build_object(VARIADIC '{a,b,c}'::text[]); -- error -ERROR: argument list must have even number of elements -HINT: The arguments of json_build_object() must consist of alternating keys and values. -SELECT json_build_object(VARIADIC ARRAY['a', NULL]::text[]); -- ok - json_build_object -------------------- - {"a" : null} -(1 row) - -SELECT json_build_object(VARIADIC ARRAY[NULL, 'a']::text[]); -- error, key cannot be NULL -ERROR: null value not allowed for object key -SELECT json_build_object(VARIADIC '{1,2,3,4}'::text[]); -- ok - json_build_object ------------------------- - {"1" : "2", "3" : "4"} -(1 row) - -SELECT json_build_object(VARIADIC '{1,2,3,4}'::int[]); -- ok - json_build_object --------------------- - {"1" : 2, "3" : 4} -(1 row) - -SELECT json_build_object(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok - json_build_object ------------------------------ - {"1" : 4, "2" : 5, "3" : 6} -(1 row) - --- empty objects/arrays -SELECT json_build_array(); - json_build_array ------------------- - [] -(1 row) - -SELECT json_build_object(); - json_build_object -------------------- - {} -(1 row) - --- make sure keys are quoted -SELECT json_build_object(1,2); - json_build_object -------------------- - {"1" : 2} -(1 row) - --- keys must be scalar and not null -SELECT json_build_object(null,2); -ERROR: null value not allowed for object key -SELECT json_build_object(r,2) FROM (SELECT 1 AS a, 2 AS b) r; -ERROR: key value must be scalar, not array, composite, or json -SELECT json_build_object(json '{"a":1,"b":2}', 3); -ERROR: key value must be scalar, not array, composite, or json -SELECT json_build_object('{1,2,3}'::int[], 3); -ERROR: key value must be scalar, not array, composite, or json -CREATE TEMP TABLE foo (serial_num int, name text, type text); -INSERT INTO foo VALUES (847001,'t15','GE1043'); -INSERT INTO foo VALUES (847002,'t16','GE1043'); -INSERT INTO foo VALUES (847003,'sub-alpha','GESS90'); -SELECT json_build_object('turbines',json_object_agg(serial_num,json_build_object('name',name,'type',type))) -FROM foo; - json_build_object -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - {"turbines" : { "847001" : {"name" : "t15", "type" : "GE1043"}, "847002" : {"name" : "t16", "type" : "GE1043"}, "847003" : {"name" : "sub-alpha", "type" : "GESS90"} }} -(1 row) - -SELECT json_object_agg(name, type) FROM foo; - json_object_agg ----------------------------------------------------------------- - { "t15" : "GE1043", "t16" : "GE1043", "sub-alpha" : "GESS90" } -(1 row) - -INSERT INTO foo VALUES (999999, NULL, 'bar'); -SELECT json_object_agg(name, type) FROM foo; -ERROR: null value not allowed for object key --- json_object --- empty object, one dimension -SELECT json_object('{}'); - json_object -------------- - {} -(1 row) - --- empty object, two dimensions -SELECT json_object('{}', '{}'); - json_object -------------- - {} -(1 row) - --- one dimension -SELECT json_object('{a,1,b,2,3,NULL,"d e f","a b c"}'); - json_object -------------------------------------------------------- - {"a" : "1", "b" : "2", "3" : null, "d e f" : "a b c"} -(1 row) - --- same but with two dimensions -SELECT json_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}'); - json_object -------------------------------------------------------- - {"a" : "1", "b" : "2", "3" : null, "d e f" : "a b c"} -(1 row) - --- odd number error -SELECT json_object('{a,b,c}'); -ERROR: array must have even number of elements --- one column error -SELECT json_object('{{a},{b}}'); -ERROR: array must have two columns --- too many columns error -SELECT json_object('{{a,b,c},{b,c,d}}'); -ERROR: array must have two columns --- too many dimensions error -SELECT json_object('{{{a,b},{c,d}},{{b,c},{d,e}}}'); -ERROR: wrong number of array subscripts ---two argument form of json_object -select json_object('{a,b,c,"d e f"}','{1,2,3,"a b c"}'); - json_object ------------------------------------------------------- - {"a" : "1", "b" : "2", "c" : "3", "d e f" : "a b c"} -(1 row) - --- too many dimensions -SELECT json_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}', '{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}'); -ERROR: wrong number of array subscripts --- mismatched dimensions -select json_object('{a,b,c,"d e f",g}','{1,2,3,"a b c"}'); -ERROR: mismatched array dimensions -select json_object('{a,b,c,"d e f"}','{1,2,3,"a b c",g}'); -ERROR: mismatched array dimensions --- null key error -select json_object('{a,b,NULL,"d e f"}','{1,2,3,"a b c"}'); -ERROR: null value not allowed for object key --- empty key is allowed -select json_object('{a,b,"","d e f"}','{1,2,3,"a b c"}'); - json_object ------------------------------------------------------ - {"a" : "1", "b" : "2", "" : "3", "d e f" : "a b c"} -(1 row) - --- json_object_agg_unique requires unique keys -select json_object_agg_unique(mod(i,100), i) from generate_series(0, 199) i; -ERROR: duplicate JSON object key value: "0" --- json_to_record and json_to_recordset -select * from json_to_record('{"a":1,"b":"foo","c":"bar"}') - as x(a int, b text, d text); - a | b | d ----+-----+--- - 1 | foo | -(1 row) - -select * from json_to_recordset('[{"a":1,"b":"foo","d":false},{"a":2,"b":"bar","c":true}]') - as x(a int, b text, c boolean); - a | b | c ----+-----+--- - 1 | foo | - 2 | bar | t -(2 rows) - -select * from json_to_recordset('[{"a":1,"b":{"d":"foo"},"c":true},{"a":2,"c":false,"b":{"d":"bar"}}]') - as x(a int, b json, c boolean); - a | b | c ----+-------------+--- - 1 | {"d":"foo"} | t - 2 | {"d":"bar"} | f -(2 rows) - -select *, c is null as c_is_null -from json_to_record('{"a":1, "b":{"c":16, "d":2}, "x":8, "ca": ["1 2", 3], "ia": [[1,2],[3,4]], "r": {"a": "aaa", "b": 123}}'::json) - as t(a int, b json, c text, x int, ca char(5)[], ia int[][], r jpop); - a | b | c | x | ca | ia | r | c_is_null ----+-----------------+---+---+-------------------+---------------+------------+----------- - 1 | {"c":16, "d":2} | | 8 | {"1 2 ","3 "} | {{1,2},{3,4}} | (aaa,123,) | t -(1 row) - -select *, c is null as c_is_null -from json_to_recordset('[{"a":1, "b":{"c":16, "d":2}, "x":8}]'::json) - as t(a int, b json, c text, x int); - a | b | c | x | c_is_null ----+-----------------+---+---+----------- - 1 | {"c":16, "d":2} | | 8 | t -(1 row) - -select * from json_to_record('{"ia": null}') as x(ia _int4); - ia ----- - -(1 row) - -select * from json_to_record('{"ia": 123}') as x(ia _int4); -ERROR: expected JSON array -HINT: See the value of key "ia". -select * from json_to_record('{"ia": [1, "2", null, 4]}') as x(ia _int4); - ia --------------- - {1,2,NULL,4} -(1 row) - -select * from json_to_record('{"ia": [[1, 2], [3, 4]]}') as x(ia _int4); - ia ---------------- - {{1,2},{3,4}} -(1 row) - -select * from json_to_record('{"ia": [[1], 2]}') as x(ia _int4); -ERROR: expected JSON array -HINT: See the array element [1] of key "ia". -select * from json_to_record('{"ia": [[1], [2, 3]]}') as x(ia _int4); -ERROR: malformed JSON array -DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. -select * from json_to_record('{"ia2": [1, 2, 3]}') as x(ia2 int[][]); - ia2 ---------- - {1,2,3} -(1 row) - -select * from json_to_record('{"ia2": [[1, 2], [3, 4]]}') as x(ia2 int4[][]); - ia2 ---------------- - {{1,2},{3,4}} -(1 row) - -select * from json_to_record('{"ia2": [[[1], [2], [3]]]}') as x(ia2 int4[][]); - ia2 ------------------ - {{{1},{2},{3}}} -(1 row) - -select * from json_to_record('{"out": {"key": 1}}') as x(out json); - out ------------- - {"key": 1} -(1 row) - -select * from json_to_record('{"out": [{"key": 1}]}') as x(out json); - out --------------- - [{"key": 1}] -(1 row) - -select * from json_to_record('{"out": "{\"key\": 1}"}') as x(out json); - out ----------------- - "{\"key\": 1}" -(1 row) - -select * from json_to_record('{"out": {"key": 1}}') as x(out jsonb); - out ------------- - {"key": 1} -(1 row) - -select * from json_to_record('{"out": [{"key": 1}]}') as x(out jsonb); - out --------------- - [{"key": 1}] -(1 row) - -select * from json_to_record('{"out": "{\"key\": 1}"}') as x(out jsonb); - out ----------------- - "{\"key\": 1}" -(1 row) - --- json_strip_nulls -select json_strip_nulls(null); - json_strip_nulls ------------------- - -(1 row) - -select json_strip_nulls('1'); - json_strip_nulls ------------------- - 1 -(1 row) - -select json_strip_nulls('"a string"'); - json_strip_nulls ------------------- - "a string" -(1 row) - -select json_strip_nulls('null'); - json_strip_nulls ------------------- - null -(1 row) - -select json_strip_nulls('[1,2,null,3,4]'); - json_strip_nulls ------------------- - [1,2,null,3,4] -(1 row) - -select json_strip_nulls('{"a":1,"b":null,"c":[2,null,3],"d":{"e":4,"f":null}}'); - json_strip_nulls ------------------------------------- - {"a":1,"c":[2,null,3],"d":{"e":4}} -(1 row) - -select json_strip_nulls('[1,{"a":1,"b":null,"c":2},3]'); - json_strip_nulls ---------------------- - [1,{"a":1,"c":2},3] -(1 row) - --- an empty object is not null and should not be stripped -select json_strip_nulls('{"a": {"b": null, "c": null}, "d": {} }'); - json_strip_nulls ------------------- - {"a":{},"d":{}} -(1 row) - --- json to tsvector -select to_tsvector('{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::json); - to_tsvector ---------------------------------------------------------------------------- - 'aaa':1 'bbb':2 'ccc':4 'ddd':3 'eee':6 'fff':7 'ggg':8 'hhh':10 'iii':11 -(1 row) - --- json to tsvector with config -select to_tsvector('simple', '{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::json); - to_tsvector ---------------------------------------------------------------------------- - 'aaa':1 'bbb':2 'ccc':4 'ddd':3 'eee':6 'fff':7 'ggg':8 'hhh':10 'iii':11 -(1 row) - --- json to tsvector with stop words -select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": ["the eee fff ggg"], "c": {"d": "hhh. iii"}}'::json); - to_tsvector ----------------------------------------------------------------------------- - 'aaa':1 'bbb':3 'ccc':5 'ddd':4 'eee':8 'fff':9 'ggg':10 'hhh':12 'iii':13 -(1 row) - --- json to tsvector with numeric values -select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": 123, "c": 456}'::json); - to_tsvector ---------------------------------- - 'aaa':1 'bbb':3 'ccc':5 'ddd':4 -(1 row) - --- json_to_tsvector -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"all"'); - json_to_tsvector ----------------------------------------------------------------------------------------- - '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16 -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"key"'); - json_to_tsvector --------------------------------- - 'b':2 'c':4 'd':6 'f':8 'g':10 -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"string"'); - json_to_tsvector ------------------- - 'aaa':1 'bbb':3 -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"numeric"'); - json_to_tsvector ------------------- - '123':1 '456':3 -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"boolean"'); - json_to_tsvector -------------------- - 'fals':3 'true':1 -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["string", "numeric"]'); - json_to_tsvector ---------------------------------- - '123':5 '456':7 'aaa':1 'bbb':3 -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"all"'); - json_to_tsvector ----------------------------------------------------------------------------------------- - '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16 -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"key"'); - json_to_tsvector --------------------------------- - 'b':2 'c':4 'd':6 'f':8 'g':10 -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"string"'); - json_to_tsvector ------------------- - 'aaa':1 'bbb':3 -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"numeric"'); - json_to_tsvector ------------------- - '123':1 '456':3 -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"boolean"'); - json_to_tsvector -------------------- - 'fals':3 'true':1 -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["string", "numeric"]'); - json_to_tsvector ---------------------------------- - '123':5 '456':7 'aaa':1 'bbb':3 -(1 row) - --- to_tsvector corner cases -select to_tsvector('""'::json); - to_tsvector -------------- - -(1 row) - -select to_tsvector('{}'::json); - to_tsvector -------------- - -(1 row) - -select to_tsvector('[]'::json); - to_tsvector -------------- - -(1 row) - -select to_tsvector('null'::json); - to_tsvector -------------- - -(1 row) - --- json_to_tsvector corner cases -select json_to_tsvector('""'::json, '"all"'); - json_to_tsvector ------------------- - -(1 row) - -select json_to_tsvector('{}'::json, '"all"'); - json_to_tsvector ------------------- - -(1 row) - -select json_to_tsvector('[]'::json, '"all"'); - json_to_tsvector ------------------- - -(1 row) - -select json_to_tsvector('null'::json, '"all"'); - json_to_tsvector ------------------- - -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '""'); -ERROR: wrong flag in flag array: "" -HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '{}'); -ERROR: wrong flag type, only arrays and scalars are allowed -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '[]'); - json_to_tsvector ------------------- - -(1 row) - -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, 'null'); -ERROR: flag array element is not a string -HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". -select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["all", null]'); -ERROR: flag array element is not a string -HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". --- ts_headline for json -select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh')); - ts_headline ---------------------------------------------------------------------------------------------------------- - {"a":"aaa bbb","b":{"c":"ccc ddd fff","c1":"ccc1 ddd1"},"d":["ggg hhh","iii jjj"]} -(1 row) - -select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh')); - ts_headline ----------------------------------------------------------------------------------------- - {"a":"aaa bbb","b":{"c":"ccc ddd fff"},"d":["ggg hhh","iii jjj"]} -(1 row) - -select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >'); - ts_headline ------------------------------------------------------------------------------------------- - {"a":"aaa ","b":{"c":"ccc fff","c1":"ccc1 ddd1"},"d":["ggg ","iii jjj"]} -(1 row) - -select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >'); - ts_headline ------------------------------------------------------------------------------------------- - {"a":"aaa ","b":{"c":"ccc fff","c1":"ccc1 ddd1"},"d":["ggg ","iii jjj"]} -(1 row) - --- corner cases for ts_headline with json -select ts_headline('null'::json, tsquery('aaa & bbb')); - ts_headline -------------- - null -(1 row) - -select ts_headline('{}'::json, tsquery('aaa & bbb')); - ts_headline -------------- - {} -(1 row) - -select ts_headline('[]'::json, tsquery('aaa & bbb')); - ts_headline -------------- - [] -(1 row) - +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/jsonb.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/jsonb.out --- /Users/admin/pgsql/src/test/regress/expected/jsonb.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/jsonb.out 2024-12-13 13:20:12 @@ -1,5717 +1,2 @@ --- directory paths are passed to us in environment variables -\getenv abs_srcdir PG_ABS_SRCDIR -CREATE TABLE testjsonb ( - j jsonb -); -\set filename :abs_srcdir '/data/jsonb.data' -COPY testjsonb FROM :'filename'; --- Strings. -SELECT '""'::jsonb; -- OK. - jsonb -------- - "" -(1 row) - -SELECT $$''$$::jsonb; -- ERROR, single quotes are not allowed -ERROR: invalid input syntax for type json -LINE 1: SELECT $$''$$::jsonb; - ^ -DETAIL: Token "'" is invalid. -CONTEXT: JSON data, line 1: '... -SELECT '"abc"'::jsonb; -- OK - jsonb -------- - "abc" -(1 row) - -SELECT '"abc'::jsonb; -- ERROR, quotes not closed -ERROR: invalid input syntax for type json -LINE 1: SELECT '"abc'::jsonb; - ^ -DETAIL: Token ""abc" is invalid. -CONTEXT: JSON data, line 1: "abc -SELECT '"abc -def"'::jsonb; -- ERROR, unescaped newline in string constant -ERROR: invalid input syntax for type json -LINE 1: SELECT '"abc - ^ -DETAIL: Character with value 0x0a must be escaped. -CONTEXT: JSON data, line 1: "abc -SELECT '"\n\"\\"'::jsonb; -- OK, legal escapes - jsonb ----------- - "\n\"\\" -(1 row) - -SELECT '"\v"'::jsonb; -- ERROR, not a valid JSON escape -ERROR: invalid input syntax for type json -LINE 1: SELECT '"\v"'::jsonb; - ^ -DETAIL: Escape sequence "\v" is invalid. -CONTEXT: JSON data, line 1: "\v... --- see json_encoding test for input with unicode escapes --- Numbers. -SELECT '1'::jsonb; -- OK - jsonb -------- - 1 -(1 row) - -SELECT '0'::jsonb; -- OK - jsonb -------- - 0 -(1 row) - -SELECT '01'::jsonb; -- ERROR, not valid according to JSON spec -ERROR: invalid input syntax for type json -LINE 1: SELECT '01'::jsonb; - ^ -DETAIL: Token "01" is invalid. -CONTEXT: JSON data, line 1: 01 -SELECT '0.1'::jsonb; -- OK - jsonb -------- - 0.1 -(1 row) - -SELECT '9223372036854775808'::jsonb; -- OK, even though it's too large for int8 - jsonb ---------------------- - 9223372036854775808 -(1 row) - -SELECT '1e100'::jsonb; -- OK - jsonb -------------------------------------------------------------------------------------------------------- - 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 -(1 row) - -SELECT '1.3e100'::jsonb; -- OK - jsonb -------------------------------------------------------------------------------------------------------- - 13000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 -(1 row) - -SELECT '1f2'::jsonb; -- ERROR -ERROR: invalid input syntax for type json -LINE 1: SELECT '1f2'::jsonb; - ^ -DETAIL: Token "1f2" is invalid. -CONTEXT: JSON data, line 1: 1f2 -SELECT '0.x1'::jsonb; -- ERROR -ERROR: invalid input syntax for type json -LINE 1: SELECT '0.x1'::jsonb; - ^ -DETAIL: Token "0.x1" is invalid. -CONTEXT: JSON data, line 1: 0.x1 -SELECT '1.3ex100'::jsonb; -- ERROR -ERROR: invalid input syntax for type json -LINE 1: SELECT '1.3ex100'::jsonb; - ^ -DETAIL: Token "1.3ex100" is invalid. -CONTEXT: JSON data, line 1: 1.3ex100 --- Arrays. -SELECT '[]'::jsonb; -- OK - jsonb -------- - [] -(1 row) - -SELECT '[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]'::jsonb; -- OK - jsonb ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]] -(1 row) - -SELECT '[1,2]'::jsonb; -- OK - jsonb --------- - [1, 2] -(1 row) - -SELECT '[1,2,]'::jsonb; -- ERROR, trailing comma -ERROR: invalid input syntax for type json -LINE 1: SELECT '[1,2,]'::jsonb; - ^ -DETAIL: Expected JSON value, but found "]". -CONTEXT: JSON data, line 1: [1,2,] -SELECT '[1,2'::jsonb; -- ERROR, no closing bracket -ERROR: invalid input syntax for type json -LINE 1: SELECT '[1,2'::jsonb; - ^ -DETAIL: The input string ended unexpectedly. -CONTEXT: JSON data, line 1: [1,2 -SELECT '[1,[2]'::jsonb; -- ERROR, no closing bracket -ERROR: invalid input syntax for type json -LINE 1: SELECT '[1,[2]'::jsonb; - ^ -DETAIL: The input string ended unexpectedly. -CONTEXT: JSON data, line 1: [1,[2] --- Objects. -SELECT '{}'::jsonb; -- OK - jsonb -------- - {} -(1 row) - -SELECT '{"abc"}'::jsonb; -- ERROR, no value -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc"}'::jsonb; - ^ -DETAIL: Expected ":", but found "}". -CONTEXT: JSON data, line 1: {"abc"} -SELECT '{"abc":1}'::jsonb; -- OK - jsonb ------------- - {"abc": 1} -(1 row) - -SELECT '{1:"abc"}'::jsonb; -- ERROR, keys must be strings -ERROR: invalid input syntax for type json -LINE 1: SELECT '{1:"abc"}'::jsonb; - ^ -DETAIL: Expected string or "}", but found "1". -CONTEXT: JSON data, line 1: {1... -SELECT '{"abc",1}'::jsonb; -- ERROR, wrong separator -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc",1}'::jsonb; - ^ -DETAIL: Expected ":", but found ",". -CONTEXT: JSON data, line 1: {"abc",... -SELECT '{"abc"=1}'::jsonb; -- ERROR, totally wrong separator -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc"=1}'::jsonb; - ^ -DETAIL: Token "=" is invalid. -CONTEXT: JSON data, line 1: {"abc"=... -SELECT '{"abc"::1}'::jsonb; -- ERROR, another wrong separator -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc"::1}'::jsonb; - ^ -DETAIL: Expected JSON value, but found ":". -CONTEXT: JSON data, line 1: {"abc"::... -SELECT '{"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}}'::jsonb; -- OK - jsonb --------------------------------------------------------------------- - {"abc": 1, "def": 2, "ghi": [3, 4], "hij": {"klm": 5, "nop": [6]}} -(1 row) - -SELECT '{"abc":1:2}'::jsonb; -- ERROR, colon in wrong spot -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc":1:2}'::jsonb; - ^ -DETAIL: Expected "," or "}", but found ":". -CONTEXT: JSON data, line 1: {"abc":1:... -SELECT '{"abc":1,3}'::jsonb; -- ERROR, no value -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc":1,3}'::jsonb; - ^ -DETAIL: Expected string, but found "3". -CONTEXT: JSON data, line 1: {"abc":1,3... --- Recursion. -SET max_stack_depth = '100kB'; -SELECT repeat('[', 10000)::jsonb; -ERROR: stack depth limit exceeded -HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate. -SELECT repeat('{"a":', 10000)::jsonb; -ERROR: stack depth limit exceeded -HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate. -RESET max_stack_depth; --- Miscellaneous stuff. -SELECT 'true'::jsonb; -- OK - jsonb -------- - true -(1 row) - -SELECT 'false'::jsonb; -- OK - jsonb -------- - false -(1 row) - -SELECT 'null'::jsonb; -- OK - jsonb -------- - null -(1 row) - -SELECT ' true '::jsonb; -- OK, even with extra whitespace - jsonb -------- - true -(1 row) - -SELECT 'true false'::jsonb; -- ERROR, too many values -ERROR: invalid input syntax for type json -LINE 1: SELECT 'true false'::jsonb; - ^ -DETAIL: Expected end of input, but found "false". -CONTEXT: JSON data, line 1: true false -SELECT 'true, false'::jsonb; -- ERROR, too many values -ERROR: invalid input syntax for type json -LINE 1: SELECT 'true, false'::jsonb; - ^ -DETAIL: Expected end of input, but found ",". -CONTEXT: JSON data, line 1: true,... -SELECT 'truf'::jsonb; -- ERROR, not a keyword -ERROR: invalid input syntax for type json -LINE 1: SELECT 'truf'::jsonb; - ^ -DETAIL: Token "truf" is invalid. -CONTEXT: JSON data, line 1: truf -SELECT 'trues'::jsonb; -- ERROR, not a keyword -ERROR: invalid input syntax for type json -LINE 1: SELECT 'trues'::jsonb; - ^ -DETAIL: Token "trues" is invalid. -CONTEXT: JSON data, line 1: trues -SELECT ''::jsonb; -- ERROR, no value -ERROR: invalid input syntax for type json -LINE 1: SELECT ''::jsonb; - ^ -DETAIL: The input string ended unexpectedly. -CONTEXT: JSON data, line 1: -SELECT ' '::jsonb; -- ERROR, no value -ERROR: invalid input syntax for type json -LINE 1: SELECT ' '::jsonb; - ^ -DETAIL: The input string ended unexpectedly. -CONTEXT: JSON data, line 1: --- Multi-line JSON input to check ERROR reporting -SELECT '{ - "one": 1, - "two":"two", - "three": - true}'::jsonb; -- OK - jsonb ------------------------------------------ - {"one": 1, "two": "two", "three": true} -(1 row) - -SELECT '{ - "one": 1, - "two":,"two", -- ERROR extraneous comma before field "two" - "three": - true}'::jsonb; -ERROR: invalid input syntax for type json -LINE 1: SELECT '{ - ^ -DETAIL: Expected JSON value, but found ",". -CONTEXT: JSON data, line 3: "two":,... -SELECT '{ - "one": 1, - "two":"two", - "averyveryveryveryveryveryveryveryveryverylongfieldname":}'::jsonb; -ERROR: invalid input syntax for type json -LINE 1: SELECT '{ - ^ -DETAIL: Expected JSON value, but found "}". -CONTEXT: JSON data, line 4: ...yveryveryveryveryveryveryveryverylongfieldname":} --- ERROR missing value for last field --- test non-error-throwing input -select pg_input_is_valid('{"a":true}', 'jsonb'); - pg_input_is_valid -------------------- - t -(1 row) - -select pg_input_is_valid('{"a":true', 'jsonb'); - pg_input_is_valid -------------------- - f -(1 row) - -select * from pg_input_error_info('{"a":true', 'jsonb'); - message | detail | hint | sql_error_code -------------------------------------+--------------------------------------+------+---------------- - invalid input syntax for type json | The input string ended unexpectedly. | | 22P02 -(1 row) - -select * from pg_input_error_info('{"a":1e1000000}', 'jsonb'); - message | detail | hint | sql_error_code ---------------------------------+--------+------+---------------- - value overflows numeric format | | | 22003 -(1 row) - --- make sure jsonb is passed through json generators without being escaped -SELECT array_to_json(ARRAY [jsonb '{"a":1}', jsonb '{"b":[2,3]}']); - array_to_json --------------------------- - [{"a": 1},{"b": [2, 3]}] -(1 row) - --- anyarray column -CREATE TEMP TABLE rows AS -SELECT x, 'txt' || x as y -FROM generate_series(1,3) AS x; -analyze rows; -select attname, to_jsonb(histogram_bounds) histogram_bounds -from pg_stats -where tablename = 'rows' and - schemaname = pg_my_temp_schema()::regnamespace::text -order by 1; - attname | histogram_bounds ----------+-------------------------- - x | [1, 2, 3] - y | ["txt1", "txt2", "txt3"] -(2 rows) - --- to_jsonb, timestamps -select to_jsonb(timestamp '2014-05-28 12:22:35.614298'); - to_jsonb ------------------------------- - "2014-05-28T12:22:35.614298" -(1 row) - -BEGIN; -SET LOCAL TIME ZONE 10.5; -select to_jsonb(timestamptz '2014-05-28 12:22:35.614298-04'); - to_jsonb ------------------------------------- - "2014-05-29T02:52:35.614298+10:30" -(1 row) - -SET LOCAL TIME ZONE -8; -select to_jsonb(timestamptz '2014-05-28 12:22:35.614298-04'); - to_jsonb ------------------------------------- - "2014-05-28T08:22:35.614298-08:00" -(1 row) - -COMMIT; -select to_jsonb(date '2014-05-28'); - to_jsonb --------------- - "2014-05-28" -(1 row) - -select to_jsonb(date 'Infinity'); - to_jsonb ------------- - "infinity" -(1 row) - -select to_jsonb(date '-Infinity'); - to_jsonb -------------- - "-infinity" -(1 row) - -select to_jsonb(timestamp 'Infinity'); - to_jsonb ------------- - "infinity" -(1 row) - -select to_jsonb(timestamp '-Infinity'); - to_jsonb -------------- - "-infinity" -(1 row) - -select to_jsonb(timestamptz 'Infinity'); - to_jsonb ------------- - "infinity" -(1 row) - -select to_jsonb(timestamptz '-Infinity'); - to_jsonb -------------- - "-infinity" -(1 row) - ---jsonb_agg -SELECT jsonb_agg(q) - FROM ( SELECT $$a$$ || x AS b, y AS c, - ARRAY[ROW(x.*,ARRAY[1,2,3]), - ROW(y.*,ARRAY[4,5,6])] AS z - FROM generate_series(1,2) x, - generate_series(4,5) y) q; - jsonb_agg --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - [{"b": "a1", "c": 4, "z": [{"f1": 1, "f2": [1, 2, 3]}, {"f1": 4, "f2": [4, 5, 6]}]}, {"b": "a1", "c": 5, "z": [{"f1": 1, "f2": [1, 2, 3]}, {"f1": 5, "f2": [4, 5, 6]}]}, {"b": "a2", "c": 4, "z": [{"f1": 2, "f2": [1, 2, 3]}, {"f1": 4, "f2": [4, 5, 6]}]}, {"b": "a2", "c": 5, "z": [{"f1": 2, "f2": [1, 2, 3]}, {"f1": 5, "f2": [4, 5, 6]}]}] -(1 row) - -SELECT jsonb_agg(q ORDER BY x, y) - FROM rows q; - jsonb_agg ------------------------------------------------------------------------ - [{"x": 1, "y": "txt1"}, {"x": 2, "y": "txt2"}, {"x": 3, "y": "txt3"}] -(1 row) - -UPDATE rows SET x = NULL WHERE x = 1; -SELECT jsonb_agg(q ORDER BY x NULLS FIRST, y) - FROM rows q; - jsonb_agg --------------------------------------------------------------------------- - [{"x": null, "y": "txt1"}, {"x": 2, "y": "txt2"}, {"x": 3, "y": "txt3"}] -(1 row) - --- jsonb extraction functions -CREATE TEMP TABLE test_jsonb ( - json_type text, - test_json jsonb -); -INSERT INTO test_jsonb VALUES -('scalar','"a scalar"'), -('array','["zero", "one","two",null,"four","five", [1,2,3],{"f1":9}]'), -('object','{"field1":"val1","field2":"val2","field3":null, "field4": 4, "field5": [1,2,3], "field6": {"f1":9}}'); -SELECT test_json -> 'x' FROM test_jsonb WHERE json_type = 'scalar'; - ?column? ----------- - -(1 row) - -SELECT test_json -> 'x' FROM test_jsonb WHERE json_type = 'array'; - ?column? ----------- - -(1 row) - -SELECT test_json -> 'x' FROM test_jsonb WHERE json_type = 'object'; - ?column? ----------- - -(1 row) - -SELECT test_json -> 'field2' FROM test_jsonb WHERE json_type = 'object'; - ?column? ----------- - "val2" -(1 row) - -SELECT test_json ->> 'field2' FROM test_jsonb WHERE json_type = 'scalar'; - ?column? ----------- - -(1 row) - -SELECT test_json ->> 'field2' FROM test_jsonb WHERE json_type = 'array'; - ?column? ----------- - -(1 row) - -SELECT test_json ->> 'field2' FROM test_jsonb WHERE json_type = 'object'; - ?column? ----------- - val2 -(1 row) - -SELECT test_json -> 2 FROM test_jsonb WHERE json_type = 'scalar'; - ?column? ----------- - -(1 row) - -SELECT test_json -> 2 FROM test_jsonb WHERE json_type = 'array'; - ?column? ----------- - "two" -(1 row) - -SELECT test_json -> 9 FROM test_jsonb WHERE json_type = 'array'; - ?column? ----------- - -(1 row) - -SELECT test_json -> 2 FROM test_jsonb WHERE json_type = 'object'; - ?column? ----------- - -(1 row) - -SELECT test_json ->> 6 FROM test_jsonb WHERE json_type = 'array'; - ?column? ------------ - [1, 2, 3] -(1 row) - -SELECT test_json ->> 7 FROM test_jsonb WHERE json_type = 'array'; - ?column? ------------ - {"f1": 9} -(1 row) - -SELECT test_json ->> 'field4' FROM test_jsonb WHERE json_type = 'object'; - ?column? ----------- - 4 -(1 row) - -SELECT test_json ->> 'field5' FROM test_jsonb WHERE json_type = 'object'; - ?column? ------------ - [1, 2, 3] -(1 row) - -SELECT test_json ->> 'field6' FROM test_jsonb WHERE json_type = 'object'; - ?column? ------------ - {"f1": 9} -(1 row) - -SELECT test_json ->> 2 FROM test_jsonb WHERE json_type = 'scalar'; - ?column? ----------- - -(1 row) - -SELECT test_json ->> 2 FROM test_jsonb WHERE json_type = 'array'; - ?column? ----------- - two -(1 row) - -SELECT test_json ->> 2 FROM test_jsonb WHERE json_type = 'object'; - ?column? ----------- - -(1 row) - -SELECT jsonb_object_keys(test_json) FROM test_jsonb WHERE json_type = 'scalar'; -ERROR: cannot call jsonb_object_keys on a scalar -SELECT jsonb_object_keys(test_json) FROM test_jsonb WHERE json_type = 'array'; -ERROR: cannot call jsonb_object_keys on an array -SELECT jsonb_object_keys(test_json) FROM test_jsonb WHERE json_type = 'object'; - jsonb_object_keys -------------------- - field1 - field2 - field3 - field4 - field5 - field6 -(6 rows) - --- nulls -SELECT (test_json->'field3') IS NULL AS expect_false FROM test_jsonb WHERE json_type = 'object'; - expect_false --------------- - f -(1 row) - -SELECT (test_json->>'field3') IS NULL AS expect_true FROM test_jsonb WHERE json_type = 'object'; - expect_true -------------- - t -(1 row) - -SELECT (test_json->3) IS NULL AS expect_false FROM test_jsonb WHERE json_type = 'array'; - expect_false --------------- - f -(1 row) - -SELECT (test_json->>3) IS NULL AS expect_true FROM test_jsonb WHERE json_type = 'array'; - expect_true -------------- - t -(1 row) - --- corner cases -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> null::text; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> null::int; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> 1; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> 'z'; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> ''; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::jsonb -> 1; - ?column? -------------- - {"b": "cc"} -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::jsonb -> 3; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::jsonb -> 'z'; - ?column? ----------- - -(1 row) - -select '{"a": "c", "b": null}'::jsonb -> 'b'; - ?column? ----------- - null -(1 row) - -select '"foo"'::jsonb -> 1; - ?column? ----------- - -(1 row) - -select '"foo"'::jsonb -> 'z'; - ?column? ----------- - -(1 row) - -select '[]'::jsonb -> -2147483648; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> null::text; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> null::int; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> 1; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> 'z'; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> ''; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::jsonb ->> 1; - ?column? -------------- - {"b": "cc"} -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::jsonb ->> 3; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::jsonb ->> 'z'; - ?column? ----------- - -(1 row) - -select '{"a": "c", "b": null}'::jsonb ->> 'b'; - ?column? ----------- - -(1 row) - -select '"foo"'::jsonb ->> 1; - ?column? ----------- - -(1 row) - -select '"foo"'::jsonb ->> 'z'; - ?column? ----------- - -(1 row) - -select '[]'::jsonb ->> -2147483648; - ?column? ----------- - -(1 row) - --- equality and inequality -SELECT '{"x":"y"}'::jsonb = '{"x":"y"}'::jsonb; - ?column? ----------- - t -(1 row) - -SELECT '{"x":"y"}'::jsonb = '{"x":"z"}'::jsonb; - ?column? ----------- - f -(1 row) - -SELECT '{"x":"y"}'::jsonb <> '{"x":"y"}'::jsonb; - ?column? ----------- - f -(1 row) - -SELECT '{"x":"y"}'::jsonb <> '{"x":"z"}'::jsonb; - ?column? ----------- - t -(1 row) - --- containment -SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b"}'); - jsonb_contains ----------------- - t -(1 row) - -SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b", "c":null}'); - jsonb_contains ----------------- - t -(1 row) - -SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b", "g":null}'); - jsonb_contains ----------------- - f -(1 row) - -SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"g":null}'); - jsonb_contains ----------------- - f -(1 row) - -SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"c"}'); - jsonb_contains ----------------- - f -(1 row) - -SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b"}'); - jsonb_contains ----------------- - t -(1 row) - -SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b", "c":"q"}'); - jsonb_contains ----------------- - f -(1 row) - -SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b"}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b", "c":null}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b", "g":null}'; - ?column? ----------- - f -(1 row) - -SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"g":null}'; - ?column? ----------- - f -(1 row) - -SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"c"}'; - ?column? ----------- - f -(1 row) - -SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b"}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b", "c":"q"}'; - ?column? ----------- - f -(1 row) - -SELECT '[1,2]'::jsonb @> '[1,2,2]'::jsonb; - ?column? ----------- - t -(1 row) - -SELECT '[1,1,2]'::jsonb @> '[1,2,2]'::jsonb; - ?column? ----------- - t -(1 row) - -SELECT '[[1,2]]'::jsonb @> '[[1,2,2]]'::jsonb; - ?column? ----------- - t -(1 row) - -SELECT '[1,2,2]'::jsonb <@ '[1,2]'::jsonb; - ?column? ----------- - t -(1 row) - -SELECT '[1,2,2]'::jsonb <@ '[1,1,2]'::jsonb; - ?column? ----------- - t -(1 row) - -SELECT '[[1,2,2]]'::jsonb <@ '[[1,2]]'::jsonb; - ?column? ----------- - t -(1 row) - -SELECT jsonb_contained('{"a":"b"}', '{"a":"b", "b":1, "c":null}'); - jsonb_contained ------------------ - t -(1 row) - -SELECT jsonb_contained('{"a":"b", "c":null}', '{"a":"b", "b":1, "c":null}'); - jsonb_contained ------------------ - t -(1 row) - -SELECT jsonb_contained('{"a":"b", "g":null}', '{"a":"b", "b":1, "c":null}'); - jsonb_contained ------------------ - f -(1 row) - -SELECT jsonb_contained('{"g":null}', '{"a":"b", "b":1, "c":null}'); - jsonb_contained ------------------ - f -(1 row) - -SELECT jsonb_contained('{"a":"c"}', '{"a":"b", "b":1, "c":null}'); - jsonb_contained ------------------ - f -(1 row) - -SELECT jsonb_contained('{"a":"b"}', '{"a":"b", "b":1, "c":null}'); - jsonb_contained ------------------ - t -(1 row) - -SELECT jsonb_contained('{"a":"b", "c":"q"}', '{"a":"b", "b":1, "c":null}'); - jsonb_contained ------------------ - f -(1 row) - -SELECT '{"a":"b"}'::jsonb <@ '{"a":"b", "b":1, "c":null}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":"b", "c":null}'::jsonb <@ '{"a":"b", "b":1, "c":null}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":"b", "g":null}'::jsonb <@ '{"a":"b", "b":1, "c":null}'; - ?column? ----------- - f -(1 row) - -SELECT '{"g":null}'::jsonb <@ '{"a":"b", "b":1, "c":null}'; - ?column? ----------- - f -(1 row) - -SELECT '{"a":"c"}'::jsonb <@ '{"a":"b", "b":1, "c":null}'; - ?column? ----------- - f -(1 row) - -SELECT '{"a":"b"}'::jsonb <@ '{"a":"b", "b":1, "c":null}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":"b", "c":"q"}'::jsonb <@ '{"a":"b", "b":1, "c":null}'; - ?column? ----------- - f -(1 row) - --- Raw scalar may contain another raw scalar, array may contain a raw scalar -SELECT '[5]'::jsonb @> '[5]'; - ?column? ----------- - t -(1 row) - -SELECT '5'::jsonb @> '5'; - ?column? ----------- - t -(1 row) - -SELECT '[5]'::jsonb @> '5'; - ?column? ----------- - t -(1 row) - --- But a raw scalar cannot contain an array -SELECT '5'::jsonb @> '[5]'; - ?column? ----------- - f -(1 row) - --- In general, one thing should always contain itself. Test array containment: -SELECT '["9", ["7", "3"], 1]'::jsonb @> '["9", ["7", "3"], 1]'::jsonb; - ?column? ----------- - t -(1 row) - -SELECT '["9", ["7", "3"], ["1"]]'::jsonb @> '["9", ["7", "3"], ["1"]]'::jsonb; - ?column? ----------- - t -(1 row) - --- array containment string matching confusion bug -SELECT '{ "name": "Bob", "tags": [ "enim", "qui"]}'::jsonb @> '{"tags":["qu"]}'; - ?column? ----------- - f -(1 row) - --- array length -SELECT jsonb_array_length('[1,2,3,{"f1":1,"f2":[5,6]},4]'); - jsonb_array_length --------------------- - 5 -(1 row) - -SELECT jsonb_array_length('[]'); - jsonb_array_length --------------------- - 0 -(1 row) - -SELECT jsonb_array_length('{"f1":1,"f2":[5,6]}'); -ERROR: cannot get array length of a non-array -SELECT jsonb_array_length('4'); -ERROR: cannot get array length of a scalar --- each -SELECT jsonb_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null}'); - jsonb_each --------------------- - (f1,"[1, 2, 3]") - (f2,"{""f3"": 1}") - (f4,null) -(3 rows) - -SELECT jsonb_each('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q; - q ------------------------------------------------------- - (1,"""first""") - (a,"{""1"": ""first"", ""b"": ""c"", ""c"": ""b""}") - (b,"[1, 2]") - (c,"""cc""") - (n,null) -(5 rows) - -SELECT * FROM jsonb_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q; - key | value ------+----------- - f1 | [1, 2, 3] - f2 | {"f3": 1} - f4 | null - f5 | 99 - f6 | "stringy" -(5 rows) - -SELECT * FROM jsonb_each('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q; - key | value ------+------------------------------------ - 1 | "first" - a | {"1": "first", "b": "c", "c": "b"} - b | [1, 2] - c | "cc" - n | null -(5 rows) - -SELECT jsonb_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":"null"}'); - jsonb_each_text --------------------- - (f1,"[1, 2, 3]") - (f2,"{""f3"": 1}") - (f4,) - (f5,null) -(4 rows) - -SELECT jsonb_each_text('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q; - q ------------------------------------------------------- - (1,first) - (a,"{""1"": ""first"", ""b"": ""c"", ""c"": ""b""}") - (b,"[1, 2]") - (c,cc) - (n,) -(5 rows) - -SELECT * FROM jsonb_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q; - key | value ------+----------- - f1 | [1, 2, 3] - f2 | {"f3": 1} - f4 | - f5 | 99 - f6 | stringy -(5 rows) - -SELECT * FROM jsonb_each_text('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q; - key | value ------+------------------------------------ - 1 | first - a | {"1": "first", "b": "c", "c": "b"} - b | [1, 2] - c | cc - n | -(5 rows) - --- exists -SELECT jsonb_exists('{"a":null, "b":"qq"}', 'a'); - jsonb_exists --------------- - t -(1 row) - -SELECT jsonb_exists('{"a":null, "b":"qq"}', 'b'); - jsonb_exists --------------- - t -(1 row) - -SELECT jsonb_exists('{"a":null, "b":"qq"}', 'c'); - jsonb_exists --------------- - f -(1 row) - -SELECT jsonb_exists('{"a":"null", "b":"qq"}', 'a'); - jsonb_exists --------------- - t -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ? 'a'; - ?column? ----------- - t -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ? 'b'; - ?column? ----------- - t -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ? 'c'; - ?column? ----------- - f -(1 row) - -SELECT jsonb '{"a":"null", "b":"qq"}' ? 'a'; - ?column? ----------- - t -(1 row) - --- array exists - array elements should behave as keys -SELECT count(*) from testjsonb WHERE j->'array' ? 'bar'; - count -------- - 3 -(1 row) - --- type sensitive array exists - should return no rows (since "exists" only --- matches strings that are either object keys or array elements) -SELECT count(*) from testjsonb WHERE j->'array' ? '5'::text; - count -------- - 0 -(1 row) - --- However, a raw scalar is *contained* within the array -SELECT count(*) from testjsonb WHERE j->'array' @> '5'::jsonb; - count -------- - 1 -(1 row) - -SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['a','b']); - jsonb_exists_any ------------------- - t -(1 row) - -SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['b','a']); - jsonb_exists_any ------------------- - t -(1 row) - -SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['c','a']); - jsonb_exists_any ------------------- - t -(1 row) - -SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['c','d']); - jsonb_exists_any ------------------- - f -(1 row) - -SELECT jsonb_exists_any('{"a":null, "b":"qq"}', '{}'::text[]); - jsonb_exists_any ------------------- - f -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['a','b']; - ?column? ----------- - t -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['b','a']; - ?column? ----------- - t -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['c','a']; - ?column? ----------- - t -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['c','d']; - ?column? ----------- - f -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ?| '{}'::text[]; - ?column? ----------- - f -(1 row) - -SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['a','b']); - jsonb_exists_all ------------------- - t -(1 row) - -SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['b','a']); - jsonb_exists_all ------------------- - t -(1 row) - -SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['c','a']); - jsonb_exists_all ------------------- - f -(1 row) - -SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['c','d']); - jsonb_exists_all ------------------- - f -(1 row) - -SELECT jsonb_exists_all('{"a":null, "b":"qq"}', '{}'::text[]); - jsonb_exists_all ------------------- - t -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['a','b']; - ?column? ----------- - t -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['b','a']; - ?column? ----------- - t -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['c','a']; - ?column? ----------- - f -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['c','d']; - ?column? ----------- - f -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['a','a', 'b', 'b', 'b']; - ?column? ----------- - t -(1 row) - -SELECT jsonb '{"a":null, "b":"qq"}' ?& '{}'::text[]; - ?column? ----------- - t -(1 row) - --- typeof -SELECT jsonb_typeof('{}') AS object; - object --------- - object -(1 row) - -SELECT jsonb_typeof('{"c":3,"p":"o"}') AS object; - object --------- - object -(1 row) - -SELECT jsonb_typeof('[]') AS array; - array -------- - array -(1 row) - -SELECT jsonb_typeof('["a", 1]') AS array; - array -------- - array -(1 row) - -SELECT jsonb_typeof('null') AS "null"; - null ------- - null -(1 row) - -SELECT jsonb_typeof('1') AS number; - number --------- - number -(1 row) - -SELECT jsonb_typeof('-1') AS number; - number --------- - number -(1 row) - -SELECT jsonb_typeof('1.0') AS number; - number --------- - number -(1 row) - -SELECT jsonb_typeof('1e2') AS number; - number --------- - number -(1 row) - -SELECT jsonb_typeof('-1.0') AS number; - number --------- - number -(1 row) - -SELECT jsonb_typeof('true') AS boolean; - boolean ---------- - boolean -(1 row) - -SELECT jsonb_typeof('false') AS boolean; - boolean ---------- - boolean -(1 row) - -SELECT jsonb_typeof('"hello"') AS string; - string --------- - string -(1 row) - -SELECT jsonb_typeof('"true"') AS string; - string --------- - string -(1 row) - -SELECT jsonb_typeof('"1.0"') AS string; - string --------- - string -(1 row) - --- jsonb_build_array, jsonb_build_object, jsonb_object_agg -SELECT jsonb_build_array('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}'); - jsonb_build_array -------------------------------------------------------------------------- - ["a", 1, "b", 1.2, "c", true, "d", null, "e", {"x": 3, "y": [1, 2, 3]}] -(1 row) - -SELECT jsonb_build_array('a', NULL); -- ok - jsonb_build_array -------------------- - ["a", null] -(1 row) - -SELECT jsonb_build_array(VARIADIC NULL::text[]); -- ok - jsonb_build_array -------------------- - -(1 row) - -SELECT jsonb_build_array(VARIADIC '{}'::text[]); -- ok - jsonb_build_array -------------------- - [] -(1 row) - -SELECT jsonb_build_array(VARIADIC '{a,b,c}'::text[]); -- ok - jsonb_build_array -------------------- - ["a", "b", "c"] -(1 row) - -SELECT jsonb_build_array(VARIADIC ARRAY['a', NULL]::text[]); -- ok - jsonb_build_array -------------------- - ["a", null] -(1 row) - -SELECT jsonb_build_array(VARIADIC '{1,2,3,4}'::text[]); -- ok - jsonb_build_array ----------------------- - ["1", "2", "3", "4"] -(1 row) - -SELECT jsonb_build_array(VARIADIC '{1,2,3,4}'::int[]); -- ok - jsonb_build_array -------------------- - [1, 2, 3, 4] -(1 row) - -SELECT jsonb_build_array(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok - jsonb_build_array --------------------- - [1, 4, 2, 5, 3, 6] -(1 row) - -SELECT jsonb_build_object('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}'); - jsonb_build_object -------------------------------------------------------------------------- - {"a": 1, "b": 1.2, "c": true, "d": null, "e": {"x": 3, "y": [1, 2, 3]}} -(1 row) - -SELECT jsonb_build_object( - 'a', jsonb_build_object('b',false,'c',99), - 'd', jsonb_build_object('e',array[9,8,7]::int[], - 'f', (select row_to_json(r) from ( select relkind, oid::regclass as name from pg_class where relname = 'pg_class') r))); - jsonb_build_object ------------------------------------------------------------------------------------------------- - {"a": {"b": false, "c": 99}, "d": {"e": [9, 8, 7], "f": {"name": "pg_class", "relkind": "r"}}} -(1 row) - -SELECT jsonb_build_object('{a,b,c}'::text[]); -- error -ERROR: argument list must have even number of elements -HINT: The arguments of jsonb_build_object() must consist of alternating keys and values. -SELECT jsonb_build_object('{a,b,c}'::text[], '{d,e,f}'::text[]); -- error, key cannot be array -ERROR: key value must be scalar, not array, composite, or json -SELECT jsonb_build_object('a', 'b', 'c'); -- error -ERROR: argument list must have even number of elements -HINT: The arguments of jsonb_build_object() must consist of alternating keys and values. -SELECT jsonb_build_object(NULL, 'a'); -- error, key cannot be NULL -ERROR: argument 1: key must not be null -SELECT jsonb_build_object('a', NULL); -- ok - jsonb_build_object --------------------- - {"a": null} -(1 row) - -SELECT jsonb_build_object(VARIADIC NULL::text[]); -- ok - jsonb_build_object --------------------- - -(1 row) - -SELECT jsonb_build_object(VARIADIC '{}'::text[]); -- ok - jsonb_build_object --------------------- - {} -(1 row) - -SELECT jsonb_build_object(VARIADIC '{a,b,c}'::text[]); -- error -ERROR: argument list must have even number of elements -HINT: The arguments of jsonb_build_object() must consist of alternating keys and values. -SELECT jsonb_build_object(VARIADIC ARRAY['a', NULL]::text[]); -- ok - jsonb_build_object --------------------- - {"a": null} -(1 row) - -SELECT jsonb_build_object(VARIADIC ARRAY[NULL, 'a']::text[]); -- error, key cannot be NULL -ERROR: argument 1: key must not be null -SELECT jsonb_build_object(VARIADIC '{1,2,3,4}'::text[]); -- ok - jsonb_build_object ----------------------- - {"1": "2", "3": "4"} -(1 row) - -SELECT jsonb_build_object(VARIADIC '{1,2,3,4}'::int[]); -- ok - jsonb_build_object --------------------- - {"1": 2, "3": 4} -(1 row) - -SELECT jsonb_build_object(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok - jsonb_build_object --------------------------- - {"1": 4, "2": 5, "3": 6} -(1 row) - --- empty objects/arrays -SELECT jsonb_build_array(); - jsonb_build_array -------------------- - [] -(1 row) - -SELECT jsonb_build_object(); - jsonb_build_object --------------------- - {} -(1 row) - --- make sure keys are quoted -SELECT jsonb_build_object(1,2); - jsonb_build_object --------------------- - {"1": 2} -(1 row) - --- keys must be scalar and not null -SELECT jsonb_build_object(null,2); -ERROR: argument 1: key must not be null -SELECT jsonb_build_object(r,2) FROM (SELECT 1 AS a, 2 AS b) r; -ERROR: key value must be scalar, not array, composite, or json -SELECT jsonb_build_object(json '{"a":1,"b":2}', 3); -ERROR: key value must be scalar, not array, composite, or json -SELECT jsonb_build_object('{1,2,3}'::int[], 3); -ERROR: key value must be scalar, not array, composite, or json --- handling of NULL values -SELECT jsonb_object_agg(1, NULL::jsonb); - jsonb_object_agg ------------------- - {"1": null} -(1 row) - -SELECT jsonb_object_agg(NULL, '{"a":1}'); -ERROR: field name must not be null -CREATE TEMP TABLE foo (serial_num int, name text, type text); -INSERT INTO foo VALUES (847001,'t15','GE1043'); -INSERT INTO foo VALUES (847002,'t16','GE1043'); -INSERT INTO foo VALUES (847003,'sub-alpha','GESS90'); -SELECT jsonb_build_object('turbines',jsonb_object_agg(serial_num,jsonb_build_object('name',name,'type',type))) -FROM foo; - jsonb_build_object -------------------------------------------------------------------------------------------------------------------------------------------------------------- - {"turbines": {"847001": {"name": "t15", "type": "GE1043"}, "847002": {"name": "t16", "type": "GE1043"}, "847003": {"name": "sub-alpha", "type": "GESS90"}}} -(1 row) - -SELECT jsonb_object_agg(name, type) FROM foo; - jsonb_object_agg ------------------------------------------------------------ - {"t15": "GE1043", "t16": "GE1043", "sub-alpha": "GESS90"} -(1 row) - -INSERT INTO foo VALUES (999999, NULL, 'bar'); -SELECT jsonb_object_agg(name, type) FROM foo; -ERROR: field name must not be null --- edge case for parser -SELECT jsonb_object_agg(DISTINCT 'a', 'abc'); - jsonb_object_agg ------------------- - {"a": "abc"} -(1 row) - --- jsonb_object --- empty object, one dimension -SELECT jsonb_object('{}'); - jsonb_object --------------- - {} -(1 row) - --- empty object, two dimensions -SELECT jsonb_object('{}', '{}'); - jsonb_object --------------- - {} -(1 row) - --- one dimension -SELECT jsonb_object('{a,1,b,2,3,NULL,"d e f","a b c"}'); - jsonb_object ---------------------------------------------------- - {"3": null, "a": "1", "b": "2", "d e f": "a b c"} -(1 row) - --- same but with two dimensions -SELECT jsonb_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}'); - jsonb_object ---------------------------------------------------- - {"3": null, "a": "1", "b": "2", "d e f": "a b c"} -(1 row) - --- odd number error -SELECT jsonb_object('{a,b,c}'); -ERROR: array must have even number of elements --- one column error -SELECT jsonb_object('{{a},{b}}'); -ERROR: array must have two columns --- too many columns error -SELECT jsonb_object('{{a,b,c},{b,c,d}}'); -ERROR: array must have two columns --- too many dimensions error -SELECT jsonb_object('{{{a,b},{c,d}},{{b,c},{d,e}}}'); -ERROR: wrong number of array subscripts ---two argument form of jsonb_object -select jsonb_object('{a,b,c,"d e f"}','{1,2,3,"a b c"}'); - jsonb_object --------------------------------------------------- - {"a": "1", "b": "2", "c": "3", "d e f": "a b c"} -(1 row) - --- too many dimensions -SELECT jsonb_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}', '{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}'); -ERROR: wrong number of array subscripts --- mismatched dimensions -select jsonb_object('{a,b,c,"d e f",g}','{1,2,3,"a b c"}'); -ERROR: mismatched array dimensions -select jsonb_object('{a,b,c,"d e f"}','{1,2,3,"a b c",g}'); -ERROR: mismatched array dimensions --- null key error -select jsonb_object('{a,b,NULL,"d e f"}','{1,2,3,"a b c"}'); -ERROR: null value not allowed for object key --- empty key is allowed -select jsonb_object('{a,b,"","d e f"}','{1,2,3,"a b c"}'); - jsonb_object -------------------------------------------------- - {"": "3", "a": "1", "b": "2", "d e f": "a b c"} -(1 row) - --- extract_path, extract_path_as_text -SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6'); - jsonb_extract_path --------------------- - "stringy" -(1 row) - -SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2'); - jsonb_extract_path --------------------- - {"f3": 1} -(1 row) - -SELECT jsonb_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text); - jsonb_extract_path --------------------- - "f3" -(1 row) - -SELECT jsonb_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text); - jsonb_extract_path --------------------- - 1 -(1 row) - -SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6'); - jsonb_extract_path_text -------------------------- - stringy -(1 row) - -SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2'); - jsonb_extract_path_text -------------------------- - {"f3": 1} -(1 row) - -SELECT jsonb_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text); - jsonb_extract_path_text -------------------------- - f3 -(1 row) - -SELECT jsonb_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text); - jsonb_extract_path_text -------------------------- - 1 -(1 row) - --- extract_path nulls -SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') IS NULL AS expect_false; - expect_false --------------- - f -(1 row) - -SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') IS NULL AS expect_true; - expect_true -------------- - t -(1 row) - -SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') IS NULL AS expect_false; - expect_false --------------- - f -(1 row) - -SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') IS NULL AS expect_true; - expect_true -------------- - t -(1 row) - --- extract_path operators -SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f4','f6']; - ?column? ------------ - "stringy" -(1 row) - -SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f2']; - ?column? ------------ - {"f3": 1} -(1 row) - -SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f2','0']; - ?column? ----------- - "f3" -(1 row) - -SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f2','1']; - ?column? ----------- - 1 -(1 row) - -SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f4','f6']; - ?column? ----------- - stringy -(1 row) - -SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f2']; - ?column? ------------ - {"f3": 1} -(1 row) - -SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f2','0']; - ?column? ----------- - f3 -(1 row) - -SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f2','1']; - ?column? ----------- - 1 -(1 row) - --- corner cases for same -select '{"a": {"b":{"c": "foo"}}}'::jsonb #> '{}'; - ?column? ----------------------------- - {"a": {"b": {"c": "foo"}}} -(1 row) - -select '[1,2,3]'::jsonb #> '{}'; - ?column? ------------ - [1, 2, 3] -(1 row) - -select '"foo"'::jsonb #> '{}'; - ?column? ----------- - "foo" -(1 row) - -select '42'::jsonb #> '{}'; - ?column? ----------- - 42 -(1 row) - -select 'null'::jsonb #> '{}'; - ?column? ----------- - null -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a']; - ?column? ---------------------- - {"b": {"c": "foo"}} -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a', null]; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a', '']; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','b']; - ?column? --------------- - {"c": "foo"} -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','b','c']; - ?column? ----------- - "foo" -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','b','c','d']; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','z','c']; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #> array['a','1','b']; - ?column? ----------- - "cc" -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #> array['a','z','b']; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::jsonb #> array['1','b']; - ?column? ----------- - "cc" -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::jsonb #> array['z','b']; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": null}]'::jsonb #> array['1','b']; - ?column? ----------- - null -(1 row) - -select '"foo"'::jsonb #> array['z']; - ?column? ----------- - -(1 row) - -select '42'::jsonb #> array['f2']; - ?column? ----------- - -(1 row) - -select '42'::jsonb #> array['0']; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> '{}'; - ?column? ----------------------------- - {"a": {"b": {"c": "foo"}}} -(1 row) - -select '[1,2,3]'::jsonb #>> '{}'; - ?column? ------------ - [1, 2, 3] -(1 row) - -select '"foo"'::jsonb #>> '{}'; - ?column? ----------- - foo -(1 row) - -select '42'::jsonb #>> '{}'; - ?column? ----------- - 42 -(1 row) - -select 'null'::jsonb #>> '{}'; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a']; - ?column? ---------------------- - {"b": {"c": "foo"}} -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a', null]; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a', '']; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','b']; - ?column? --------------- - {"c": "foo"} -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','b','c']; - ?column? ----------- - foo -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','b','c','d']; - ?column? ----------- - -(1 row) - -select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','z','c']; - ?column? ----------- - -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #>> array['a','1','b']; - ?column? ----------- - cc -(1 row) - -select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #>> array['a','z','b']; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::jsonb #>> array['1','b']; - ?column? ----------- - cc -(1 row) - -select '[{"b": "c"}, {"b": "cc"}]'::jsonb #>> array['z','b']; - ?column? ----------- - -(1 row) - -select '[{"b": "c"}, {"b": null}]'::jsonb #>> array['1','b']; - ?column? ----------- - -(1 row) - -select '"foo"'::jsonb #>> array['z']; - ?column? ----------- - -(1 row) - -select '42'::jsonb #>> array['f2']; - ?column? ----------- - -(1 row) - -select '42'::jsonb #>> array['0']; - ?column? ----------- - -(1 row) - --- array_elements -SELECT jsonb_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false]'); - jsonb_array_elements ----------------------------- - 1 - true - [1, [2, 3]] - null - {"f1": 1, "f2": [7, 8, 9]} - false -(6 rows) - -SELECT * FROM jsonb_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false]') q; - value ----------------------------- - 1 - true - [1, [2, 3]] - null - {"f1": 1, "f2": [7, 8, 9]} - false -(6 rows) - -SELECT jsonb_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]'); - jsonb_array_elements_text ----------------------------- - 1 - true - [1, [2, 3]] - - {"f1": 1, "f2": [7, 8, 9]} - false - stringy -(7 rows) - -SELECT * FROM jsonb_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]') q; - value ----------------------------- - 1 - true - [1, [2, 3]] - - {"f1": 1, "f2": [7, 8, 9]} - false - stringy -(7 rows) - --- populate_record -CREATE TYPE jbpop AS (a text, b int, c timestamp); -CREATE DOMAIN jsb_int_not_null AS int NOT NULL; -CREATE DOMAIN jsb_int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 3); -CREATE DOMAIN jsb_int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 3); -create type jb_unordered_pair as (x int, y int); -create domain jb_ordered_pair as jb_unordered_pair check((value).x <= (value).y); -CREATE TYPE jsbrec AS ( - i int, - ia _int4, - ia1 int[], - ia2 int[][], - ia3 int[][][], - ia1d jsb_int_array_1d, - ia2d jsb_int_array_2d, - t text, - ta text[], - c char(10), - ca char(10)[], - ts timestamp, - js json, - jsb jsonb, - jsa json[], - rec jbpop, - reca jbpop[] -); -CREATE TYPE jsbrec_i_not_null AS ( - i jsb_int_not_null -); -SELECT * FROM jsonb_populate_record(NULL::jbpop,'{"a":"blurfl","x":43.2}') q; - a | b | c ---------+---+--- - blurfl | | -(1 row) - -SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"a":"blurfl","x":43.2}') q; - a | b | c ---------+---+-------------------------- - blurfl | 3 | Mon Dec 31 15:30:56 2012 -(1 row) - -SELECT * FROM jsonb_populate_record(NULL::jbpop,'{"a":"blurfl","x":43.2}') q; - a | b | c ---------+---+--- - blurfl | | -(1 row) - -SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"a":"blurfl","x":43.2}') q; - a | b | c ---------+---+-------------------------- - blurfl | 3 | Mon Dec 31 15:30:56 2012 -(1 row) - -SELECT * FROM jsonb_populate_record(NULL::jbpop,'{"a":[100,200,false],"x":43.2}') q; - a | b | c --------------------+---+--- - [100, 200, false] | | -(1 row) - -SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"a":[100,200,false],"x":43.2}') q; - a | b | c --------------------+---+-------------------------- - [100, 200, false] | 3 | Mon Dec 31 15:30:56 2012 -(1 row) - -SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"c":[100,200,false],"x":43.2}') q; -ERROR: invalid input syntax for type timestamp: "[100, 200, false]" -SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop, '{}') q; - a | b | c ----+---+-------------------------- - x | 3 | Mon Dec 31 15:30:56 2012 -(1 row) - -SELECT i FROM jsonb_populate_record(NULL::jsbrec_i_not_null, '{"x": 43.2}') q; -ERROR: domain jsb_int_not_null does not allow null values -SELECT i FROM jsonb_populate_record(NULL::jsbrec_i_not_null, '{"i": null}') q; -ERROR: domain jsb_int_not_null does not allow null values -SELECT i FROM jsonb_populate_record(NULL::jsbrec_i_not_null, '{"i": 12345}') q; - i -------- - 12345 -(1 row) - -SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": null}') q; - ia ----- - -(1 row) - -SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "ia". -SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [1, "2", null, 4]}') q; - ia --------------- - {1,2,NULL,4} -(1 row) - -SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [[1, 2], [3, 4]]}') q; - ia ---------------- - {{1,2},{3,4}} -(1 row) - -SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [[1], 2]}') q; -ERROR: expected JSON array -HINT: See the array element [1] of key "ia". -SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [[1], [2, 3]]}') q; -ERROR: malformed JSON array -DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. -SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": "{1,2,3}"}') q; - ia ---------- - {1,2,3} -(1 row) - -SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": null}') q; - ia1 ------ - -(1 row) - -SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "ia1". -SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": [1, "2", null, 4]}') q; - ia1 --------------- - {1,2,NULL,4} -(1 row) - -SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": [[1, 2, 3]]}') q; - ia1 ------------ - {{1,2,3}} -(1 row) - -SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": null}') q; - ia1d ------- - -(1 row) - -SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "ia1d". -SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": [1, "2", null, 4]}') q; -ERROR: value for domain jsb_int_array_1d violates check constraint "jsb_int_array_1d_check" -SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": [1, "2", null]}') q; - ia1d ------------- - {1,2,NULL} -(1 row) - -SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [1, "2", null, 4]}') q; - ia2 --------------- - {1,2,NULL,4} -(1 row) - -SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[1, 2], [null, 4]]}') q; - ia2 ------------------- - {{1,2},{NULL,4}} -(1 row) - -SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[], []]}') q; - ia2 ------ - {} -(1 row) - -SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[1, 2], [3]]}') q; -ERROR: malformed JSON array -DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. -SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[1, 2], 3, 4]}') q; -ERROR: expected JSON array -HINT: See the array element [1] of key "ia2". -SELECT ia2d FROM jsonb_populate_record(NULL::jsbrec, '{"ia2d": [[1, "2"], [null, 4]]}') q; -ERROR: value for domain jsb_int_array_2d violates check constraint "jsb_int_array_2d_check" -SELECT ia2d FROM jsonb_populate_record(NULL::jsbrec, '{"ia2d": [[1, "2", 3], [null, 5, 6]]}') q; - ia2d ----------------------- - {{1,2,3},{NULL,5,6}} -(1 row) - -SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [1, "2", null, 4]}') q; - ia3 --------------- - {1,2,NULL,4} -(1 row) - -SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [[1, 2], [null, 4]]}') q; - ia3 ------------------- - {{1,2},{NULL,4}} -(1 row) - -SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[], []], [[], []], [[], []] ]}') q; - ia3 ------ - {} -(1 row) - -SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[1, 2]], [[3, 4]] ]}') q; - ia3 -------------------- - {{{1,2}},{{3,4}}} -(1 row) - -SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8]] ]}') q; - ia3 -------------------------------- - {{{1,2},{3,4}},{{5,6},{7,8}}} -(1 row) - -SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8], [9, 10]] ]}') q; -ERROR: malformed JSON array -DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. -SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": null}') q; - ta ----- - -(1 row) - -SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "ta". -SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": [1, "2", null, 4]}') q; - ta --------------- - {1,2,NULL,4} -(1 row) - -SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": [[1, 2, 3], {"k": "v"}]}') q; -ERROR: expected JSON array -HINT: See the array element [1] of key "ta". -SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": null}') q; - c ---- - -(1 row) - -SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": "aaa"}') q; - c ------------- - aaa -(1 row) - -SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": "aaaaaaaaaa"}') q; - c ------------- - aaaaaaaaaa -(1 row) - -SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": "aaaaaaaaaaaaa"}') q; -ERROR: value too long for type character(10) -SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": null}') q; - ca ----- - -(1 row) - -SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "ca". -SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": [1, "2", null, 4]}') q; - ca ------------------------------------------------ - {"1 ","2 ",NULL,"4 "} -(1 row) - -SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": ["aaaaaaaaaaaaaaaa"]}') q; -ERROR: value too long for type character(10) -SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": [[1, 2, 3], {"k": "v"}]}') q; -ERROR: expected JSON array -HINT: See the array element [1] of key "ca". -SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": null}') q; - js ----- - -(1 row) - -SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": true}') q; - js ------- - true -(1 row) - -SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": 123.45}') q; - js --------- - 123.45 -(1 row) - -SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": "123.45"}') q; - js ----------- - "123.45" -(1 row) - -SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": "abc"}') q; - js -------- - "abc" -(1 row) - -SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": [123, "123", null, {"key": "value"}]}') q; - js --------------------------------------- - [123, "123", null, {"key": "value"}] -(1 row) - -SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": {"a": "bbb", "b": null, "c": 123.45}}') q; - js --------------------------------------- - {"a": "bbb", "b": null, "c": 123.45} -(1 row) - -SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": null}') q; - jsb ------ - -(1 row) - -SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": true}') q; - jsb ------- - true -(1 row) - -SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": 123.45}') q; - jsb --------- - 123.45 -(1 row) - -SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": "123.45"}') q; - jsb ----------- - "123.45" -(1 row) - -SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": "abc"}') q; - jsb -------- - "abc" -(1 row) - -SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": [123, "123", null, {"key": "value"}]}') q; - jsb --------------------------------------- - [123, "123", null, {"key": "value"}] -(1 row) - -SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": {"a": "bbb", "b": null, "c": 123.45}}') q; - jsb --------------------------------------- - {"a": "bbb", "b": null, "c": 123.45} -(1 row) - -SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": null}') q; - jsa ------ - -(1 row) - -SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "jsa". -SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": [1, "2", null, 4]}') q; - jsa --------------------- - {1,"\"2\"",NULL,4} -(1 row) - -SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": ["aaa", null, [1, 2, "3", {}], { "k" : "v" }]}') q; - jsa -------------------------------------------------------- - {"\"aaa\"",NULL,"[1, 2, \"3\", {}]","{\"k\": \"v\"}"} -(1 row) - -SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": 123}') q; -ERROR: cannot call populate_composite on a scalar -SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": [1, 2]}') q; -ERROR: cannot call populate_composite on an array -SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}') q; - rec ------------------------------------ - (abc,,"Thu Jan 02 00:00:00 2003") -(1 row) - -SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": "(abc,42,01.02.2003)"}') q; - rec -------------------------------------- - (abc,42,"Thu Jan 02 00:00:00 2003") -(1 row) - -SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": 123}') q; -ERROR: expected JSON array -HINT: See the value of key "reca". -SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": [1, 2]}') q; -ERROR: cannot call populate_composite on a scalar -SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}]}') q; - reca --------------------------------------------------------- - {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} -(1 row) - -SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": ["(abc,42,01.02.2003)"]}') q; - reca -------------------------------------------- - {"(abc,42,\"Thu Jan 02 00:00:00 2003\")"} -(1 row) - -SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": "{\"(abc,42,01.02.2003)\"}"}') q; - reca -------------------------------------------- - {"(abc,42,\"Thu Jan 02 00:00:00 2003\")"} -(1 row) - -SELECT rec FROM jsonb_populate_record( - row(NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, - row('x',3,'2012-12-31 15:30:56')::jbpop,NULL)::jsbrec, - '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}' -) q; - rec ------------------------------------- - (abc,3,"Thu Jan 02 00:00:00 2003") -(1 row) - --- Tests to check soft-error support for populate_record_field() --- populate_scalar() -create type jsb_char2 as (a char(2)); -select jsonb_populate_record_valid(NULL::jsb_char2, '{"a": "aaa"}'); - jsonb_populate_record_valid ------------------------------ - f -(1 row) - -select * from jsonb_populate_record(NULL::jsb_char2, '{"a": "aaa"}') q; -ERROR: value too long for type character(2) -select jsonb_populate_record_valid(NULL::jsb_char2, '{"a": "aa"}'); - jsonb_populate_record_valid ------------------------------ - t -(1 row) - -select * from jsonb_populate_record(NULL::jsb_char2, '{"a": "aa"}') q; - a ----- - aa -(1 row) - --- populate_array() -create type jsb_ia as (a int[]); -create type jsb_ia2 as (a int[][]); -select jsonb_populate_record_valid(NULL::jsb_ia, '{"a": 43.2}'); - jsonb_populate_record_valid ------------------------------ - f -(1 row) - -select * from jsonb_populate_record(NULL::jsb_ia, '{"a": 43.2}') q; -ERROR: expected JSON array -HINT: See the value of key "a". -select jsonb_populate_record_valid(NULL::jsb_ia, '{"a": [1, 2]}'); - jsonb_populate_record_valid ------------------------------ - t -(1 row) - -select * from jsonb_populate_record(NULL::jsb_ia, '{"a": [1, 2]}') q; - a -------- - {1,2} -(1 row) - -select jsonb_populate_record_valid(NULL::jsb_ia2, '{"a": [[1], [2, 3]]}'); - jsonb_populate_record_valid ------------------------------ - f -(1 row) - -select * from jsonb_populate_record(NULL::jsb_ia2, '{"a": [[1], [2, 3]]}') q; -ERROR: malformed JSON array -DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. -select jsonb_populate_record_valid(NULL::jsb_ia2, '{"a": [[1, 0], [2, 3]]}'); - jsonb_populate_record_valid ------------------------------ - t -(1 row) - -select * from jsonb_populate_record(NULL::jsb_ia2, '{"a": [[1, 0], [2, 3]]}') q; - a ---------------- - {{1,0},{2,3}} -(1 row) - --- populate_domain() -create domain jsb_i_not_null as int not null; -create domain jsb_i_gt_1 as int check (value > 1); -create type jsb_i_not_null_rec as (a jsb_i_not_null); -create type jsb_i_gt_1_rec as (a jsb_i_gt_1); -select jsonb_populate_record_valid(NULL::jsb_i_not_null_rec, '{"a": null}'); - jsonb_populate_record_valid ------------------------------ - f -(1 row) - -select * from jsonb_populate_record(NULL::jsb_i_not_null_rec, '{"a": null}') q; -ERROR: domain jsb_i_not_null does not allow null values -select jsonb_populate_record_valid(NULL::jsb_i_not_null_rec, '{"a": 1}'); - jsonb_populate_record_valid ------------------------------ - t -(1 row) - -select * from jsonb_populate_record(NULL::jsb_i_not_null_rec, '{"a": 1}') q; - a ---- - 1 -(1 row) - -select jsonb_populate_record_valid(NULL::jsb_i_gt_1_rec, '{"a": 1}'); - jsonb_populate_record_valid ------------------------------ - f -(1 row) - -select * from jsonb_populate_record(NULL::jsb_i_gt_1_rec, '{"a": 1}') q; -ERROR: value for domain jsb_i_gt_1 violates check constraint "jsb_i_gt_1_check" -select jsonb_populate_record_valid(NULL::jsb_i_gt_1_rec, '{"a": 2}'); - jsonb_populate_record_valid ------------------------------ - t -(1 row) - -select * from jsonb_populate_record(NULL::jsb_i_gt_1_rec, '{"a": 2}') q; - a ---- - 2 -(1 row) - -drop type jsb_ia, jsb_ia2, jsb_char2, jsb_i_not_null_rec, jsb_i_gt_1_rec; -drop domain jsb_i_not_null, jsb_i_gt_1; --- anonymous record type -SELECT jsonb_populate_record(null::record, '{"x": 0, "y": 1}'); -ERROR: could not determine row type for result of jsonb_populate_record -HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list. -SELECT jsonb_populate_record(row(1,2), '{"f1": 0, "f2": 1}'); - jsonb_populate_record ------------------------ - (0,1) -(1 row) - -SELECT * FROM - jsonb_populate_record(null::record, '{"x": 776}') AS (x int, y int); - x | y ------+--- - 776 | -(1 row) - --- composite domain -SELECT jsonb_populate_record(null::jb_ordered_pair, '{"x": 0, "y": 1}'); - jsonb_populate_record ------------------------ - (0,1) -(1 row) - -SELECT jsonb_populate_record(row(1,2)::jb_ordered_pair, '{"x": 0}'); - jsonb_populate_record ------------------------ - (0,2) -(1 row) - -SELECT jsonb_populate_record(row(1,2)::jb_ordered_pair, '{"x": 1, "y": 0}'); -ERROR: value for domain jb_ordered_pair violates check constraint "jb_ordered_pair_check" --- populate_recordset -SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+---+-------------------------- - blurfl | | - | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+----+-------------------------- - blurfl | 99 | - def | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+---+-------------------------- - blurfl | | - | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+----+-------------------------- - blurfl | 99 | - def | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ------------------+----+-------------------------- - [100, 200, 300] | 99 | - {"z": true} | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"c":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q; -ERROR: invalid input syntax for type timestamp: "[100, 200, 300]" -SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+---+-------------------------- - blurfl | | - | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+----+-------------------------- - blurfl | 99 | - def | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ------------------+----+-------------------------- - [100, 200, 300] | 99 | - {"z": true} | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - --- anonymous record type -SELECT jsonb_populate_recordset(null::record, '[{"x": 0, "y": 1}]'); -ERROR: could not determine row type for result of jsonb_populate_recordset -HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list. -SELECT jsonb_populate_recordset(row(1,2), '[{"f1": 0, "f2": 1}]'); - jsonb_populate_recordset --------------------------- - (0,1) -(1 row) - -SELECT i, jsonb_populate_recordset(row(i,50), '[{"f1":"42"},{"f2":"43"}]') -FROM (VALUES (1),(2)) v(i); - i | jsonb_populate_recordset ----+-------------------------- - 1 | (42,50) - 1 | (1,43) - 2 | (42,50) - 2 | (2,43) -(4 rows) - -SELECT * FROM - jsonb_populate_recordset(null::record, '[{"x": 776}]') AS (x int, y int); - x | y ------+--- - 776 | -(1 row) - --- empty array is a corner case -SELECT jsonb_populate_recordset(null::record, '[]'); -ERROR: could not determine row type for result of jsonb_populate_recordset -HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list. -SELECT jsonb_populate_recordset(row(1,2), '[]'); - jsonb_populate_recordset --------------------------- -(0 rows) - -SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[]') q; - a | b | c ----+---+--- -(0 rows) - -SELECT * FROM - jsonb_populate_recordset(null::record, '[]') AS (x int, y int); - x | y ----+--- -(0 rows) - --- composite domain -SELECT jsonb_populate_recordset(null::jb_ordered_pair, '[{"x": 0, "y": 1}]'); - jsonb_populate_recordset --------------------------- - (0,1) -(1 row) - -SELECT jsonb_populate_recordset(row(1,2)::jb_ordered_pair, '[{"x": 0}, {"y": 3}]'); - jsonb_populate_recordset --------------------------- - (0,2) - (1,3) -(2 rows) - -SELECT jsonb_populate_recordset(row(1,2)::jb_ordered_pair, '[{"x": 1, "y": 0}]'); -ERROR: value for domain jb_ordered_pair violates check constraint "jb_ordered_pair_check" --- negative cases where the wrong record type is supplied -select * from jsonb_populate_recordset(row(0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); -ERROR: function return row and query-specified return row do not match -DETAIL: Returned row contains 1 attribute, but query expects 2. -select * from jsonb_populate_recordset(row(0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); -ERROR: function return row and query-specified return row do not match -DETAIL: Returned type integer at ordinal position 1, but query expects text. -select * from jsonb_populate_recordset(row(0::int,0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); -ERROR: function return row and query-specified return row do not match -DETAIL: Returned row contains 3 attributes, but query expects 2. -select * from jsonb_populate_recordset(row(1000000000::int,50::int),'[{"b":"2"},{"a":"3"}]') q (a text, b text); -ERROR: function return row and query-specified return row do not match -DETAIL: Returned type integer at ordinal position 1, but query expects text. --- jsonb_to_record and jsonb_to_recordset -select * from jsonb_to_record('{"a":1,"b":"foo","c":"bar"}') - as x(a int, b text, d text); - a | b | d ----+-----+--- - 1 | foo | -(1 row) - -select * from jsonb_to_recordset('[{"a":1,"b":"foo","d":false},{"a":2,"b":"bar","c":true}]') - as x(a int, b text, c boolean); - a | b | c ----+-----+--- - 1 | foo | - 2 | bar | t -(2 rows) - -select *, c is null as c_is_null -from jsonb_to_record('{"a":1, "b":{"c":16, "d":2}, "x":8, "ca": ["1 2", 3], "ia": [[1,2],[3,4]], "r": {"a": "aaa", "b": 123}}'::jsonb) - as t(a int, b jsonb, c text, x int, ca char(5)[], ia int[][], r jbpop); - a | b | c | x | ca | ia | r | c_is_null ----+-------------------+---+---+-------------------+---------------+------------+----------- - 1 | {"c": 16, "d": 2} | | 8 | {"1 2 ","3 "} | {{1,2},{3,4}} | (aaa,123,) | t -(1 row) - -select *, c is null as c_is_null -from jsonb_to_recordset('[{"a":1, "b":{"c":16, "d":2}, "x":8}]'::jsonb) - as t(a int, b jsonb, c text, x int); - a | b | c | x | c_is_null ----+-------------------+---+---+----------- - 1 | {"c": 16, "d": 2} | | 8 | t -(1 row) - -select * from jsonb_to_record('{"ia": null}') as x(ia _int4); - ia ----- - -(1 row) - -select * from jsonb_to_record('{"ia": 123}') as x(ia _int4); -ERROR: expected JSON array -HINT: See the value of key "ia". -select * from jsonb_to_record('{"ia": [1, "2", null, 4]}') as x(ia _int4); - ia --------------- - {1,2,NULL,4} -(1 row) - -select * from jsonb_to_record('{"ia": [[1, 2], [3, 4]]}') as x(ia _int4); - ia ---------------- - {{1,2},{3,4}} -(1 row) - -select * from jsonb_to_record('{"ia": [[1], 2]}') as x(ia _int4); -ERROR: expected JSON array -HINT: See the array element [1] of key "ia". -select * from jsonb_to_record('{"ia": [[1], [2, 3]]}') as x(ia _int4); -ERROR: malformed JSON array -DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. -select * from jsonb_to_record('{"ia2": [1, 2, 3]}') as x(ia2 int[][]); - ia2 ---------- - {1,2,3} -(1 row) - -select * from jsonb_to_record('{"ia2": [[1, 2], [3, 4]]}') as x(ia2 int4[][]); - ia2 ---------------- - {{1,2},{3,4}} -(1 row) - -select * from jsonb_to_record('{"ia2": [[[1], [2], [3]]]}') as x(ia2 int4[][]); - ia2 ------------------ - {{{1},{2},{3}}} -(1 row) - -select * from jsonb_to_record('{"out": {"key": 1}}') as x(out json); - out ------------- - {"key": 1} -(1 row) - -select * from jsonb_to_record('{"out": [{"key": 1}]}') as x(out json); - out --------------- - [{"key": 1}] -(1 row) - -select * from jsonb_to_record('{"out": "{\"key\": 1}"}') as x(out json); - out ----------------- - "{\"key\": 1}" -(1 row) - -select * from jsonb_to_record('{"out": {"key": 1}}') as x(out jsonb); - out ------------- - {"key": 1} -(1 row) - -select * from jsonb_to_record('{"out": [{"key": 1}]}') as x(out jsonb); - out --------------- - [{"key": 1}] -(1 row) - -select * from jsonb_to_record('{"out": "{\"key\": 1}"}') as x(out jsonb); - out ----------------- - "{\"key\": 1}" -(1 row) - --- test type info caching in jsonb_populate_record() -CREATE TEMP TABLE jsbpoptest (js jsonb); -INSERT INTO jsbpoptest -SELECT '{ - "jsa": [1, "2", null, 4], - "rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}, - "reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}] -}'::jsonb -FROM generate_series(1, 3); -SELECT (jsonb_populate_record(NULL::jsbrec, js)).* FROM jsbpoptest; - i | ia | ia1 | ia2 | ia3 | ia1d | ia2d | t | ta | c | ca | ts | js | jsb | jsa | rec | reca ----+----+-----+-----+-----+------+------+---+----+---+----+----+----+-----+--------------------+-----------------------------------+-------------------------------------------------------- - | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} - | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} - | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} -(3 rows) - -DROP TYPE jsbrec; -DROP TYPE jsbrec_i_not_null; -DROP DOMAIN jsb_int_not_null; -DROP DOMAIN jsb_int_array_1d; -DROP DOMAIN jsb_int_array_2d; -DROP DOMAIN jb_ordered_pair; -DROP TYPE jb_unordered_pair; --- indexing -SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC"}'; - count -------- - 15 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC", "public":true}'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j ? 'public'; - count -------- - 194 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j ? 'bar'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j ?| ARRAY['public','disabled']; - count -------- - 337 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j ?& ARRAY['public','disabled']; - count -------- - 42 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '"CC" == $.wait'; - count -------- - 15 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == "CC" && true == $.public'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25.0'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($)'; - count -------- - 1012 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public)'; - count -------- - 194 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.bar)'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) || exists($.disabled)'; - count -------- - 337 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) && exists($.disabled)'; - count -------- - 42 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? ("CC" == @)'; - count -------- - 15 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.wait == "CC" && true == @.public)'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.age ? (@ == 25)'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.age == 25.0)'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$'; - count -------- - 1012 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.public'; - count -------- - 194 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.bar'; - count -------- - 0 -(1 row) - -CREATE INDEX jidx ON testjsonb USING gin (j); -SET enable_seqscan = off; -SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC"}'; - count -------- - 15 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC", "public":true}'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"array":["foo"]}'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"array":["bar"]}'; - count -------- - 3 -(1 row) - --- exercise GIN_SEARCH_MODE_ALL -SELECT count(*) FROM testjsonb WHERE j @> '{}'; - count -------- - 1012 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j ? 'public'; - count -------- - 194 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j ? 'bar'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j ?| ARRAY['public','disabled']; - count -------- - 337 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j ?& ARRAY['public','disabled']; - count -------- - 42 -(1 row) - -EXPLAIN (COSTS OFF) -SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null'; - QUERY PLAN ------------------------------------------------------------------ - Aggregate - -> Bitmap Heap Scan on testjsonb - Recheck Cond: (j @@ '($."wait" == null)'::jsonpath) - -> Bitmap Index Scan on jidx - Index Cond: (j @@ '($."wait" == null)'::jsonpath) -(5 rows) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.wait == null))'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.wait ? (@ == null))'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '"CC" == $.wait'; - count -------- - 15 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == "CC" && true == $.public'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25.0'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "foo"'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "bar"'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.array[*] == "bar"))'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array ? (@[*] == "bar"))'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array[*] ? (@ == "bar"))'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($)'; - count -------- - 1012 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public)'; - count -------- - 194 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.bar)'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) || exists($.disabled)'; - count -------- - 337 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) && exists($.disabled)'; - count -------- - 42 -(1 row) - -EXPLAIN (COSTS OFF) -SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)'; - QUERY PLAN -------------------------------------------------------------------- - Aggregate - -> Bitmap Heap Scan on testjsonb - Recheck Cond: (j @? '$."wait"?(@ == null)'::jsonpath) - -> Bitmap Index Scan on jidx - Index Cond: (j @? '$."wait"?(@ == null)'::jsonpath) -(5 rows) - -SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? ("CC" == @)'; - count -------- - 15 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.wait == "CC" && true == @.public)'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.age ? (@ == 25)'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.age == 25.0)'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.array[*] == "bar")'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.array ? (@[*] == "bar")'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.array[*] ? (@ == "bar")'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$'; - count -------- - 1012 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.public'; - count -------- - 194 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.bar'; - count -------- - 0 -(1 row) - --- array exists - array elements should behave as keys (for GIN index scans too) -CREATE INDEX jidx_array ON testjsonb USING gin((j->'array')); -SELECT count(*) from testjsonb WHERE j->'array' ? 'bar'; - count -------- - 3 -(1 row) - --- type sensitive array exists - should return no rows (since "exists" only --- matches strings that are either object keys or array elements) -SELECT count(*) from testjsonb WHERE j->'array' ? '5'::text; - count -------- - 0 -(1 row) - --- However, a raw scalar is *contained* within the array -SELECT count(*) from testjsonb WHERE j->'array' @> '5'::jsonb; - count -------- - 1 -(1 row) - -RESET enable_seqscan; -SELECT count(*) FROM (SELECT (jsonb_each(j)).key FROM testjsonb) AS wow; - count -------- - 4791 -(1 row) - -SELECT key, count(*) FROM (SELECT (jsonb_each(j)).key FROM testjsonb) AS wow GROUP BY key ORDER BY count DESC, key; - key | count ------------+------- - line | 884 - query | 207 - pos | 203 - node | 202 - space | 197 - status | 195 - public | 194 - title | 190 - wait | 190 - org | 189 - user | 189 - coauthors | 188 - disabled | 185 - indexed | 184 - cleaned | 180 - bad | 179 - date | 179 - world | 176 - state | 172 - subtitle | 169 - auth | 168 - abstract | 161 - array | 5 - age | 2 - foo | 2 - fool | 1 -(26 rows) - --- sort/hash -SELECT count(distinct j) FROM testjsonb; - count -------- - 894 -(1 row) - -SET enable_hashagg = off; -SELECT count(*) FROM (SELECT j FROM (SELECT * FROM testjsonb UNION ALL SELECT * FROM testjsonb) js GROUP BY j) js2; - count -------- - 894 -(1 row) - -SET enable_hashagg = on; -SET enable_sort = off; -SELECT count(*) FROM (SELECT j FROM (SELECT * FROM testjsonb UNION ALL SELECT * FROM testjsonb) js GROUP BY j) js2; - count -------- - 894 -(1 row) - -SELECT distinct * FROM (values (jsonb '{}' || ''::text),('{}')) v(j); - j ----- - {} -(1 row) - -SET enable_sort = on; -RESET enable_hashagg; -RESET enable_sort; -DROP INDEX jidx; -DROP INDEX jidx_array; --- btree -CREATE INDEX jidx ON testjsonb USING btree (j); -SET enable_seqscan = off; -SELECT count(*) FROM testjsonb WHERE j > '{"p":1}'; - count -------- - 884 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j = '{"pos":98, "line":371, "node":"CBA", "indexed":true}'; - count -------- - 1 -(1 row) - ---gin path opclass -DROP INDEX jidx; -CREATE INDEX jidx ON testjsonb USING gin (j jsonb_path_ops); -SET enable_seqscan = off; -SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC"}'; - count -------- - 15 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC", "public":true}'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}'; - count -------- - 2 -(1 row) - --- exercise GIN_SEARCH_MODE_ALL -SELECT count(*) FROM testjsonb WHERE j @> '{}'; - count -------- - 1012 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.wait == null))'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.wait ? (@ == null))'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '"CC" == $.wait'; - count -------- - 15 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == "CC" && true == $.public'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25.0'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "foo"'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "bar"'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.array[*] == "bar"))'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array ? (@[*] == "bar"))'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array[*] ? (@ == "bar"))'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @@ 'exists($)'; - count -------- - 1012 -(1 row) - -EXPLAIN (COSTS OFF) -SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)'; - QUERY PLAN -------------------------------------------------------------------- - Aggregate - -> Bitmap Heap Scan on testjsonb - Recheck Cond: (j @? '$."wait"?(@ == null)'::jsonpath) - -> Bitmap Index Scan on jidx - Index Cond: (j @? '$."wait"?(@ == null)'::jsonpath) -(5 rows) - -SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)'; - count -------- - 1 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? ("CC" == @)'; - count -------- - 15 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.wait == "CC" && true == @.public)'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.age ? (@ == 25)'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.age == 25.0)'; - count -------- - 2 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.array[*] == "bar")'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.array ? (@[*] == "bar")'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.array[*] ? (@ == "bar")'; - count -------- - 3 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$'; - count -------- - 1012 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.public'; - count -------- - 194 -(1 row) - -SELECT count(*) FROM testjsonb WHERE j @? '$.bar'; - count -------- - 0 -(1 row) - -RESET enable_seqscan; -DROP INDEX jidx; --- nested tests -SELECT '{"ff":{"a":12,"b":16}}'::jsonb; - jsonb ----------------------------- - {"ff": {"a": 12, "b": 16}} -(1 row) - -SELECT '{"ff":{"a":12,"b":16},"qq":123}'::jsonb; - jsonb ---------------------------------------- - {"ff": {"a": 12, "b": 16}, "qq": 123} -(1 row) - -SELECT '{"aa":["a","aaa"],"qq":{"a":12,"b":16,"c":["c1","c2"],"d":{"d1":"d1","d2":"d2","d1":"d3"}}}'::jsonb; - jsonb --------------------------------------------------------------------------------------------------- - {"aa": ["a", "aaa"], "qq": {"a": 12, "b": 16, "c": ["c1", "c2"], "d": {"d1": "d3", "d2": "d2"}}} -(1 row) - -SELECT '{"aa":["a","aaa"],"qq":{"a":"12","b":"16","c":["c1","c2"],"d":{"d1":"d1","d2":"d2"}}}'::jsonb; - jsonb ------------------------------------------------------------------------------------------------------- - {"aa": ["a", "aaa"], "qq": {"a": "12", "b": "16", "c": ["c1", "c2"], "d": {"d1": "d1", "d2": "d2"}}} -(1 row) - -SELECT '{"aa":["a","aaa"],"qq":{"a":"12","b":"16","c":["c1","c2",["c3"],{"c4":4}],"d":{"d1":"d1","d2":"d2"}}}'::jsonb; - jsonb -------------------------------------------------------------------------------------------------------------------------- - {"aa": ["a", "aaa"], "qq": {"a": "12", "b": "16", "c": ["c1", "c2", ["c3"], {"c4": 4}], "d": {"d1": "d1", "d2": "d2"}}} -(1 row) - -SELECT '{"ff":["a","aaa"]}'::jsonb; - jsonb ----------------------- - {"ff": ["a", "aaa"]} -(1 row) - -SELECT - '{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'ff', - '{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'qq', - ('{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'Y') IS NULL AS f, - ('{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb ->> 'Y') IS NULL AS t, - '{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'x'; - ?column? | ?column? | f | t | ?column? ---------------------+----------+---+---+---------- - {"a": 12, "b": 16} | 123 | f | t | [1, 2] -(1 row) - --- nested containment -SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[1,2]}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":[2,1],"c":"b"}'::jsonb @> '{"a":[1,2]}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":{"1":2},"c":"b"}'::jsonb @> '{"a":[1,2]}'; - ?column? ----------- - f -(1 row) - -SELECT '{"a":{"2":1},"c":"b"}'::jsonb @> '{"a":[1,2]}'; - ?column? ----------- - f -(1 row) - -SELECT '{"a":{"1":2},"c":"b"}'::jsonb @> '{"a":{"1":2}}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":{"2":1},"c":"b"}'::jsonb @> '{"a":{"1":2}}'; - ?column? ----------- - f -(1 row) - -SELECT '["a","b"]'::jsonb @> '["a","b","c","b"]'; - ?column? ----------- - f -(1 row) - -SELECT '["a","b","c","b"]'::jsonb @> '["a","b"]'; - ?column? ----------- - t -(1 row) - -SELECT '["a","b","c",[1,2]]'::jsonb @> '["a",[1,2]]'; - ?column? ----------- - t -(1 row) - -SELECT '["a","b","c",[1,2]]'::jsonb @> '["b",[1,2]]'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[1]}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[2]}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[3]}'; - ?column? ----------- - f -(1 row) - -SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"c":3}]}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"x":4}]}'; - ?column? ----------- - t -(1 row) - -SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"x":4},3]}'; - ?column? ----------- - f -(1 row) - -SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"x":4},1]}'; - ?column? ----------- - t -(1 row) - --- check some corner cases for indexed nested containment (bug #13756) -create temp table nestjsonb (j jsonb); -insert into nestjsonb (j) values ('{"a":[["b",{"x":1}],["b",{"x":2}]],"c":3}'); -insert into nestjsonb (j) values ('[[14,2,3]]'); -insert into nestjsonb (j) values ('[1,[14,2,3]]'); -create index on nestjsonb using gin(j jsonb_path_ops); -set enable_seqscan = on; -set enable_bitmapscan = off; -select * from nestjsonb where j @> '{"a":[[{"x":2}]]}'::jsonb; - j ---------------------------------------------------- - {"a": [["b", {"x": 1}], ["b", {"x": 2}]], "c": 3} -(1 row) - -select * from nestjsonb where j @> '{"c":3}'; - j ---------------------------------------------------- - {"a": [["b", {"x": 1}], ["b", {"x": 2}]], "c": 3} -(1 row) - -select * from nestjsonb where j @> '[[14]]'; - j ------------------ - [[14, 2, 3]] - [1, [14, 2, 3]] -(2 rows) - -set enable_seqscan = off; -set enable_bitmapscan = on; -select * from nestjsonb where j @> '{"a":[[{"x":2}]]}'::jsonb; - j ---------------------------------------------------- - {"a": [["b", {"x": 1}], ["b", {"x": 2}]], "c": 3} -(1 row) - -select * from nestjsonb where j @> '{"c":3}'; - j ---------------------------------------------------- - {"a": [["b", {"x": 1}], ["b", {"x": 2}]], "c": 3} -(1 row) - -select * from nestjsonb where j @> '[[14]]'; - j ------------------ - [[14, 2, 3]] - [1, [14, 2, 3]] -(2 rows) - -reset enable_seqscan; -reset enable_bitmapscan; --- nested object field / array index lookup -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'n'; - ?column? ----------- - null -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'a'; - ?column? ----------- - 1 -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'b'; - ?column? ----------- - [1, 2] -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'c'; - ?column? ----------- - {"1": 2} -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'd'; - ?column? ---------------- - {"1": [2, 3]} -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'd' -> '1'; - ?column? ----------- - [2, 3] -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'e'; - ?column? ----------- - -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 0; --expecting error - ?column? ----------- - -(1 row) - -SELECT '["a","b","c",[1,2],null]'::jsonb -> 0; - ?column? ----------- - "a" -(1 row) - -SELECT '["a","b","c",[1,2],null]'::jsonb -> 1; - ?column? ----------- - "b" -(1 row) - -SELECT '["a","b","c",[1,2],null]'::jsonb -> 2; - ?column? ----------- - "c" -(1 row) - -SELECT '["a","b","c",[1,2],null]'::jsonb -> 3; - ?column? ----------- - [1, 2] -(1 row) - -SELECT '["a","b","c",[1,2],null]'::jsonb -> 3 -> 1; - ?column? ----------- - 2 -(1 row) - -SELECT '["a","b","c",[1,2],null]'::jsonb -> 4; - ?column? ----------- - null -(1 row) - -SELECT '["a","b","c",[1,2],null]'::jsonb -> 5; - ?column? ----------- - -(1 row) - -SELECT '["a","b","c",[1,2],null]'::jsonb -> -1; - ?column? ----------- - null -(1 row) - -SELECT '["a","b","c",[1,2],null]'::jsonb -> -5; - ?column? ----------- - "a" -(1 row) - -SELECT '["a","b","c",[1,2],null]'::jsonb -> -6; - ?column? ----------- - -(1 row) - ---nested path extraction -SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{0}'; - ?column? ----------- - -(1 row) - -SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{a}'; - ?column? ----------- - "b" -(1 row) - -SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c}'; - ?column? ------------ - [1, 2, 3] -(1 row) - -SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,0}'; - ?column? ----------- - 1 -(1 row) - -SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,1}'; - ?column? ----------- - 2 -(1 row) - -SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,2}'; - ?column? ----------- - 3 -(1 row) - -SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,3}'; - ?column? ----------- - -(1 row) - -SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-1}'; - ?column? ----------- - 3 -(1 row) - -SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-3}'; - ?column? ----------- - 1 -(1 row) - -SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-4}'; - ?column? ----------- - -(1 row) - -SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{0}'; - ?column? ----------- - 0 -(1 row) - -SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{3}'; - ?column? ----------- - [3, 4] -(1 row) - -SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{4}'; - ?column? ---------------- - {"5": "five"} -(1 row) - -SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{4,5}'; - ?column? ----------- - "five" -(1 row) - ---nested exists -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'n'; - ?column? ----------- - t -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'a'; - ?column? ----------- - t -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'b'; - ?column? ----------- - t -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'c'; - ?column? ----------- - t -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'd'; - ?column? ----------- - t -(1 row) - -SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'e'; - ?column? ----------- - f -(1 row) - --- jsonb_strip_nulls -select jsonb_strip_nulls(null); - jsonb_strip_nulls -------------------- - -(1 row) - -select jsonb_strip_nulls('1'); - jsonb_strip_nulls -------------------- - 1 -(1 row) - -select jsonb_strip_nulls('"a string"'); - jsonb_strip_nulls -------------------- - "a string" -(1 row) - -select jsonb_strip_nulls('null'); - jsonb_strip_nulls -------------------- - null -(1 row) - -select jsonb_strip_nulls('[1,2,null,3,4]'); - jsonb_strip_nulls --------------------- - [1, 2, null, 3, 4] -(1 row) - -select jsonb_strip_nulls('{"a":1,"b":null,"c":[2,null,3],"d":{"e":4,"f":null}}'); - jsonb_strip_nulls --------------------------------------------- - {"a": 1, "c": [2, null, 3], "d": {"e": 4}} -(1 row) - -select jsonb_strip_nulls('[1,{"a":1,"b":null,"c":2},3]'); - jsonb_strip_nulls --------------------------- - [1, {"a": 1, "c": 2}, 3] -(1 row) - --- an empty object is not null and should not be stripped -select jsonb_strip_nulls('{"a": {"b": null, "c": null}, "d": {} }'); - jsonb_strip_nulls --------------------- - {"a": {}, "d": {}} -(1 row) - -select jsonb_pretty('{"a": "test", "b": [1, 2, 3], "c": "test3", "d":{"dd": "test4", "dd2":{"ddd": "test5"}}}'); - jsonb_pretty ----------------------------- - { + - "a": "test", + - "b": [ + - 1, + - 2, + - 3 + - ], + - "c": "test3", + - "d": { + - "dd": "test4", + - "dd2": { + - "ddd": "test5"+ - } + - } + - } -(1 row) - -select jsonb_pretty('[{"f1":1,"f2":null},2,null,[[{"x":true},6,7],8],3]'); - jsonb_pretty ---------------------------- - [ + - { + - "f1": 1, + - "f2": null + - }, + - 2, + - null, + - [ + - [ + - { + - "x": true+ - }, + - 6, + - 7 + - ], + - 8 + - ], + - 3 + - ] -(1 row) - -select jsonb_pretty('{"a":["b", "c"], "d": {"e":"f"}}'); - jsonb_pretty ------------------- - { + - "a": [ + - "b", + - "c" + - ], + - "d": { + - "e": "f"+ - } + - } -(1 row) - -select jsonb_concat('{"d": "test", "a": [1, 2]}', '{"g": "test2", "c": {"c1":1, "c2":2}}'); - jsonb_concat -------------------------------------------------------------------- - {"a": [1, 2], "c": {"c1": 1, "c2": 2}, "d": "test", "g": "test2"} -(1 row) - -select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{"cq":"l", "b":"g", "fg":false}'; - ?column? ---------------------------------------------- - {"b": "g", "aa": 1, "cq": "l", "fg": false} -(1 row) - -select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{"aq":"l"}'; - ?column? ---------------------------------------- - {"b": 2, "aa": 1, "aq": "l", "cq": 3} -(1 row) - -select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{"aa":"l"}'; - ?column? ------------------------------- - {"b": 2, "aa": "l", "cq": 3} -(1 row) - -select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{}'; - ?column? ----------------------------- - {"b": 2, "aa": 1, "cq": 3} -(1 row) - -select '["a", "b"]'::jsonb || '["c"]'; - ?column? ------------------ - ["a", "b", "c"] -(1 row) - -select '["a", "b"]'::jsonb || '["c", "d"]'; - ?column? ----------------------- - ["a", "b", "c", "d"] -(1 row) - -select '["c"]' || '["a", "b"]'::jsonb; - ?column? ------------------ - ["c", "a", "b"] -(1 row) - -select '["a", "b"]'::jsonb || '"c"'; - ?column? ------------------ - ["a", "b", "c"] -(1 row) - -select '"c"' || '["a", "b"]'::jsonb; - ?column? ------------------ - ["c", "a", "b"] -(1 row) - -select '[]'::jsonb || '["a"]'::jsonb; - ?column? ----------- - ["a"] -(1 row) - -select '[]'::jsonb || '"a"'::jsonb; - ?column? ----------- - ["a"] -(1 row) - -select '"b"'::jsonb || '"a"'::jsonb; - ?column? ------------- - ["b", "a"] -(1 row) - -select '{}'::jsonb || '{"a":"b"}'::jsonb; - ?column? ------------- - {"a": "b"} -(1 row) - -select '[]'::jsonb || '{"a":"b"}'::jsonb; - ?column? --------------- - [{"a": "b"}] -(1 row) - -select '{"a":"b"}'::jsonb || '[]'::jsonb; - ?column? --------------- - [{"a": "b"}] -(1 row) - -select '"a"'::jsonb || '{"a":1}'; - ?column? ------------------ - ["a", {"a": 1}] -(1 row) - -select '{"a":1}' || '"a"'::jsonb; - ?column? ------------------ - [{"a": 1}, "a"] -(1 row) - -select '[3]'::jsonb || '{}'::jsonb; - ?column? ----------- - [3, {}] -(1 row) - -select '3'::jsonb || '[]'::jsonb; - ?column? ----------- - [3] -(1 row) - -select '3'::jsonb || '4'::jsonb; - ?column? ----------- - [3, 4] -(1 row) - -select '3'::jsonb || '{}'::jsonb; - ?column? ----------- - [3, {}] -(1 row) - -select '["a", "b"]'::jsonb || '{"c":1}'; - ?column? ----------------------- - ["a", "b", {"c": 1}] -(1 row) - -select '{"c": 1}'::jsonb || '["a", "b"]'; - ?column? ----------------------- - [{"c": 1}, "a", "b"] -(1 row) - -select '{}'::jsonb || '{"cq":"l", "b":"g", "fg":false}'; - ?column? ------------------------------------- - {"b": "g", "cq": "l", "fg": false} -(1 row) - -select pg_column_size('{}'::jsonb || '{}'::jsonb) = pg_column_size('{}'::jsonb); - ?column? ----------- - t -(1 row) - -select pg_column_size('{"aa":1}'::jsonb || '{"b":2}'::jsonb) = pg_column_size('{"aa":1, "b":2}'::jsonb); - ?column? ----------- - t -(1 row) - -select pg_column_size('{"aa":1, "b":2}'::jsonb || '{}'::jsonb) = pg_column_size('{"aa":1, "b":2}'::jsonb); - ?column? ----------- - t -(1 row) - -select pg_column_size('{}'::jsonb || '{"aa":1, "b":2}'::jsonb) = pg_column_size('{"aa":1, "b":2}'::jsonb); - ?column? ----------- - t -(1 row) - -select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'a'); - jsonb_delete ------------------- - {"b": 2, "c": 3} -(1 row) - -select jsonb_delete('{"a":null , "b":2, "c":3}'::jsonb, 'a'); - jsonb_delete ------------------- - {"b": 2, "c": 3} -(1 row) - -select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'b'); - jsonb_delete ------------------- - {"a": 1, "c": 3} -(1 row) - -select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'c'); - jsonb_delete ------------------- - {"a": 1, "b": 2} -(1 row) - -select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'd'); - jsonb_delete --------------------------- - {"a": 1, "b": 2, "c": 3} -(1 row) - -select '{"a":1 , "b":2, "c":3}'::jsonb - 'a'; - ?column? ------------------- - {"b": 2, "c": 3} -(1 row) - -select '{"a":null , "b":2, "c":3}'::jsonb - 'a'; - ?column? ------------------- - {"b": 2, "c": 3} -(1 row) - -select '{"a":1 , "b":2, "c":3}'::jsonb - 'b'; - ?column? ------------------- - {"a": 1, "c": 3} -(1 row) - -select '{"a":1 , "b":2, "c":3}'::jsonb - 'c'; - ?column? ------------------- - {"a": 1, "b": 2} -(1 row) - -select '{"a":1 , "b":2, "c":3}'::jsonb - 'd'; - ?column? --------------------------- - {"a": 1, "b": 2, "c": 3} -(1 row) - -select pg_column_size('{"a":1 , "b":2, "c":3}'::jsonb - 'b') = pg_column_size('{"a":1, "b":2}'::jsonb); - ?column? ----------- - t -(1 row) - -select '["a","b","c"]'::jsonb - 3; - ?column? ------------------ - ["a", "b", "c"] -(1 row) - -select '["a","b","c"]'::jsonb - 2; - ?column? ------------- - ["a", "b"] -(1 row) - -select '["a","b","c"]'::jsonb - 1; - ?column? ------------- - ["a", "c"] -(1 row) - -select '["a","b","c"]'::jsonb - 0; - ?column? ------------- - ["b", "c"] -(1 row) - -select '["a","b","c"]'::jsonb - -1; - ?column? ------------- - ["a", "b"] -(1 row) - -select '["a","b","c"]'::jsonb - -2; - ?column? ------------- - ["a", "c"] -(1 row) - -select '["a","b","c"]'::jsonb - -3; - ?column? ------------- - ["b", "c"] -(1 row) - -select '["a","b","c"]'::jsonb - -4; - ?column? ------------------ - ["a", "b", "c"] -(1 row) - -select '{"a":1 , "b":2, "c":3}'::jsonb - '{b}'::text[]; - ?column? ------------------- - {"a": 1, "c": 3} -(1 row) - -select '{"a":1 , "b":2, "c":3}'::jsonb - '{c,b}'::text[]; - ?column? ----------- - {"a": 1} -(1 row) - -select '{"a":1 , "b":2, "c":3}'::jsonb - '{}'::text[]; - ?column? --------------------------- - {"a": 1, "b": 2, "c": 3} -(1 row) - -select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{n}', '[1,2,3]'); - jsonb_set --------------------------------------------------------------------------- - {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": [1, 2, 3]} -(1 row) - -select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '[1,2,3]'); - jsonb_set ------------------------------------------------------------------------------ - {"a": 1, "b": [1, [1, 2, 3]], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null} -(1 row) - -select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,1,0}', '[1,2,3]'); - jsonb_set ------------------------------------------------------------------------------ - {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [[1, 2, 3], 3]}, "n": null} -(1 row) - -select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,NULL,0}', '[1,2,3]'); -ERROR: path element at position 2 is null -select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{n}', '{"1": 2}'); - jsonb_set -------------------------------------------------------------------------- - {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": {"1": 2}} -(1 row) - -select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '{"1": 2}'); - jsonb_set ----------------------------------------------------------------------------- - {"a": 1, "b": [1, {"1": 2}], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null} -(1 row) - -select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,1,0}', '{"1": 2}'); - jsonb_set ----------------------------------------------------------------------------- - {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [{"1": 2}, 3]}, "n": null} -(1 row) - -select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,NULL,0}', '{"1": 2}'); -ERROR: path element at position 2 is null -select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '"test"'); - jsonb_set --------------------------------------------------------------------------- - {"a": 1, "b": [1, "test"], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null} -(1 row) - -select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '{"f": "test"}'); - jsonb_set ---------------------------------------------------------------------------------- - {"a": 1, "b": [1, {"f": "test"}], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null} -(1 row) - -select jsonb_delete_path('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}', '{n}'); - jsonb_delete_path ----------------------------------------------------------- - {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [2, 3]}} -(1 row) - -select jsonb_delete_path('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}', '{b,-1}'); - jsonb_delete_path ------------------------------------------------------------------- - {"a": 1, "b": [1], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null} -(1 row) - -select jsonb_delete_path('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}', '{d,1,0}'); - jsonb_delete_path ------------------------------------------------------------------- - {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [3]}, "n": null} -(1 row) - -select jsonb_delete_path('{"a":[]}', '{"a",-2147483648}'); - jsonb_delete_path -------------------- - {"a": []} -(1 row) - -select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{n}'; - ?column? ----------------------------------------------------------- - {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [2, 3]}} -(1 row) - -select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{b,-1}'; - ?column? ------------------------------------------------------------------- - {"a": 1, "b": [1], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null} -(1 row) - -select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{b,-1e}'; -- invalid array subscript -ERROR: path element at position 2 is not an integer: "-1e" -select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{d,1,0}'; - ?column? ------------------------------------------------------------------- - {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [3]}, "n": null} -(1 row) - --- empty structure and error conditions for delete and replace -select '"a"'::jsonb - 'a'; -- error -ERROR: cannot delete from scalar -select '{}'::jsonb - 'a'; - ?column? ----------- - {} -(1 row) - -select '[]'::jsonb - 'a'; - ?column? ----------- - [] -(1 row) - -select '"a"'::jsonb - 1; -- error -ERROR: cannot delete from scalar -select '{}'::jsonb - 1; -- error -ERROR: cannot delete from object using integer index -select '[]'::jsonb - 1; - ?column? ----------- - [] -(1 row) - -select '"a"'::jsonb #- '{a}'; -- error -ERROR: cannot delete path in scalar -select '{}'::jsonb #- '{a}'; - ?column? ----------- - {} -(1 row) - -select '[]'::jsonb #- '{a}'; - ?column? ----------- - [] -(1 row) - -select jsonb_set('"a"','{a}','"b"'); --error -ERROR: cannot set path in scalar -select jsonb_set('{}','{a}','"b"', false); - jsonb_set ------------ - {} -(1 row) - -select jsonb_set('[]','{1}','"b"', false); - jsonb_set ------------ - [] -(1 row) - -select jsonb_set('[{"f1":1,"f2":null},2,null,3]', '{0}','[2,3,4]', false); - jsonb_set -------------------------- - [[2, 3, 4], 2, null, 3] -(1 row) - --- jsonb_set adding instead of replacing --- prepend to array -select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{b,-33}','{"foo":123}'); - jsonb_set -------------------------------------------------------- - {"a": 1, "b": [{"foo": 123}, 0, 1, 2], "c": {"d": 4}} -(1 row) - --- append to array -select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{b,33}','{"foo":123}'); - jsonb_set -------------------------------------------------------- - {"a": 1, "b": [0, 1, 2, {"foo": 123}], "c": {"d": 4}} -(1 row) - --- check nesting levels addition -select jsonb_set('{"a":1,"b":[4,5,[0,1,2],6,7],"c":{"d":4}}','{b,2,33}','{"foo":123}'); - jsonb_set ---------------------------------------------------------------------- - {"a": 1, "b": [4, 5, [0, 1, 2, {"foo": 123}], 6, 7], "c": {"d": 4}} -(1 row) - --- add new key -select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{c,e}','{"foo":123}'); - jsonb_set ------------------------------------------------------------- - {"a": 1, "b": [0, 1, 2], "c": {"d": 4, "e": {"foo": 123}}} -(1 row) - --- adding doesn't do anything if elements before last aren't present -select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{x,-33}','{"foo":123}'); - jsonb_set ------------------------------------------ - {"a": 1, "b": [0, 1, 2], "c": {"d": 4}} -(1 row) - -select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{x,y}','{"foo":123}'); - jsonb_set ------------------------------------------ - {"a": 1, "b": [0, 1, 2], "c": {"d": 4}} -(1 row) - --- add to empty object -select jsonb_set('{}','{x}','{"foo":123}'); - jsonb_set ---------------------- - {"x": {"foo": 123}} -(1 row) - ---add to empty array -select jsonb_set('[]','{0}','{"foo":123}'); - jsonb_set ----------------- - [{"foo": 123}] -(1 row) - -select jsonb_set('[]','{99}','{"foo":123}'); - jsonb_set ----------------- - [{"foo": 123}] -(1 row) - -select jsonb_set('[]','{-99}','{"foo":123}'); - jsonb_set ----------------- - [{"foo": 123}] -(1 row) - -select jsonb_set('{"a": [1, 2, 3]}', '{a, non_integer}', '"new_value"'); -ERROR: path element at position 2 is not an integer: "non_integer" -select jsonb_set('{"a": {"b": [1, 2, 3]}}', '{a, b, non_integer}', '"new_value"'); -ERROR: path element at position 3 is not an integer: "non_integer" -select jsonb_set('{"a": {"b": [1, 2, 3]}}', '{a, b, NULL}', '"new_value"'); -ERROR: path element at position 3 is null --- jsonb_set_lax -\pset null NULL --- pass though non nulls to jsonb_set -select jsonb_set_lax('{"a":1,"b":2}','{b}','5') ; - jsonb_set_lax ------------------- - {"a": 1, "b": 5} -(1 row) - -select jsonb_set_lax('{"a":1,"b":2}','{d}','6', true) ; - jsonb_set_lax --------------------------- - {"a": 1, "b": 2, "d": 6} -(1 row) - --- using the default treatment -select jsonb_set_lax('{"a":1,"b":2}','{b}',null); - jsonb_set_lax ---------------------- - {"a": 1, "b": null} -(1 row) - -select jsonb_set_lax('{"a":1,"b":2}','{d}',null,true); - jsonb_set_lax ------------------------------ - {"a": 1, "b": 2, "d": null} -(1 row) - --- errors -select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, true, null); -ERROR: null_value_treatment must be "delete_key", "return_target", "use_json_null", or "raise_exception" -select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, true, 'no_such_treatment'); -ERROR: null_value_treatment must be "delete_key", "return_target", "use_json_null", or "raise_exception" --- explicit treatments -select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'raise_exception') as raise_exception; -ERROR: JSON value must not be null -DETAIL: Exception was raised because null_value_treatment is "raise_exception". -HINT: To avoid, either change the null_value_treatment argument or ensure that an SQL NULL is not passed. -select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'return_target') as return_target; - return_target ------------------- - {"a": 1, "b": 2} -(1 row) - -select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'delete_key') as delete_key; - delete_key ------------- - {"a": 1} -(1 row) - -select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'use_json_null') as use_json_null; - use_json_null ---------------------- - {"a": 1, "b": null} -(1 row) - -\pset null '' --- jsonb_insert -select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '"new_value"'); - jsonb_insert -------------------------------- - {"a": [0, "new_value", 1, 2]} -(1 row) - -select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '"new_value"', true); - jsonb_insert -------------------------------- - {"a": [0, 1, "new_value", 2]} -(1 row) - -select jsonb_insert('{"a": {"b": {"c": [0, 1, "test1", "test2"]}}}', '{a, b, c, 2}', '"new_value"'); - jsonb_insert ------------------------------------------------------------- - {"a": {"b": {"c": [0, 1, "new_value", "test1", "test2"]}}} -(1 row) - -select jsonb_insert('{"a": {"b": {"c": [0, 1, "test1", "test2"]}}}', '{a, b, c, 2}', '"new_value"', true); - jsonb_insert ------------------------------------------------------------- - {"a": {"b": {"c": [0, 1, "test1", "new_value", "test2"]}}} -(1 row) - -select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '{"b": "value"}'); - jsonb_insert ----------------------------------- - {"a": [0, {"b": "value"}, 1, 2]} -(1 row) - -select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '["value1", "value2"]'); - jsonb_insert ----------------------------------------- - {"a": [0, ["value1", "value2"], 1, 2]} -(1 row) - --- edge cases -select jsonb_insert('{"a": [0,1,2]}', '{a, 0}', '"new_value"'); - jsonb_insert -------------------------------- - {"a": ["new_value", 0, 1, 2]} -(1 row) - -select jsonb_insert('{"a": [0,1,2]}', '{a, 0}', '"new_value"', true); - jsonb_insert -------------------------------- - {"a": [0, "new_value", 1, 2]} -(1 row) - -select jsonb_insert('{"a": [0,1,2]}', '{a, 2}', '"new_value"'); - jsonb_insert -------------------------------- - {"a": [0, 1, "new_value", 2]} -(1 row) - -select jsonb_insert('{"a": [0,1,2]}', '{a, 2}', '"new_value"', true); - jsonb_insert -------------------------------- - {"a": [0, 1, 2, "new_value"]} -(1 row) - -select jsonb_insert('{"a": [0,1,2]}', '{a, -1}', '"new_value"'); - jsonb_insert -------------------------------- - {"a": [0, 1, "new_value", 2]} -(1 row) - -select jsonb_insert('{"a": [0,1,2]}', '{a, -1}', '"new_value"', true); - jsonb_insert -------------------------------- - {"a": [0, 1, 2, "new_value"]} -(1 row) - -select jsonb_insert('[]', '{1}', '"new_value"'); - jsonb_insert ---------------- - ["new_value"] -(1 row) - -select jsonb_insert('[]', '{1}', '"new_value"', true); - jsonb_insert ---------------- - ["new_value"] -(1 row) - -select jsonb_insert('{"a": []}', '{a, 1}', '"new_value"'); - jsonb_insert ----------------------- - {"a": ["new_value"]} -(1 row) - -select jsonb_insert('{"a": []}', '{a, 1}', '"new_value"', true); - jsonb_insert ----------------------- - {"a": ["new_value"]} -(1 row) - -select jsonb_insert('{"a": [0,1,2]}', '{a, 10}', '"new_value"'); - jsonb_insert -------------------------------- - {"a": [0, 1, 2, "new_value"]} -(1 row) - -select jsonb_insert('{"a": [0,1,2]}', '{a, -10}', '"new_value"'); - jsonb_insert -------------------------------- - {"a": ["new_value", 0, 1, 2]} -(1 row) - --- jsonb_insert should be able to insert new value for objects, but not to replace -select jsonb_insert('{"a": {"b": "value"}}', '{a, c}', '"new_value"'); - jsonb_insert ------------------------------------------ - {"a": {"b": "value", "c": "new_value"}} -(1 row) - -select jsonb_insert('{"a": {"b": "value"}}', '{a, c}', '"new_value"', true); - jsonb_insert ------------------------------------------ - {"a": {"b": "value", "c": "new_value"}} -(1 row) - -select jsonb_insert('{"a": {"b": "value"}}', '{a, b}', '"new_value"'); -ERROR: cannot replace existing key -HINT: Try using the function jsonb_set to replace key value. -select jsonb_insert('{"a": {"b": "value"}}', '{a, b}', '"new_value"', true); -ERROR: cannot replace existing key -HINT: Try using the function jsonb_set to replace key value. --- jsonb subscript -select ('123'::jsonb)['a']; - jsonb -------- - -(1 row) - -select ('123'::jsonb)[0]; - jsonb -------- - -(1 row) - -select ('123'::jsonb)[NULL]; - jsonb -------- - -(1 row) - -select ('{"a": 1}'::jsonb)['a']; - jsonb -------- - 1 -(1 row) - -select ('{"a": 1}'::jsonb)[0]; - jsonb -------- - -(1 row) - -select ('{"a": 1}'::jsonb)['not_exist']; - jsonb -------- - -(1 row) - -select ('{"a": 1}'::jsonb)[NULL]; - jsonb -------- - -(1 row) - -select ('[1, "2", null]'::jsonb)['a']; - jsonb -------- - -(1 row) - -select ('[1, "2", null]'::jsonb)[0]; - jsonb -------- - 1 -(1 row) - -select ('[1, "2", null]'::jsonb)['1']; - jsonb -------- - "2" -(1 row) - -select ('[1, "2", null]'::jsonb)[1.0]; -ERROR: subscript type numeric is not supported -LINE 1: select ('[1, "2", null]'::jsonb)[1.0]; - ^ -HINT: jsonb subscript must be coercible to either integer or text. -select ('[1, "2", null]'::jsonb)[2]; - jsonb -------- - null -(1 row) - -select ('[1, "2", null]'::jsonb)[3]; - jsonb -------- - -(1 row) - -select ('[1, "2", null]'::jsonb)[-2]; - jsonb -------- - "2" -(1 row) - -select ('[1, "2", null]'::jsonb)[1]['a']; - jsonb -------- - -(1 row) - -select ('[1, "2", null]'::jsonb)[1][0]; - jsonb -------- - -(1 row) - -select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['b']; - jsonb -------- - "c" -(1 row) - -select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d']; - jsonb ------------ - [1, 2, 3] -(1 row) - -select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d'][1]; - jsonb -------- - 2 -(1 row) - -select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d']['a']; - jsonb -------- - -(1 row) - -select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1']; - jsonb ---------------- - {"a2": "aaa"} -(1 row) - -select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1']['a2']; - jsonb -------- - "aaa" -(1 row) - -select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1']['a2']['a3']; - jsonb -------- - -(1 row) - -select ('{"a": ["a1", {"b1": ["aaa", "bbb", "ccc"]}], "b": "bb"}'::jsonb)['a'][1]['b1']; - jsonb ------------------------ - ["aaa", "bbb", "ccc"] -(1 row) - -select ('{"a": ["a1", {"b1": ["aaa", "bbb", "ccc"]}], "b": "bb"}'::jsonb)['a'][1]['b1'][2]; - jsonb -------- - "ccc" -(1 row) - --- slices are not supported -select ('{"a": 1}'::jsonb)['a':'b']; -ERROR: jsonb subscript does not support slices -LINE 1: select ('{"a": 1}'::jsonb)['a':'b']; - ^ -select ('[1, "2", null]'::jsonb)[1:2]; -ERROR: jsonb subscript does not support slices -LINE 1: select ('[1, "2", null]'::jsonb)[1:2]; - ^ -select ('[1, "2", null]'::jsonb)[:2]; -ERROR: jsonb subscript does not support slices -LINE 1: select ('[1, "2", null]'::jsonb)[:2]; - ^ -select ('[1, "2", null]'::jsonb)[1:]; -ERROR: jsonb subscript does not support slices -LINE 1: select ('[1, "2", null]'::jsonb)[1:]; - ^ -select ('[1, "2", null]'::jsonb)[:]; -ERROR: jsonb subscript does not support slices -create TEMP TABLE test_jsonb_subscript ( - id int, - test_json jsonb -); -insert into test_jsonb_subscript values -(1, '{}'), -- empty jsonb -(2, '{"key": "value"}'); -- jsonb with data --- update empty jsonb -update test_jsonb_subscript set test_json['a'] = '1' where id = 1; -select * from test_jsonb_subscript; - id | test_json -----+------------------ - 2 | {"key": "value"} - 1 | {"a": 1} -(2 rows) - --- update jsonb with some data -update test_jsonb_subscript set test_json['a'] = '1' where id = 2; -select * from test_jsonb_subscript; - id | test_json -----+-------------------------- - 1 | {"a": 1} - 2 | {"a": 1, "key": "value"} -(2 rows) - --- replace jsonb -update test_jsonb_subscript set test_json['a'] = '"test"'; -select * from test_jsonb_subscript; - id | test_json -----+------------------------------- - 1 | {"a": "test"} - 2 | {"a": "test", "key": "value"} -(2 rows) - --- replace by object -update test_jsonb_subscript set test_json['a'] = '{"b": 1}'::jsonb; -select * from test_jsonb_subscript; - id | test_json -----+--------------------------------- - 1 | {"a": {"b": 1}} - 2 | {"a": {"b": 1}, "key": "value"} -(2 rows) - --- replace by array -update test_jsonb_subscript set test_json['a'] = '[1, 2, 3]'::jsonb; -select * from test_jsonb_subscript; - id | test_json -----+---------------------------------- - 1 | {"a": [1, 2, 3]} - 2 | {"a": [1, 2, 3], "key": "value"} -(2 rows) - --- use jsonb subscription in where clause -select * from test_jsonb_subscript where test_json['key'] = '"value"'; - id | test_json -----+---------------------------------- - 2 | {"a": [1, 2, 3], "key": "value"} -(1 row) - -select * from test_jsonb_subscript where test_json['key_doesnt_exists'] = '"value"'; - id | test_json -----+----------- -(0 rows) - -select * from test_jsonb_subscript where test_json['key'] = '"wrong_value"'; - id | test_json -----+----------- -(0 rows) - --- NULL -update test_jsonb_subscript set test_json[NULL] = '1'; -ERROR: jsonb subscript in assignment must not be null -update test_jsonb_subscript set test_json['another_key'] = NULL; -select * from test_jsonb_subscript; - id | test_json -----+------------------------------------------------------- - 1 | {"a": [1, 2, 3], "another_key": null} - 2 | {"a": [1, 2, 3], "key": "value", "another_key": null} -(2 rows) - --- NULL as jsonb source -insert into test_jsonb_subscript values (3, NULL); -update test_jsonb_subscript set test_json['a'] = '1' where id = 3; -select * from test_jsonb_subscript; - id | test_json -----+------------------------------------------------------- - 1 | {"a": [1, 2, 3], "another_key": null} - 2 | {"a": [1, 2, 3], "key": "value", "another_key": null} - 3 | {"a": 1} -(3 rows) - -update test_jsonb_subscript set test_json = NULL where id = 3; -update test_jsonb_subscript set test_json[0] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+--------------------------------------------------------------- - 1 | {"0": 1, "a": [1, 2, 3], "another_key": null} - 2 | {"0": 1, "a": [1, 2, 3], "key": "value", "another_key": null} - 3 | [1] -(3 rows) - --- Fill the gaps logic -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '[0]'); -update test_jsonb_subscript set test_json[5] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+-------------------------------- - 1 | [0, null, null, null, null, 1] -(1 row) - -update test_jsonb_subscript set test_json[-4] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+----------------------------- - 1 | [0, null, 1, null, null, 1] -(1 row) - -update test_jsonb_subscript set test_json[-8] = '1'; -ERROR: path element at position 1 is out of range: -8 -select * from test_jsonb_subscript; - id | test_json -----+----------------------------- - 1 | [0, null, 1, null, null, 1] -(1 row) - --- keep consistent values position -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '[]'); -update test_jsonb_subscript set test_json[5] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+----------------------------------- - 1 | [null, null, null, null, null, 1] -(1 row) - --- create the whole path -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '{}'); -update test_jsonb_subscript set test_json['a'][0]['b'][0]['c'] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+---------------------------- - 1 | {"a": [{"b": [{"c": 1}]}]} -(1 row) - -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '{}'); -update test_jsonb_subscript set test_json['a'][2]['b'][2]['c'][2] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+------------------------------------------------------------------ - 1 | {"a": [null, null, {"b": [null, null, {"c": [null, null, 1]}]}]} -(1 row) - --- create the whole path with already existing keys -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '{"b": 1}'); -update test_jsonb_subscript set test_json['a'][0] = '2'; -select * from test_jsonb_subscript; - id | test_json -----+-------------------- - 1 | {"a": [2], "b": 1} -(1 row) - --- the start jsonb is an object, first subscript is treated as a key -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '{}'); -update test_jsonb_subscript set test_json[0]['a'] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+----------------- - 1 | {"0": {"a": 1}} -(1 row) - --- the start jsonb is an array -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '[]'); -update test_jsonb_subscript set test_json[0]['a'] = '1'; -update test_jsonb_subscript set test_json[2]['b'] = '2'; -select * from test_jsonb_subscript; - id | test_json -----+---------------------------- - 1 | [{"a": 1}, null, {"b": 2}] -(1 row) - --- overwriting an existing path -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '{}'); -update test_jsonb_subscript set test_json['a']['b'][1] = '1'; -update test_jsonb_subscript set test_json['a']['b'][10] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+---------------------------------------------------------------------------- - 1 | {"a": {"b": [null, 1, null, null, null, null, null, null, null, null, 1]}} -(1 row) - -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '[]'); -update test_jsonb_subscript set test_json[0][0][0] = '1'; -update test_jsonb_subscript set test_json[0][0][1] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+------------ - 1 | [[[1, 1]]] -(1 row) - -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '{}'); -update test_jsonb_subscript set test_json['a']['b'][10] = '1'; -update test_jsonb_subscript set test_json['a'][10][10] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+------------------------------------------------------------------------------------------------------------------------------------------------------ - 1 | {"a": {"b": [null, null, null, null, null, null, null, null, null, null, 1], "10": [null, null, null, null, null, null, null, null, null, null, 1]}} -(1 row) - --- an empty sub element -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '{"a": {}}'); -update test_jsonb_subscript set test_json['a']['b']['c'][2] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+-------------------------------------- - 1 | {"a": {"b": {"c": [null, null, 1]}}} -(1 row) - -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '{"a": []}'); -update test_jsonb_subscript set test_json['a'][1]['c'][2] = '1'; -select * from test_jsonb_subscript; - id | test_json -----+--------------------------------------- - 1 | {"a": [null, {"c": [null, null, 1]}]} -(1 row) - --- trying replace assuming a composite object, but it's an element or a value -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, '{"a": 1}'); -update test_jsonb_subscript set test_json['a']['b'] = '1'; -ERROR: cannot replace existing key -DETAIL: The path assumes key is a composite object, but it is a scalar value. -update test_jsonb_subscript set test_json['a']['b']['c'] = '1'; -ERROR: cannot replace existing key -DETAIL: The path assumes key is a composite object, but it is a scalar value. -update test_jsonb_subscript set test_json['a'][0] = '1'; -ERROR: cannot replace existing key -DETAIL: The path assumes key is a composite object, but it is a scalar value. -update test_jsonb_subscript set test_json['a'][0]['c'] = '1'; -ERROR: cannot replace existing key -DETAIL: The path assumes key is a composite object, but it is a scalar value. -update test_jsonb_subscript set test_json['a'][0][0] = '1'; -ERROR: cannot replace existing key -DETAIL: The path assumes key is a composite object, but it is a scalar value. --- trying replace assuming a composite object, but it's a raw scalar -delete from test_jsonb_subscript; -insert into test_jsonb_subscript values (1, 'null'); -update test_jsonb_subscript set test_json[0] = '1'; -ERROR: cannot replace existing key -DETAIL: The path assumes key is a composite object, but it is a scalar value. -update test_jsonb_subscript set test_json[0][0] = '1'; -ERROR: cannot replace existing key -DETAIL: The path assumes key is a composite object, but it is a scalar value. --- try some things with short-header and toasted subscript values -drop table test_jsonb_subscript; -create temp table test_jsonb_subscript ( - id text, - test_json jsonb -); -insert into test_jsonb_subscript values('foo', '{"foo": "bar"}'); -insert into test_jsonb_subscript - select s, ('{"' || s || '": "bar"}')::jsonb from repeat('xyzzy', 500) s; -select length(id), test_json[id] from test_jsonb_subscript; - length | test_json ---------+----------- - 3 | "bar" - 2500 | "bar" -(2 rows) - -update test_jsonb_subscript set test_json[id] = '"baz"'; -select length(id), test_json[id] from test_jsonb_subscript; - length | test_json ---------+----------- - 3 | "baz" - 2500 | "baz" -(2 rows) - -\x -table test_jsonb_subscript; --[ RECORD 1 ]-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -id | foo -test_json | {"foo": "baz"} --[ RECORD 2 ]-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -id | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzy -test_json | {"xyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzy": "baz"} - -\x --- jsonb to tsvector -select to_tsvector('{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::jsonb); - to_tsvector ---------------------------------------------------------------------------- - 'aaa':1 'bbb':2 'ccc':4 'ddd':3 'eee':6 'fff':7 'ggg':8 'hhh':10 'iii':11 -(1 row) - --- jsonb to tsvector with config -select to_tsvector('simple', '{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::jsonb); - to_tsvector ---------------------------------------------------------------------------- - 'aaa':1 'bbb':2 'ccc':4 'ddd':3 'eee':6 'fff':7 'ggg':8 'hhh':10 'iii':11 -(1 row) - --- jsonb to tsvector with stop words -select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": ["the eee fff ggg"], "c": {"d": "hhh. iii"}}'::jsonb); - to_tsvector ----------------------------------------------------------------------------- - 'aaa':1 'bbb':3 'ccc':5 'ddd':4 'eee':8 'fff':9 'ggg':10 'hhh':12 'iii':13 -(1 row) - --- jsonb to tsvector with numeric values -select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": 123, "c": 456}'::jsonb); - to_tsvector ---------------------------------- - 'aaa':1 'bbb':3 'ccc':5 'ddd':4 -(1 row) - --- jsonb_to_tsvector -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"all"'); - jsonb_to_tsvector ----------------------------------------------------------------------------------------- - '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16 -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"key"'); - jsonb_to_tsvector --------------------------------- - 'b':2 'c':4 'd':6 'f':8 'g':10 -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"string"'); - jsonb_to_tsvector -------------------- - 'aaa':1 'bbb':3 -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"numeric"'); - jsonb_to_tsvector -------------------- - '123':1 '456':3 -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"boolean"'); - jsonb_to_tsvector -------------------- - 'fals':3 'true':1 -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["string", "numeric"]'); - jsonb_to_tsvector ---------------------------------- - '123':5 '456':7 'aaa':1 'bbb':3 -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"all"'); - jsonb_to_tsvector ----------------------------------------------------------------------------------------- - '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16 -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"key"'); - jsonb_to_tsvector --------------------------------- - 'b':2 'c':4 'd':6 'f':8 'g':10 -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"string"'); - jsonb_to_tsvector -------------------- - 'aaa':1 'bbb':3 -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"numeric"'); - jsonb_to_tsvector -------------------- - '123':1 '456':3 -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"boolean"'); - jsonb_to_tsvector -------------------- - 'fals':3 'true':1 -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["string", "numeric"]'); - jsonb_to_tsvector ---------------------------------- - '123':5 '456':7 'aaa':1 'bbb':3 -(1 row) - --- to_tsvector corner cases -select to_tsvector('""'::jsonb); - to_tsvector -------------- - -(1 row) - -select to_tsvector('{}'::jsonb); - to_tsvector -------------- - -(1 row) - -select to_tsvector('[]'::jsonb); - to_tsvector -------------- - -(1 row) - -select to_tsvector('null'::jsonb); - to_tsvector -------------- - -(1 row) - --- jsonb_to_tsvector corner cases -select jsonb_to_tsvector('""'::jsonb, '"all"'); - jsonb_to_tsvector -------------------- - -(1 row) - -select jsonb_to_tsvector('{}'::jsonb, '"all"'); - jsonb_to_tsvector -------------------- - -(1 row) - -select jsonb_to_tsvector('[]'::jsonb, '"all"'); - jsonb_to_tsvector -------------------- - -(1 row) - -select jsonb_to_tsvector('null'::jsonb, '"all"'); - jsonb_to_tsvector -------------------- - -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '""'); -ERROR: wrong flag in flag array: "" -HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '{}'); -ERROR: wrong flag type, only arrays and scalars are allowed -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '[]'); - jsonb_to_tsvector -------------------- - -(1 row) - -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, 'null'); -ERROR: flag array element is not a string -HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". -select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["all", null]'); -ERROR: flag array element is not a string -HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". --- ts_headline for jsonb -select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh')); - ts_headline ------------------------------------------------------------------------------------------------------------------- - {"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]} -(1 row) - -select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh')); - ts_headline ------------------------------------------------------------------------------------------------ - {"a": "aaa bbb", "b": {"c": "ccc ddd fff"}, "d": ["ggg hhh", "iii jjj"]} -(1 row) - -select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >'); - ts_headline ---------------------------------------------------------------------------------------------------- - {"a": "aaa ", "b": {"c": "ccc fff", "c1": "ccc1 ddd1"}, "d": ["ggg ", "iii jjj"]} -(1 row) - -select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >'); - ts_headline ---------------------------------------------------------------------------------------------------- - {"a": "aaa ", "b": {"c": "ccc fff", "c1": "ccc1 ddd1"}, "d": ["ggg ", "iii jjj"]} -(1 row) - --- corner cases for ts_headline with jsonb -select ts_headline('null'::jsonb, tsquery('aaa & bbb')); - ts_headline -------------- - null -(1 row) - -select ts_headline('{}'::jsonb, tsquery('aaa & bbb')); - ts_headline -------------- - {} -(1 row) - -select ts_headline('[]'::jsonb, tsquery('aaa & bbb')); - ts_headline -------------- - [] -(1 row) - --- casts -select 'true'::jsonb::bool; - bool ------- - t -(1 row) - -select '[]'::jsonb::bool; -ERROR: cannot cast jsonb array to type boolean -select '1.0'::jsonb::float; - float8 --------- - 1 -(1 row) - -select '[1.0]'::jsonb::float; -ERROR: cannot cast jsonb array to type double precision -select '12345'::jsonb::int4; - int4 -------- - 12345 -(1 row) - -select '"hello"'::jsonb::int4; -ERROR: cannot cast jsonb string to type integer -select '12345'::jsonb::numeric; - numeric ---------- - 12345 -(1 row) - -select '{}'::jsonb::numeric; -ERROR: cannot cast jsonb object to type numeric -select '12345.05'::jsonb::numeric; - numeric ----------- - 12345.05 -(1 row) - -select '12345.05'::jsonb::float4; - float4 ----------- - 12345.05 -(1 row) - -select '12345.05'::jsonb::float8; - float8 ----------- - 12345.05 -(1 row) - -select '12345.05'::jsonb::int2; - int2 -------- - 12345 -(1 row) - -select '12345.05'::jsonb::int4; - int4 -------- - 12345 -(1 row) - -select '12345.05'::jsonb::int8; - int8 -------- - 12345 -(1 row) - -select '12345.0000000000000000000000000000000000000000000005'::jsonb::numeric; - numeric ------------------------------------------------------- - 12345.0000000000000000000000000000000000000000000005 -(1 row) - -select '12345.0000000000000000000000000000000000000000000005'::jsonb::float4; - float4 --------- - 12345 -(1 row) - -select '12345.0000000000000000000000000000000000000000000005'::jsonb::float8; - float8 --------- - 12345 -(1 row) - -select '12345.0000000000000000000000000000000000000000000005'::jsonb::int2; - int2 -------- - 12345 -(1 row) - -select '12345.0000000000000000000000000000000000000000000005'::jsonb::int4; - int4 -------- - 12345 -(1 row) - -select '12345.0000000000000000000000000000000000000000000005'::jsonb::int8; - int8 -------- - 12345 -(1 row) - +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/json_encoding_2.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/json_encoding.out --- /Users/admin/pgsql/src/test/regress/expected/json_encoding_2.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/json_encoding.out 2024-12-13 13:20:12 @@ -1,9 +1,2 @@ --- --- encoding-sensitive tests for json and jsonb --- --- We provide expected-results files for UTF8 (json_encoding.out) --- and for SQL_ASCII (json_encoding_1.out). Skip otherwise. -SELECT getdatabaseencoding() NOT IN ('UTF8', 'SQL_ASCII') - AS skip_test \gset -\if :skip_test -\quit +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/jsonpath.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/jsonpath.out --- /Users/admin/pgsql/src/test/regress/expected/jsonpath.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/jsonpath.out 2024-12-13 13:20:12 @@ -1,1296 +1,2 @@ ---jsonpath io -select ''::jsonpath; -ERROR: invalid input syntax for type jsonpath: "" -LINE 1: select ''::jsonpath; - ^ -select '$'::jsonpath; - jsonpath ----------- - $ -(1 row) - -select 'strict $'::jsonpath; - jsonpath ----------- - strict $ -(1 row) - -select 'lax $'::jsonpath; - jsonpath ----------- - $ -(1 row) - -select '$.a'::jsonpath; - jsonpath ----------- - $."a" -(1 row) - -select '$.a.v'::jsonpath; - jsonpath ------------ - $."a"."v" -(1 row) - -select '$.a.*'::jsonpath; - jsonpath ----------- - $."a".* -(1 row) - -select '$.*[*]'::jsonpath; - jsonpath ----------- - $.*[*] -(1 row) - -select '$.a[*]'::jsonpath; - jsonpath ----------- - $."a"[*] -(1 row) - -select '$.a[*][*]'::jsonpath; - jsonpath -------------- - $."a"[*][*] -(1 row) - -select '$[*]'::jsonpath; - jsonpath ----------- - $[*] -(1 row) - -select '$[0]'::jsonpath; - jsonpath ----------- - $[0] -(1 row) - -select '$[*][0]'::jsonpath; - jsonpath ----------- - $[*][0] -(1 row) - -select '$[*].a'::jsonpath; - jsonpath ----------- - $[*]."a" -(1 row) - -select '$[*][0].a.b'::jsonpath; - jsonpath ------------------ - $[*][0]."a"."b" -(1 row) - -select '$.a.**.b'::jsonpath; - jsonpath --------------- - $."a".**."b" -(1 row) - -select '$.a.**{2}.b'::jsonpath; - jsonpath ------------------ - $."a".**{2}."b" -(1 row) - -select '$.a.**{2 to 2}.b'::jsonpath; - jsonpath ------------------ - $."a".**{2}."b" -(1 row) - -select '$.a.**{2 to 5}.b'::jsonpath; - jsonpath ----------------------- - $."a".**{2 to 5}."b" -(1 row) - -select '$.a.**{0 to 5}.b'::jsonpath; - jsonpath ----------------------- - $."a".**{0 to 5}."b" -(1 row) - -select '$.a.**{5 to last}.b'::jsonpath; - jsonpath -------------------------- - $."a".**{5 to last}."b" -(1 row) - -select '$.a.**{last}.b'::jsonpath; - jsonpath --------------------- - $."a".**{last}."b" -(1 row) - -select '$.a.**{last to 5}.b'::jsonpath; - jsonpath -------------------------- - $."a".**{last to 5}."b" -(1 row) - -select '$+1'::jsonpath; - jsonpath ----------- - ($ + 1) -(1 row) - -select '$-1'::jsonpath; - jsonpath ----------- - ($ - 1) -(1 row) - -select '$--+1'::jsonpath; - jsonpath ----------- - ($ - -1) -(1 row) - -select '$.a/+-1'::jsonpath; - jsonpath --------------- - ($."a" / -1) -(1 row) - -select '1 * 2 + 4 % -3 != false'::jsonpath; - jsonpath ---------------------------- - (1 * 2 + 4 % -3 != false) -(1 row) - -select '"\b\f\r\n\t\v\"\''\\"'::jsonpath; - jsonpath -------------------------- - "\b\f\r\n\t\u000b\"'\\" -(1 row) - -select '"\x50\u0067\u{53}\u{051}\u{00004C}"'::jsonpath; - jsonpath ----------- - "PgSQL" -(1 row) - -select '$.foo\x50\u0067\u{53}\u{051}\u{00004C}\t\"bar'::jsonpath; - jsonpath ---------------------- - $."fooPgSQL\t\"bar" -(1 row) - -select '"\z"'::jsonpath; -- unrecognized escape is just the literal char - jsonpath ----------- - "z" -(1 row) - -select '$.g ? ($.a == 1)'::jsonpath; - jsonpath --------------------- - $."g"?($."a" == 1) -(1 row) - -select '$.g ? (@ == 1)'::jsonpath; - jsonpath ----------------- - $."g"?(@ == 1) -(1 row) - -select '$.g ? (@.a == 1)'::jsonpath; - jsonpath --------------------- - $."g"?(@."a" == 1) -(1 row) - -select '$.g ? (@.a == 1 || @.a == 4)'::jsonpath; - jsonpath ----------------------------------- - $."g"?(@."a" == 1 || @."a" == 4) -(1 row) - -select '$.g ? (@.a == 1 && @.a == 4)'::jsonpath; - jsonpath ----------------------------------- - $."g"?(@."a" == 1 && @."a" == 4) -(1 row) - -select '$.g ? (@.a == 1 || @.a == 4 && @.b == 7)'::jsonpath; - jsonpath ------------------------------------------------- - $."g"?(@."a" == 1 || @."a" == 4 && @."b" == 7) -(1 row) - -select '$.g ? (@.a == 1 || !(@.a == 4) && @.b == 7)'::jsonpath; - jsonpath ---------------------------------------------------- - $."g"?(@."a" == 1 || !(@."a" == 4) && @."b" == 7) -(1 row) - -select '$.g ? (@.a == 1 || !(@.x >= 123 || @.a == 4) && @.b == 7)'::jsonpath; - jsonpath -------------------------------------------------------------------- - $."g"?(@."a" == 1 || !(@."x" >= 123 || @."a" == 4) && @."b" == 7) -(1 row) - -select '$.g ? (@.x >= @[*]?(@.a > "abc"))'::jsonpath; - jsonpath ---------------------------------------- - $."g"?(@."x" >= @[*]?(@."a" > "abc")) -(1 row) - -select '$.g ? ((@.x >= 123 || @.a == 4) is unknown)'::jsonpath; - jsonpath -------------------------------------------------- - $."g"?((@."x" >= 123 || @."a" == 4) is unknown) -(1 row) - -select '$.g ? (exists (@.x))'::jsonpath; - jsonpath ------------------------- - $."g"?(exists (@."x")) -(1 row) - -select '$.g ? (exists (@.x ? (@ == 14)))'::jsonpath; - jsonpath ----------------------------------- - $."g"?(exists (@."x"?(@ == 14))) -(1 row) - -select '$.g ? ((@.x >= 123 || @.a == 4) && exists (@.x ? (@ == 14)))'::jsonpath; - jsonpath ------------------------------------------------------------------- - $."g"?((@."x" >= 123 || @."a" == 4) && exists (@."x"?(@ == 14))) -(1 row) - -select '$.g ? (+@.x >= +-(+@.a + 2))'::jsonpath; - jsonpath ------------------------------------- - $."g"?(+@."x" >= +(-(+@."a" + 2))) -(1 row) - -select '$a'::jsonpath; - jsonpath ----------- - $"a" -(1 row) - -select '$a.b'::jsonpath; - jsonpath ----------- - $"a"."b" -(1 row) - -select '$a[*]'::jsonpath; - jsonpath ----------- - $"a"[*] -(1 row) - -select '$.g ? (@.zip == $zip)'::jsonpath; - jsonpath ---------------------------- - $."g"?(@."zip" == $"zip") -(1 row) - -select '$.a[1,2, 3 to 16]'::jsonpath; - jsonpath --------------------- - $."a"[1,2,3 to 16] -(1 row) - -select '$.a[$a + 1, ($b[*]) to -($[0] * 2)]'::jsonpath; - jsonpath ----------------------------------------- - $."a"[$"a" + 1,$"b"[*] to -($[0] * 2)] -(1 row) - -select '$.a[$.a.size() - 3]'::jsonpath; - jsonpath -------------------------- - $."a"[$."a".size() - 3] -(1 row) - -select 'last'::jsonpath; -ERROR: LAST is allowed only in array subscripts -LINE 1: select 'last'::jsonpath; - ^ -select '"last"'::jsonpath; - jsonpath ----------- - "last" -(1 row) - -select '$.last'::jsonpath; - jsonpath ----------- - $."last" -(1 row) - -select '$ ? (last > 0)'::jsonpath; -ERROR: LAST is allowed only in array subscripts -LINE 1: select '$ ? (last > 0)'::jsonpath; - ^ -select '$[last]'::jsonpath; - jsonpath ----------- - $[last] -(1 row) - -select '$[$[0] ? (last > 0)]'::jsonpath; - jsonpath --------------------- - $[$[0]?(last > 0)] -(1 row) - -select 'null.type()'::jsonpath; - jsonpath -------------- - null.type() -(1 row) - -select '1.type()'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1.t" of jsonpath input -LINE 1: select '1.type()'::jsonpath; - ^ -select '(1).type()'::jsonpath; - jsonpath ------------- - (1).type() -(1 row) - -select '1.2.type()'::jsonpath; - jsonpath --------------- - (1.2).type() -(1 row) - -select '"aaa".type()'::jsonpath; - jsonpath --------------- - "aaa".type() -(1 row) - -select 'true.type()'::jsonpath; - jsonpath -------------- - true.type() -(1 row) - -select '$.double().floor().ceiling().abs()'::jsonpath; - jsonpath ------------------------------------- - $.double().floor().ceiling().abs() -(1 row) - -select '$.keyvalue().key'::jsonpath; - jsonpath --------------------- - $.keyvalue()."key" -(1 row) - -select '$.datetime()'::jsonpath; - jsonpath --------------- - $.datetime() -(1 row) - -select '$.datetime("datetime template")'::jsonpath; - jsonpath ---------------------------------- - $.datetime("datetime template") -(1 row) - -select '$.bigint().integer().number().decimal()'::jsonpath; - jsonpath ------------------------------------------ - $.bigint().integer().number().decimal() -(1 row) - -select '$.boolean()'::jsonpath; - jsonpath -------------- - $.boolean() -(1 row) - -select '$.date()'::jsonpath; - jsonpath ----------- - $.date() -(1 row) - -select '$.decimal(4,2)'::jsonpath; - jsonpath ----------------- - $.decimal(4,2) -(1 row) - -select '$.string()'::jsonpath; - jsonpath ------------- - $.string() -(1 row) - -select '$.time()'::jsonpath; - jsonpath ----------- - $.time() -(1 row) - -select '$.time(6)'::jsonpath; - jsonpath ------------ - $.time(6) -(1 row) - -select '$.time_tz()'::jsonpath; - jsonpath -------------- - $.time_tz() -(1 row) - -select '$.time_tz(4)'::jsonpath; - jsonpath --------------- - $.time_tz(4) -(1 row) - -select '$.timestamp()'::jsonpath; - jsonpath ---------------- - $.timestamp() -(1 row) - -select '$.timestamp(2)'::jsonpath; - jsonpath ----------------- - $.timestamp(2) -(1 row) - -select '$.timestamp_tz()'::jsonpath; - jsonpath ------------------- - $.timestamp_tz() -(1 row) - -select '$.timestamp_tz(0)'::jsonpath; - jsonpath -------------------- - $.timestamp_tz(0) -(1 row) - -select '$ ? (@ starts with "abc")'::jsonpath; - jsonpath -------------------------- - $?(@ starts with "abc") -(1 row) - -select '$ ? (@ starts with $var)'::jsonpath; - jsonpath --------------------------- - $?(@ starts with $"var") -(1 row) - -select '$ ? (@ like_regex "(invalid pattern")'::jsonpath; -ERROR: invalid regular expression: parentheses () not balanced -LINE 1: select '$ ? (@ like_regex "(invalid pattern")'::jsonpath; - ^ -select '$ ? (@ like_regex "pattern")'::jsonpath; - jsonpath ----------------------------- - $?(@ like_regex "pattern") -(1 row) - -select '$ ? (@ like_regex "pattern" flag "")'::jsonpath; - jsonpath ----------------------------- - $?(@ like_regex "pattern") -(1 row) - -select '$ ? (@ like_regex "pattern" flag "i")'::jsonpath; - jsonpath -------------------------------------- - $?(@ like_regex "pattern" flag "i") -(1 row) - -select '$ ? (@ like_regex "pattern" flag "is")'::jsonpath; - jsonpath --------------------------------------- - $?(@ like_regex "pattern" flag "is") -(1 row) - -select '$ ? (@ like_regex "pattern" flag "isim")'::jsonpath; - jsonpath ---------------------------------------- - $?(@ like_regex "pattern" flag "ism") -(1 row) - -select '$ ? (@ like_regex "pattern" flag "xsms")'::jsonpath; -ERROR: XQuery "x" flag (expanded regular expressions) is not implemented -LINE 1: select '$ ? (@ like_regex "pattern" flag "xsms")'::jsonpath; - ^ -select '$ ? (@ like_regex "pattern" flag "q")'::jsonpath; - jsonpath -------------------------------------- - $?(@ like_regex "pattern" flag "q") -(1 row) - -select '$ ? (@ like_regex "pattern" flag "iq")'::jsonpath; - jsonpath --------------------------------------- - $?(@ like_regex "pattern" flag "iq") -(1 row) - -select '$ ? (@ like_regex "pattern" flag "smixq")'::jsonpath; - jsonpath ------------------------------------------ - $?(@ like_regex "pattern" flag "ismxq") -(1 row) - -select '$ ? (@ like_regex "pattern" flag "a")'::jsonpath; -ERROR: invalid input syntax for type jsonpath -LINE 1: select '$ ? (@ like_regex "pattern" flag "a")'::jsonpath; - ^ -DETAIL: Unrecognized flag character "a" in LIKE_REGEX predicate. -select '$ < 1'::jsonpath; - jsonpath ----------- - ($ < 1) -(1 row) - -select '($ < 1) || $.a.b <= $x'::jsonpath; - jsonpath ------------------------------- - ($ < 1 || $."a"."b" <= $"x") -(1 row) - -select '@ + 1'::jsonpath; -ERROR: @ is not allowed in root expressions -LINE 1: select '@ + 1'::jsonpath; - ^ -select '($).a.b'::jsonpath; - jsonpath ------------ - $."a"."b" -(1 row) - -select '($.a.b).c.d'::jsonpath; - jsonpath -------------------- - $."a"."b"."c"."d" -(1 row) - -select '($.a.b + -$.x.y).c.d'::jsonpath; - jsonpath ----------------------------------- - ($."a"."b" + -$."x"."y")."c"."d" -(1 row) - -select '(-+$.a.b).c.d'::jsonpath; - jsonpath -------------------------- - (-(+$."a"."b"))."c"."d" -(1 row) - -select '1 + ($.a.b + 2).c.d'::jsonpath; - jsonpath -------------------------------- - (1 + ($."a"."b" + 2)."c"."d") -(1 row) - -select '1 + ($.a.b > 2).c.d'::jsonpath; - jsonpath -------------------------------- - (1 + ($."a"."b" > 2)."c"."d") -(1 row) - -select '($)'::jsonpath; - jsonpath ----------- - $ -(1 row) - -select '(($))'::jsonpath; - jsonpath ----------- - $ -(1 row) - -select '((($ + 1)).a + ((2)).b ? ((((@ > 1)) || (exists(@.c)))))'::jsonpath; - jsonpath ---------------------------------------------------- - (($ + 1)."a" + (2)."b"?(@ > 1 || exists (@."c"))) -(1 row) - -select '$ ? (@.a < 1)'::jsonpath; - jsonpath ---------------- - $?(@."a" < 1) -(1 row) - -select '$ ? (@.a < -1)'::jsonpath; - jsonpath ----------------- - $?(@."a" < -1) -(1 row) - -select '$ ? (@.a < +1)'::jsonpath; - jsonpath ---------------- - $?(@."a" < 1) -(1 row) - -select '$ ? (@.a < .1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < 0.1) -(1 row) - -select '$ ? (@.a < -.1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < -0.1) -(1 row) - -select '$ ? (@.a < +.1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < 0.1) -(1 row) - -select '$ ? (@.a < 0.1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < 0.1) -(1 row) - -select '$ ? (@.a < -0.1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < -0.1) -(1 row) - -select '$ ? (@.a < +0.1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < 0.1) -(1 row) - -select '$ ? (@.a < 10.1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < 10.1) -(1 row) - -select '$ ? (@.a < -10.1)'::jsonpath; - jsonpath -------------------- - $?(@."a" < -10.1) -(1 row) - -select '$ ? (@.a < +10.1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < 10.1) -(1 row) - -select '$ ? (@.a < 1e1)'::jsonpath; - jsonpath ----------------- - $?(@."a" < 10) -(1 row) - -select '$ ? (@.a < -1e1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < -10) -(1 row) - -select '$ ? (@.a < +1e1)'::jsonpath; - jsonpath ----------------- - $?(@."a" < 10) -(1 row) - -select '$ ? (@.a < .1e1)'::jsonpath; - jsonpath ---------------- - $?(@."a" < 1) -(1 row) - -select '$ ? (@.a < -.1e1)'::jsonpath; - jsonpath ----------------- - $?(@."a" < -1) -(1 row) - -select '$ ? (@.a < +.1e1)'::jsonpath; - jsonpath ---------------- - $?(@."a" < 1) -(1 row) - -select '$ ? (@.a < 0.1e1)'::jsonpath; - jsonpath ---------------- - $?(@."a" < 1) -(1 row) - -select '$ ? (@.a < -0.1e1)'::jsonpath; - jsonpath ----------------- - $?(@."a" < -1) -(1 row) - -select '$ ? (@.a < +0.1e1)'::jsonpath; - jsonpath ---------------- - $?(@."a" < 1) -(1 row) - -select '$ ? (@.a < 10.1e1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < 101) -(1 row) - -select '$ ? (@.a < -10.1e1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < -101) -(1 row) - -select '$ ? (@.a < +10.1e1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < 101) -(1 row) - -select '$ ? (@.a < 1e-1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < 0.1) -(1 row) - -select '$ ? (@.a < -1e-1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < -0.1) -(1 row) - -select '$ ? (@.a < +1e-1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < 0.1) -(1 row) - -select '$ ? (@.a < .1e-1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < 0.01) -(1 row) - -select '$ ? (@.a < -.1e-1)'::jsonpath; - jsonpath -------------------- - $?(@."a" < -0.01) -(1 row) - -select '$ ? (@.a < +.1e-1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < 0.01) -(1 row) - -select '$ ? (@.a < 0.1e-1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < 0.01) -(1 row) - -select '$ ? (@.a < -0.1e-1)'::jsonpath; - jsonpath -------------------- - $?(@."a" < -0.01) -(1 row) - -select '$ ? (@.a < +0.1e-1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < 0.01) -(1 row) - -select '$ ? (@.a < 10.1e-1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < 1.01) -(1 row) - -select '$ ? (@.a < -10.1e-1)'::jsonpath; - jsonpath -------------------- - $?(@."a" < -1.01) -(1 row) - -select '$ ? (@.a < +10.1e-1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < 1.01) -(1 row) - -select '$ ? (@.a < 1e+1)'::jsonpath; - jsonpath ----------------- - $?(@."a" < 10) -(1 row) - -select '$ ? (@.a < -1e+1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < -10) -(1 row) - -select '$ ? (@.a < +1e+1)'::jsonpath; - jsonpath ----------------- - $?(@."a" < 10) -(1 row) - -select '$ ? (@.a < .1e+1)'::jsonpath; - jsonpath ---------------- - $?(@."a" < 1) -(1 row) - -select '$ ? (@.a < -.1e+1)'::jsonpath; - jsonpath ----------------- - $?(@."a" < -1) -(1 row) - -select '$ ? (@.a < +.1e+1)'::jsonpath; - jsonpath ---------------- - $?(@."a" < 1) -(1 row) - -select '$ ? (@.a < 0.1e+1)'::jsonpath; - jsonpath ---------------- - $?(@."a" < 1) -(1 row) - -select '$ ? (@.a < -0.1e+1)'::jsonpath; - jsonpath ----------------- - $?(@."a" < -1) -(1 row) - -select '$ ? (@.a < +0.1e+1)'::jsonpath; - jsonpath ---------------- - $?(@."a" < 1) -(1 row) - -select '$ ? (@.a < 10.1e+1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < 101) -(1 row) - -select '$ ? (@.a < -10.1e+1)'::jsonpath; - jsonpath ------------------- - $?(@."a" < -101) -(1 row) - -select '$ ? (@.a < +10.1e+1)'::jsonpath; - jsonpath ------------------ - $?(@."a" < 101) -(1 row) - --- numeric literals -select '0'::jsonpath; - jsonpath ----------- - 0 -(1 row) - -select '00'::jsonpath; -ERROR: trailing junk after numeric literal at or near "00" of jsonpath input -LINE 1: select '00'::jsonpath; - ^ -select '0755'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '0755'::jsonpath; - ^ -select '0.0'::jsonpath; - jsonpath ----------- - 0.0 -(1 row) - -select '0.000'::jsonpath; - jsonpath ----------- - 0.000 -(1 row) - -select '0.000e1'::jsonpath; - jsonpath ----------- - 0.00 -(1 row) - -select '0.000e2'::jsonpath; - jsonpath ----------- - 0.0 -(1 row) - -select '0.000e3'::jsonpath; - jsonpath ----------- - 0 -(1 row) - -select '0.0010'::jsonpath; - jsonpath ----------- - 0.0010 -(1 row) - -select '0.0010e-1'::jsonpath; - jsonpath ----------- - 0.00010 -(1 row) - -select '0.0010e+1'::jsonpath; - jsonpath ----------- - 0.010 -(1 row) - -select '0.0010e+2'::jsonpath; - jsonpath ----------- - 0.10 -(1 row) - -select '.001'::jsonpath; - jsonpath ----------- - 0.001 -(1 row) - -select '.001e1'::jsonpath; - jsonpath ----------- - 0.01 -(1 row) - -select '1.'::jsonpath; - jsonpath ----------- - 1 -(1 row) - -select '1.e1'::jsonpath; - jsonpath ----------- - 10 -(1 row) - -select '1a'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1a" of jsonpath input -LINE 1: select '1a'::jsonpath; - ^ -select '1e'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1e" of jsonpath input -LINE 1: select '1e'::jsonpath; - ^ -select '1.e'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1.e" of jsonpath input -LINE 1: select '1.e'::jsonpath; - ^ -select '1.2a'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1.2a" of jsonpath input -LINE 1: select '1.2a'::jsonpath; - ^ -select '1.2e'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1.2e" of jsonpath input -LINE 1: select '1.2e'::jsonpath; - ^ -select '1.2.e'::jsonpath; - jsonpath ------------ - (1.2)."e" -(1 row) - -select '(1.2).e'::jsonpath; - jsonpath ------------ - (1.2)."e" -(1 row) - -select '1e3'::jsonpath; - jsonpath ----------- - 1000 -(1 row) - -select '1.e3'::jsonpath; - jsonpath ----------- - 1000 -(1 row) - -select '1.e3.e'::jsonpath; - jsonpath ------------- - (1000)."e" -(1 row) - -select '1.e3.e4'::jsonpath; - jsonpath -------------- - (1000)."e4" -(1 row) - -select '1.2e3'::jsonpath; - jsonpath ----------- - 1200 -(1 row) - -select '1.2e3a'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1.2e3a" of jsonpath input -LINE 1: select '1.2e3a'::jsonpath; - ^ -select '1.2.e3'::jsonpath; - jsonpath ------------- - (1.2)."e3" -(1 row) - -select '(1.2).e3'::jsonpath; - jsonpath ------------- - (1.2)."e3" -(1 row) - -select '1..e'::jsonpath; - jsonpath ----------- - (1)."e" -(1 row) - -select '1..e3'::jsonpath; - jsonpath ----------- - (1)."e3" -(1 row) - -select '(1.).e'::jsonpath; - jsonpath ----------- - (1)."e" -(1 row) - -select '(1.).e3'::jsonpath; - jsonpath ----------- - (1)."e3" -(1 row) - -select '1?(2>3)'::jsonpath; - jsonpath -------------- - (1)?(2 > 3) -(1 row) - --- nondecimal -select '0b100101'::jsonpath; - jsonpath ----------- - 37 -(1 row) - -select '0o273'::jsonpath; - jsonpath ----------- - 187 -(1 row) - -select '0x42F'::jsonpath; - jsonpath ----------- - 1071 -(1 row) - --- error cases -select '0b'::jsonpath; -ERROR: trailing junk after numeric literal at or near "0b" of jsonpath input -LINE 1: select '0b'::jsonpath; - ^ -select '1b'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1b" of jsonpath input -LINE 1: select '1b'::jsonpath; - ^ -select '0b0x'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '0b0x'::jsonpath; - ^ -select '0o'::jsonpath; -ERROR: trailing junk after numeric literal at or near "0o" of jsonpath input -LINE 1: select '0o'::jsonpath; - ^ -select '1o'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1o" of jsonpath input -LINE 1: select '1o'::jsonpath; - ^ -select '0o0x'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '0o0x'::jsonpath; - ^ -select '0x'::jsonpath; -ERROR: trailing junk after numeric literal at or near "0x" of jsonpath input -LINE 1: select '0x'::jsonpath; - ^ -select '1x'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1x" of jsonpath input -LINE 1: select '1x'::jsonpath; - ^ -select '0x0y'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '0x0y'::jsonpath; - ^ --- underscores -select '1_000_000'::jsonpath; - jsonpath ----------- - 1000000 -(1 row) - -select '1_2_3'::jsonpath; - jsonpath ----------- - 123 -(1 row) - -select '0x1EEE_FFFF'::jsonpath; - jsonpath ------------ - 518979583 -(1 row) - -select '0o2_73'::jsonpath; - jsonpath ----------- - 187 -(1 row) - -select '0b10_0101'::jsonpath; - jsonpath ----------- - 37 -(1 row) - -select '1_000.000_005'::jsonpath; - jsonpath -------------- - 1000.000005 -(1 row) - -select '1_000.'::jsonpath; - jsonpath ----------- - 1000 -(1 row) - -select '.000_005'::jsonpath; - jsonpath ----------- - 0.000005 -(1 row) - -select '1_000.5e0_1'::jsonpath; - jsonpath ----------- - 10005 -(1 row) - --- error cases -select '_100'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '_100'::jsonpath; - ^ -select '100_'::jsonpath; -ERROR: trailing junk after numeric literal at or near "100_" of jsonpath input -LINE 1: select '100_'::jsonpath; - ^ -select '100__000'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '100__000'::jsonpath; - ^ -select '_1_000.5'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '_1_000.5'::jsonpath; - ^ -select '1_000_.5'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1_000_" of jsonpath input -LINE 1: select '1_000_.5'::jsonpath; - ^ -select '1_000._5'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1_000._" of jsonpath input -LINE 1: select '1_000._5'::jsonpath; - ^ -select '1_000.5_'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1_000.5_" of jsonpath input -LINE 1: select '1_000.5_'::jsonpath; - ^ -select '1_000.5e_1'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1_000.5e" of jsonpath input -LINE 1: select '1_000.5e_1'::jsonpath; - ^ --- underscore after prefix not allowed in JavaScript (but allowed in SQL) -select '0b_10_0101'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '0b_10_0101'::jsonpath; - ^ -select '0o_273'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '0o_273'::jsonpath; - ^ -select '0x_42F'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '0x_42F'::jsonpath; - ^ --- test non-error-throwing API -SELECT str as jsonpath, - pg_input_is_valid(str,'jsonpath') as ok, - errinfo.sql_error_code, - errinfo.message, - errinfo.detail, - errinfo.hint -FROM unnest(ARRAY['$ ? (@ like_regex "pattern" flag "smixq")'::text, - '$ ? (@ like_regex "pattern" flag "a")', - '@ + 1', - '00', - '1a']) str, - LATERAL pg_input_error_info(str, 'jsonpath') as errinfo; - jsonpath | ok | sql_error_code | message | detail | hint --------------------------------------------+----+----------------+-----------------------------------------------------------------------+----------------------------------------------------------+------ - $ ? (@ like_regex "pattern" flag "smixq") | t | | | | - $ ? (@ like_regex "pattern" flag "a") | f | 42601 | invalid input syntax for type jsonpath | Unrecognized flag character "a" in LIKE_REGEX predicate. | - @ + 1 | f | 42601 | @ is not allowed in root expressions | | - 00 | f | 42601 | trailing junk after numeric literal at or near "00" of jsonpath input | | - 1a | f | 42601 | trailing junk after numeric literal at or near "1a" of jsonpath input | | -(5 rows) - +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/jsonpath_encoding_2.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/jsonpath_encoding.out --- /Users/admin/pgsql/src/test/regress/expected/jsonpath_encoding_2.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/jsonpath_encoding.out 2024-12-13 13:20:12 @@ -1,9 +1,2 @@ --- --- encoding-sensitive tests for jsonpath --- --- We provide expected-results files for UTF8 (jsonpath_encoding.out) --- and for SQL_ASCII (jsonpath_encoding_1.out). Skip otherwise. -SELECT getdatabaseencoding() NOT IN ('UTF8', 'SQL_ASCII') - AS skip_test \gset -\if :skip_test -\quit +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/jsonb_jsonpath.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/jsonb_jsonpath.out --- /Users/admin/pgsql/src/test/regress/expected/jsonb_jsonpath.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/jsonb_jsonpath.out 2024-12-13 13:20:12 @@ -1,4512 +1,2 @@ -select jsonb '{"a": 12}' @? '$'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": 12}' @? '1'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": 12}' @? '$.a.b'; - ?column? ----------- - f -(1 row) - -select jsonb '{"a": 12}' @? '$.b'; - ?column? ----------- - f -(1 row) - -select jsonb '{"a": 12}' @? '$.a + 2'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": 12}' @? '$.b + 2'; - ?column? ----------- - -(1 row) - -select jsonb '{"a": {"a": 12}}' @? '$.a.a'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"a": 12}}' @? '$.*.a'; - ?column? ----------- - t -(1 row) - -select jsonb '{"b": {"a": 12}}' @? '$.*.a'; - ?column? ----------- - t -(1 row) - -select jsonb '{"b": {"a": 12}}' @? '$.*.b'; - ?column? ----------- - f -(1 row) - -select jsonb '{"b": {"a": 12}}' @? 'strict $.*.b'; - ?column? ----------- - -(1 row) - -select jsonb '{}' @? '$.*'; - ?column? ----------- - f -(1 row) - -select jsonb '{"a": 1}' @? '$.*'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"b": 1}}' @? 'lax $.**{1}'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"b": 1}}' @? 'lax $.**{2}'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"b": 1}}' @? 'lax $.**{3}'; - ?column? ----------- - f -(1 row) - -select jsonb '[]' @? '$[*]'; - ?column? ----------- - f -(1 row) - -select jsonb '[1]' @? '$[*]'; - ?column? ----------- - t -(1 row) - -select jsonb '[1]' @? '$[1]'; - ?column? ----------- - f -(1 row) - -select jsonb '[1]' @? 'strict $[1]'; - ?column? ----------- - -(1 row) - -select jsonb_path_query('[1]', 'strict $[1]'); -ERROR: jsonpath array subscript is out of bounds -select jsonb_path_query('[1]', 'strict $[1]', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb '[1]' @? 'lax $[10000000000000000]'; - ?column? ----------- - -(1 row) - -select jsonb '[1]' @? 'strict $[10000000000000000]'; - ?column? ----------- - -(1 row) - -select jsonb_path_query('[1]', 'lax $[10000000000000000]'); -ERROR: jsonpath array subscript is out of integer range -select jsonb_path_query('[1]', 'strict $[10000000000000000]'); -ERROR: jsonpath array subscript is out of integer range -select jsonb '[1]' @? '$[0]'; - ?column? ----------- - t -(1 row) - -select jsonb '[1]' @? '$[0.3]'; - ?column? ----------- - t -(1 row) - -select jsonb '[1]' @? '$[0.5]'; - ?column? ----------- - t -(1 row) - -select jsonb '[1]' @? '$[0.9]'; - ?column? ----------- - t -(1 row) - -select jsonb '[1]' @? '$[1.2]'; - ?column? ----------- - f -(1 row) - -select jsonb '[1]' @? 'strict $[1.2]'; - ?column? ----------- - -(1 row) - -select jsonb '{"a": [1,2,3], "b": [3,4,5]}' @? '$ ? (@.a[*] > @.b[*])'; - ?column? ----------- - f -(1 row) - -select jsonb '{"a": [1,2,3], "b": [3,4,5]}' @? '$ ? (@.a[*] >= @.b[*])'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": [1,2,3], "b": [3,4,"5"]}' @? '$ ? (@.a[*] >= @.b[*])'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": [1,2,3], "b": [3,4,"5"]}' @? 'strict $ ? (@.a[*] >= @.b[*])'; - ?column? ----------- - f -(1 row) - -select jsonb '{"a": [1,2,3], "b": [3,4,null]}' @? '$ ? (@.a[*] >= @.b[*])'; - ?column? ----------- - t -(1 row) - -select jsonb '1' @? '$ ? ((@ == "1") is unknown)'; - ?column? ----------- - t -(1 row) - -select jsonb '1' @? '$ ? ((@ == 1) is unknown)'; - ?column? ----------- - f -(1 row) - -select jsonb '[{"a": 1}, {"a": 2}]' @? '$[0 to 1] ? (@.a > 1)'; - ?column? ----------- - t -(1 row) - -select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'lax $[*].a', silent => false); - jsonb_path_exists -------------------- - t -(1 row) - -select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'lax $[*].a', silent => true); - jsonb_path_exists -------------------- - t -(1 row) - -select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'strict $[*].a', silent => false); -ERROR: jsonpath member accessor can only be applied to an object -select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'strict $[*].a', silent => true); - jsonb_path_exists -------------------- - -(1 row) - -select jsonb_path_query('1', 'lax $.a'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('1', 'strict $.a'); -ERROR: jsonpath member accessor can only be applied to an object -select jsonb_path_query('1', 'strict $.*'); -ERROR: jsonpath wildcard member accessor can only be applied to an object -select jsonb_path_query('1', 'strict $.a', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('1', 'strict $.*', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'lax $.a'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.a'); -ERROR: jsonpath member accessor can only be applied to an object -select jsonb_path_query('[]', 'strict $.a', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{}', 'lax $.a'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{}', 'strict $.a'); -ERROR: JSON object does not contain key "a" -select jsonb_path_query('{}', 'strict $.a', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('1', 'strict $[1]'); -ERROR: jsonpath array accessor can only be applied to an array -select jsonb_path_query('1', 'strict $[*]'); -ERROR: jsonpath wildcard array accessor can only be applied to an array -select jsonb_path_query('[]', 'strict $[1]'); -ERROR: jsonpath array subscript is out of bounds -select jsonb_path_query('[]', 'strict $["a"]'); -ERROR: jsonpath array subscript is not a single numeric value -select jsonb_path_query('1', 'strict $[1]', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('1', 'strict $[*]', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $[1]', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $["a"]', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{"a": 12, "b": {"a": 13}}', '$.a'); - jsonb_path_query ------------------- - 12 -(1 row) - -select jsonb_path_query('{"a": 12, "b": {"a": 13}}', '$.b'); - jsonb_path_query ------------------- - {"a": 13} -(1 row) - -select jsonb_path_query('{"a": 12, "b": {"a": 13}}', '$.*'); - jsonb_path_query ------------------- - 12 - {"a": 13} -(2 rows) - -select jsonb_path_query('{"a": 12, "b": {"a": 13}}', 'lax $.*.a'); - jsonb_path_query ------------------- - 13 -(1 row) - -select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[*].a'); - jsonb_path_query ------------------- - 13 -(1 row) - -select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[*].*'); - jsonb_path_query ------------------- - 13 - 14 -(2 rows) - -select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0].a'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[1].a'); - jsonb_path_query ------------------- - 13 -(1 row) - -select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[2].a'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0,1].a'); - jsonb_path_query ------------------- - 13 -(1 row) - -select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0 to 10].a'); - jsonb_path_query ------------------- - 13 -(1 row) - -select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0 to 10 / 0].a'); -ERROR: division by zero -select jsonb_path_query('[12, {"a": 13}, {"b": 14}, "ccc", true]', '$[2.5 - 1 to $.size() - 2]'); - jsonb_path_query ------------------- - {"a": 13} - {"b": 14} - "ccc" -(3 rows) - -select jsonb_path_query('1', 'lax $[0]'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('1', 'lax $[*]'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('[1]', 'lax $[0]'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('[1]', 'lax $[*]'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('[1,2,3]', 'lax $[*]'); - jsonb_path_query ------------------- - 1 - 2 - 3 -(3 rows) - -select jsonb_path_query('[1,2,3]', 'strict $[*].a'); -ERROR: jsonpath member accessor can only be applied to an object -select jsonb_path_query('[1,2,3]', 'strict $[*].a', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', '$[last]'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', '$[last ? (exists(last))]'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $[last]'); -ERROR: jsonpath array subscript is out of bounds -select jsonb_path_query('[]', 'strict $[last]', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[1]', '$[last]'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('[1,2,3]', '$[last]'); - jsonb_path_query ------------------- - 3 -(1 row) - -select jsonb_path_query('[1,2,3]', '$[last - 1]'); - jsonb_path_query ------------------- - 2 -(1 row) - -select jsonb_path_query('[1,2,3]', '$[last ? (@.type() == "number")]'); - jsonb_path_query ------------------- - 3 -(1 row) - -select jsonb_path_query('[1,2,3]', '$[last ? (@.type() == "string")]'); -ERROR: jsonpath array subscript is not a single numeric value -select jsonb_path_query('[1,2,3]', '$[last ? (@.type() == "string")]', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select * from jsonb_path_query('{"a": 10}', '$'); - jsonb_path_query ------------------- - {"a": 10} -(1 row) - -select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)'); -ERROR: could not find jsonpath variable "value" -select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '1'); -ERROR: "vars" argument is not an object -DETAIL: Jsonpath parameters should be encoded as key-value pairs of "vars" object. -select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '[{"value" : 13}]'); -ERROR: "vars" argument is not an object -DETAIL: Jsonpath parameters should be encoded as key-value pairs of "vars" object. -select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '{"value" : 13}'); - jsonb_path_query ------------------- - {"a": 10} -(1 row) - -select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '{"value" : 8}'); - jsonb_path_query ------------------- -(0 rows) - -select * from jsonb_path_query('{"a": 10}', '$.a ? (@ < $value)', '{"value" : 13}'); - jsonb_path_query ------------------- - 10 -(1 row) - -select * from jsonb_path_query('[10,11,12,13,14,15]', '$[*] ? (@ < $value)', '{"value" : 13}'); - jsonb_path_query ------------------- - 10 - 11 - 12 -(3 rows) - -select * from jsonb_path_query('[10,11,12,13,14,15]', '$[0,1] ? (@ < $x.value)', '{"x": {"value" : 13}}'); - jsonb_path_query ------------------- - 10 - 11 -(2 rows) - -select * from jsonb_path_query('[10,11,12,13,14,15]', '$[0 to 2] ? (@ < $value)', '{"value" : 15}'); - jsonb_path_query ------------------- - 10 - 11 - 12 -(3 rows) - -select * from jsonb_path_query('[1,"1",2,"2",null]', '$[*] ? (@ == "1")'); - jsonb_path_query ------------------- - "1" -(1 row) - -select * from jsonb_path_query('[1,"1",2,"2",null]', '$[*] ? (@ == $value)', '{"value" : "1"}'); - jsonb_path_query ------------------- - "1" -(1 row) - -select * from jsonb_path_query('[1,"1",2,"2",null]', '$[*] ? (@ == $value)', '{"value" : null}'); - jsonb_path_query ------------------- - null -(1 row) - -select * from jsonb_path_query('[1, "2", null]', '$[*] ? (@ != null)'); - jsonb_path_query ------------------- - 1 - "2" -(2 rows) - -select * from jsonb_path_query('[1, "2", null]', '$[*] ? (@ == null)'); - jsonb_path_query ------------------- - null -(1 row) - -select * from jsonb_path_query('{}', '$ ? (@ == @)'); - jsonb_path_query ------------------- -(0 rows) - -select * from jsonb_path_query('[]', 'strict $ ? (@ == @)'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**'); - jsonb_path_query ------------------- - {"a": {"b": 1}} - {"b": 1} - 1 -(3 rows) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0}'); - jsonb_path_query ------------------- - {"a": {"b": 1}} -(1 row) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0 to last}'); - jsonb_path_query ------------------- - {"a": {"b": 1}} - {"b": 1} - 1 -(3 rows) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1}'); - jsonb_path_query ------------------- - {"b": 1} -(1 row) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1 to last}'); - jsonb_path_query ------------------- - {"b": 1} - 1 -(2 rows) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{2}'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{2 to last}'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{3 to last}'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{last}'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**.b ? (@ > 0)'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0}.b ? (@ > 0)'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1}.b ? (@ > 0)'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0 to last}.b ? (@ > 0)'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1 to last}.b ? (@ > 0)'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1 to 2}.b ? (@ > 0)'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**.b ? (@ > 0)'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{0}.b ? (@ > 0)'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{1}.b ? (@ > 0)'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{0 to last}.b ? (@ > 0)'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{1 to last}.b ? (@ > 0)'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{1 to 2}.b ? (@ > 0)'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{2 to 3}.b ? (@ > 0)'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb '{"a": {"b": 1}}' @? '$.**.b ? ( @ > 0)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"b": 1}}' @? '$.**{0}.b ? ( @ > 0)'; - ?column? ----------- - f -(1 row) - -select jsonb '{"a": {"b": 1}}' @? '$.**{1}.b ? ( @ > 0)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"b": 1}}' @? '$.**{0 to last}.b ? ( @ > 0)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"b": 1}}' @? '$.**{1 to last}.b ? ( @ > 0)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"b": 1}}' @? '$.**{1 to 2}.b ? ( @ > 0)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**.b ? ( @ > 0)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{0}.b ? ( @ > 0)'; - ?column? ----------- - f -(1 row) - -select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{1}.b ? ( @ > 0)'; - ?column? ----------- - f -(1 row) - -select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{0 to last}.b ? ( @ > 0)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{1 to last}.b ? ( @ > 0)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{1 to 2}.b ? ( @ > 0)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{2 to 3}.b ? ( @ > 0)'; - ?column? ----------- - t -(1 row) - -select jsonb_path_query('{"g": {"x": 2}}', '$.g ? (exists (@.x))'); - jsonb_path_query ------------------- - {"x": 2} -(1 row) - -select jsonb_path_query('{"g": {"x": 2}}', '$.g ? (exists (@.y))'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{"g": {"x": 2}}', '$.g ? (exists (@.x ? (@ >= 2) ))'); - jsonb_path_query ------------------- - {"x": 2} -(1 row) - -select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'lax $.g ? (exists (@.x))'); - jsonb_path_query ------------------- - {"x": 2} -(1 row) - -select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'lax $.g ? (exists (@.x + "3"))'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'lax $.g ? ((exists (@.x + "3")) is unknown)'); - jsonb_path_query ------------------- - {"x": 2} - {"y": 3} -(2 rows) - -select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g[*] ? (exists (@.x))'); - jsonb_path_query ------------------- - {"x": 2} -(1 row) - -select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g[*] ? ((exists (@.x)) is unknown)'); - jsonb_path_query ------------------- - {"y": 3} -(1 row) - -select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g ? (exists (@[*].x))'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g ? ((exists (@[*].x)) is unknown)'); - jsonb_path_query ----------------------- - [{"x": 2}, {"y": 3}] -(1 row) - ---test ternary logic -select - x, y, - jsonb_path_query( - '[true, false, null]', - '$[*] ? (@ == true && ($x == true && $y == true) || - @ == false && !($x == true && $y == true) || - @ == null && ($x == true && $y == true) is unknown)', - jsonb_build_object('x', x, 'y', y) - ) as "x && y" -from - (values (jsonb 'true'), ('false'), ('"null"')) x(x), - (values (jsonb 'true'), ('false'), ('"null"')) y(y); - x | y | x && y ---------+--------+-------- - true | true | true - true | false | false - true | "null" | null - false | true | false - false | false | false - false | "null" | false - "null" | true | null - "null" | false | false - "null" | "null" | null -(9 rows) - -select - x, y, - jsonb_path_query( - '[true, false, null]', - '$[*] ? (@ == true && ($x == true || $y == true) || - @ == false && !($x == true || $y == true) || - @ == null && ($x == true || $y == true) is unknown)', - jsonb_build_object('x', x, 'y', y) - ) as "x || y" -from - (values (jsonb 'true'), ('false'), ('"null"')) x(x), - (values (jsonb 'true'), ('false'), ('"null"')) y(y); - x | y | x || y ---------+--------+-------- - true | true | true - true | false | true - true | "null" | true - false | true | true - false | false | false - false | "null" | null - "null" | true | true - "null" | false | null - "null" | "null" | null -(9 rows) - -select jsonb '{"a": 1, "b":1}' @? '$ ? (@.a == @.b)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"c": {"a": 1, "b":1}}' @? '$ ? (@.a == @.b)'; - ?column? ----------- - f -(1 row) - -select jsonb '{"c": {"a": 1, "b":1}}' @? '$.c ? (@.a == @.b)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"c": {"a": 1, "b":1}}' @? '$.c ? ($.c.a == @.b)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"c": {"a": 1, "b":1}}' @? '$.* ? (@.a == @.b)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": 1, "b":1}' @? '$.** ? (@.a == @.b)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"c": {"a": 1, "b":1}}' @? '$.** ? (@.a == @.b)'; - ?column? ----------- - t -(1 row) - -select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == 1 + 1)'); - jsonb_path_query ------------------- - {"a": 2, "b": 1} -(1 row) - -select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == (1 + 1))'); - jsonb_path_query ------------------- - {"a": 2, "b": 1} -(1 row) - -select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == @.b + 1)'); - jsonb_path_query ------------------- - {"a": 2, "b": 1} -(1 row) - -select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == (@.b + 1))'); - jsonb_path_query ------------------- - {"a": 2, "b": 1} -(1 row) - -select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == - 1)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == -1)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == -@.b)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == - @.b)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"c": {"a": 0, "b":1}}' @? '$.** ? (@.a == 1 - @.b)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"c": {"a": 2, "b":1}}' @? '$.** ? (@.a == 1 - - @.b)'; - ?column? ----------- - t -(1 row) - -select jsonb '{"c": {"a": 0, "b":1}}' @? '$.** ? (@.a == 1 - +@.b)'; - ?column? ----------- - t -(1 row) - -select jsonb '[1,2,3]' @? '$ ? (+@[*] > +2)'; - ?column? ----------- - t -(1 row) - -select jsonb '[1,2,3]' @? '$ ? (+@[*] > +3)'; - ?column? ----------- - f -(1 row) - -select jsonb '[1,2,3]' @? '$ ? (-@[*] < -2)'; - ?column? ----------- - t -(1 row) - -select jsonb '[1,2,3]' @? '$ ? (-@[*] < -3)'; - ?column? ----------- - f -(1 row) - -select jsonb '1' @? '$ ? ($ > 0)'; - ?column? ----------- - t -(1 row) - --- arithmetic errors -select jsonb_path_query('[1,2,0,3]', '$[*] ? (2 / @ > 0)'); - jsonb_path_query ------------------- - 1 - 2 - 3 -(3 rows) - -select jsonb_path_query('[1,2,0,3]', '$[*] ? ((2 / @ > 0) is unknown)'); - jsonb_path_query ------------------- - 0 -(1 row) - -select jsonb_path_query('0', '1 / $'); -ERROR: division by zero -select jsonb_path_query('0', '1 / $ + 2'); -ERROR: division by zero -select jsonb_path_query('0', '-(3 + 1 % $)'); -ERROR: division by zero -select jsonb_path_query('1', '$ + "2"'); -ERROR: right operand of jsonpath operator + is not a single numeric value -select jsonb_path_query('[1, 2]', '3 * $'); -ERROR: right operand of jsonpath operator * is not a single numeric value -select jsonb_path_query('"a"', '-$'); -ERROR: operand of unary jsonpath operator - is not a numeric value -select jsonb_path_query('[1,"2",3]', '+$'); -ERROR: operand of unary jsonpath operator + is not a numeric value -select jsonb_path_query('1', '$ + "2"', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[1, 2]', '3 * $', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('"a"', '-$', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[1,"2",3]', '+$', silent => true); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb '["1",2,0,3]' @? '-$[*]'; - ?column? ----------- - t -(1 row) - -select jsonb '[1,"2",0,3]' @? '-$[*]'; - ?column? ----------- - t -(1 row) - -select jsonb '["1",2,0,3]' @? 'strict -$[*]'; - ?column? ----------- - -(1 row) - -select jsonb '[1,"2",0,3]' @? 'strict -$[*]'; - ?column? ----------- - -(1 row) - --- unwrapping of operator arguments in lax mode -select jsonb_path_query('{"a": [2]}', 'lax $.a * 3'); - jsonb_path_query ------------------- - 6 -(1 row) - -select jsonb_path_query('{"a": [2]}', 'lax $.a + 3'); - jsonb_path_query ------------------- - 5 -(1 row) - -select jsonb_path_query('{"a": [2, 3, 4]}', 'lax -$.a'); - jsonb_path_query ------------------- - -2 - -3 - -4 -(3 rows) - --- should fail -select jsonb_path_query('{"a": [1, 2]}', 'lax $.a * 3'); -ERROR: left operand of jsonpath operator * is not a single numeric value -select jsonb_path_query('{"a": [1, 2]}', 'lax $.a * 3', silent => true); - jsonb_path_query ------------------- -(0 rows) - --- any key on arrays with and without unwrapping. -select jsonb_path_query('{"a": [1,2,3], "b": [3,4,5]}', '$.*'); - jsonb_path_query ------------------- - [1, 2, 3] - [3, 4, 5] -(2 rows) - -select jsonb_path_query('[1,2,3]', '$.*'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[1,2,3,{"b": [3,4,5]}]', 'lax $.*'); - jsonb_path_query ------------------- - [3, 4, 5] -(1 row) - -select jsonb_path_query('[1,2,3,{"b": [3,4,5]}]', 'strict $.*'); -ERROR: jsonpath wildcard member accessor can only be applied to an object -select jsonb_path_query('[1,2,3,{"b": [3,4,5]}]', 'strict $.*', NULL, true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb '{"a": [1,2,3], "b": [3,4,5]}' @? '$.*'; - ?column? ----------- - t -(1 row) - -select jsonb '[1,2,3]' @? '$.*'; - ?column? ----------- - f -(1 row) - -select jsonb '[1,2,3,{"b": [3,4,5]}]' @? 'lax $.*'; - ?column? ----------- - t -(1 row) - -select jsonb '[1,2,3,{"b": [3,4,5]}]' @? 'strict $.*'; - ?column? ----------- - -(1 row) - --- extension: boolean expressions -select jsonb_path_query('2', '$ > 1'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('2', '$ <= 1'); - jsonb_path_query ------------------- - false -(1 row) - -select jsonb_path_query('2', '$ == "2"'); - jsonb_path_query ------------------- - null -(1 row) - -select jsonb '2' @? '$ == "2"'; - ?column? ----------- - t -(1 row) - -select jsonb '2' @@ '$ > 1'; - ?column? ----------- - t -(1 row) - -select jsonb '2' @@ '$ <= 1'; - ?column? ----------- - f -(1 row) - -select jsonb '2' @@ '$ == "2"'; - ?column? ----------- - -(1 row) - -select jsonb '2' @@ '1'; - ?column? ----------- - -(1 row) - -select jsonb '{}' @@ '$'; - ?column? ----------- - -(1 row) - -select jsonb '[]' @@ '$'; - ?column? ----------- - -(1 row) - -select jsonb '[1,2,3]' @@ '$[*]'; - ?column? ----------- - -(1 row) - -select jsonb '[]' @@ '$[*]'; - ?column? ----------- - -(1 row) - -select jsonb_path_match('[[1, true], [2, false]]', 'strict $[*] ? (@[0] > $x) [1]', '{"x": 1}'); - jsonb_path_match ------------------- - f -(1 row) - -select jsonb_path_match('[[1, true], [2, false]]', 'strict $[*] ? (@[0] < $x) [1]', '{"x": 2}'); - jsonb_path_match ------------------- - t -(1 row) - -select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'lax exists($[*].a)', silent => false); - jsonb_path_match ------------------- - t -(1 row) - -select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'lax exists($[*].a)', silent => true); - jsonb_path_match ------------------- - t -(1 row) - -select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'strict exists($[*].a)', silent => false); - jsonb_path_match ------------------- - -(1 row) - -select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'strict exists($[*].a)', silent => true); - jsonb_path_match ------------------- - -(1 row) - -select jsonb_path_query('[null,1,true,"a",[],{}]', '$.type()'); - jsonb_path_query ------------------- - "array" -(1 row) - -select jsonb_path_query('[null,1,true,"a",[],{}]', 'lax $.type()'); - jsonb_path_query ------------------- - "array" -(1 row) - -select jsonb_path_query('[null,1,true,"a",[],{}]', '$[*].type()'); - jsonb_path_query ------------------- - "null" - "number" - "boolean" - "string" - "array" - "object" -(6 rows) - -select jsonb_path_query('null', 'null.type()'); - jsonb_path_query ------------------- - "null" -(1 row) - -select jsonb_path_query('null', 'true.type()'); - jsonb_path_query ------------------- - "boolean" -(1 row) - -select jsonb_path_query('null', '(123).type()'); - jsonb_path_query ------------------- - "number" -(1 row) - -select jsonb_path_query('null', '"123".type()'); - jsonb_path_query ------------------- - "string" -(1 row) - -select jsonb_path_query('{"a": 2}', '($.a - 5).abs() + 10'); - jsonb_path_query ------------------- - 13 -(1 row) - -select jsonb_path_query('{"a": 2.5}', '-($.a * $.a).floor() % 4.3'); - jsonb_path_query ------------------- - -1.7 -(1 row) - -select jsonb_path_query('[1, 2, 3]', '($[*] > 2) ? (@ == true)'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('[1, 2, 3]', '($[*] > 3).type()'); - jsonb_path_query ------------------- - "boolean" -(1 row) - -select jsonb_path_query('[1, 2, 3]', '($[*].a > 3).type()'); - jsonb_path_query ------------------- - "boolean" -(1 row) - -select jsonb_path_query('[1, 2, 3]', 'strict ($[*].a > 3).type()'); - jsonb_path_query ------------------- - "null" -(1 row) - -select jsonb_path_query('[1,null,true,"11",[],[1],[1,2,3],{},{"a":1,"b":2}]', 'strict $[*].size()'); -ERROR: jsonpath item method .size() can only be applied to an array -select jsonb_path_query('[1,null,true,"11",[],[1],[1,2,3],{},{"a":1,"b":2}]', 'strict $[*].size()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[1,null,true,"11",[],[1],[1,2,3],{},{"a":1,"b":2}]', 'lax $[*].size()'); - jsonb_path_query ------------------- - 1 - 1 - 1 - 1 - 0 - 1 - 3 - 1 - 1 -(9 rows) - -select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].abs()'); - jsonb_path_query ------------------- - 0 - 1 - 2 - 3.4 - 5.6 -(5 rows) - -select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].floor()'); - jsonb_path_query ------------------- - 0 - 1 - -2 - -4 - 5 -(5 rows) - -select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].ceiling()'); - jsonb_path_query ------------------- - 0 - 1 - -2 - -3 - 6 -(5 rows) - -select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].ceiling().abs()'); - jsonb_path_query ------------------- - 0 - 1 - 2 - 3 - 6 -(5 rows) - -select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].ceiling().abs().type()'); - jsonb_path_query ------------------- - "number" - "number" - "number" - "number" - "number" -(5 rows) - -select jsonb_path_query('[{},1]', '$[*].keyvalue()'); -ERROR: jsonpath item method .keyvalue() can only be applied to an object -select jsonb_path_query('[{},1]', '$[*].keyvalue()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{}', '$.keyvalue()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{"a": 1, "b": [1, 2], "c": {"a": "bbb"}}', '$.keyvalue()'); - jsonb_path_query ----------------------------------------------- - {"id": 0, "key": "a", "value": 1} - {"id": 0, "key": "b", "value": [1, 2]} - {"id": 0, "key": "c", "value": {"a": "bbb"}} -(3 rows) - -select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', '$[*].keyvalue()'); - jsonb_path_query ------------------------------------------------ - {"id": 12, "key": "a", "value": 1} - {"id": 12, "key": "b", "value": [1, 2]} - {"id": 72, "key": "c", "value": {"a": "bbb"}} -(3 rows) - -select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', 'strict $.keyvalue()'); -ERROR: jsonpath item method .keyvalue() can only be applied to an object -select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', 'lax $.keyvalue()'); - jsonb_path_query ------------------------------------------------ - {"id": 12, "key": "a", "value": 1} - {"id": 12, "key": "b", "value": [1, 2]} - {"id": 72, "key": "c", "value": {"a": "bbb"}} -(3 rows) - -select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', 'strict $.keyvalue().a'); -ERROR: jsonpath item method .keyvalue() can only be applied to an object -select jsonb '{"a": 1, "b": [1, 2]}' @? 'lax $.keyvalue()'; - ?column? ----------- - t -(1 row) - -select jsonb '{"a": 1, "b": [1, 2]}' @? 'lax $.keyvalue().key'; - ?column? ----------- - t -(1 row) - -select jsonb_path_query('null', '$.double()'); -ERROR: jsonpath item method .double() can only be applied to a string or numeric value -select jsonb_path_query('true', '$.double()'); -ERROR: jsonpath item method .double() can only be applied to a string or numeric value -select jsonb_path_query('null', '$.double()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('true', '$.double()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', '$.double()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.double()'); -ERROR: jsonpath item method .double() can only be applied to a string or numeric value -select jsonb_path_query('{}', '$.double()'); -ERROR: jsonpath item method .double() can only be applied to a string or numeric value -select jsonb_path_query('[]', 'strict $.double()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{}', '$.double()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('1.23', '$.double()'); - jsonb_path_query ------------------- - 1.23 -(1 row) - -select jsonb_path_query('"1.23"', '$.double()'); - jsonb_path_query ------------------- - 1.23 -(1 row) - -select jsonb_path_query('"1.23aaa"', '$.double()'); -ERROR: argument "1.23aaa" of jsonpath item method .double() is invalid for type double precision -select jsonb_path_query('1e1000', '$.double()'); -ERROR: argument "10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" of jsonpath item method .double() is invalid for type double precision -select jsonb_path_query('"nan"', '$.double()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .double() -select jsonb_path_query('"NaN"', '$.double()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .double() -select jsonb_path_query('"inf"', '$.double()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .double() -select jsonb_path_query('"-inf"', '$.double()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .double() -select jsonb_path_query('"inf"', '$.double()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('"-inf"', '$.double()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{}', '$.abs()'); -ERROR: jsonpath item method .abs() can only be applied to a numeric value -select jsonb_path_query('true', '$.floor()'); -ERROR: jsonpath item method .floor() can only be applied to a numeric value -select jsonb_path_query('"1.2"', '$.ceiling()'); -ERROR: jsonpath item method .ceiling() can only be applied to a numeric value -select jsonb_path_query('{}', '$.abs()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('true', '$.floor()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('"1.2"', '$.ceiling()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('["", "a", "abc", "abcabc"]', '$[*] ? (@ starts with "abc")'); - jsonb_path_query ------------------- - "abc" - "abcabc" -(2 rows) - -select jsonb_path_query('["", "a", "abc", "abcabc"]', 'strict $ ? (@[*] starts with "abc")'); - jsonb_path_query ----------------------------- - ["", "a", "abc", "abcabc"] -(1 row) - -select jsonb_path_query('["", "a", "abd", "abdabc"]', 'strict $ ? (@[*] starts with "abc")'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('["abc", "abcabc", null, 1]', 'strict $ ? (@[*] starts with "abc")'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('["abc", "abcabc", null, 1]', 'strict $ ? ((@[*] starts with "abc") is unknown)'); - jsonb_path_query ----------------------------- - ["abc", "abcabc", null, 1] -(1 row) - -select jsonb_path_query('[[null, 1, "abc", "abcabc"]]', 'lax $ ? (@[*] starts with "abc")'); - jsonb_path_query ----------------------------- - [null, 1, "abc", "abcabc"] -(1 row) - -select jsonb_path_query('[[null, 1, "abd", "abdabc"]]', 'lax $ ? ((@[*] starts with "abc") is unknown)'); - jsonb_path_query ----------------------------- - [null, 1, "abd", "abdabc"] -(1 row) - -select jsonb_path_query('[null, 1, "abd", "abdabc"]', 'lax $[*] ? ((@ starts with "abc") is unknown)'); - jsonb_path_query ------------------- - null - 1 -(2 rows) - -select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c")'); - jsonb_path_query ------------------- - "abc" - "abdacb" -(2 rows) - -select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c" flag "i")'); - jsonb_path_query ------------------- - "abc" - "aBdC" - "abdacb" -(3 rows) - -select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c" flag "m")'); - jsonb_path_query ------------------- - "abc" - "abdacb" - "adc\nabc" -(3 rows) - -select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c" flag "s")'); - jsonb_path_query ------------------- - "abc" - "abdacb" - "ab\nadc" -(3 rows) - -select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "a\\b" flag "q")'); - jsonb_path_query ------------------- - "a\\b" - "^a\\b$" -(2 rows) - -select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "a\\b" flag "")'); - jsonb_path_query ------------------- - "a\b" -(1 row) - -select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\b$" flag "q")'); - jsonb_path_query ------------------- - "^a\\b$" -(1 row) - -select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\B$" flag "q")'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\B$" flag "iq")'); - jsonb_path_query ------------------- - "^a\\b$" -(1 row) - -select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\b$" flag "")'); - jsonb_path_query ------------------- - "a\b" -(1 row) - -select jsonb_path_query('null', '$.datetime()'); -ERROR: jsonpath item method .datetime() can only be applied to a string -select jsonb_path_query('true', '$.datetime()'); -ERROR: jsonpath item method .datetime() can only be applied to a string -select jsonb_path_query('1', '$.datetime()'); -ERROR: jsonpath item method .datetime() can only be applied to a string -select jsonb_path_query('[]', '$.datetime()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.datetime()'); -ERROR: jsonpath item method .datetime() can only be applied to a string -select jsonb_path_query('{}', '$.datetime()'); -ERROR: jsonpath item method .datetime() can only be applied to a string -select jsonb_path_query('"bogus"', '$.datetime()'); -ERROR: datetime format is not recognized: "bogus" -HINT: Use a datetime template argument to specify the input data format. -select jsonb_path_query('"12:34"', '$.datetime("aaa")'); -ERROR: invalid datetime format separator: "a" -select jsonb_path_query('"aaaa"', '$.datetime("HH24")'); -ERROR: invalid value "aa" for "HH24" -DETAIL: Value must be an integer. -select jsonb '"10-03-2017"' @? '$.datetime("dd-mm-yyyy")'; - ?column? ----------- - t -(1 row) - -select jsonb_path_query('"10-03-2017"', '$.datetime("dd-mm-yyyy")'); - jsonb_path_query ------------------- - "2017-03-10" -(1 row) - -select jsonb_path_query('"10-03-2017"', '$.datetime("dd-mm-yyyy").type()'); - jsonb_path_query ------------------- - "date" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy")'); -ERROR: trailing characters remain in input string after datetime format -select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy").type()'); -ERROR: trailing characters remain in input string after datetime format -select jsonb_path_query('"10-03-2017 12:34"', ' $.datetime("dd-mm-yyyy HH24:MI").type()'); - jsonb_path_query -------------------------------- - "timestamp without time zone" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34 +05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM").type()'); - jsonb_path_query ----------------------------- - "timestamp with time zone" -(1 row) - -select jsonb_path_query('"12:34:56"', '$.datetime("HH24:MI:SS").type()'); - jsonb_path_query --------------------------- - "time without time zone" -(1 row) - -select jsonb_path_query('"12:34:56 +05:20"', '$.datetime("HH24:MI:SS TZH:TZM").type()'); - jsonb_path_query ------------------------ - "time with time zone" -(1 row) - -select jsonb_path_query('"10-03-2017T12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")'); - jsonb_path_query ------------------------ - "2017-03-10T12:34:56" -(1 row) - -select jsonb_path_query('"10-03-2017t12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")'); -ERROR: unmatched format character "T" -select jsonb_path_query('"10-03-2017 12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")'); -ERROR: unmatched format character "T" --- Test .bigint() -select jsonb_path_query('null', '$.bigint()'); -ERROR: jsonpath item method .bigint() can only be applied to a string or numeric value -select jsonb_path_query('true', '$.bigint()'); -ERROR: jsonpath item method .bigint() can only be applied to a string or numeric value -select jsonb_path_query('null', '$.bigint()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('true', '$.bigint()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', '$.bigint()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.bigint()'); -ERROR: jsonpath item method .bigint() can only be applied to a string or numeric value -select jsonb_path_query('{}', '$.bigint()'); -ERROR: jsonpath item method .bigint() can only be applied to a string or numeric value -select jsonb_path_query('[]', 'strict $.bigint()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{}', '$.bigint()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('"1.23"', '$.bigint()'); -ERROR: argument "1.23" of jsonpath item method .bigint() is invalid for type bigint -select jsonb_path_query('"1.23aaa"', '$.bigint()'); -ERROR: argument "1.23aaa" of jsonpath item method .bigint() is invalid for type bigint -select jsonb_path_query('1e1000', '$.bigint()'); -ERROR: argument "10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" of jsonpath item method .bigint() is invalid for type bigint -select jsonb_path_query('"nan"', '$.bigint()'); -ERROR: argument "nan" of jsonpath item method .bigint() is invalid for type bigint -select jsonb_path_query('"NaN"', '$.bigint()'); -ERROR: argument "NaN" of jsonpath item method .bigint() is invalid for type bigint -select jsonb_path_query('"inf"', '$.bigint()'); -ERROR: argument "inf" of jsonpath item method .bigint() is invalid for type bigint -select jsonb_path_query('"-inf"', '$.bigint()'); -ERROR: argument "-inf" of jsonpath item method .bigint() is invalid for type bigint -select jsonb_path_query('"inf"', '$.bigint()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('"-inf"', '$.bigint()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('123', '$.bigint()'); - jsonb_path_query ------------------- - 123 -(1 row) - -select jsonb_path_query('"123"', '$.bigint()'); - jsonb_path_query ------------------- - 123 -(1 row) - -select jsonb_path_query('1.23', '$.bigint()'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('1.83', '$.bigint()'); - jsonb_path_query ------------------- - 2 -(1 row) - -select jsonb_path_query('1234567890123', '$.bigint()'); - jsonb_path_query ------------------- - 1234567890123 -(1 row) - -select jsonb_path_query('"1234567890123"', '$.bigint()'); - jsonb_path_query ------------------- - 1234567890123 -(1 row) - -select jsonb_path_query('12345678901234567890', '$.bigint()'); -ERROR: argument "12345678901234567890" of jsonpath item method .bigint() is invalid for type bigint -select jsonb_path_query('"12345678901234567890"', '$.bigint()'); -ERROR: argument "12345678901234567890" of jsonpath item method .bigint() is invalid for type bigint -select jsonb_path_query('"+123"', '$.bigint()'); - jsonb_path_query ------------------- - 123 -(1 row) - -select jsonb_path_query('-123', '$.bigint()'); - jsonb_path_query ------------------- - -123 -(1 row) - -select jsonb_path_query('"-123"', '$.bigint()'); - jsonb_path_query ------------------- - -123 -(1 row) - -select jsonb_path_query('123', '$.bigint() * 2'); - jsonb_path_query ------------------- - 246 -(1 row) - --- Test .boolean() -select jsonb_path_query('null', '$.boolean()'); -ERROR: jsonpath item method .boolean() can only be applied to a boolean, string, or numeric value -select jsonb_path_query('null', '$.boolean()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', '$.boolean()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.boolean()'); -ERROR: jsonpath item method .boolean() can only be applied to a boolean, string, or numeric value -select jsonb_path_query('{}', '$.boolean()'); -ERROR: jsonpath item method .boolean() can only be applied to a boolean, string, or numeric value -select jsonb_path_query('[]', 'strict $.boolean()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{}', '$.boolean()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('1.23', '$.boolean()'); -ERROR: argument "1.23" of jsonpath item method .boolean() is invalid for type boolean -select jsonb_path_query('"1.23"', '$.boolean()'); -ERROR: argument "1.23" of jsonpath item method .boolean() is invalid for type boolean -select jsonb_path_query('"1.23aaa"', '$.boolean()'); -ERROR: argument "1.23aaa" of jsonpath item method .boolean() is invalid for type boolean -select jsonb_path_query('1e1000', '$.boolean()'); -ERROR: argument "10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" of jsonpath item method .boolean() is invalid for type boolean -select jsonb_path_query('"nan"', '$.boolean()'); -ERROR: argument "nan" of jsonpath item method .boolean() is invalid for type boolean -select jsonb_path_query('"NaN"', '$.boolean()'); -ERROR: argument "NaN" of jsonpath item method .boolean() is invalid for type boolean -select jsonb_path_query('"inf"', '$.boolean()'); -ERROR: argument "inf" of jsonpath item method .boolean() is invalid for type boolean -select jsonb_path_query('"-inf"', '$.boolean()'); -ERROR: argument "-inf" of jsonpath item method .boolean() is invalid for type boolean -select jsonb_path_query('"inf"', '$.boolean()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('"-inf"', '$.boolean()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('"100"', '$.boolean()'); -ERROR: argument "100" of jsonpath item method .boolean() is invalid for type boolean -select jsonb_path_query('true', '$.boolean()'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('false', '$.boolean()'); - jsonb_path_query ------------------- - false -(1 row) - -select jsonb_path_query('1', '$.boolean()'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('0', '$.boolean()'); - jsonb_path_query ------------------- - false -(1 row) - -select jsonb_path_query('-1', '$.boolean()'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('100', '$.boolean()'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('"1"', '$.boolean()'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('"0"', '$.boolean()'); - jsonb_path_query ------------------- - false -(1 row) - -select jsonb_path_query('"true"', '$.boolean()'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('"false"', '$.boolean()'); - jsonb_path_query ------------------- - false -(1 row) - -select jsonb_path_query('"TRUE"', '$.boolean()'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('"FALSE"', '$.boolean()'); - jsonb_path_query ------------------- - false -(1 row) - -select jsonb_path_query('"yes"', '$.boolean()'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('"NO"', '$.boolean()'); - jsonb_path_query ------------------- - false -(1 row) - -select jsonb_path_query('"T"', '$.boolean()'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('"f"', '$.boolean()'); - jsonb_path_query ------------------- - false -(1 row) - -select jsonb_path_query('"y"', '$.boolean()'); - jsonb_path_query ------------------- - true -(1 row) - -select jsonb_path_query('"N"', '$.boolean()'); - jsonb_path_query ------------------- - false -(1 row) - -select jsonb_path_query('true', '$.boolean().type()'); - jsonb_path_query ------------------- - "boolean" -(1 row) - -select jsonb_path_query('123', '$.boolean().type()'); - jsonb_path_query ------------------- - "boolean" -(1 row) - -select jsonb_path_query('"Yes"', '$.boolean().type()'); - jsonb_path_query ------------------- - "boolean" -(1 row) - -select jsonb_path_query_array('[1, "yes", false]', '$[*].boolean()'); - jsonb_path_query_array ------------------------- - [true, true, false] -(1 row) - --- Test .date() -select jsonb_path_query('null', '$.date()'); -ERROR: jsonpath item method .date() can only be applied to a string -select jsonb_path_query('true', '$.date()'); -ERROR: jsonpath item method .date() can only be applied to a string -select jsonb_path_query('1', '$.date()'); -ERROR: jsonpath item method .date() can only be applied to a string -select jsonb_path_query('[]', '$.date()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.date()'); -ERROR: jsonpath item method .date() can only be applied to a string -select jsonb_path_query('{}', '$.date()'); -ERROR: jsonpath item method .date() can only be applied to a string -select jsonb_path_query('"bogus"', '$.date()'); -ERROR: date format is not recognized: "bogus" -select jsonb '"2023-08-15"' @? '$.date()'; - ?column? ----------- - t -(1 row) - -select jsonb_path_query('"2023-08-15"', '$.date()'); - jsonb_path_query ------------------- - "2023-08-15" -(1 row) - -select jsonb_path_query('"2023-08-15"', '$.date().type()'); - jsonb_path_query ------------------- - "date" -(1 row) - -select jsonb_path_query('"12:34:56"', '$.date()'); -ERROR: date format is not recognized: "12:34:56" -select jsonb_path_query('"12:34:56 +05:30"', '$.date()'); -ERROR: date format is not recognized: "12:34:56 +05:30" -select jsonb_path_query('"2023-08-15 12:34:56"', '$.date()'); - jsonb_path_query ------------------- - "2023-08-15" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.date()'); -ERROR: cannot convert value from timestamptz to date without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"2023-08-15 12:34:56 +05:30"', '$.date()'); -- should work - jsonb_path_query_tz ---------------------- - "2023-08-15" -(1 row) - -select jsonb_path_query('"2023-08-15"', '$.date(2)'); -ERROR: syntax error at or near "2" of jsonpath input -LINE 1: select jsonb_path_query('"2023-08-15"', '$.date(2)'); - ^ --- Test .decimal() -select jsonb_path_query('null', '$.decimal()'); -ERROR: jsonpath item method .decimal() can only be applied to a string or numeric value -select jsonb_path_query('true', '$.decimal()'); -ERROR: jsonpath item method .decimal() can only be applied to a string or numeric value -select jsonb_path_query('null', '$.decimal()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('true', '$.decimal()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', '$.decimal()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.decimal()'); -ERROR: jsonpath item method .decimal() can only be applied to a string or numeric value -select jsonb_path_query('{}', '$.decimal()'); -ERROR: jsonpath item method .decimal() can only be applied to a string or numeric value -select jsonb_path_query('[]', 'strict $.decimal()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{}', '$.decimal()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('1.23', '$.decimal()'); - jsonb_path_query ------------------- - 1.23 -(1 row) - -select jsonb_path_query('"1.23"', '$.decimal()'); - jsonb_path_query ------------------- - 1.23 -(1 row) - -select jsonb_path_query('"1.23aaa"', '$.decimal()'); -ERROR: argument "1.23aaa" of jsonpath item method .decimal() is invalid for type numeric -select jsonb_path_query('1e1000', '$.decimal()'); - jsonb_path_query -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 -(1 row) - -select jsonb_path_query('"nan"', '$.decimal()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .decimal() -select jsonb_path_query('"NaN"', '$.decimal()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .decimal() -select jsonb_path_query('"inf"', '$.decimal()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .decimal() -select jsonb_path_query('"-inf"', '$.decimal()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .decimal() -select jsonb_path_query('"inf"', '$.decimal()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('"-inf"', '$.decimal()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('123', '$.decimal()'); - jsonb_path_query ------------------- - 123 -(1 row) - -select jsonb_path_query('"123"', '$.decimal()'); - jsonb_path_query ------------------- - 123 -(1 row) - -select jsonb_path_query('12345678901234567890', '$.decimal()'); - jsonb_path_query ----------------------- - 12345678901234567890 -(1 row) - -select jsonb_path_query('"12345678901234567890"', '$.decimal()'); - jsonb_path_query ----------------------- - 12345678901234567890 -(1 row) - -select jsonb_path_query('"+12.3"', '$.decimal()'); - jsonb_path_query ------------------- - 12.3 -(1 row) - -select jsonb_path_query('-12.3', '$.decimal()'); - jsonb_path_query ------------------- - -12.3 -(1 row) - -select jsonb_path_query('"-12.3"', '$.decimal()'); - jsonb_path_query ------------------- - -12.3 -(1 row) - -select jsonb_path_query('12.3', '$.decimal() * 2'); - jsonb_path_query ------------------- - 24.6 -(1 row) - -select jsonb_path_query('12345.678', '$.decimal(6, 1)'); - jsonb_path_query ------------------- - 12345.7 -(1 row) - -select jsonb_path_query('12345.678', '$.decimal(6, 2)'); -ERROR: argument "12345.678" of jsonpath item method .decimal() is invalid for type numeric -select jsonb_path_query('1234.5678', '$.decimal(6, 2)'); - jsonb_path_query ------------------- - 1234.57 -(1 row) - -select jsonb_path_query('12345.678', '$.decimal(4, 6)'); -ERROR: argument "12345.678" of jsonpath item method .decimal() is invalid for type numeric -select jsonb_path_query('12345.678', '$.decimal(0, 6)'); -ERROR: NUMERIC precision 0 must be between 1 and 1000 -select jsonb_path_query('12345.678', '$.decimal(1001, 6)'); -ERROR: NUMERIC precision 1001 must be between 1 and 1000 -select jsonb_path_query('1234.5678', '$.decimal(+6, +2)'); - jsonb_path_query ------------------- - 1234.57 -(1 row) - -select jsonb_path_query('1234.5678', '$.decimal(+6, -2)'); - jsonb_path_query ------------------- - 1200 -(1 row) - -select jsonb_path_query('1234.5678', '$.decimal(-6, +2)'); -ERROR: NUMERIC precision -6 must be between 1 and 1000 -select jsonb_path_query('1234.5678', '$.decimal(6, -1001)'); -ERROR: NUMERIC scale -1001 must be between -1000 and 1000 -select jsonb_path_query('1234.5678', '$.decimal(6, 1001)'); -ERROR: NUMERIC scale 1001 must be between -1000 and 1000 -select jsonb_path_query('-1234.5678', '$.decimal(+6, -2)'); - jsonb_path_query ------------------- - -1200 -(1 row) - -select jsonb_path_query('0.0123456', '$.decimal(1,2)'); - jsonb_path_query ------------------- - 0.01 -(1 row) - -select jsonb_path_query('0.0012345', '$.decimal(2,4)'); - jsonb_path_query ------------------- - 0.0012 -(1 row) - -select jsonb_path_query('-0.00123456', '$.decimal(2,-4)'); - jsonb_path_query ------------------- - 0 -(1 row) - -select jsonb_path_query('12.3', '$.decimal(12345678901,1)'); -ERROR: precision of jsonpath item method .decimal() is out of range for type integer -select jsonb_path_query('12.3', '$.decimal(1,12345678901)'); -ERROR: scale of jsonpath item method .decimal() is out of range for type integer --- Test .integer() -select jsonb_path_query('null', '$.integer()'); -ERROR: jsonpath item method .integer() can only be applied to a string or numeric value -select jsonb_path_query('true', '$.integer()'); -ERROR: jsonpath item method .integer() can only be applied to a string or numeric value -select jsonb_path_query('null', '$.integer()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('true', '$.integer()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', '$.integer()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.integer()'); -ERROR: jsonpath item method .integer() can only be applied to a string or numeric value -select jsonb_path_query('{}', '$.integer()'); -ERROR: jsonpath item method .integer() can only be applied to a string or numeric value -select jsonb_path_query('[]', 'strict $.integer()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{}', '$.integer()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('"1.23"', '$.integer()'); -ERROR: argument "1.23" of jsonpath item method .integer() is invalid for type integer -select jsonb_path_query('"1.23aaa"', '$.integer()'); -ERROR: argument "1.23aaa" of jsonpath item method .integer() is invalid for type integer -select jsonb_path_query('1e1000', '$.integer()'); -ERROR: argument "10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" of jsonpath item method .integer() is invalid for type integer -select jsonb_path_query('"nan"', '$.integer()'); -ERROR: argument "nan" of jsonpath item method .integer() is invalid for type integer -select jsonb_path_query('"NaN"', '$.integer()'); -ERROR: argument "NaN" of jsonpath item method .integer() is invalid for type integer -select jsonb_path_query('"inf"', '$.integer()'); -ERROR: argument "inf" of jsonpath item method .integer() is invalid for type integer -select jsonb_path_query('"-inf"', '$.integer()'); -ERROR: argument "-inf" of jsonpath item method .integer() is invalid for type integer -select jsonb_path_query('"inf"', '$.integer()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('"-inf"', '$.integer()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('123', '$.integer()'); - jsonb_path_query ------------------- - 123 -(1 row) - -select jsonb_path_query('"123"', '$.integer()'); - jsonb_path_query ------------------- - 123 -(1 row) - -select jsonb_path_query('1.23', '$.integer()'); - jsonb_path_query ------------------- - 1 -(1 row) - -select jsonb_path_query('1.83', '$.integer()'); - jsonb_path_query ------------------- - 2 -(1 row) - -select jsonb_path_query('12345678901', '$.integer()'); -ERROR: argument "12345678901" of jsonpath item method .integer() is invalid for type integer -select jsonb_path_query('"12345678901"', '$.integer()'); -ERROR: argument "12345678901" of jsonpath item method .integer() is invalid for type integer -select jsonb_path_query('"+123"', '$.integer()'); - jsonb_path_query ------------------- - 123 -(1 row) - -select jsonb_path_query('-123', '$.integer()'); - jsonb_path_query ------------------- - -123 -(1 row) - -select jsonb_path_query('"-123"', '$.integer()'); - jsonb_path_query ------------------- - -123 -(1 row) - -select jsonb_path_query('123', '$.integer() * 2'); - jsonb_path_query ------------------- - 246 -(1 row) - --- Test .number() -select jsonb_path_query('null', '$.number()'); -ERROR: jsonpath item method .number() can only be applied to a string or numeric value -select jsonb_path_query('true', '$.number()'); -ERROR: jsonpath item method .number() can only be applied to a string or numeric value -select jsonb_path_query('null', '$.number()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('true', '$.number()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', '$.number()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.number()'); -ERROR: jsonpath item method .number() can only be applied to a string or numeric value -select jsonb_path_query('{}', '$.number()'); -ERROR: jsonpath item method .number() can only be applied to a string or numeric value -select jsonb_path_query('[]', 'strict $.number()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{}', '$.number()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('1.23', '$.number()'); - jsonb_path_query ------------------- - 1.23 -(1 row) - -select jsonb_path_query('"1.23"', '$.number()'); - jsonb_path_query ------------------- - 1.23 -(1 row) - -select jsonb_path_query('"1.23aaa"', '$.number()'); -ERROR: argument "1.23aaa" of jsonpath item method .number() is invalid for type numeric -select jsonb_path_query('1e1000', '$.number()'); - jsonb_path_query -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 -(1 row) - -select jsonb_path_query('"nan"', '$.number()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .number() -select jsonb_path_query('"NaN"', '$.number()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .number() -select jsonb_path_query('"inf"', '$.number()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .number() -select jsonb_path_query('"-inf"', '$.number()'); -ERROR: NaN or Infinity is not allowed for jsonpath item method .number() -select jsonb_path_query('"inf"', '$.number()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('"-inf"', '$.number()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('123', '$.number()'); - jsonb_path_query ------------------- - 123 -(1 row) - -select jsonb_path_query('"123"', '$.number()'); - jsonb_path_query ------------------- - 123 -(1 row) - -select jsonb_path_query('12345678901234567890', '$.number()'); - jsonb_path_query ----------------------- - 12345678901234567890 -(1 row) - -select jsonb_path_query('"12345678901234567890"', '$.number()'); - jsonb_path_query ----------------------- - 12345678901234567890 -(1 row) - -select jsonb_path_query('"+12.3"', '$.number()'); - jsonb_path_query ------------------- - 12.3 -(1 row) - -select jsonb_path_query('-12.3', '$.number()'); - jsonb_path_query ------------------- - -12.3 -(1 row) - -select jsonb_path_query('"-12.3"', '$.number()'); - jsonb_path_query ------------------- - -12.3 -(1 row) - -select jsonb_path_query('12.3', '$.number() * 2'); - jsonb_path_query ------------------- - 24.6 -(1 row) - --- Test .string() -select jsonb_path_query('null', '$.string()'); -ERROR: jsonpath item method .string() can only be applied to a boolean, string, numeric, or datetime value -select jsonb_path_query('null', '$.string()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', '$.string()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.string()'); -ERROR: jsonpath item method .string() can only be applied to a boolean, string, numeric, or datetime value -select jsonb_path_query('{}', '$.string()'); -ERROR: jsonpath item method .string() can only be applied to a boolean, string, numeric, or datetime value -select jsonb_path_query('[]', 'strict $.string()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('{}', '$.string()', silent => true); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('1.23', '$.string()'); - jsonb_path_query ------------------- - "1.23" -(1 row) - -select jsonb_path_query('"1.23"', '$.string()'); - jsonb_path_query ------------------- - "1.23" -(1 row) - -select jsonb_path_query('"1.23aaa"', '$.string()'); - jsonb_path_query ------------------- - "1.23aaa" -(1 row) - -select jsonb_path_query('1234', '$.string()'); - jsonb_path_query ------------------- - "1234" -(1 row) - -select jsonb_path_query('true', '$.string()'); - jsonb_path_query ------------------- - "true" -(1 row) - -select jsonb_path_query('1234', '$.string().type()'); - jsonb_path_query ------------------- - "string" -(1 row) - -select jsonb_path_query('[2, true]', '$.string()'); - jsonb_path_query ------------------- - "2" - "true" -(2 rows) - -select jsonb_path_query_array('[1.23, "yes", false]', '$[*].string()'); - jsonb_path_query_array --------------------------- - ["1.23", "yes", "false"] -(1 row) - -select jsonb_path_query_array('[1.23, "yes", false]', '$[*].string().type()'); - jsonb_path_query_array --------------------------------- - ["string", "string", "string"] -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +5:30"', '$.timestamp().string()'); -ERROR: cannot convert value from timestamptz to timestamp without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"2023-08-15 12:34:56 +5:30"', '$.timestamp().string()'); -- should work - jsonb_path_query_tz ------------------------ - "2023-08-15T00:04:56" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56"', '$.timestamp_tz().string()'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"2023-08-15 12:34:56"', '$.timestamp_tz().string()'); -- should work - jsonb_path_query_tz ------------------------------ - "2023-08-15T12:34:56-07:00" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +5:30"', '$.timestamp_tz().string()'); - jsonb_path_query ------------------------------ - "2023-08-15T12:34:56+05:30" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56"', '$.timestamp().string()'); - jsonb_path_query ------------------------ - "2023-08-15T12:34:56" -(1 row) - -select jsonb_path_query('"12:34:56 +5:30"', '$.time_tz().string()'); - jsonb_path_query ------------------- - "12:34:56+05:30" -(1 row) - --- this timetz usage will absorb the UTC offset of the current timezone setting -begin; -set local timezone = 'UTC-10'; -select jsonb_path_query_tz('"12:34:56"', '$.time_tz().string()'); - jsonb_path_query_tz ---------------------- - "12:34:56+10:00" -(1 row) - -rollback; -select jsonb_path_query('"12:34:56"', '$.time().string()'); - jsonb_path_query ------------------- - "12:34:56" -(1 row) - -select jsonb_path_query('"2023-08-15"', '$.date().string()'); - jsonb_path_query ------------------- - "2023-08-15" -(1 row) - --- .string() does not react to timezone or datestyle -begin; -set local timezone = 'UTC'; -set local datestyle = 'German'; -select jsonb_path_query('"2023-08-15 12:34:56 +5:30"', '$.timestamp_tz().string()'); - jsonb_path_query ------------------------------ - "2023-08-15T12:34:56+05:30" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56"', '$.timestamp().string()'); - jsonb_path_query ------------------------ - "2023-08-15T12:34:56" -(1 row) - -rollback; --- Test .time() -select jsonb_path_query('null', '$.time()'); -ERROR: jsonpath item method .time() can only be applied to a string -select jsonb_path_query('true', '$.time()'); -ERROR: jsonpath item method .time() can only be applied to a string -select jsonb_path_query('1', '$.time()'); -ERROR: jsonpath item method .time() can only be applied to a string -select jsonb_path_query('[]', '$.time()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.time()'); -ERROR: jsonpath item method .time() can only be applied to a string -select jsonb_path_query('{}', '$.time()'); -ERROR: jsonpath item method .time() can only be applied to a string -select jsonb_path_query('"bogus"', '$.time()'); -ERROR: time format is not recognized: "bogus" -select jsonb '"12:34:56"' @? '$.time()'; - ?column? ----------- - t -(1 row) - -select jsonb_path_query('"12:34:56"', '$.time()'); - jsonb_path_query ------------------- - "12:34:56" -(1 row) - -select jsonb_path_query('"12:34:56"', '$.time().type()'); - jsonb_path_query --------------------------- - "time without time zone" -(1 row) - -select jsonb_path_query('"2023-08-15"', '$.time()'); -ERROR: time format is not recognized: "2023-08-15" -select jsonb_path_query('"12:34:56 +05:30"', '$.time()'); -ERROR: cannot convert value from timetz to time without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"12:34:56 +05:30"', '$.time()'); -- should work - jsonb_path_query_tz ---------------------- - "12:34:56" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56"', '$.time()'); - jsonb_path_query ------------------- - "12:34:56" -(1 row) - -select jsonb_path_query('"12:34:56.789"', '$.time(-1)'); -ERROR: syntax error at or near "-" of jsonpath input -LINE 1: select jsonb_path_query('"12:34:56.789"', '$.time(-1)'); - ^ -select jsonb_path_query('"12:34:56.789"', '$.time(2.0)'); -ERROR: syntax error at or near "2.0" of jsonpath input -LINE 1: select jsonb_path_query('"12:34:56.789"', '$.time(2.0)'); - ^ -select jsonb_path_query('"12:34:56.789"', '$.time(12345678901)'); -ERROR: time precision of jsonpath item method .time() is out of range for type integer -select jsonb_path_query('"12:34:56.789"', '$.time(0)'); - jsonb_path_query ------------------- - "12:34:57" -(1 row) - -select jsonb_path_query('"12:34:56.789"', '$.time(2)'); - jsonb_path_query ------------------- - "12:34:56.79" -(1 row) - -select jsonb_path_query('"12:34:56.789"', '$.time(5)'); - jsonb_path_query ------------------- - "12:34:56.789" -(1 row) - -select jsonb_path_query('"12:34:56.789"', '$.time(10)'); -WARNING: TIME(10) precision reduced to maximum allowed, 6 - jsonb_path_query ------------------- - "12:34:56.789" -(1 row) - -select jsonb_path_query('"12:34:56.789012"', '$.time(8)'); -WARNING: TIME(8) precision reduced to maximum allowed, 6 - jsonb_path_query -------------------- - "12:34:56.789012" -(1 row) - --- Test .time_tz() -select jsonb_path_query('null', '$.time_tz()'); -ERROR: jsonpath item method .time_tz() can only be applied to a string -select jsonb_path_query('true', '$.time_tz()'); -ERROR: jsonpath item method .time_tz() can only be applied to a string -select jsonb_path_query('1', '$.time_tz()'); -ERROR: jsonpath item method .time_tz() can only be applied to a string -select jsonb_path_query('[]', '$.time_tz()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.time_tz()'); -ERROR: jsonpath item method .time_tz() can only be applied to a string -select jsonb_path_query('{}', '$.time_tz()'); -ERROR: jsonpath item method .time_tz() can only be applied to a string -select jsonb_path_query('"bogus"', '$.time_tz()'); -ERROR: time_tz format is not recognized: "bogus" -select jsonb '"12:34:56 +05:30"' @? '$.time_tz()'; - ?column? ----------- - t -(1 row) - -select jsonb_path_query('"12:34:56 +05:30"', '$.time_tz()'); - jsonb_path_query ------------------- - "12:34:56+05:30" -(1 row) - -select jsonb_path_query('"12:34:56 +05:30"', '$.time_tz().type()'); - jsonb_path_query ------------------------ - "time with time zone" -(1 row) - -select jsonb_path_query('"2023-08-15"', '$.time_tz()'); -ERROR: time_tz format is not recognized: "2023-08-15" -select jsonb_path_query('"2023-08-15 12:34:56"', '$.time_tz()'); -ERROR: time_tz format is not recognized: "2023-08-15 12:34:56" -select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(-1)'); -ERROR: syntax error at or near "-" of jsonpath input -LINE 1: select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(... - ^ -select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(2.0)'); -ERROR: syntax error at or near "2.0" of jsonpath input -LINE 1: select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(... - ^ -select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(12345678901)'); -ERROR: time precision of jsonpath item method .time_tz() is out of range for type integer -select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(0)'); - jsonb_path_query ------------------- - "12:34:57+05:30" -(1 row) - -select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(2)'); - jsonb_path_query ---------------------- - "12:34:56.79+05:30" -(1 row) - -select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(5)'); - jsonb_path_query ----------------------- - "12:34:56.789+05:30" -(1 row) - -select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(10)'); -WARNING: TIME(10) WITH TIME ZONE precision reduced to maximum allowed, 6 - jsonb_path_query ----------------------- - "12:34:56.789+05:30" -(1 row) - -select jsonb_path_query('"12:34:56.789012 +05:30"', '$.time_tz(8)'); -WARNING: TIME(8) WITH TIME ZONE precision reduced to maximum allowed, 6 - jsonb_path_query -------------------------- - "12:34:56.789012+05:30" -(1 row) - --- Test .timestamp() -select jsonb_path_query('null', '$.timestamp()'); -ERROR: jsonpath item method .timestamp() can only be applied to a string -select jsonb_path_query('true', '$.timestamp()'); -ERROR: jsonpath item method .timestamp() can only be applied to a string -select jsonb_path_query('1', '$.timestamp()'); -ERROR: jsonpath item method .timestamp() can only be applied to a string -select jsonb_path_query('[]', '$.timestamp()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.timestamp()'); -ERROR: jsonpath item method .timestamp() can only be applied to a string -select jsonb_path_query('{}', '$.timestamp()'); -ERROR: jsonpath item method .timestamp() can only be applied to a string -select jsonb_path_query('"bogus"', '$.timestamp()'); -ERROR: timestamp format is not recognized: "bogus" -select jsonb '"2023-08-15 12:34:56"' @? '$.timestamp()'; - ?column? ----------- - t -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56"', '$.timestamp()'); - jsonb_path_query ------------------------ - "2023-08-15T12:34:56" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56"', '$.timestamp().type()'); - jsonb_path_query -------------------------------- - "timestamp without time zone" -(1 row) - -select jsonb_path_query('"2023-08-15"', '$.timestamp()'); - jsonb_path_query ------------------------ - "2023-08-15T00:00:00" -(1 row) - -select jsonb_path_query('"12:34:56"', '$.timestamp()'); -ERROR: timestamp format is not recognized: "12:34:56" -select jsonb_path_query('"12:34:56 +05:30"', '$.timestamp()'); -ERROR: timestamp format is not recognized: "12:34:56 +05:30" -select jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timestamp(-1)'); -ERROR: syntax error at or near "-" of jsonpath input -LINE 1: ...ect jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timesta... - ^ -select jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timestamp(2.0)'); -ERROR: syntax error at or near "2.0" of jsonpath input -LINE 1: ...ect jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timesta... - ^ -select jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timestamp(12345678901)'); -ERROR: time precision of jsonpath item method .timestamp() is out of range for type integer -select jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timestamp(0)'); - jsonb_path_query ------------------------ - "2023-08-15T12:34:57" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timestamp(2)'); - jsonb_path_query --------------------------- - "2023-08-15T12:34:56.79" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timestamp(5)'); - jsonb_path_query ---------------------------- - "2023-08-15T12:34:56.789" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timestamp(10)'); -WARNING: TIMESTAMP(10) precision reduced to maximum allowed, 6 - jsonb_path_query ---------------------------- - "2023-08-15T12:34:56.789" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56.789012"', '$.timestamp(8)'); -WARNING: TIMESTAMP(8) precision reduced to maximum allowed, 6 - jsonb_path_query ------------------------------- - "2023-08-15T12:34:56.789012" -(1 row) - --- Test .timestamp_tz() -select jsonb_path_query('null', '$.timestamp_tz()'); -ERROR: jsonpath item method .timestamp_tz() can only be applied to a string -select jsonb_path_query('true', '$.timestamp_tz()'); -ERROR: jsonpath item method .timestamp_tz() can only be applied to a string -select jsonb_path_query('1', '$.timestamp_tz()'); -ERROR: jsonpath item method .timestamp_tz() can only be applied to a string -select jsonb_path_query('[]', '$.timestamp_tz()'); - jsonb_path_query ------------------- -(0 rows) - -select jsonb_path_query('[]', 'strict $.timestamp_tz()'); -ERROR: jsonpath item method .timestamp_tz() can only be applied to a string -select jsonb_path_query('{}', '$.timestamp_tz()'); -ERROR: jsonpath item method .timestamp_tz() can only be applied to a string -select jsonb_path_query('"bogus"', '$.timestamp_tz()'); -ERROR: timestamp_tz format is not recognized: "bogus" -select jsonb '"2023-08-15 12:34:56 +05:30"' @? '$.timestamp_tz()'; - ?column? ----------- - t -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.timestamp_tz()'); - jsonb_path_query ------------------------------ - "2023-08-15T12:34:56+05:30" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.timestamp_tz().type()'); - jsonb_path_query ----------------------------- - "timestamp with time zone" -(1 row) - -select jsonb_path_query('"2023-08-15"', '$.timestamp_tz()'); -ERROR: cannot convert value from date to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"2023-08-15"', '$.timestamp_tz()'); -- should work - jsonb_path_query_tz ------------------------------ - "2023-08-15T00:00:00-07:00" -(1 row) - -select jsonb_path_query('"12:34:56"', '$.timestamp_tz()'); -ERROR: timestamp_tz format is not recognized: "12:34:56" -select jsonb_path_query('"12:34:56 +05:30"', '$.timestamp_tz()'); -ERROR: timestamp_tz format is not recognized: "12:34:56 +05:30" -select jsonb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timestamp_tz(-1)'); -ERROR: syntax error at or near "-" of jsonpath input -LINE 1: ...nb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timesta... - ^ -select jsonb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timestamp_tz(2.0)'); -ERROR: syntax error at or near "2.0" of jsonpath input -LINE 1: ...nb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timesta... - ^ -select jsonb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timestamp_tz(12345678901)'); -ERROR: time precision of jsonpath item method .timestamp_tz() is out of range for type integer -select jsonb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timestamp_tz(0)'); - jsonb_path_query ------------------------------ - "2023-08-15T12:34:57+05:30" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timestamp_tz(2)'); - jsonb_path_query --------------------------------- - "2023-08-15T12:34:56.79+05:30" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timestamp_tz(5)'); - jsonb_path_query ---------------------------------- - "2023-08-15T12:34:56.789+05:30" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timestamp_tz(10)'); -WARNING: TIMESTAMP(10) WITH TIME ZONE precision reduced to maximum allowed, 6 - jsonb_path_query ---------------------------------- - "2023-08-15T12:34:56.789+05:30" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56.789012 +05:30"', '$.timestamp_tz(8)'); -WARNING: TIMESTAMP(8) WITH TIME ZONE precision reduced to maximum allowed, 6 - jsonb_path_query ------------------------------------- - "2023-08-15T12:34:56.789012+05:30" -(1 row) - -set time zone '+00'; -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.time()'); -ERROR: cannot convert value from timestamptz to time without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"2023-08-15 12:34:56 +05:30"', '$.time()'); -- should work - jsonb_path_query_tz ---------------------- - "07:04:56" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.time_tz()'); - jsonb_path_query ------------------- - "07:04:56+00:00" -(1 row) - -select jsonb_path_query('"12:34:56"', '$.time_tz()'); -ERROR: cannot convert value from time to timetz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"12:34:56"', '$.time_tz()'); -- should work - jsonb_path_query_tz ---------------------- - "12:34:56+00:00" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.timestamp()'); -ERROR: cannot convert value from timestamptz to timestamp without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"2023-08-15 12:34:56 +05:30"', '$.timestamp()'); -- should work - jsonb_path_query_tz ------------------------ - "2023-08-15T07:04:56" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56"', '$.timestamp_tz()'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"2023-08-15 12:34:56"', '$.timestamp_tz()'); -- should work - jsonb_path_query_tz ------------------------------ - "2023-08-15T12:34:56+00:00" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI")'); - jsonb_path_query ------------------------ - "2017-03-10T12:34:00" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); -ERROR: input string is too short for datetime format -select jsonb_path_query('"10-03-2017 12:34 +05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); - jsonb_path_query ------------------------------ - "2017-03-10T12:34:00+05:00" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34 -05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); - jsonb_path_query ------------------------------ - "2017-03-10T12:34:00-05:00" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34 +05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")'); - jsonb_path_query ------------------------------ - "2017-03-10T12:34:00+05:20" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34 -05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")'); - jsonb_path_query ------------------------------ - "2017-03-10T12:34:00-05:20" -(1 row) - -select jsonb_path_query('"12:34"', '$.datetime("HH24:MI")'); - jsonb_path_query ------------------- - "12:34:00" -(1 row) - -select jsonb_path_query('"12:34"', '$.datetime("HH24:MI TZH")'); -ERROR: input string is too short for datetime format -select jsonb_path_query('"12:34 +05"', '$.datetime("HH24:MI TZH")'); - jsonb_path_query ------------------- - "12:34:00+05:00" -(1 row) - -select jsonb_path_query('"12:34 -05"', '$.datetime("HH24:MI TZH")'); - jsonb_path_query ------------------- - "12:34:00-05:00" -(1 row) - -select jsonb_path_query('"12:34 +05:20"', '$.datetime("HH24:MI TZH:TZM")'); - jsonb_path_query ------------------- - "12:34:00+05:20" -(1 row) - -select jsonb_path_query('"12:34 -05:20"', '$.datetime("HH24:MI TZH:TZM")'); - jsonb_path_query ------------------- - "12:34:00-05:20" -(1 row) - -set time zone '+10'; -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.time()'); -ERROR: cannot convert value from timestamptz to time without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"2023-08-15 12:34:56 +05:30"', '$.time()'); -- should work - jsonb_path_query_tz ---------------------- - "17:04:56" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.time_tz()'); - jsonb_path_query ------------------- - "17:04:56+10:00" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.timestamp()'); -ERROR: cannot convert value from timestamptz to timestamp without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"2023-08-15 12:34:56 +05:30"', '$.timestamp()'); -- should work - jsonb_path_query_tz ------------------------ - "2023-08-15T17:04:56" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56"', '$.timestamp_tz()'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"2023-08-15 12:34:56"', '$.timestamp_tz()'); -- should work - jsonb_path_query_tz ------------------------------ - "2023-08-15T12:34:56+10:00" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.timestamp_tz()'); - jsonb_path_query ------------------------------ - "2023-08-15T12:34:56+05:30" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI")'); - jsonb_path_query ------------------------ - "2017-03-10T12:34:00" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); -ERROR: input string is too short for datetime format -select jsonb_path_query('"10-03-2017 12:34 +05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); - jsonb_path_query ------------------------------ - "2017-03-10T12:34:00+05:00" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34 -05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); - jsonb_path_query ------------------------------ - "2017-03-10T12:34:00-05:00" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34 +05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")'); - jsonb_path_query ------------------------------ - "2017-03-10T12:34:00+05:20" -(1 row) - -select jsonb_path_query('"10-03-2017 12:34 -05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")'); - jsonb_path_query ------------------------------ - "2017-03-10T12:34:00-05:20" -(1 row) - -select jsonb_path_query('"12:34"', '$.datetime("HH24:MI")'); - jsonb_path_query ------------------- - "12:34:00" -(1 row) - -select jsonb_path_query('"12:34"', '$.datetime("HH24:MI TZH")'); -ERROR: input string is too short for datetime format -select jsonb_path_query('"12:34 +05"', '$.datetime("HH24:MI TZH")'); - jsonb_path_query ------------------- - "12:34:00+05:00" -(1 row) - -select jsonb_path_query('"12:34 -05"', '$.datetime("HH24:MI TZH")'); - jsonb_path_query ------------------- - "12:34:00-05:00" -(1 row) - -select jsonb_path_query('"12:34 +05:20"', '$.datetime("HH24:MI TZH:TZM")'); - jsonb_path_query ------------------- - "12:34:00+05:20" -(1 row) - -select jsonb_path_query('"12:34 -05:20"', '$.datetime("HH24:MI TZH:TZM")'); - jsonb_path_query ------------------- - "12:34:00-05:20" -(1 row) - -set time zone default; -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.time()'); -ERROR: cannot convert value from timestamptz to time without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"2023-08-15 12:34:56 +05:30"', '$.time()'); -- should work - jsonb_path_query_tz ---------------------- - "00:04:56" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.time_tz()'); - jsonb_path_query ------------------- - "00:04:56-07:00" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.timestamp()'); -ERROR: cannot convert value from timestamptz to timestamp without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz('"2023-08-15 12:34:56 +05:30"', '$.timestamp()'); -- should work - jsonb_path_query_tz ------------------------ - "2023-08-15T00:04:56" -(1 row) - -select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.timestamp_tz()'); - jsonb_path_query ------------------------------ - "2023-08-15T12:34:56+05:30" -(1 row) - -select jsonb_path_query('"2017-03-10"', '$.datetime().type()'); - jsonb_path_query ------------------- - "date" -(1 row) - -select jsonb_path_query('"2017-03-10"', '$.datetime()'); - jsonb_path_query ------------------- - "2017-03-10" -(1 row) - -select jsonb_path_query('"2017-03-10 12:34:56"', '$.datetime().type()'); - jsonb_path_query -------------------------------- - "timestamp without time zone" -(1 row) - -select jsonb_path_query('"2017-03-10 12:34:56"', '$.datetime()'); - jsonb_path_query ------------------------ - "2017-03-10T12:34:56" -(1 row) - -select jsonb_path_query('"2017-03-10 12:34:56+3"', '$.datetime().type()'); - jsonb_path_query ----------------------------- - "timestamp with time zone" -(1 row) - -select jsonb_path_query('"2017-03-10 12:34:56+3"', '$.datetime()'); - jsonb_path_query ------------------------------ - "2017-03-10T12:34:56+03:00" -(1 row) - -select jsonb_path_query('"2017-03-10 12:34:56+3:10"', '$.datetime().type()'); - jsonb_path_query ----------------------------- - "timestamp with time zone" -(1 row) - -select jsonb_path_query('"2017-03-10 12:34:56+3:10"', '$.datetime()'); - jsonb_path_query ------------------------------ - "2017-03-10T12:34:56+03:10" -(1 row) - -select jsonb_path_query('"2017-03-10T12:34:56+3:10"', '$.datetime()'); - jsonb_path_query ------------------------------ - "2017-03-10T12:34:56+03:10" -(1 row) - -select jsonb_path_query('"2017-03-10t12:34:56+3:10"', '$.datetime()'); -ERROR: datetime format is not recognized: "2017-03-10t12:34:56+3:10" -HINT: Use a datetime template argument to specify the input data format. -select jsonb_path_query('"2017-03-10 12:34:56.789+3:10"', '$.datetime()'); - jsonb_path_query ---------------------------------- - "2017-03-10T12:34:56.789+03:10" -(1 row) - -select jsonb_path_query('"2017-03-10T12:34:56.789+3:10"', '$.datetime()'); - jsonb_path_query ---------------------------------- - "2017-03-10T12:34:56.789+03:10" -(1 row) - -select jsonb_path_query('"2017-03-10t12:34:56.789+3:10"', '$.datetime()'); -ERROR: datetime format is not recognized: "2017-03-10t12:34:56.789+3:10" -HINT: Use a datetime template argument to specify the input data format. -select jsonb_path_query('"2017-03-10T12:34:56.789EST"', '$.datetime()'); - jsonb_path_query ---------------------------------- - "2017-03-10T12:34:56.789-05:00" -(1 row) - -select jsonb_path_query('"2017-03-10T12:34:56.789Z"', '$.datetime()'); - jsonb_path_query ---------------------------------- - "2017-03-10T12:34:56.789+00:00" -(1 row) - -select jsonb_path_query('"12:34:56"', '$.datetime().type()'); - jsonb_path_query --------------------------- - "time without time zone" -(1 row) - -select jsonb_path_query('"12:34:56"', '$.datetime()'); - jsonb_path_query ------------------- - "12:34:56" -(1 row) - -select jsonb_path_query('"12:34:56+3"', '$.datetime().type()'); - jsonb_path_query ------------------------ - "time with time zone" -(1 row) - -select jsonb_path_query('"12:34:56+3"', '$.datetime()'); - jsonb_path_query ------------------- - "12:34:56+03:00" -(1 row) - -select jsonb_path_query('"12:34:56+3:10"', '$.datetime().type()'); - jsonb_path_query ------------------------ - "time with time zone" -(1 row) - -select jsonb_path_query('"12:34:56+3:10"', '$.datetime()'); - jsonb_path_query ------------------- - "12:34:56+03:10" -(1 row) - -set time zone '+00'; --- date comparison -select jsonb_path_query( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].datetime() ? (@ == "10.03.2017".datetime("dd.mm.yyyy"))'); -ERROR: cannot convert value from date to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].datetime() ? (@ >= "10.03.2017".datetime("dd.mm.yyyy"))'); -ERROR: cannot convert value from date to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].datetime() ? (@ < "10.03.2017".datetime("dd.mm.yyyy"))'); -ERROR: cannot convert value from date to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].datetime() ? (@ == "10.03.2017".datetime("dd.mm.yyyy"))'); - jsonb_path_query_tz ------------------------------ - "2017-03-10" - "2017-03-10T00:00:00" - "2017-03-10T03:00:00+03:00" -(3 rows) - -select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].datetime() ? (@ >= "10.03.2017".datetime("dd.mm.yyyy"))'); - jsonb_path_query_tz ------------------------------ - "2017-03-10" - "2017-03-11" - "2017-03-10T00:00:00" - "2017-03-10T12:34:56" - "2017-03-10T03:00:00+03:00" -(5 rows) - -select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].datetime() ? (@ < "10.03.2017".datetime("dd.mm.yyyy"))'); - jsonb_path_query_tz ------------------------------ - "2017-03-09" - "2017-03-10T01:02:03+04:00" -(2 rows) - -select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].datetime() ? (@ == "2017-03-10".date())'); - jsonb_path_query_tz ------------------------------ - "2017-03-10" - "2017-03-10T00:00:00" - "2017-03-10T03:00:00+03:00" -(3 rows) - -select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].datetime() ? (@ >= "2017-03-10".date())'); - jsonb_path_query_tz ------------------------------ - "2017-03-10" - "2017-03-11" - "2017-03-10T00:00:00" - "2017-03-10T12:34:56" - "2017-03-10T03:00:00+03:00" -(5 rows) - -select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].datetime() ? (@ < "2017-03-10".date())'); - jsonb_path_query_tz ------------------------------ - "2017-03-09" - "2017-03-10T01:02:03+04:00" -(2 rows) - -select jsonb_path_query( - '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].date() ? (@ == "2017-03-10".date())'); -ERROR: cannot convert value from timestamptz to date without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].date() ? (@ >= "2017-03-10".date())'); -ERROR: cannot convert value from timestamptz to date without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].date() ? (@ < "2017-03-10".date())'); -ERROR: cannot convert value from timestamptz to date without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].date() ? (@ == "2017-03-10".date())'); - jsonb_path_query_tz ---------------------- - "2017-03-10" - "2017-03-10" - "2017-03-10" - "2017-03-10" -(4 rows) - -select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].date() ? (@ >= "2017-03-10".date())'); - jsonb_path_query_tz ---------------------- - "2017-03-10" - "2017-03-11" - "2017-03-10" - "2017-03-10" - "2017-03-10" -(5 rows) - -select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', - '$[*].date() ? (@ < "2017-03-10".date())'); - jsonb_path_query_tz ---------------------- - "2017-03-09" - "2017-03-09" -(2 rows) - --- time comparison -select jsonb_path_query( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].datetime() ? (@ == "12:35".datetime("HH24:MI"))'); -ERROR: cannot convert value from time to timetz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].datetime() ? (@ >= "12:35".datetime("HH24:MI"))'); -ERROR: cannot convert value from time to timetz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].datetime() ? (@ < "12:35".datetime("HH24:MI"))'); -ERROR: cannot convert value from time to timetz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].datetime() ? (@ == "12:35".datetime("HH24:MI"))'); - jsonb_path_query_tz ---------------------- - "12:35:00" - "12:35:00+00:00" -(2 rows) - -select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].datetime() ? (@ >= "12:35".datetime("HH24:MI"))'); - jsonb_path_query_tz ---------------------- - "12:35:00" - "12:36:00" - "12:35:00+00:00" -(3 rows) - -select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].datetime() ? (@ < "12:35".datetime("HH24:MI"))'); - jsonb_path_query_tz ---------------------- - "12:34:00" - "12:35:00+01:00" - "13:35:00+01:00" -(3 rows) - -select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].datetime() ? (@ == "12:35:00".time())'); - jsonb_path_query_tz ---------------------- - "12:35:00" - "12:35:00+00:00" -(2 rows) - -select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].datetime() ? (@ >= "12:35:00".time())'); - jsonb_path_query_tz ---------------------- - "12:35:00" - "12:36:00" - "12:35:00+00:00" -(3 rows) - -select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].datetime() ? (@ < "12:35:00".time())'); - jsonb_path_query_tz ---------------------- - "12:34:00" - "12:35:00+01:00" - "13:35:00+01:00" -(3 rows) - -select jsonb_path_query( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].time() ? (@ == "12:35:00".time())'); -ERROR: cannot convert value from timetz to time without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].time() ? (@ >= "12:35:00".time())'); -ERROR: cannot convert value from timetz to time without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].time() ? (@ < "12:35:00".time())'); -ERROR: cannot convert value from timetz to time without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["12:34:00.123", "12:35:00.123", "12:36:00.1123", "12:35:00.1123+00", "12:35:00.123+01", "13:35:00.123+01", "2017-03-10 12:35:00.1", "2017-03-10 12:35:00.123+01"]', - '$[*].time(2) ? (@ >= "12:35:00.123".time(2))'); -ERROR: cannot convert value from timetz to time without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].time() ? (@ == "12:35:00".time())'); - jsonb_path_query_tz ---------------------- - "12:35:00" - "12:35:00" - "12:35:00" - "12:35:00" -(4 rows) - -select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].time() ? (@ >= "12:35:00".time())'); - jsonb_path_query_tz ---------------------- - "12:35:00" - "12:36:00" - "12:35:00" - "12:35:00" - "13:35:00" - "12:35:00" -(6 rows) - -select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', - '$[*].time() ? (@ < "12:35:00".time())'); - jsonb_path_query_tz ---------------------- - "12:34:00" - "11:35:00" -(2 rows) - -select jsonb_path_query_tz( - '["12:34:00.123", "12:35:00.123", "12:36:00.1123", "12:35:00.1123+00", "12:35:00.123+01", "13:35:00.123+01", "2017-03-10 12:35:00.1", "2017-03-10 12:35:00.123+01"]', - '$[*].time(2) ? (@ >= "12:35:00.123".time(2))'); - jsonb_path_query_tz ---------------------- - "12:35:00.12" - "12:36:00.11" - "12:35:00.12" - "13:35:00.12" -(4 rows) - --- timetz comparison -select jsonb_path_query( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].datetime() ? (@ == "12:35 +1".datetime("HH24:MI TZH"))'); -ERROR: cannot convert value from time to timetz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].datetime() ? (@ >= "12:35 +1".datetime("HH24:MI TZH"))'); -ERROR: cannot convert value from time to timetz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].datetime() ? (@ < "12:35 +1".datetime("HH24:MI TZH"))'); -ERROR: cannot convert value from time to timetz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].datetime() ? (@ == "12:35 +1".datetime("HH24:MI TZH"))'); - jsonb_path_query_tz ---------------------- - "12:35:00+01:00" -(1 row) - -select jsonb_path_query_tz( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].datetime() ? (@ >= "12:35 +1".datetime("HH24:MI TZH"))'); - jsonb_path_query_tz ---------------------- - "12:35:00+01:00" - "12:36:00+01:00" - "12:35:00-02:00" - "11:35:00" - "12:35:00" -(5 rows) - -select jsonb_path_query_tz( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].datetime() ? (@ < "12:35 +1".datetime("HH24:MI TZH"))'); - jsonb_path_query_tz ---------------------- - "12:34:00+01:00" - "12:35:00+02:00" - "10:35:00" -(3 rows) - -select jsonb_path_query_tz( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].datetime() ? (@ == "12:35:00 +1".time_tz())'); - jsonb_path_query_tz ---------------------- - "12:35:00+01:00" -(1 row) - -select jsonb_path_query_tz( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].datetime() ? (@ >= "12:35:00 +1".time_tz())'); - jsonb_path_query_tz ---------------------- - "12:35:00+01:00" - "12:36:00+01:00" - "12:35:00-02:00" - "11:35:00" - "12:35:00" -(5 rows) - -select jsonb_path_query_tz( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].datetime() ? (@ < "12:35:00 +1".time_tz())'); - jsonb_path_query_tz ---------------------- - "12:34:00+01:00" - "12:35:00+02:00" - "10:35:00" -(3 rows) - -select jsonb_path_query( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].time_tz() ? (@ == "12:35:00 +1".time_tz())'); -ERROR: cannot convert value from time to timetz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].time_tz() ? (@ >= "12:35:00 +1".time_tz())'); -ERROR: cannot convert value from time to timetz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].time_tz() ? (@ < "12:35:00 +1".time_tz())'); -ERROR: cannot convert value from time to timetz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["12:34:00.123+01", "12:35:00.123+01", "12:36:00.1123+01", "12:35:00.1123+02", "12:35:00.123-02", "10:35:00.123", "11:35:00.1", "12:35:00.123", "2017-03-10 12:35:00.123 +1"]', - '$[*].time_tz(2) ? (@ >= "12:35:00.123 +1".time_tz(2))'); -ERROR: cannot convert value from time to timetz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].time_tz() ? (@ == "12:35:00 +1".time_tz())'); - jsonb_path_query_tz ---------------------- - "12:35:00+01:00" -(1 row) - -select jsonb_path_query_tz( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].time_tz() ? (@ >= "12:35:00 +1".time_tz())'); - jsonb_path_query_tz ---------------------- - "12:35:00+01:00" - "12:36:00+01:00" - "12:35:00-02:00" - "11:35:00+00:00" - "12:35:00+00:00" - "11:35:00+00:00" -(6 rows) - -select jsonb_path_query_tz( - '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]', - '$[*].time_tz() ? (@ < "12:35:00 +1".time_tz())'); - jsonb_path_query_tz ---------------------- - "12:34:00+01:00" - "12:35:00+02:00" - "10:35:00+00:00" -(3 rows) - -select jsonb_path_query_tz( - '["12:34:00.123+01", "12:35:00.123+01", "12:36:00.1123+01", "12:35:00.1123+02", "12:35:00.123-02", "10:35:00.123", "11:35:00.1", "12:35:00.123", "2017-03-10 12:35:00.123 +1"]', - '$[*].time_tz(2) ? (@ >= "12:35:00.123 +1".time_tz(2))'); - jsonb_path_query_tz ---------------------- - "12:35:00.12+01:00" - "12:36:00.11+01:00" - "12:35:00.12-02:00" - "12:35:00.12+00:00" - "11:35:00.12+00:00" -(5 rows) - --- timestamp comparison -select jsonb_path_query( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ == "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ >= "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ < "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ == "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:35:00" - "2017-03-10T13:35:00+01:00" -(2 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ >= "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:35:00" - "2017-03-10T12:36:00" - "2017-03-10T13:35:00+01:00" - "2017-03-10T12:35:00-01:00" - "2017-03-11" -(5 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ < "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:34:00" - "2017-03-10T12:35:00+01:00" - "2017-03-10" -(3 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]', - '$[*].datetime() ? (@ == "2017-03-10 12:35:00".timestamp())'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:35:00" - "2017-03-10T13:35:00+01:00" -(2 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]', - '$[*].datetime() ? (@ >= "2017-03-10 12:35:00".timestamp())'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:35:00" - "2017-03-10T12:36:00" - "2017-03-10T13:35:00+01:00" - "2017-03-10T12:35:00-01:00" - "2017-03-11" -(5 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]', - '$[*].datetime() ? (@ < "2017-03-10 12:35:00".timestamp())'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:34:00" - "2017-03-10T12:35:00+01:00" - "2017-03-10" -(3 rows) - -select jsonb_path_query( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]', - '$[*].timestamp() ? (@ == "2017-03-10 12:35:00".timestamp())'); -ERROR: cannot convert value from timestamptz to timestamp without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]', - '$[*].timestamp() ? (@ >= "2017-03-10 12:35:00".timestamp())'); -ERROR: cannot convert value from timestamptz to timestamp without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]', - '$[*].timestamp() ? (@ < "2017-03-10 12:35:00".timestamp())'); -ERROR: cannot convert value from timestamptz to timestamp without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10 12:34:00.123", "2017-03-10 12:35:00.123", "2017-03-10 12:36:00.1123", "2017-03-10 12:35:00.1123+01", "2017-03-10 13:35:00.123+01", "2017-03-10 12:35:00.1-01", "2017-03-10", "2017-03-11"]', - '$[*].timestamp(2) ? (@ >= "2017-03-10 12:35:00.123".timestamp(2))'); -ERROR: cannot convert value from timestamptz to timestamp without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]', - '$[*].timestamp() ? (@ == "2017-03-10 12:35:00".timestamp())'); - jsonb_path_query_tz ------------------------ - "2017-03-10T12:35:00" - "2017-03-10T12:35:00" -(2 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]', - '$[*].timestamp() ? (@ >= "2017-03-10 12:35:00".timestamp())'); - jsonb_path_query_tz ------------------------ - "2017-03-10T12:35:00" - "2017-03-10T12:36:00" - "2017-03-10T12:35:00" - "2017-03-10T13:35:00" - "2017-03-11T00:00:00" -(5 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]', - '$[*].timestamp() ? (@ < "2017-03-10 12:35:00".timestamp())'); - jsonb_path_query_tz ------------------------ - "2017-03-10T12:34:00" - "2017-03-10T11:35:00" - "2017-03-10T00:00:00" -(3 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00.123", "2017-03-10 12:35:00.123", "2017-03-10 12:36:00.1123", "2017-03-10 12:35:00.1123+01", "2017-03-10 13:35:00.123+01", "2017-03-10 12:35:00.1-01", "2017-03-10", "2017-03-11"]', - '$[*].timestamp(2) ? (@ >= "2017-03-10 12:35:00.123".timestamp(2))'); - jsonb_path_query_tz --------------------------- - "2017-03-10T12:35:00.12" - "2017-03-10T12:36:00.11" - "2017-03-10T12:35:00.12" - "2017-03-10T13:35:00.1" - "2017-03-11T00:00:00" -(5 rows) - --- timestamptz comparison -select jsonb_path_query( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ == "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ >= "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ < "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ == "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:35:00+01:00" - "2017-03-10T11:35:00" -(2 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ >= "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:35:00+01:00" - "2017-03-10T12:36:00+01:00" - "2017-03-10T12:35:00-02:00" - "2017-03-10T11:35:00" - "2017-03-10T12:35:00" - "2017-03-11" -(6 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', - '$[*].datetime() ? (@ < "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:34:00+01:00" - "2017-03-10T12:35:00+02:00" - "2017-03-10T10:35:00" - "2017-03-10" -(4 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]', - '$[*].datetime() ? (@ == "2017-03-10 12:35:00 +1".timestamp_tz())'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:35:00+01:00" - "2017-03-10T11:35:00" -(2 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]', - '$[*].datetime() ? (@ >= "2017-03-10 12:35:00 +1".timestamp_tz())'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:35:00+01:00" - "2017-03-10T12:36:00+01:00" - "2017-03-10T12:35:00-02:00" - "2017-03-10T11:35:00" - "2017-03-10T12:35:00" - "2017-03-11" -(6 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]', - '$[*].datetime() ? (@ < "2017-03-10 12:35:00 +1".timestamp_tz())'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:34:00+01:00" - "2017-03-10T12:35:00+02:00" - "2017-03-10T10:35:00" - "2017-03-10" -(4 rows) - -select jsonb_path_query( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]', - '$[*].timestamp_tz() ? (@ == "2017-03-10 12:35:00 +1".timestamp_tz())'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]', - '$[*].timestamp_tz() ? (@ >= "2017-03-10 12:35:00 +1".timestamp_tz())'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]', - '$[*].timestamp_tz() ? (@ < "2017-03-10 12:35:00 +1".timestamp_tz())'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query( - '["2017-03-10 12:34:00.123+01", "2017-03-10 12:35:00.123+01", "2017-03-10 12:36:00.1123+01", "2017-03-10 12:35:00.1123+02", "2017-03-10 12:35:00.123-02", "2017-03-10 10:35:00.123", "2017-03-10 11:35:00.1", "2017-03-10 12:35:00.123", "2017-03-10", "2017-03-11"]', - '$[*].timestamp_tz(2) ? (@ >= "2017-03-10 12:35:00.123 +1".timestamp_tz(2))'); -ERROR: cannot convert value from timestamp to timestamptz without time zone usage -HINT: Use *_tz() function for time zone support. -select jsonb_path_query_tz( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]', - '$[*].timestamp_tz() ? (@ == "2017-03-10 12:35:00 +1".timestamp_tz())'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:35:00+01:00" - "2017-03-10T11:35:00+00:00" -(2 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]', - '$[*].timestamp_tz() ? (@ >= "2017-03-10 12:35:00 +1".timestamp_tz())'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:35:00+01:00" - "2017-03-10T12:36:00+01:00" - "2017-03-10T12:35:00-02:00" - "2017-03-10T11:35:00+00:00" - "2017-03-10T12:35:00+00:00" - "2017-03-11T00:00:00+00:00" -(6 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]', - '$[*].timestamp_tz() ? (@ < "2017-03-10 12:35:00 +1".timestamp_tz())'); - jsonb_path_query_tz ------------------------------ - "2017-03-10T12:34:00+01:00" - "2017-03-10T12:35:00+02:00" - "2017-03-10T10:35:00+00:00" - "2017-03-10T00:00:00+00:00" -(4 rows) - -select jsonb_path_query_tz( - '["2017-03-10 12:34:00.123+01", "2017-03-10 12:35:00.123+01", "2017-03-10 12:36:00.1123+01", "2017-03-10 12:35:00.1123+02", "2017-03-10 12:35:00.123-02", "2017-03-10 10:35:00.123", "2017-03-10 11:35:00.1", "2017-03-10 12:35:00.123", "2017-03-10", "2017-03-11"]', - '$[*].timestamp_tz(2) ? (@ >= "2017-03-10 12:35:00.123 +1".timestamp_tz(2))'); - jsonb_path_query_tz --------------------------------- - "2017-03-10T12:35:00.12+01:00" - "2017-03-10T12:36:00.11+01:00" - "2017-03-10T12:35:00.12-02:00" - "2017-03-10T12:35:00.12+00:00" - "2017-03-11T00:00:00+00:00" -(5 rows) - --- overflow during comparison -select jsonb_path_query('"1000000-01-01"', '$.datetime() > "2020-01-01 12:00:00".datetime()'::jsonpath); - jsonb_path_query ------------------- - true -(1 row) - -set time zone default; --- jsonpath operators -SELECT jsonb_path_query('[{"a": 1}, {"a": 2}]', '$[*]'); - jsonb_path_query ------------------- - {"a": 1} - {"a": 2} -(2 rows) - -SELECT jsonb_path_query('[{"a": 1}, {"a": 2}]', '$[*] ? (@.a > 10)'); - jsonb_path_query ------------------- -(0 rows) - -SELECT jsonb_path_query('[{"a": 1}]', '$undefined_var'); -ERROR: could not find jsonpath variable "undefined_var" -SELECT jsonb_path_query('[{"a": 1}]', 'false'); - jsonb_path_query ------------------- - false -(1 row) - -SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}, {}]', 'strict $[*].a'); -ERROR: JSON object does not contain key "a" -SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}]', '$[*].a'); - jsonb_path_query_array ------------------------- - [1, 2] -(1 row) - -SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ == 1)'); - jsonb_path_query_array ------------------------- - [1] -(1 row) - -SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ > 10)'); - jsonb_path_query_array ------------------------- - [] -(1 row) - -SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 1, "max": 4}'); - jsonb_path_query_array ------------------------- - [2, 3] -(1 row) - -SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 3, "max": 4}'); - jsonb_path_query_array ------------------------- - [] -(1 row) - -SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {}]', 'strict $[*].a'); -ERROR: JSON object does not contain key "a" -SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {}]', 'strict $[*].a', silent => true); - jsonb_path_query_first ------------------------- - 1 -(1 row) - -SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}]', '$[*].a'); - jsonb_path_query_first ------------------------- - 1 -(1 row) - -SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ == 1)'); - jsonb_path_query_first ------------------------- - 1 -(1 row) - -SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ > 10)'); - jsonb_path_query_first ------------------------- - -(1 row) - -SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 1, "max": 4}'); - jsonb_path_query_first ------------------------- - 2 -(1 row) - -SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 3, "max": 4}'); - jsonb_path_query_first ------------------------- - -(1 row) - -SELECT jsonb_path_query_first('[{"a": 1}]', '$undefined_var'); -ERROR: could not find jsonpath variable "undefined_var" -SELECT jsonb_path_query_first('[{"a": 1}]', 'false'); - jsonb_path_query_first ------------------------- - false -(1 row) - -SELECT jsonb '[{"a": 1}, {"a": 2}]' @? '$[*].a ? (@ > 1)'; - ?column? ----------- - t -(1 row) - -SELECT jsonb '[{"a": 1}, {"a": 2}]' @? '$[*] ? (@.a > 2)'; - ?column? ----------- - f -(1 row) - -SELECT jsonb_path_exists('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ > 1)'); - jsonb_path_exists -------------------- - t -(1 row) - -SELECT jsonb_path_exists('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*] ? (@.a > $min && @.a < $max)', vars => '{"min": 1, "max": 4}'); - jsonb_path_exists -------------------- - t -(1 row) - -SELECT jsonb_path_exists('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*] ? (@.a > $min && @.a < $max)', vars => '{"min": 3, "max": 4}'); - jsonb_path_exists -------------------- - f -(1 row) - -SELECT jsonb_path_exists('[{"a": 1}]', '$undefined_var'); -ERROR: could not find jsonpath variable "undefined_var" -SELECT jsonb_path_exists('[{"a": 1}]', 'false'); - jsonb_path_exists -------------------- - t -(1 row) - -SELECT jsonb_path_match('true', '$', silent => false); - jsonb_path_match ------------------- - t -(1 row) - -SELECT jsonb_path_match('false', '$', silent => false); - jsonb_path_match ------------------- - f -(1 row) - -SELECT jsonb_path_match('null', '$', silent => false); - jsonb_path_match ------------------- - -(1 row) - -SELECT jsonb_path_match('1', '$', silent => true); - jsonb_path_match ------------------- - -(1 row) - -SELECT jsonb_path_match('1', '$', silent => false); -ERROR: single boolean result is expected -SELECT jsonb_path_match('"a"', '$', silent => false); -ERROR: single boolean result is expected -SELECT jsonb_path_match('{}', '$', silent => false); -ERROR: single boolean result is expected -SELECT jsonb_path_match('[true]', '$', silent => false); -ERROR: single boolean result is expected -SELECT jsonb_path_match('{}', 'lax $.a', silent => false); -ERROR: single boolean result is expected -SELECT jsonb_path_match('{}', 'strict $.a', silent => false); -ERROR: JSON object does not contain key "a" -SELECT jsonb_path_match('{}', 'strict $.a', silent => true); - jsonb_path_match ------------------- - -(1 row) - -SELECT jsonb_path_match('[true, true]', '$[*]', silent => false); -ERROR: single boolean result is expected -SELECT jsonb '[{"a": 1}, {"a": 2}]' @@ '$[*].a > 1'; - ?column? ----------- - t -(1 row) - -SELECT jsonb '[{"a": 1}, {"a": 2}]' @@ '$[*].a > 2'; - ?column? ----------- - f -(1 row) - -SELECT jsonb_path_match('[{"a": 1}, {"a": 2}]', '$[*].a > 1'); - jsonb_path_match ------------------- - t -(1 row) - -SELECT jsonb_path_match('[{"a": 1}]', '$undefined_var'); -ERROR: could not find jsonpath variable "undefined_var" -SELECT jsonb_path_match('[{"a": 1}]', 'false'); - jsonb_path_match ------------------- - f -(1 row) - --- test string comparison (Unicode codepoint collation) -WITH str(j, num) AS -( - SELECT jsonb_build_object('s', s), num - FROM unnest('{"", "a", "ab", "abc", "abcd", "b", "A", "AB", "ABC", "ABc", "ABcD", "B"}'::text[]) WITH ORDINALITY AS a(s, num) -) -SELECT - s1.j, s2.j, - jsonb_path_query_first(s1.j, '$.s < $s', vars => s2.j) lt, - jsonb_path_query_first(s1.j, '$.s <= $s', vars => s2.j) le, - jsonb_path_query_first(s1.j, '$.s == $s', vars => s2.j) eq, - jsonb_path_query_first(s1.j, '$.s >= $s', vars => s2.j) ge, - jsonb_path_query_first(s1.j, '$.s > $s', vars => s2.j) gt -FROM str s1, str s2 -ORDER BY s1.num, s2.num; - j | j | lt | le | eq | ge | gt ----------------+---------------+-------+-------+-------+-------+------- - {"s": ""} | {"s": ""} | false | true | true | true | false - {"s": ""} | {"s": "a"} | true | true | false | false | false - {"s": ""} | {"s": "ab"} | true | true | false | false | false - {"s": ""} | {"s": "abc"} | true | true | false | false | false - {"s": ""} | {"s": "abcd"} | true | true | false | false | false - {"s": ""} | {"s": "b"} | true | true | false | false | false - {"s": ""} | {"s": "A"} | true | true | false | false | false - {"s": ""} | {"s": "AB"} | true | true | false | false | false - {"s": ""} | {"s": "ABC"} | true | true | false | false | false - {"s": ""} | {"s": "ABc"} | true | true | false | false | false - {"s": ""} | {"s": "ABcD"} | true | true | false | false | false - {"s": ""} | {"s": "B"} | true | true | false | false | false - {"s": "a"} | {"s": ""} | false | false | false | true | true - {"s": "a"} | {"s": "a"} | false | true | true | true | false - {"s": "a"} | {"s": "ab"} | true | true | false | false | false - {"s": "a"} | {"s": "abc"} | true | true | false | false | false - {"s": "a"} | {"s": "abcd"} | true | true | false | false | false - {"s": "a"} | {"s": "b"} | true | true | false | false | false - {"s": "a"} | {"s": "A"} | false | false | false | true | true - {"s": "a"} | {"s": "AB"} | false | false | false | true | true - {"s": "a"} | {"s": "ABC"} | false | false | false | true | true - {"s": "a"} | {"s": "ABc"} | false | false | false | true | true - {"s": "a"} | {"s": "ABcD"} | false | false | false | true | true - {"s": "a"} | {"s": "B"} | false | false | false | true | true - {"s": "ab"} | {"s": ""} | false | false | false | true | true - {"s": "ab"} | {"s": "a"} | false | false | false | true | true - {"s": "ab"} | {"s": "ab"} | false | true | true | true | false - {"s": "ab"} | {"s": "abc"} | true | true | false | false | false - {"s": "ab"} | {"s": "abcd"} | true | true | false | false | false - {"s": "ab"} | {"s": "b"} | true | true | false | false | false - {"s": "ab"} | {"s": "A"} | false | false | false | true | true - {"s": "ab"} | {"s": "AB"} | false | false | false | true | true - {"s": "ab"} | {"s": "ABC"} | false | false | false | true | true - {"s": "ab"} | {"s": "ABc"} | false | false | false | true | true - {"s": "ab"} | {"s": "ABcD"} | false | false | false | true | true - {"s": "ab"} | {"s": "B"} | false | false | false | true | true - {"s": "abc"} | {"s": ""} | false | false | false | true | true - {"s": "abc"} | {"s": "a"} | false | false | false | true | true - {"s": "abc"} | {"s": "ab"} | false | false | false | true | true - {"s": "abc"} | {"s": "abc"} | false | true | true | true | false - {"s": "abc"} | {"s": "abcd"} | true | true | false | false | false - {"s": "abc"} | {"s": "b"} | true | true | false | false | false - {"s": "abc"} | {"s": "A"} | false | false | false | true | true - {"s": "abc"} | {"s": "AB"} | false | false | false | true | true - {"s": "abc"} | {"s": "ABC"} | false | false | false | true | true - {"s": "abc"} | {"s": "ABc"} | false | false | false | true | true - {"s": "abc"} | {"s": "ABcD"} | false | false | false | true | true - {"s": "abc"} | {"s": "B"} | false | false | false | true | true - {"s": "abcd"} | {"s": ""} | false | false | false | true | true - {"s": "abcd"} | {"s": "a"} | false | false | false | true | true - {"s": "abcd"} | {"s": "ab"} | false | false | false | true | true - {"s": "abcd"} | {"s": "abc"} | false | false | false | true | true - {"s": "abcd"} | {"s": "abcd"} | false | true | true | true | false - {"s": "abcd"} | {"s": "b"} | true | true | false | false | false - {"s": "abcd"} | {"s": "A"} | false | false | false | true | true - {"s": "abcd"} | {"s": "AB"} | false | false | false | true | true - {"s": "abcd"} | {"s": "ABC"} | false | false | false | true | true - {"s": "abcd"} | {"s": "ABc"} | false | false | false | true | true - {"s": "abcd"} | {"s": "ABcD"} | false | false | false | true | true - {"s": "abcd"} | {"s": "B"} | false | false | false | true | true - {"s": "b"} | {"s": ""} | false | false | false | true | true - {"s": "b"} | {"s": "a"} | false | false | false | true | true - {"s": "b"} | {"s": "ab"} | false | false | false | true | true - {"s": "b"} | {"s": "abc"} | false | false | false | true | true - {"s": "b"} | {"s": "abcd"} | false | false | false | true | true - {"s": "b"} | {"s": "b"} | false | true | true | true | false - {"s": "b"} | {"s": "A"} | false | false | false | true | true - {"s": "b"} | {"s": "AB"} | false | false | false | true | true - {"s": "b"} | {"s": "ABC"} | false | false | false | true | true - {"s": "b"} | {"s": "ABc"} | false | false | false | true | true - {"s": "b"} | {"s": "ABcD"} | false | false | false | true | true - {"s": "b"} | {"s": "B"} | false | false | false | true | true - {"s": "A"} | {"s": ""} | false | false | false | true | true - {"s": "A"} | {"s": "a"} | true | true | false | false | false - {"s": "A"} | {"s": "ab"} | true | true | false | false | false - {"s": "A"} | {"s": "abc"} | true | true | false | false | false - {"s": "A"} | {"s": "abcd"} | true | true | false | false | false - {"s": "A"} | {"s": "b"} | true | true | false | false | false - {"s": "A"} | {"s": "A"} | false | true | true | true | false - {"s": "A"} | {"s": "AB"} | true | true | false | false | false - {"s": "A"} | {"s": "ABC"} | true | true | false | false | false - {"s": "A"} | {"s": "ABc"} | true | true | false | false | false - {"s": "A"} | {"s": "ABcD"} | true | true | false | false | false - {"s": "A"} | {"s": "B"} | true | true | false | false | false - {"s": "AB"} | {"s": ""} | false | false | false | true | true - {"s": "AB"} | {"s": "a"} | true | true | false | false | false - {"s": "AB"} | {"s": "ab"} | true | true | false | false | false - {"s": "AB"} | {"s": "abc"} | true | true | false | false | false - {"s": "AB"} | {"s": "abcd"} | true | true | false | false | false - {"s": "AB"} | {"s": "b"} | true | true | false | false | false - {"s": "AB"} | {"s": "A"} | false | false | false | true | true - {"s": "AB"} | {"s": "AB"} | false | true | true | true | false - {"s": "AB"} | {"s": "ABC"} | true | true | false | false | false - {"s": "AB"} | {"s": "ABc"} | true | true | false | false | false - {"s": "AB"} | {"s": "ABcD"} | true | true | false | false | false - {"s": "AB"} | {"s": "B"} | true | true | false | false | false - {"s": "ABC"} | {"s": ""} | false | false | false | true | true - {"s": "ABC"} | {"s": "a"} | true | true | false | false | false - {"s": "ABC"} | {"s": "ab"} | true | true | false | false | false - {"s": "ABC"} | {"s": "abc"} | true | true | false | false | false - {"s": "ABC"} | {"s": "abcd"} | true | true | false | false | false - {"s": "ABC"} | {"s": "b"} | true | true | false | false | false - {"s": "ABC"} | {"s": "A"} | false | false | false | true | true - {"s": "ABC"} | {"s": "AB"} | false | false | false | true | true - {"s": "ABC"} | {"s": "ABC"} | false | true | true | true | false - {"s": "ABC"} | {"s": "ABc"} | true | true | false | false | false - {"s": "ABC"} | {"s": "ABcD"} | true | true | false | false | false - {"s": "ABC"} | {"s": "B"} | true | true | false | false | false - {"s": "ABc"} | {"s": ""} | false | false | false | true | true - {"s": "ABc"} | {"s": "a"} | true | true | false | false | false - {"s": "ABc"} | {"s": "ab"} | true | true | false | false | false - {"s": "ABc"} | {"s": "abc"} | true | true | false | false | false - {"s": "ABc"} | {"s": "abcd"} | true | true | false | false | false - {"s": "ABc"} | {"s": "b"} | true | true | false | false | false - {"s": "ABc"} | {"s": "A"} | false | false | false | true | true - {"s": "ABc"} | {"s": "AB"} | false | false | false | true | true - {"s": "ABc"} | {"s": "ABC"} | false | false | false | true | true - {"s": "ABc"} | {"s": "ABc"} | false | true | true | true | false - {"s": "ABc"} | {"s": "ABcD"} | true | true | false | false | false - {"s": "ABc"} | {"s": "B"} | true | true | false | false | false - {"s": "ABcD"} | {"s": ""} | false | false | false | true | true - {"s": "ABcD"} | {"s": "a"} | true | true | false | false | false - {"s": "ABcD"} | {"s": "ab"} | true | true | false | false | false - {"s": "ABcD"} | {"s": "abc"} | true | true | false | false | false - {"s": "ABcD"} | {"s": "abcd"} | true | true | false | false | false - {"s": "ABcD"} | {"s": "b"} | true | true | false | false | false - {"s": "ABcD"} | {"s": "A"} | false | false | false | true | true - {"s": "ABcD"} | {"s": "AB"} | false | false | false | true | true - {"s": "ABcD"} | {"s": "ABC"} | false | false | false | true | true - {"s": "ABcD"} | {"s": "ABc"} | false | false | false | true | true - {"s": "ABcD"} | {"s": "ABcD"} | false | true | true | true | false - {"s": "ABcD"} | {"s": "B"} | true | true | false | false | false - {"s": "B"} | {"s": ""} | false | false | false | true | true - {"s": "B"} | {"s": "a"} | true | true | false | false | false - {"s": "B"} | {"s": "ab"} | true | true | false | false | false - {"s": "B"} | {"s": "abc"} | true | true | false | false | false - {"s": "B"} | {"s": "abcd"} | true | true | false | false | false - {"s": "B"} | {"s": "b"} | true | true | false | false | false - {"s": "B"} | {"s": "A"} | false | false | false | true | true - {"s": "B"} | {"s": "AB"} | false | false | false | true | true - {"s": "B"} | {"s": "ABC"} | false | false | false | true | true - {"s": "B"} | {"s": "ABc"} | false | false | false | true | true - {"s": "B"} | {"s": "ABcD"} | false | false | false | true | true - {"s": "B"} | {"s": "B"} | false | true | true | true | false -(144 rows) - +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/sqljson.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/sqljson.out --- /Users/admin/pgsql/src/test/regress/expected/sqljson.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/sqljson.out 2024-12-13 13:20:12 @@ -1,1355 +1,2 @@ --- JSON() -SELECT JSON(); -ERROR: syntax error at or near ")" -LINE 1: SELECT JSON(); - ^ -SELECT JSON(NULL); - json ------- - -(1 row) - -SELECT JSON('{ "a" : 1 } '); - json --------------- - { "a" : 1 } -(1 row) - -SELECT JSON('{ "a" : 1 } ' FORMAT JSON); - json --------------- - { "a" : 1 } -(1 row) - -SELECT JSON('{ "a" : 1 } ' FORMAT JSON ENCODING UTF8); -ERROR: JSON ENCODING clause is only allowed for bytea input type -LINE 1: SELECT JSON('{ "a" : 1 } ' FORMAT JSON ENCODING UTF8); - ^ -SELECT JSON('{ "a" : 1 } '::bytea FORMAT JSON ENCODING UTF8); - json --------------- - { "a" : 1 } -(1 row) - -SELECT pg_typeof(JSON('{ "a" : 1 } ')); - pg_typeof ------------ - json -(1 row) - -SELECT JSON(' 1 '::json); - json ---------- - 1 -(1 row) - -SELECT JSON(' 1 '::jsonb); - json ------- - 1 -(1 row) - -SELECT JSON(' 1 '::json WITH UNIQUE KEYS); -ERROR: cannot use non-string types with WITH UNIQUE KEYS clause -LINE 1: SELECT JSON(' 1 '::json WITH UNIQUE KEYS); - ^ -SELECT JSON(123); -ERROR: cannot cast type integer to json -LINE 1: SELECT JSON(123); - ^ -SELECT JSON('{"a": 1, "a": 2}'); - json ------------------- - {"a": 1, "a": 2} -(1 row) - -SELECT JSON('{"a": 1, "a": 2}' WITH UNIQUE KEYS); -ERROR: duplicate JSON object key value -SELECT JSON('{"a": 1, "a": 2}' WITHOUT UNIQUE KEYS); - json ------------------- - {"a": 1, "a": 2} -(1 row) - -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON('123'); - QUERY PLAN ------------------------------ - Result - Output: JSON('123'::json) -(2 rows) - -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON('123' FORMAT JSON); - QUERY PLAN ------------------------------ - Result - Output: JSON('123'::json) -(2 rows) - -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON('123'::bytea FORMAT JSON); - QUERY PLAN ------------------------------------------------ - Result - Output: JSON('\x313233'::bytea FORMAT JSON) -(2 rows) - -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON('123'::bytea FORMAT JSON ENCODING UTF8); - QUERY PLAN -------------------------------------------------------------- - Result - Output: JSON('\x313233'::bytea FORMAT JSON ENCODING UTF8) -(2 rows) - -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON('123' WITH UNIQUE KEYS); - QUERY PLAN ----------------------------------------------- - Result - Output: JSON('123'::text WITH UNIQUE KEYS) -(2 rows) - -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON('123' WITHOUT UNIQUE KEYS); - QUERY PLAN ------------------------------ - Result - Output: JSON('123'::json) -(2 rows) - -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON('123'); - QUERY PLAN ------------------------------ - Result - Output: JSON('123'::json) -(2 rows) - -SELECT pg_typeof(JSON('123')); - pg_typeof ------------ - json -(1 row) - --- JSON_SCALAR() -SELECT JSON_SCALAR(); -ERROR: syntax error at or near ")" -LINE 1: SELECT JSON_SCALAR(); - ^ -SELECT JSON_SCALAR(NULL); - json_scalar -------------- - -(1 row) - -SELECT JSON_SCALAR(NULL::int); - json_scalar -------------- - -(1 row) - -SELECT JSON_SCALAR(123); - json_scalar -------------- - 123 -(1 row) - -SELECT JSON_SCALAR(123.45); - json_scalar -------------- - 123.45 -(1 row) - -SELECT JSON_SCALAR(123.45::numeric); - json_scalar -------------- - 123.45 -(1 row) - -SELECT JSON_SCALAR(true); - json_scalar -------------- - true -(1 row) - -SELECT JSON_SCALAR(false); - json_scalar -------------- - false -(1 row) - -SELECT JSON_SCALAR(' 123.45'); - json_scalar -------------- - " 123.45" -(1 row) - -SELECT JSON_SCALAR('2020-06-07'::date); - json_scalar --------------- - "2020-06-07" -(1 row) - -SELECT JSON_SCALAR('2020-06-07 01:02:03'::timestamp); - json_scalar ------------------------ - "2020-06-07T01:02:03" -(1 row) - -SELECT JSON_SCALAR('{}'::json); - json_scalar -------------- - {} -(1 row) - -SELECT JSON_SCALAR('{}'::jsonb); - json_scalar -------------- - {} -(1 row) - -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON_SCALAR(123); - QUERY PLAN ----------------------------- - Result - Output: JSON_SCALAR(123) -(2 rows) - -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON_SCALAR('123'); - QUERY PLAN ------------------------------------- - Result - Output: JSON_SCALAR('123'::text) -(2 rows) - --- JSON_SERIALIZE() -SELECT JSON_SERIALIZE(); -ERROR: syntax error at or near ")" -LINE 1: SELECT JSON_SERIALIZE(); - ^ -SELECT JSON_SERIALIZE(NULL); - json_serialize ----------------- - -(1 row) - -SELECT JSON_SERIALIZE(JSON('{ "a" : 1 } ')); - json_serialize ----------------- - { "a" : 1 } -(1 row) - -SELECT JSON_SERIALIZE('{ "a" : 1 } '); - json_serialize ----------------- - { "a" : 1 } -(1 row) - -SELECT JSON_SERIALIZE('1'); - json_serialize ----------------- - 1 -(1 row) - -SELECT JSON_SERIALIZE('1' FORMAT JSON); - json_serialize ----------------- - 1 -(1 row) - -SELECT JSON_SERIALIZE('{ "a" : 1 } ' RETURNING bytea); - json_serialize ----------------------------- - \x7b20226122203a2031207d20 -(1 row) - -SELECT JSON_SERIALIZE('{ "a" : 1 } ' RETURNING varchar); - json_serialize ----------------- - { "a" : 1 } -(1 row) - -SELECT pg_typeof(JSON_SERIALIZE(NULL)); - pg_typeof ------------ - text -(1 row) - --- only string types or bytea allowed -SELECT JSON_SERIALIZE('{ "a" : 1 } ' RETURNING jsonb); -ERROR: cannot use RETURNING type jsonb in JSON_SERIALIZE() -HINT: Try returning a string type or bytea. -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON_SERIALIZE('{}'); - QUERY PLAN ------------------------------------------------------ - Result - Output: JSON_SERIALIZE('{}'::json RETURNING text) -(2 rows) - -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON_SERIALIZE('{}' RETURNING bytea); - QUERY PLAN ------------------------------------------------------- - Result - Output: JSON_SERIALIZE('{}'::json RETURNING bytea) -(2 rows) - --- JSON_OBJECT() -SELECT JSON_OBJECT(); - json_object -------------- - {} -(1 row) - -SELECT JSON_OBJECT(RETURNING json); - json_object -------------- - {} -(1 row) - -SELECT JSON_OBJECT(RETURNING json FORMAT JSON); - json_object -------------- - {} -(1 row) - -SELECT JSON_OBJECT(RETURNING jsonb); - json_object -------------- - {} -(1 row) - -SELECT JSON_OBJECT(RETURNING jsonb FORMAT JSON); - json_object -------------- - {} -(1 row) - -SELECT JSON_OBJECT(RETURNING text); - json_object -------------- - {} -(1 row) - -SELECT JSON_OBJECT(RETURNING text FORMAT JSON); - json_object -------------- - {} -(1 row) - -SELECT JSON_OBJECT(RETURNING text FORMAT JSON ENCODING UTF8); -ERROR: cannot set JSON encoding for non-bytea output types -LINE 1: SELECT JSON_OBJECT(RETURNING text FORMAT JSON ENCODING UTF8)... - ^ -SELECT JSON_OBJECT(RETURNING text FORMAT JSON ENCODING INVALID_ENCODING); -ERROR: unrecognized JSON encoding: invalid_encoding -LINE 1: ...T JSON_OBJECT(RETURNING text FORMAT JSON ENCODING INVALID_EN... - ^ -SELECT JSON_OBJECT(RETURNING bytea); - json_object -------------- - \x7b7d -(1 row) - -SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON); - json_object -------------- - \x7b7d -(1 row) - -SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON ENCODING UTF8); - json_object -------------- - \x7b7d -(1 row) - -SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON ENCODING UTF16); -ERROR: unsupported JSON encoding -LINE 1: SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON ENCODING UTF1... - ^ -HINT: Only UTF8 JSON encoding is supported. -SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON ENCODING UTF32); -ERROR: unsupported JSON encoding -LINE 1: SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON ENCODING UTF3... - ^ -HINT: Only UTF8 JSON encoding is supported. -SELECT JSON_OBJECT('foo': NULL::int FORMAT JSON); -ERROR: cannot use non-string types with explicit FORMAT JSON clause -LINE 1: SELECT JSON_OBJECT('foo': NULL::int FORMAT JSON); - ^ -SELECT JSON_OBJECT('foo': NULL::int FORMAT JSON ENCODING UTF8); -ERROR: JSON ENCODING clause is only allowed for bytea input type -LINE 1: SELECT JSON_OBJECT('foo': NULL::int FORMAT JSON ENCODING UTF... - ^ -SELECT JSON_OBJECT('foo': NULL::json FORMAT JSON); - json_object ----------------- - {"foo" : null} -(1 row) - -SELECT JSON_OBJECT('foo': NULL::json FORMAT JSON ENCODING UTF8); -ERROR: JSON ENCODING clause is only allowed for bytea input type -LINE 1: SELECT JSON_OBJECT('foo': NULL::json FORMAT JSON ENCODING UT... - ^ -SELECT JSON_OBJECT('foo': NULL::jsonb FORMAT JSON); - json_object ---------------- - {"foo": null} -(1 row) - -SELECT JSON_OBJECT('foo': NULL::jsonb FORMAT JSON ENCODING UTF8); -ERROR: JSON ENCODING clause is only allowed for bytea input type -LINE 1: SELECT JSON_OBJECT('foo': NULL::jsonb FORMAT JSON ENCODING U... - ^ -SELECT JSON_OBJECT(NULL: 1); -ERROR: null value not allowed for object key -SELECT JSON_OBJECT('a': 2 + 3); - json_object -------------- - {"a" : 5} -(1 row) - -SELECT JSON_OBJECT('a' VALUE 2 + 3); - json_object -------------- - {"a" : 5} -(1 row) - ---SELECT JSON_OBJECT(KEY 'a' VALUE 2 + 3); -SELECT JSON_OBJECT('a' || 2: 1); - json_object -------------- - {"a2" : 1} -(1 row) - -SELECT JSON_OBJECT(('a' || 2) VALUE 1); - json_object -------------- - {"a2" : 1} -(1 row) - ---SELECT JSON_OBJECT('a' || 2 VALUE 1); ---SELECT JSON_OBJECT(KEY 'a' || 2 VALUE 1); -SELECT JSON_OBJECT('a': 2::text); - json_object -------------- - {"a" : "2"} -(1 row) - -SELECT JSON_OBJECT('a' VALUE 2::text); - json_object -------------- - {"a" : "2"} -(1 row) - ---SELECT JSON_OBJECT(KEY 'a' VALUE 2::text); -SELECT JSON_OBJECT(1::text: 2); - json_object -------------- - {"1" : 2} -(1 row) - -SELECT JSON_OBJECT((1::text) VALUE 2); - json_object -------------- - {"1" : 2} -(1 row) - ---SELECT JSON_OBJECT(1::text VALUE 2); ---SELECT JSON_OBJECT(KEY 1::text VALUE 2); -SELECT JSON_OBJECT(json '[1]': 123); -ERROR: key value must be scalar, not array, composite, or json -SELECT JSON_OBJECT(ARRAY[1,2,3]: 'aaa'); -ERROR: key value must be scalar, not array, composite, or json -SELECT JSON_OBJECT( - 'a': '123', - 1.23: 123, - 'c': json '[ 1,true,{ } ]', - 'd': jsonb '{ "x" : 123.45 }' -); - json_object -------------------------------------------------------------------- - {"a": "123", "c": [1, true, {}], "d": {"x": 123.45}, "1.23": 123} -(1 row) - -SELECT JSON_OBJECT( - 'a': '123', - 1.23: 123, - 'c': json '[ 1,true,{ } ]', - 'd': jsonb '{ "x" : 123.45 }' - RETURNING jsonb -); - json_object -------------------------------------------------------------------- - {"a": "123", "c": [1, true, {}], "d": {"x": 123.45}, "1.23": 123} -(1 row) - -/* -SELECT JSON_OBJECT( - 'a': '123', - KEY 1.23 VALUE 123, - 'c' VALUE json '[1, true, {}]' -); -*/ -SELECT JSON_OBJECT('a': '123', 'b': JSON_OBJECT('a': 111, 'b': 'aaa')); - json_object ------------------------------------------------ - {"a" : "123", "b" : {"a" : 111, "b" : "aaa"}} -(1 row) - -SELECT JSON_OBJECT('a': '123', 'b': JSON_OBJECT('a': 111, 'b': 'aaa' RETURNING jsonb)); - json_object -------------------------------------------- - {"a": "123", "b": {"a": 111, "b": "aaa"}} -(1 row) - -SELECT JSON_OBJECT('a': JSON_OBJECT('b': 1 RETURNING text)); - json_object ------------------------ - {"a" : "{\"b\" : 1}"} -(1 row) - -SELECT JSON_OBJECT('a': JSON_OBJECT('b': 1 RETURNING text) FORMAT JSON); - json_object -------------------- - {"a" : {"b" : 1}} -(1 row) - -SELECT JSON_OBJECT('a': JSON_OBJECT('b': 1 RETURNING bytea)); - json_object ---------------------------------- - {"a" : "\\x7b226222203a20317d"} -(1 row) - -SELECT JSON_OBJECT('a': JSON_OBJECT('b': 1 RETURNING bytea) FORMAT JSON); - json_object -------------------- - {"a" : {"b" : 1}} -(1 row) - -SELECT JSON_OBJECT('a': '1', 'b': NULL, 'c': 2); - json_object ----------------------------------- - {"a" : "1", "b" : null, "c" : 2} -(1 row) - -SELECT JSON_OBJECT('a': '1', 'b': NULL, 'c': 2 NULL ON NULL); - json_object ----------------------------------- - {"a" : "1", "b" : null, "c" : 2} -(1 row) - -SELECT JSON_OBJECT('a': '1', 'b': NULL, 'c': 2 ABSENT ON NULL); - json_object ----------------------- - {"a" : "1", "c" : 2} -(1 row) - -SELECT JSON_OBJECT(1: 1, '2': NULL, '3': 1, repeat('x', 1000): 1, 2: repeat('a', 100) WITH UNIQUE); -ERROR: duplicate JSON object key value: "2" -SELECT JSON_OBJECT(1: 1, '1': NULL WITH UNIQUE); -ERROR: duplicate JSON object key value: "1" -SELECT JSON_OBJECT(1: 1, '1': NULL ABSENT ON NULL WITH UNIQUE); -ERROR: duplicate JSON object key value: "1" -SELECT JSON_OBJECT(1: 1, '1': NULL NULL ON NULL WITH UNIQUE RETURNING jsonb); -ERROR: duplicate JSON object key value -SELECT JSON_OBJECT(1: 1, '1': NULL ABSENT ON NULL WITH UNIQUE RETURNING jsonb); -ERROR: duplicate JSON object key value -SELECT JSON_OBJECT(1: 1, '2': NULL, '1': 1 NULL ON NULL WITH UNIQUE); -ERROR: duplicate JSON object key value: "1" -SELECT JSON_OBJECT(1: 1, '2': NULL, '1': 1 ABSENT ON NULL WITH UNIQUE); -ERROR: duplicate JSON object key value: "1" -SELECT JSON_OBJECT(1: 1, '2': NULL, '1': 1 ABSENT ON NULL WITHOUT UNIQUE); - json_object --------------------- - {"1" : 1, "1" : 1} -(1 row) - -SELECT JSON_OBJECT(1: 1, '2': NULL, '1': 1 ABSENT ON NULL WITH UNIQUE RETURNING jsonb); -ERROR: duplicate JSON object key value -SELECT JSON_OBJECT(1: 1, '2': NULL, '1': 1 ABSENT ON NULL WITHOUT UNIQUE RETURNING jsonb); - json_object -------------- - {"1": 1} -(1 row) - -SELECT JSON_OBJECT(1: 1, '2': NULL, '3': 1, 4: NULL, '5': 'a' ABSENT ON NULL WITH UNIQUE RETURNING jsonb); - json_object ----------------------------- - {"1": 1, "3": 1, "5": "a"} -(1 row) - --- JSON_ARRAY() -SELECT JSON_ARRAY(); - json_array ------------- - [] -(1 row) - -SELECT JSON_ARRAY(RETURNING json); - json_array ------------- - [] -(1 row) - -SELECT JSON_ARRAY(RETURNING json FORMAT JSON); - json_array ------------- - [] -(1 row) - -SELECT JSON_ARRAY(RETURNING jsonb); - json_array ------------- - [] -(1 row) - -SELECT JSON_ARRAY(RETURNING jsonb FORMAT JSON); - json_array ------------- - [] -(1 row) - -SELECT JSON_ARRAY(RETURNING text); - json_array ------------- - [] -(1 row) - -SELECT JSON_ARRAY(RETURNING text FORMAT JSON); - json_array ------------- - [] -(1 row) - -SELECT JSON_ARRAY(RETURNING text FORMAT JSON ENCODING UTF8); -ERROR: cannot set JSON encoding for non-bytea output types -LINE 1: SELECT JSON_ARRAY(RETURNING text FORMAT JSON ENCODING UTF8); - ^ -SELECT JSON_ARRAY(RETURNING text FORMAT JSON ENCODING INVALID_ENCODING); -ERROR: unrecognized JSON encoding: invalid_encoding -LINE 1: ...CT JSON_ARRAY(RETURNING text FORMAT JSON ENCODING INVALID_EN... - ^ -SELECT JSON_ARRAY(RETURNING bytea); - json_array ------------- - \x5b5d -(1 row) - -SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON); - json_array ------------- - \x5b5d -(1 row) - -SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON ENCODING UTF8); - json_array ------------- - \x5b5d -(1 row) - -SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON ENCODING UTF16); -ERROR: unsupported JSON encoding -LINE 1: SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON ENCODING UTF16... - ^ -HINT: Only UTF8 JSON encoding is supported. -SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON ENCODING UTF32); -ERROR: unsupported JSON encoding -LINE 1: SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON ENCODING UTF32... - ^ -HINT: Only UTF8 JSON encoding is supported. -SELECT JSON_ARRAY('aaa', 111, true, array[1,2,3], NULL, json '{"a": [1]}', jsonb '["a",3]'); - json_array ------------------------------------------------------ - ["aaa", 111, true, [1, 2, 3], {"a": [1]}, ["a", 3]] -(1 row) - -SELECT JSON_ARRAY('a', NULL, 'b' NULL ON NULL); - json_array ------------------- - ["a", null, "b"] -(1 row) - -SELECT JSON_ARRAY('a', NULL, 'b' ABSENT ON NULL); - json_array ------------- - ["a", "b"] -(1 row) - -SELECT JSON_ARRAY(NULL, NULL, 'b' ABSENT ON NULL); - json_array ------------- - ["b"] -(1 row) - -SELECT JSON_ARRAY('a', NULL, 'b' NULL ON NULL RETURNING jsonb); - json_array ------------------- - ["a", null, "b"] -(1 row) - -SELECT JSON_ARRAY('a', NULL, 'b' ABSENT ON NULL RETURNING jsonb); - json_array ------------- - ["a", "b"] -(1 row) - -SELECT JSON_ARRAY(NULL, NULL, 'b' ABSENT ON NULL RETURNING jsonb); - json_array ------------- - ["b"] -(1 row) - -SELECT JSON_ARRAY(JSON_ARRAY('{ "a" : 123 }' RETURNING text)); - json_array -------------------------------- - ["[\"{ \\\"a\\\" : 123 }\"]"] -(1 row) - -SELECT JSON_ARRAY(JSON_ARRAY('{ "a" : 123 }' FORMAT JSON RETURNING text)); - json_array ------------------------ - ["[{ \"a\" : 123 }]"] -(1 row) - -SELECT JSON_ARRAY(JSON_ARRAY('{ "a" : 123 }' FORMAT JSON RETURNING text) FORMAT JSON); - json_array -------------------- - [[{ "a" : 123 }]] -(1 row) - -SELECT JSON_ARRAY(SELECT i FROM (VALUES (1), (2), (NULL), (4)) foo(i)); - json_array ------------- - [1, 2, 4] -(1 row) - -SELECT JSON_ARRAY(SELECT i FROM (VALUES (NULL::int[]), ('{1,2}'), (NULL), (NULL), ('{3,4}'), (NULL)) foo(i)); - json_array ------------- - [[1,2], + - [3,4]] -(1 row) - -SELECT JSON_ARRAY(SELECT i FROM (VALUES (NULL::int[]), ('{1,2}'), (NULL), (NULL), ('{3,4}'), (NULL)) foo(i) RETURNING jsonb); - json_array ------------------- - [[1, 2], [3, 4]] -(1 row) - ---SELECT JSON_ARRAY(SELECT i FROM (VALUES (NULL::int[]), ('{1,2}'), (NULL), (NULL), ('{3,4}'), (NULL)) foo(i) NULL ON NULL); ---SELECT JSON_ARRAY(SELECT i FROM (VALUES (NULL::int[]), ('{1,2}'), (NULL), (NULL), ('{3,4}'), (NULL)) foo(i) NULL ON NULL RETURNING jsonb); -SELECT JSON_ARRAY(SELECT i FROM (VALUES (3), (1), (NULL), (2)) foo(i) ORDER BY i); - json_array ------------- - [1, 2, 3] -(1 row) - --- Should fail -SELECT JSON_ARRAY(SELECT FROM (VALUES (1)) foo(i)); -ERROR: subquery must return only one column -LINE 1: SELECT JSON_ARRAY(SELECT FROM (VALUES (1)) foo(i)); - ^ -SELECT JSON_ARRAY(SELECT i, i FROM (VALUES (1)) foo(i)); -ERROR: subquery must return only one column -LINE 1: SELECT JSON_ARRAY(SELECT i, i FROM (VALUES (1)) foo(i)); - ^ -SELECT JSON_ARRAY(SELECT * FROM (VALUES (1, 2)) foo(i, j)); -ERROR: subquery must return only one column -LINE 1: SELECT JSON_ARRAY(SELECT * FROM (VALUES (1, 2)) foo(i, j)); - ^ --- JSON_ARRAYAGG() -SELECT JSON_ARRAYAGG(i) IS NULL, - JSON_ARRAYAGG(i RETURNING jsonb) IS NULL -FROM generate_series(1, 0) i; - ?column? | ?column? -----------+---------- - t | t -(1 row) - -SELECT JSON_ARRAYAGG(i), - JSON_ARRAYAGG(i RETURNING jsonb) -FROM generate_series(1, 5) i; - json_arrayagg | json_arrayagg ------------------+----------------- - [1, 2, 3, 4, 5] | [1, 2, 3, 4, 5] -(1 row) - -SELECT JSON_ARRAYAGG(i ORDER BY i DESC) -FROM generate_series(1, 5) i; - json_arrayagg ------------------ - [5, 4, 3, 2, 1] -(1 row) - -SELECT JSON_ARRAYAGG(i::text::json) -FROM generate_series(1, 5) i; - json_arrayagg ------------------ - [1, 2, 3, 4, 5] -(1 row) - -SELECT JSON_ARRAYAGG(JSON_ARRAY(i, i + 1 RETURNING text) FORMAT JSON) -FROM generate_series(1, 5) i; - json_arrayagg ------------------------------------------- - [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6]] -(1 row) - -SELECT JSON_ARRAYAGG(NULL), - JSON_ARRAYAGG(NULL RETURNING jsonb) -FROM generate_series(1, 5); - json_arrayagg | json_arrayagg ----------------+--------------- - [] | [] -(1 row) - -SELECT JSON_ARRAYAGG(NULL NULL ON NULL), - JSON_ARRAYAGG(NULL NULL ON NULL RETURNING jsonb) -FROM generate_series(1, 5); - json_arrayagg | json_arrayagg ---------------------------------+-------------------------------- - [null, null, null, null, null] | [null, null, null, null, null] -(1 row) - -\x -SELECT - JSON_ARRAYAGG(bar) as no_options, - JSON_ARRAYAGG(bar RETURNING jsonb) as returning_jsonb, - JSON_ARRAYAGG(bar ABSENT ON NULL) as absent_on_null, - JSON_ARRAYAGG(bar ABSENT ON NULL RETURNING jsonb) as absentonnull_returning_jsonb, - JSON_ARRAYAGG(bar NULL ON NULL) as null_on_null, - JSON_ARRAYAGG(bar NULL ON NULL RETURNING jsonb) as nullonnull_returning_jsonb, - JSON_ARRAYAGG(foo) as row_no_options, - JSON_ARRAYAGG(foo RETURNING jsonb) as row_returning_jsonb, - JSON_ARRAYAGG(foo ORDER BY bar) FILTER (WHERE bar > 2) as row_filtered_agg, - JSON_ARRAYAGG(foo ORDER BY bar RETURNING jsonb) FILTER (WHERE bar > 2) as row_filtered_agg_returning_jsonb -FROM - (VALUES (NULL), (3), (1), (NULL), (NULL), (5), (2), (4), (NULL)) foo(bar); --[ RECORD 1 ]--------------------+------------------------------------------------------------------------------------------------------------------------- -no_options | [1, 2, 3, 4, 5] -returning_jsonb | [1, 2, 3, 4, 5] -absent_on_null | [1, 2, 3, 4, 5] -absentonnull_returning_jsonb | [1, 2, 3, 4, 5] -null_on_null | [1, 2, 3, 4, 5, null, null, null, null] -nullonnull_returning_jsonb | [1, 2, 3, 4, 5, null, null, null, null] -row_no_options | [{"bar":1}, + - | {"bar":2}, + - | {"bar":3}, + - | {"bar":4}, + - | {"bar":5}, + - | {"bar":null}, + - | {"bar":null}, + - | {"bar":null}, + - | {"bar":null}] -row_returning_jsonb | [{"bar": 1}, {"bar": 2}, {"bar": 3}, {"bar": 4}, {"bar": 5}, {"bar": null}, {"bar": null}, {"bar": null}, {"bar": null}] -row_filtered_agg | [{"bar":3}, + - | {"bar":4}, + - | {"bar":5}] -row_filtered_agg_returning_jsonb | [{"bar": 3}, {"bar": 4}, {"bar": 5}] - -\x -SELECT - bar, JSON_ARRAYAGG(bar) FILTER (WHERE bar > 2) OVER (PARTITION BY foo.bar % 2) -FROM - (VALUES (NULL), (3), (1), (NULL), (NULL), (5), (2), (4), (NULL), (5), (4)) foo(bar); - bar | json_arrayagg ------+--------------- - 4 | [4, 4] - 4 | [4, 4] - 2 | [4, 4] - 5 | [5, 3, 5] - 3 | [5, 3, 5] - 1 | [5, 3, 5] - 5 | [5, 3, 5] - | - | - | - | -(11 rows) - --- JSON_OBJECTAGG() -SELECT JSON_OBJECTAGG('key': 1) IS NULL, - JSON_OBJECTAGG('key': 1 RETURNING jsonb) IS NULL -WHERE FALSE; - ?column? | ?column? -----------+---------- - t | t -(1 row) - -SELECT JSON_OBJECTAGG(NULL: 1); -ERROR: null value not allowed for object key -SELECT JSON_OBJECTAGG(NULL: 1 RETURNING jsonb); -ERROR: field name must not be null -SELECT - JSON_OBJECTAGG(i: i), --- JSON_OBJECTAGG(i VALUE i), --- JSON_OBJECTAGG(KEY i VALUE i), - JSON_OBJECTAGG(i: i RETURNING jsonb) -FROM - generate_series(1, 5) i; - json_objectagg | json_objectagg --------------------------------------------------+------------------------------------------ - { "1" : 1, "2" : 2, "3" : 3, "4" : 4, "5" : 5 } | {"1": 1, "2": 2, "3": 3, "4": 4, "5": 5} -(1 row) - -SELECT - JSON_OBJECTAGG(k: v), - JSON_OBJECTAGG(k: v NULL ON NULL), - JSON_OBJECTAGG(k: v ABSENT ON NULL), - JSON_OBJECTAGG(k: v RETURNING jsonb), - JSON_OBJECTAGG(k: v NULL ON NULL RETURNING jsonb), - JSON_OBJECTAGG(k: v ABSENT ON NULL RETURNING jsonb) -FROM - (VALUES (1, 1), (1, NULL), (2, NULL), (3, 3)) foo(k, v); - json_objectagg | json_objectagg | json_objectagg | json_objectagg | json_objectagg | json_objectagg -----------------------------------------------+----------------------------------------------+----------------------+--------------------------------+--------------------------------+------------------ - { "1" : 1, "1" : null, "2" : null, "3" : 3 } | { "1" : 1, "1" : null, "2" : null, "3" : 3 } | { "1" : 1, "3" : 3 } | {"1": null, "2": null, "3": 3} | {"1": null, "2": null, "3": 3} | {"1": 1, "3": 3} -(1 row) - -SELECT JSON_OBJECTAGG(k: v WITH UNIQUE KEYS) -FROM (VALUES (1, 1), (1, NULL), (2, 2)) foo(k, v); -ERROR: duplicate JSON object key value: "1" -SELECT JSON_OBJECTAGG(k: v ABSENT ON NULL WITH UNIQUE KEYS) -FROM (VALUES (1, 1), (1, NULL), (2, 2)) foo(k, v); -ERROR: duplicate JSON object key value: "1" -SELECT JSON_OBJECTAGG(k: v ABSENT ON NULL WITH UNIQUE KEYS) -FROM (VALUES (1, 1), (0, NULL), (3, NULL), (2, 2), (4, NULL)) foo(k, v); - json_objectagg ----------------------- - { "1" : 1, "2" : 2 } -(1 row) - -SELECT JSON_OBJECTAGG(k: v WITH UNIQUE KEYS RETURNING jsonb) -FROM (VALUES (1, 1), (1, NULL), (2, 2)) foo(k, v); -ERROR: duplicate JSON object key value -SELECT JSON_OBJECTAGG(k: v ABSENT ON NULL WITH UNIQUE KEYS RETURNING jsonb) -FROM (VALUES (1, 1), (1, NULL), (2, 2)) foo(k, v); -ERROR: duplicate JSON object key value -SELECT JSON_OBJECTAGG(k: v ABSENT ON NULL WITH UNIQUE KEYS RETURNING jsonb) -FROM (VALUES (1, 1), (0, NULL),(4, null), (5, null),(6, null),(2, 2)) foo(k, v); - json_objectagg ------------------- - {"1": 1, "2": 2} -(1 row) - -SELECT JSON_OBJECTAGG(mod(i,100): (i)::text FORMAT JSON WITH UNIQUE) -FROM generate_series(0, 199) i; -ERROR: duplicate JSON object key value: "0" --- Test JSON_OBJECT deparsing -EXPLAIN (VERBOSE, COSTS OFF) -SELECT JSON_OBJECT('foo' : '1' FORMAT JSON, 'bar' : 'baz' RETURNING json); - QUERY PLAN ------------------------------------------------------------------------------- - Result - Output: JSON_OBJECT('foo' : '1'::json, 'bar' : 'baz'::text RETURNING json) -(2 rows) - -CREATE VIEW json_object_view AS -SELECT JSON_OBJECT('foo' : '1' FORMAT JSON, 'bar' : 'baz' RETURNING json); -\sv json_object_view -CREATE OR REPLACE VIEW public.json_object_view AS - SELECT JSON_OBJECT('foo' : '1'::text FORMAT JSON, 'bar' : 'baz'::text RETURNING json) AS "json_object" -DROP VIEW json_object_view; -SELECT to_json(a) AS a, JSON_OBJECTAGG(k : v WITH UNIQUE KEYS) OVER (ORDER BY k) -FROM (VALUES (1,1), (2,2)) a(k,v); - a | json_objectagg ----------------+---------------------- - {"k":1,"v":1} | { "1" : 1 } - {"k":2,"v":2} | { "1" : 1, "2" : 2 } -(2 rows) - -SELECT to_json(a) AS a, JSON_OBJECTAGG(k : v WITH UNIQUE KEYS) OVER (ORDER BY k) -FROM (VALUES (1,1), (1,2), (2,2)) a(k,v); -ERROR: duplicate JSON object key value: "1" -SELECT to_json(a) AS a, JSON_OBJECTAGG(k : v ABSENT ON NULL WITH UNIQUE KEYS) - OVER (ORDER BY k) -FROM (VALUES (1,1), (1,null), (2,2)) a(k,v); -ERROR: duplicate JSON object key value: "1" -SELECT to_json(a) AS a, JSON_OBJECTAGG(k : v ABSENT ON NULL) -OVER (ORDER BY k) -FROM (VALUES (1,1), (1,null), (2,2)) a(k,v); - a | json_objectagg -------------------+---------------------- - {"k":1,"v":1} | { "1" : 1 } - {"k":1,"v":null} | { "1" : 1 } - {"k":2,"v":2} | { "1" : 1, "2" : 2 } -(3 rows) - -SELECT to_json(a) AS a, JSON_OBJECTAGG(k : v ABSENT ON NULL) -OVER (ORDER BY k RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) -FROM (VALUES (1,1), (1,null), (2,2)) a(k,v); - a | json_objectagg -------------------+---------------------- - {"k":1,"v":1} | { "1" : 1, "2" : 2 } - {"k":1,"v":null} | { "1" : 1, "2" : 2 } - {"k":2,"v":2} | { "1" : 1, "2" : 2 } -(3 rows) - --- Test JSON_ARRAY deparsing -EXPLAIN (VERBOSE, COSTS OFF) -SELECT JSON_ARRAY('1' FORMAT JSON, 2 RETURNING json); - QUERY PLAN ---------------------------------------------------- - Result - Output: JSON_ARRAY('1'::json, 2 RETURNING json) -(2 rows) - -CREATE VIEW json_array_view AS -SELECT JSON_ARRAY('1' FORMAT JSON, 2 RETURNING json); -\sv json_array_view -CREATE OR REPLACE VIEW public.json_array_view AS - SELECT JSON_ARRAY('1'::text FORMAT JSON, 2 RETURNING json) AS "json_array" -DROP VIEW json_array_view; --- Test JSON_OBJECTAGG deparsing -EXPLAIN (VERBOSE, COSTS OFF) -SELECT JSON_OBJECTAGG(i: ('111' || i)::bytea FORMAT JSON WITH UNIQUE RETURNING text) FILTER (WHERE i > 3) -FROM generate_series(1,5) i; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------- - Aggregate - Output: JSON_OBJECTAGG(i : (('111'::text || (i)::text))::bytea FORMAT JSON WITH UNIQUE KEYS RETURNING text) FILTER (WHERE (i > 3)) - -> Function Scan on pg_catalog.generate_series i - Output: i - Function Call: generate_series(1, 5) -(5 rows) - -EXPLAIN (VERBOSE, COSTS OFF) -SELECT JSON_OBJECTAGG(i: ('111' || i)::bytea FORMAT JSON WITH UNIQUE RETURNING text) OVER (PARTITION BY i % 2) -FROM generate_series(1,5) i; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------ - WindowAgg - Output: JSON_OBJECTAGG(i : (('111'::text || (i)::text))::bytea FORMAT JSON WITH UNIQUE KEYS RETURNING text) OVER (?), ((i % 2)) - -> Sort - Output: ((i % 2)), i - Sort Key: ((i.i % 2)) - -> Function Scan on pg_catalog.generate_series i - Output: (i % 2), i - Function Call: generate_series(1, 5) -(8 rows) - -CREATE VIEW json_objectagg_view AS -SELECT JSON_OBJECTAGG(i: ('111' || i)::bytea FORMAT JSON WITH UNIQUE RETURNING text) FILTER (WHERE i > 3) -FROM generate_series(1,5) i; -\sv json_objectagg_view -CREATE OR REPLACE VIEW public.json_objectagg_view AS - SELECT JSON_OBJECTAGG(i : ('111'::text || i)::bytea FORMAT JSON WITH UNIQUE KEYS RETURNING text) FILTER (WHERE i > 3) AS "json_objectagg" - FROM generate_series(1, 5) i(i) -DROP VIEW json_objectagg_view; --- Test JSON_ARRAYAGG deparsing -EXPLAIN (VERBOSE, COSTS OFF) -SELECT JSON_ARRAYAGG(('111' || i)::bytea FORMAT JSON NULL ON NULL RETURNING text) FILTER (WHERE i > 3) -FROM generate_series(1,5) i; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------ - Aggregate - Output: JSON_ARRAYAGG((('111'::text || (i)::text))::bytea FORMAT JSON NULL ON NULL RETURNING text) FILTER (WHERE (i > 3)) - -> Function Scan on pg_catalog.generate_series i - Output: i - Function Call: generate_series(1, 5) -(5 rows) - -EXPLAIN (VERBOSE, COSTS OFF) -SELECT JSON_ARRAYAGG(('111' || i)::bytea FORMAT JSON NULL ON NULL RETURNING text) OVER (PARTITION BY i % 2) -FROM generate_series(1,5) i; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------- - WindowAgg - Output: JSON_ARRAYAGG((('111'::text || (i)::text))::bytea FORMAT JSON NULL ON NULL RETURNING text) OVER (?), ((i % 2)) - -> Sort - Output: ((i % 2)), i - Sort Key: ((i.i % 2)) - -> Function Scan on pg_catalog.generate_series i - Output: (i % 2), i - Function Call: generate_series(1, 5) -(8 rows) - -CREATE VIEW json_arrayagg_view AS -SELECT JSON_ARRAYAGG(('111' || i)::bytea FORMAT JSON NULL ON NULL RETURNING text) FILTER (WHERE i > 3) -FROM generate_series(1,5) i; -\sv json_arrayagg_view -CREATE OR REPLACE VIEW public.json_arrayagg_view AS - SELECT JSON_ARRAYAGG(('111'::text || i)::bytea FORMAT JSON NULL ON NULL RETURNING text) FILTER (WHERE i > 3) AS "json_arrayagg" - FROM generate_series(1, 5) i(i) -DROP VIEW json_arrayagg_view; --- Test JSON_ARRAY(subquery) deparsing -EXPLAIN (VERBOSE, COSTS OFF) -SELECT JSON_ARRAY(SELECT i FROM (VALUES (1), (2), (NULL), (4)) foo(i) RETURNING jsonb); - QUERY PLAN ---------------------------------------------------------------------- - Result - Output: (InitPlan 1).col1 - InitPlan 1 - -> Aggregate - Output: JSON_ARRAYAGG("*VALUES*".column1 RETURNING jsonb) - -> Values Scan on "*VALUES*" - Output: "*VALUES*".column1 -(7 rows) - -CREATE VIEW json_array_subquery_view AS -SELECT JSON_ARRAY(SELECT i FROM (VALUES (1), (2), (NULL), (4)) foo(i) RETURNING jsonb); -\sv json_array_subquery_view -CREATE OR REPLACE VIEW public.json_array_subquery_view AS - SELECT ( SELECT JSON_ARRAYAGG(q.a RETURNING jsonb) AS "json_arrayagg" - FROM ( SELECT foo.i - FROM ( VALUES (1), (2), (NULL::integer), (4)) foo(i)) q(a)) AS "json_array" -DROP VIEW json_array_subquery_view; --- IS JSON predicate -SELECT NULL IS JSON; - ?column? ----------- - -(1 row) - -SELECT NULL IS NOT JSON; - ?column? ----------- - -(1 row) - -SELECT NULL::json IS JSON; - ?column? ----------- - -(1 row) - -SELECT NULL::jsonb IS JSON; - ?column? ----------- - -(1 row) - -SELECT NULL::text IS JSON; - ?column? ----------- - -(1 row) - -SELECT NULL::bytea IS JSON; - ?column? ----------- - -(1 row) - -SELECT NULL::int IS JSON; -ERROR: cannot use type integer in IS JSON predicate -SELECT '' IS JSON; - ?column? ----------- - f -(1 row) - -SELECT bytea '\x00' IS JSON; -ERROR: invalid byte sequence for encoding "UTF8": 0x00 -CREATE TABLE test_is_json (js text); -INSERT INTO test_is_json VALUES - (NULL), - (''), - ('123'), - ('"aaa "'), - ('true'), - ('null'), - ('[]'), - ('[1, "2", {}]'), - ('{}'), - ('{ "a": 1, "b": null }'), - ('{ "a": 1, "a": null }'), - ('{ "a": 1, "b": [{ "a": 1 }, { "a": 2 }] }'), - ('{ "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] }'), - ('aaa'), - ('{a:1}'), - ('["a",]'); -SELECT - js, - js IS JSON "IS JSON", - js IS NOT JSON "IS NOT JSON", - js IS JSON VALUE "IS VALUE", - js IS JSON OBJECT "IS OBJECT", - js IS JSON ARRAY "IS ARRAY", - js IS JSON SCALAR "IS SCALAR", - js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE", - js IS JSON WITH UNIQUE KEYS "WITH UNIQUE" -FROM - test_is_json; - js | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE ------------------------------------------------+---------+-------------+----------+-----------+----------+-----------+----------------+------------- - | | | | | | | | - | f | t | f | f | f | f | f | f - 123 | t | f | t | f | f | t | t | t - "aaa " | t | f | t | f | f | t | t | t - true | t | f | t | f | f | t | t | t - null | t | f | t | f | f | t | t | t - [] | t | f | t | f | t | f | t | t - [1, "2", {}] | t | f | t | f | t | f | t | t - {} | t | f | t | t | f | f | t | t - { "a": 1, "b": null } | t | f | t | t | f | f | t | t - { "a": 1, "a": null } | t | f | t | t | f | f | t | f - { "a": 1, "b": [{ "a": 1 }, { "a": 2 }] } | t | f | t | t | f | f | t | t - { "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] } | t | f | t | t | f | f | t | f - aaa | f | t | f | f | f | f | f | f - {a:1} | f | t | f | f | f | f | f | f - ["a",] | f | t | f | f | f | f | f | f -(16 rows) - -SELECT - js, - js IS JSON "IS JSON", - js IS NOT JSON "IS NOT JSON", - js IS JSON VALUE "IS VALUE", - js IS JSON OBJECT "IS OBJECT", - js IS JSON ARRAY "IS ARRAY", - js IS JSON SCALAR "IS SCALAR", - js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE", - js IS JSON WITH UNIQUE KEYS "WITH UNIQUE" -FROM - (SELECT js::json FROM test_is_json WHERE js IS JSON) foo(js); - js | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE ------------------------------------------------+---------+-------------+----------+-----------+----------+-----------+----------------+------------- - 123 | t | f | t | f | f | t | t | t - "aaa " | t | f | t | f | f | t | t | t - true | t | f | t | f | f | t | t | t - null | t | f | t | f | f | t | t | t - [] | t | f | t | f | t | f | t | t - [1, "2", {}] | t | f | t | f | t | f | t | t - {} | t | f | t | t | f | f | t | t - { "a": 1, "b": null } | t | f | t | t | f | f | t | t - { "a": 1, "a": null } | t | f | t | t | f | f | t | f - { "a": 1, "b": [{ "a": 1 }, { "a": 2 }] } | t | f | t | t | f | f | t | t - { "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] } | t | f | t | t | f | f | t | f -(11 rows) - -SELECT - js0, - js IS JSON "IS JSON", - js IS NOT JSON "IS NOT JSON", - js IS JSON VALUE "IS VALUE", - js IS JSON OBJECT "IS OBJECT", - js IS JSON ARRAY "IS ARRAY", - js IS JSON SCALAR "IS SCALAR", - js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE", - js IS JSON WITH UNIQUE KEYS "WITH UNIQUE" -FROM - (SELECT js, js::bytea FROM test_is_json WHERE js IS JSON) foo(js0, js); - js0 | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE ------------------------------------------------+---------+-------------+----------+-----------+----------+-----------+----------------+------------- - 123 | t | f | t | f | f | t | t | t - "aaa " | t | f | t | f | f | t | t | t - true | t | f | t | f | f | t | t | t - null | t | f | t | f | f | t | t | t - [] | t | f | t | f | t | f | t | t - [1, "2", {}] | t | f | t | f | t | f | t | t - {} | t | f | t | t | f | f | t | t - { "a": 1, "b": null } | t | f | t | t | f | f | t | t - { "a": 1, "a": null } | t | f | t | t | f | f | t | f - { "a": 1, "b": [{ "a": 1 }, { "a": 2 }] } | t | f | t | t | f | f | t | t - { "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] } | t | f | t | t | f | f | t | f -(11 rows) - -SELECT - js, - js IS JSON "IS JSON", - js IS NOT JSON "IS NOT JSON", - js IS JSON VALUE "IS VALUE", - js IS JSON OBJECT "IS OBJECT", - js IS JSON ARRAY "IS ARRAY", - js IS JSON SCALAR "IS SCALAR", - js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE", - js IS JSON WITH UNIQUE KEYS "WITH UNIQUE" -FROM - (SELECT js::jsonb FROM test_is_json WHERE js IS JSON) foo(js); - js | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE --------------------------------------+---------+-------------+----------+-----------+----------+-----------+----------------+------------- - 123 | t | f | t | f | f | t | t | t - "aaa " | t | f | t | f | f | t | t | t - true | t | f | t | f | f | t | t | t - null | t | f | t | f | f | t | t | t - [] | t | f | t | f | t | f | t | t - [1, "2", {}] | t | f | t | f | t | f | t | t - {} | t | f | t | t | f | f | t | t - {"a": 1, "b": null} | t | f | t | t | f | f | t | t - {"a": null} | t | f | t | t | f | f | t | t - {"a": 1, "b": [{"a": 1}, {"a": 2}]} | t | f | t | t | f | f | t | t - {"a": 1, "b": [{"a": 2, "b": 0}]} | t | f | t | t | f | f | t | t -(11 rows) - --- Test IS JSON deparsing -EXPLAIN (VERBOSE, COSTS OFF) -SELECT '1' IS JSON AS "any", ('1' || i) IS JSON SCALAR AS "scalar", '[]' IS NOT JSON ARRAY AS "array", '{}' IS JSON OBJECT WITH UNIQUE AS "object" FROM generate_series(1, 3) i; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------- - Function Scan on pg_catalog.generate_series i - Output: ('1'::text IS JSON), (('1'::text || (i)::text) IS JSON SCALAR), (NOT ('[]'::text IS JSON ARRAY)), ('{}'::text IS JSON OBJECT WITH UNIQUE KEYS) - Function Call: generate_series(1, 3) -(3 rows) - -CREATE VIEW is_json_view AS -SELECT '1' IS JSON AS "any", ('1' || i) IS JSON SCALAR AS "scalar", '[]' IS NOT JSON ARRAY AS "array", '{}' IS JSON OBJECT WITH UNIQUE AS "object" FROM generate_series(1, 3) i; -\sv is_json_view -CREATE OR REPLACE VIEW public.is_json_view AS - SELECT '1'::text IS JSON AS "any", - ('1'::text || i) IS JSON SCALAR AS scalar, - NOT '[]'::text IS JSON ARRAY AS "array", - '{}'::text IS JSON OBJECT WITH UNIQUE KEYS AS object - FROM generate_series(1, 3) i(i) -DROP VIEW is_json_view; --- Test implicit coercion to a fixed-length type specified in RETURNING -SELECT JSON_SERIALIZE('{ "a" : 1 } ' RETURNING varchar(2)); -ERROR: value too long for type character varying(2) -SELECT JSON_OBJECT('a': JSON_OBJECT('b': 1 RETURNING varchar(2))); -ERROR: value too long for type character varying(2) -SELECT JSON_ARRAY(JSON_ARRAY('{ "a" : 123 }' RETURNING varchar(2))); -ERROR: value too long for type character varying(2) -SELECT JSON_ARRAYAGG(('111' || i)::bytea FORMAT JSON NULL ON NULL RETURNING varchar(2)) FROM generate_series(1,1) i; -ERROR: value too long for type character varying(2) -SELECT JSON_OBJECTAGG(i: ('111' || i)::bytea FORMAT JSON WITH UNIQUE RETURNING varchar(2)) FROM generate_series(1, 1) i; -ERROR: value too long for type character varying(2) --- Now try domain over fixed-length type -CREATE DOMAIN sqljson_char2 AS char(2) CHECK (VALUE NOT IN ('12')); -SELECT JSON_SERIALIZE('123' RETURNING sqljson_char2); -ERROR: value too long for type character(2) -SELECT JSON_SERIALIZE('12' RETURNING sqljson_char2); -ERROR: value for domain sqljson_char2 violates check constraint "sqljson_char2_check" --- Bug #18657: JsonValueExpr.raw_expr was not initialized in ExecInitExprRec() --- causing the Aggrefs contained in it to also not be initialized, which led --- to a crash in ExecBuildAggTrans() as mentioned in the bug report: --- https://postgr.es/m/18657-1b90ccce2b16bdb8@postgresql.org -CREATE FUNCTION volatile_one() RETURNS int AS $$ BEGIN RETURN 1; END; $$ LANGUAGE plpgsql VOLATILE; -CREATE FUNCTION stable_one() RETURNS int AS $$ BEGIN RETURN 1; END; $$ LANGUAGE plpgsql STABLE; -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON_OBJECT('a': JSON_OBJECTAGG('b': volatile_one() RETURNING text) FORMAT JSON); - QUERY PLAN -------------------------------------------------------------------------------------------------------------- - Aggregate - Output: JSON_OBJECT('a' : JSON_OBJECTAGG('b' : volatile_one() RETURNING text) FORMAT JSON RETURNING json) - -> Result -(3 rows) - -SELECT JSON_OBJECT('a': JSON_OBJECTAGG('b': volatile_one() RETURNING text) FORMAT JSON); - json_object ---------------------- - {"a" : { "b" : 1 }} -(1 row) - -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON_OBJECT('a': JSON_OBJECTAGG('b': stable_one() RETURNING text) FORMAT JSON); - QUERY PLAN ------------------------------------------------------------------------------------------------------------ - Aggregate - Output: JSON_OBJECT('a' : JSON_OBJECTAGG('b' : stable_one() RETURNING text) FORMAT JSON RETURNING json) - -> Result -(3 rows) - -SELECT JSON_OBJECT('a': JSON_OBJECTAGG('b': stable_one() RETURNING text) FORMAT JSON); - json_object ---------------------- - {"a" : { "b" : 1 }} -(1 row) - -EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON_OBJECT('a': JSON_OBJECTAGG('b': 1 RETURNING text) FORMAT JSON); - QUERY PLAN ------------------------------------------------------------------------------------------------- - Aggregate - Output: JSON_OBJECT('a' : JSON_OBJECTAGG('b' : 1 RETURNING text) FORMAT JSON RETURNING json) - -> Result -(3 rows) - -SELECT JSON_OBJECT('a': JSON_OBJECTAGG('b': 1 RETURNING text) FORMAT JSON); - json_object ---------------------- - {"a" : { "b" : 1 }} -(1 row) - -DROP FUNCTION volatile_one, stable_one; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/sqljson_queryfuncs.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/sqljson_queryfuncs.out --- /Users/admin/pgsql/src/test/regress/expected/sqljson_queryfuncs.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/sqljson_queryfuncs.out 2024-12-13 13:20:12 @@ -1,1452 +1,2 @@ --- JSON_EXISTS -SELECT JSON_EXISTS(NULL::jsonb, '$'); - json_exists -------------- - -(1 row) - -SELECT JSON_EXISTS(jsonb '[]', '$'); - json_exists -------------- - t -(1 row) - -SELECT JSON_EXISTS(JSON_OBJECT(RETURNING jsonb), '$'); - json_exists -------------- - t -(1 row) - -SELECT JSON_EXISTS(jsonb '1', '$'); - json_exists -------------- - t -(1 row) - -SELECT JSON_EXISTS(jsonb 'null', '$'); - json_exists -------------- - t -(1 row) - -SELECT JSON_EXISTS(jsonb '[]', '$'); - json_exists -------------- - t -(1 row) - -SELECT JSON_EXISTS(jsonb '1', '$.a'); - json_exists -------------- - f -(1 row) - -SELECT JSON_EXISTS(jsonb '1', 'strict $.a'); -- FALSE on error - json_exists -------------- - f -(1 row) - -SELECT JSON_EXISTS(jsonb '1', 'strict $.a' ERROR ON ERROR); -ERROR: jsonpath member accessor can only be applied to an object -SELECT JSON_EXISTS(jsonb 'null', '$.a'); - json_exists -------------- - f -(1 row) - -SELECT JSON_EXISTS(jsonb '[]', '$.a'); - json_exists -------------- - f -(1 row) - -SELECT JSON_EXISTS(jsonb '[1, "aaa", {"a": 1}]', 'strict $.a'); -- FALSE on error - json_exists -------------- - f -(1 row) - -SELECT JSON_EXISTS(jsonb '[1, "aaa", {"a": 1}]', 'lax $.a'); - json_exists -------------- - t -(1 row) - -SELECT JSON_EXISTS(jsonb '{}', '$.a'); - json_exists -------------- - f -(1 row) - -SELECT JSON_EXISTS(jsonb '{"b": 1, "a": 2}', '$.a'); - json_exists -------------- - t -(1 row) - -SELECT JSON_EXISTS(jsonb '1', '$.a.b'); - json_exists -------------- - f -(1 row) - -SELECT JSON_EXISTS(jsonb '{"a": {"b": 1}}', '$.a.b'); - json_exists -------------- - t -(1 row) - -SELECT JSON_EXISTS(jsonb '{"a": 1, "b": 2}', '$.a.b'); - json_exists -------------- - f -(1 row) - -SELECT JSON_EXISTS(jsonb '{"a": 1, "b": 2}', '$.* ? (@ > $x)' PASSING 1 AS x); - json_exists -------------- - t -(1 row) - -SELECT JSON_EXISTS(jsonb '{"a": 1, "b": 2}', '$.* ? (@ > $x)' PASSING '1' AS x); - json_exists -------------- - f -(1 row) - -SELECT JSON_EXISTS(jsonb '{"a": 1, "b": 2}', '$.* ? (@ > $x && @ < $y)' PASSING 0 AS x, 2 AS y); - json_exists -------------- - t -(1 row) - -SELECT JSON_EXISTS(jsonb '{"a": 1, "b": 2}', '$.* ? (@ > $x && @ < $y)' PASSING 0 AS x, 1 AS y); - json_exists -------------- - f -(1 row) - --- extension: boolean expressions -SELECT JSON_EXISTS(jsonb '1', '$ > 2'); - json_exists -------------- - t -(1 row) - -SELECT JSON_EXISTS(jsonb '1', '$.a > 2' ERROR ON ERROR); - json_exists -------------- - t -(1 row) - --- JSON_VALUE -SELECT JSON_VALUE(NULL::jsonb, '$'); - json_value ------------- - -(1 row) - -SELECT JSON_VALUE(jsonb 'null', '$'); - json_value ------------- - -(1 row) - -SELECT JSON_VALUE(jsonb 'null', '$' RETURNING int); - json_value ------------- - -(1 row) - -SELECT JSON_VALUE(jsonb 'true', '$'); - json_value ------------- - t -(1 row) - -SELECT JSON_VALUE(jsonb 'true', '$' RETURNING bool); - json_value ------------- - t -(1 row) - -SELECT JSON_VALUE(jsonb '123', '$'); - json_value ------------- - 123 -(1 row) - -SELECT JSON_VALUE(jsonb '123', '$' RETURNING int) + 234; - ?column? ----------- - 357 -(1 row) - -SELECT JSON_VALUE(jsonb '123', '$' RETURNING text); - json_value ------------- - 123 -(1 row) - -/* jsonb bytea ??? */ -SELECT JSON_VALUE(jsonb '123', '$' RETURNING bytea ERROR ON ERROR); - json_value ------------- - \x313233 -(1 row) - -SELECT JSON_VALUE(jsonb '1.23', '$'); - json_value ------------- - 1.23 -(1 row) - -SELECT JSON_VALUE(jsonb '1.23', '$' RETURNING int); - json_value ------------- - -(1 row) - -SELECT JSON_VALUE(jsonb '"1.23"', '$' RETURNING numeric); - json_value ------------- - 1.23 -(1 row) - -SELECT JSON_VALUE(jsonb '"1.23"', '$' RETURNING int ERROR ON ERROR); -ERROR: invalid input syntax for type integer: "1.23" -SELECT JSON_VALUE(jsonb '"aaa"', '$'); - json_value ------------- - aaa -(1 row) - -SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING text); - json_value ------------- - aaa -(1 row) - -SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING char(5)); - json_value ------------- - aaa -(1 row) - -SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING char(2) ERROR ON ERROR); -ERROR: value too long for type character(2) -SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING char(2)); - json_value ------------- - -(1 row) - -SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING char(3) ERROR ON ERROR); - json_value ------------- - aaa -(1 row) - -SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING json); - json_value ------------- - "aaa" -(1 row) - -SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING jsonb); - json_value ------------- - "aaa" -(1 row) - -SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING json ERROR ON ERROR); - json_value ------------- - "aaa" -(1 row) - -SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING jsonb ERROR ON ERROR); - json_value ------------- - "aaa" -(1 row) - -SELECT JSON_VALUE(jsonb '"\"aaa\""', '$' RETURNING json); - json_value ------------- - "\"aaa\"" -(1 row) - -SELECT JSON_VALUE(jsonb '"\"aaa\""', '$' RETURNING jsonb); - json_value ------------- - "\"aaa\"" -(1 row) - -SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING int); - json_value ------------- - -(1 row) - -SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING int ERROR ON ERROR); -ERROR: invalid input syntax for type integer: "aaa" -SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING int DEFAULT 111 ON ERROR); - json_value ------------- - 111 -(1 row) - -SELECT JSON_VALUE(jsonb '"123"', '$' RETURNING int) + 234; - ?column? ----------- - 357 -(1 row) - -SELECT JSON_VALUE(jsonb '"2017-02-20"', '$' RETURNING date) + 9; - ?column? ------------- - 03-01-2017 -(1 row) - --- Test NULL checks execution in domain types -CREATE DOMAIN sqljsonb_int_not_null AS int NOT NULL; -SELECT JSON_VALUE(jsonb 'null', '$' RETURNING sqljsonb_int_not_null); -ERROR: could not coerce ON ERROR expression (NULL) to the RETURNING type -DETAIL: domain sqljsonb_int_not_null does not allow null values -SELECT JSON_VALUE(jsonb 'null', '$' RETURNING sqljsonb_int_not_null ERROR ON ERROR); -ERROR: domain sqljsonb_int_not_null does not allow null values -SELECT JSON_VALUE(jsonb 'null', '$' RETURNING sqljsonb_int_not_null DEFAULT 2 ON EMPTY ERROR ON ERROR); -ERROR: domain sqljsonb_int_not_null does not allow null values -SELECT JSON_VALUE(jsonb '1', '$.a' RETURNING sqljsonb_int_not_null DEFAULT 2 ON EMPTY ERROR ON ERROR); - json_value ------------- - 2 -(1 row) - -SELECT JSON_VALUE(jsonb '1', '$.a' RETURNING sqljsonb_int_not_null DEFAULT NULL ON EMPTY ERROR ON ERROR); -ERROR: domain sqljsonb_int_not_null does not allow null values -CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple'); -CREATE DOMAIN rgb AS rainbow CHECK (VALUE IN ('red', 'green', 'blue')); -SELECT JSON_VALUE('"purple"'::jsonb, 'lax $[*]' RETURNING rgb); - json_value ------------- - -(1 row) - -SELECT JSON_VALUE('"purple"'::jsonb, 'lax $[*]' RETURNING rgb ERROR ON ERROR); -ERROR: value for domain rgb violates check constraint "rgb_check" -SELECT JSON_VALUE(jsonb '[]', '$'); - json_value ------------- - -(1 row) - -SELECT JSON_VALUE(jsonb '[]', '$' ERROR ON ERROR); -ERROR: JSON path expression in JSON_VALUE should return single scalar item -SELECT JSON_VALUE(jsonb '{}', '$'); - json_value ------------- - -(1 row) - -SELECT JSON_VALUE(jsonb '{}', '$' ERROR ON ERROR); -ERROR: JSON path expression in JSON_VALUE should return single scalar item -SELECT JSON_VALUE(jsonb '1', '$.a'); - json_value ------------- - -(1 row) - -SELECT JSON_VALUE(jsonb '1', 'strict $.a' ERROR ON ERROR); -ERROR: jsonpath member accessor can only be applied to an object -SELECT JSON_VALUE(jsonb '1', 'strict $.a' DEFAULT 'error' ON ERROR); - json_value ------------- - error -(1 row) - -SELECT JSON_VALUE(jsonb '1', 'lax $.a' ERROR ON ERROR); -- NULL ON EMPTY - json_value ------------- - -(1 row) - -SELECT JSON_VALUE(jsonb '1', 'lax $.a' ERROR ON EMPTY ERROR ON ERROR); -ERROR: no SQL/JSON item found for specified path -SELECT JSON_VALUE(jsonb '1', 'strict $.*' DEFAULT 2 ON ERROR); - json_value ------------- - 2 -(1 row) - -SELECT JSON_VALUE(jsonb '1', 'lax $.a' DEFAULT 2 ON ERROR); - json_value ------------- - -(1 row) - -SELECT JSON_VALUE(jsonb '1', 'lax $.a' DEFAULT '2' ON EMPTY); - json_value ------------- - 2 -(1 row) - -SELECT JSON_VALUE(jsonb '1', 'lax $.a' NULL ON EMPTY DEFAULT '2' ON ERROR); - json_value ------------- - -(1 row) - -SELECT JSON_VALUE(jsonb '1', 'lax $.a' DEFAULT '2' ON EMPTY DEFAULT '3' ON ERROR); - json_value ------------- - 2 -(1 row) - -SELECT JSON_VALUE(jsonb '1', 'lax $.a' ERROR ON EMPTY DEFAULT '3' ON ERROR); -ERROR: no SQL/JSON item found for specified path -SELECT JSON_VALUE(jsonb '[1,2]', '$[*]' ERROR ON ERROR); -ERROR: JSON path expression in JSON_VALUE should return single scalar item -SELECT JSON_VALUE(jsonb '[1,2]', '$[*]' DEFAULT '0' ON ERROR); - json_value ------------- - 0 -(1 row) - -SELECT JSON_VALUE(jsonb '[" "]', '$[*]' RETURNING int ERROR ON ERROR); -ERROR: invalid input syntax for type integer: " " -SELECT JSON_VALUE(jsonb '[" "]', '$[*]' RETURNING int DEFAULT 2 + 3 ON ERROR); - json_value ------------- - 5 -(1 row) - -SELECT JSON_VALUE(jsonb '["1"]', '$[*]' RETURNING int DEFAULT 2 + 3 ON ERROR); - json_value ------------- - 1 -(1 row) - -SELECT JSON_VALUE(jsonb '["1"]', '$[*]' RETURNING int FORMAT JSON); -- RETURNING FORMAT not allowed -ERROR: cannot specify FORMAT JSON in RETURNING clause of JSON_VALUE() -LINE 1: ...CT JSON_VALUE(jsonb '["1"]', '$[*]' RETURNING int FORMAT JSO... - ^ --- RETUGNING pseudo-types not allowed -SELECT JSON_VALUE(jsonb '["1"]', '$[*]' RETURNING record); -ERROR: returning pseudo-types is not supported in SQL/JSON functions -SELECT - x, - JSON_VALUE( - jsonb '{"a": 1, "b": 2}', - '$.* ? (@ > $x)' PASSING x AS x - RETURNING int - DEFAULT -1 ON EMPTY - DEFAULT -2 ON ERROR - ) y -FROM - generate_series(0, 2) x; - x | y ----+---- - 0 | -2 - 1 | 2 - 2 | -1 -(3 rows) - -SELECT JSON_VALUE(jsonb 'null', '$a' PASSING point ' (1, 2 )' AS a); - json_value ------------- - (1,2) -(1 row) - -SELECT JSON_VALUE(jsonb 'null', '$a' PASSING point ' (1, 2 )' AS a RETURNING point); - json_value ------------- - (1,2) -(1 row) - -SELECT JSON_VALUE(jsonb 'null', '$a' PASSING point ' (1, 2 )' AS a RETURNING point ERROR ON ERROR); - json_value ------------- - (1,2) -(1 row) - --- Test PASSING and RETURNING date/time types -SELECT JSON_VALUE(jsonb 'null', '$ts' PASSING timestamptz '2018-02-21 12:34:56 +10' AS ts); - json_value ------------------------------- - Tue Feb 20 18:34:56 2018 PST -(1 row) - -SELECT JSON_VALUE(jsonb 'null', '$ts' PASSING timestamptz '2018-02-21 12:34:56 +10' AS ts RETURNING timestamptz); - json_value ------------------------------- - Tue Feb 20 18:34:56 2018 PST -(1 row) - -SELECT JSON_VALUE(jsonb 'null', '$ts' PASSING timestamptz '2018-02-21 12:34:56 +10' AS ts RETURNING timestamp); - json_value --------------------------- - Tue Feb 20 18:34:56 2018 -(1 row) - -SELECT JSON_VALUE(jsonb 'null', '$ts' PASSING date '2018-02-21 12:34:56 +10' AS ts RETURNING date); - json_value ------------- - 02-21-2018 -(1 row) - -SELECT JSON_VALUE(jsonb 'null', '$ts' PASSING time '2018-02-21 12:34:56 +10' AS ts RETURNING time); - json_value ------------- - 12:34:56 -(1 row) - -SELECT JSON_VALUE(jsonb 'null', '$ts' PASSING timetz '2018-02-21 12:34:56 +10' AS ts RETURNING timetz); - json_value -------------- - 12:34:56+10 -(1 row) - -SELECT JSON_VALUE(jsonb 'null', '$ts' PASSING timestamp '2018-02-21 12:34:56 +10' AS ts RETURNING timestamp); - json_value --------------------------- - Wed Feb 21 12:34:56 2018 -(1 row) - --- Also test RETURNING json[b] -SELECT JSON_VALUE(jsonb 'null', '$ts' PASSING timestamptz '2018-02-21 12:34:56 +10' AS ts RETURNING json); - json_value ------------------------------ - "2018-02-21T02:34:56+00:00" -(1 row) - -SELECT JSON_VALUE(jsonb 'null', '$ts' PASSING timestamptz '2018-02-21 12:34:56 +10' AS ts RETURNING jsonb); - json_value ------------------------------ - "2018-02-21T02:34:56+00:00" -(1 row) - --- Test that numeric JSON values are coerced uniformly -select json_value('{"a": 1.234}', '$.a' returning int error on error); -ERROR: invalid input syntax for type integer: "1.234" -select json_value('{"a": "1.234"}', '$.a' returning int error on error); -ERROR: invalid input syntax for type integer: "1.234" --- JSON_QUERY -SELECT JSON_VALUE(NULL::jsonb, '$'); - json_value ------------- - -(1 row) - -SELECT - JSON_QUERY(js, '$') AS "unspec", - JSON_QUERY(js, '$' WITHOUT WRAPPER) AS "without", - JSON_QUERY(js, '$' WITH CONDITIONAL WRAPPER) AS "with cond", - JSON_QUERY(js, '$' WITH UNCONDITIONAL ARRAY WRAPPER) AS "with uncond", - JSON_QUERY(js, '$' WITH ARRAY WRAPPER) AS "with" -FROM - (VALUES - (jsonb 'null'), - ('12.3'), - ('true'), - ('"aaa"'), - ('[1, null, "2"]'), - ('{"a": 1, "b": [2]}') - ) foo(js); - unspec | without | with cond | with uncond | with ---------------------+--------------------+--------------------+----------------------+---------------------- - null | null | null | [null] | [null] - 12.3 | 12.3 | 12.3 | [12.3] | [12.3] - true | true | true | [true] | [true] - "aaa" | "aaa" | "aaa" | ["aaa"] | ["aaa"] - [1, null, "2"] | [1, null, "2"] | [1, null, "2"] | [[1, null, "2"]] | [[1, null, "2"]] - {"a": 1, "b": [2]} | {"a": 1, "b": [2]} | {"a": 1, "b": [2]} | [{"a": 1, "b": [2]}] | [{"a": 1, "b": [2]}] -(6 rows) - -SELECT - JSON_QUERY(js, 'strict $[*]') AS "unspec", - JSON_QUERY(js, 'strict $[*]' WITHOUT WRAPPER) AS "without", - JSON_QUERY(js, 'strict $[*]' WITH CONDITIONAL WRAPPER) AS "with cond", - JSON_QUERY(js, 'strict $[*]' WITH UNCONDITIONAL ARRAY WRAPPER) AS "with uncond", - JSON_QUERY(js, 'strict $[*]' WITH ARRAY WRAPPER) AS "with" -FROM - (VALUES - (jsonb '1'), - ('[]'), - ('[null]'), - ('[12.3]'), - ('[true]'), - ('["aaa"]'), - ('[[1, 2, 3]]'), - ('[{"a": 1, "b": [2]}]'), - ('[1, "2", null, [3]]') - ) foo(js); - unspec | without | with cond | with uncond | with ---------------------+--------------------+---------------------+----------------------+---------------------- - | | | | - | | | | - null | null | null | [null] | [null] - 12.3 | 12.3 | 12.3 | [12.3] | [12.3] - true | true | true | [true] | [true] - "aaa" | "aaa" | "aaa" | ["aaa"] | ["aaa"] - [1, 2, 3] | [1, 2, 3] | [1, 2, 3] | [[1, 2, 3]] | [[1, 2, 3]] - {"a": 1, "b": [2]} | {"a": 1, "b": [2]} | {"a": 1, "b": [2]} | [{"a": 1, "b": [2]}] | [{"a": 1, "b": [2]}] - | | [1, "2", null, [3]] | [1, "2", null, [3]] | [1, "2", null, [3]] -(9 rows) - -SELECT JSON_QUERY(jsonb '"aaa"', '$' RETURNING text); - json_query ------------- - "aaa" -(1 row) - -SELECT JSON_QUERY(jsonb '"aaa"', '$' RETURNING text KEEP QUOTES); - json_query ------------- - "aaa" -(1 row) - -SELECT JSON_QUERY(jsonb '"aaa"', '$' RETURNING text KEEP QUOTES ON SCALAR STRING); - json_query ------------- - "aaa" -(1 row) - -SELECT JSON_QUERY(jsonb '"aaa"', '$' RETURNING text OMIT QUOTES); - json_query ------------- - aaa -(1 row) - -SELECT JSON_QUERY(jsonb '"aaa"', '$' RETURNING text OMIT QUOTES ON SCALAR STRING); - json_query ------------- - aaa -(1 row) - -SELECT JSON_QUERY(jsonb '"aaa"', '$' OMIT QUOTES ERROR ON ERROR); -ERROR: invalid input syntax for type json -DETAIL: Token "aaa" is invalid. -CONTEXT: JSON data, line 1: aaa -SELECT JSON_QUERY(jsonb '"aaa"', '$' RETURNING json OMIT QUOTES ERROR ON ERROR); -ERROR: invalid input syntax for type json -DETAIL: Token "aaa" is invalid. -CONTEXT: JSON data, line 1: aaa -SELECT JSON_QUERY(jsonb '"aaa"', '$' RETURNING bytea FORMAT JSON OMIT QUOTES ERROR ON ERROR); - json_query ------------- - \x616161 -(1 row) - --- Behavior when a RETURNING type has typmod != -1 -SELECT JSON_QUERY(jsonb '"aaa"', '$' RETURNING char(3) ERROR ON ERROR); -ERROR: value too long for type character(3) -SELECT JSON_QUERY(jsonb '"aaa"', '$' RETURNING char(3)); - json_query ------------- - -(1 row) - -SELECT JSON_QUERY(jsonb '"aaa"', '$' RETURNING char(3) OMIT QUOTES ERROR ON ERROR); - json_query ------------- - aaa -(1 row) - -SELECT JSON_QUERY(jsonb '"aaa"', '$.a' RETURNING char(2) OMIT QUOTES DEFAULT 'bb' ON EMPTY); - json_query ------------- - bb -(1 row) - -SELECT JSON_QUERY(jsonb '"aaa"', '$.a' RETURNING char(2) OMIT QUOTES DEFAULT '"bb"'::jsonb ON EMPTY); - json_query ------------- - bb -(1 row) - --- OMIT QUOTES behavior should not be specified when WITH WRAPPER used: --- Should fail -SELECT JSON_QUERY(jsonb '[1]', '$' WITH WRAPPER OMIT QUOTES); -ERROR: SQL/JSON QUOTES behavior must not be specified when WITH WRAPPER is used -LINE 1: SELECT JSON_QUERY(jsonb '[1]', '$' WITH WRAPPER OMIT QUOTES)... - ^ -SELECT JSON_QUERY(jsonb '[1]', '$' WITH CONDITIONAL WRAPPER OMIT QUOTES); -ERROR: SQL/JSON QUOTES behavior must not be specified when WITH WRAPPER is used -LINE 1: SELECT JSON_QUERY(jsonb '[1]', '$' WITH CONDITIONAL WRAPPER ... - ^ --- Should succeed -SELECT JSON_QUERY(jsonb '["1"]', '$[*]' WITH CONDITIONAL WRAPPER KEEP QUOTES); - json_query ------------- - "1" -(1 row) - -SELECT JSON_QUERY(jsonb '["1"]', '$[*]' WITH UNCONDITIONAL WRAPPER KEEP QUOTES); - json_query ------------- - ["1"] -(1 row) - -SELECT JSON_QUERY(jsonb '["1"]', '$[*]' WITH WRAPPER KEEP QUOTES); - json_query ------------- - ["1"] -(1 row) - -SELECT JSON_QUERY(jsonb '["1"]', '$[*]' WITHOUT WRAPPER OMIT QUOTES); - json_query ------------- - 1 -(1 row) - -SELECT JSON_QUERY(jsonb '["1"]', '$[*]' WITHOUT WRAPPER KEEP QUOTES); - json_query ------------- - "1" -(1 row) - --- test QUOTES behavior. -SELECT JSON_QUERY(jsonb'{"rec": "{1,2,3}"}', '$.rec' returning int[] omit quotes); - json_query ------------- - {1,2,3} -(1 row) - -SELECT JSON_QUERY(jsonb'{"rec": "{1,2,3}"}', '$.rec' returning int[] keep quotes); - json_query ------------- - -(1 row) - -SELECT JSON_QUERY(jsonb'{"rec": "{1,2,3}"}', '$.rec' returning int[] keep quotes error on error); -ERROR: expected JSON array -SELECT JSON_QUERY(jsonb'{"rec": "[1,2]"}', '$.rec' returning int4range omit quotes); - json_query ------------- - [1,3) -(1 row) - -SELECT JSON_QUERY(jsonb'{"rec": "[1,2]"}', '$.rec' returning int4range keep quotes); - json_query ------------- - -(1 row) - -SELECT JSON_QUERY(jsonb'{"rec": "[1,2]"}', '$.rec' returning int4range keep quotes error on error); -ERROR: malformed range literal: ""[1,2]"" -DETAIL: Missing left parenthesis or bracket. -CREATE DOMAIN qf_char_domain AS char(1); -CREATE DOMAIN qf_jsonb_domain AS jsonb; -SELECT JSON_QUERY(jsonb '"1"', '$' RETURNING qf_char_domain OMIT QUOTES ERROR ON ERROR); - json_query ------------- - 1 -(1 row) - -SELECT JSON_QUERY(jsonb '"1"', '$' RETURNING qf_jsonb_domain OMIT QUOTES ERROR ON ERROR); - json_query ------------- - 1 -(1 row) - -DROP DOMAIN qf_char_domain, qf_jsonb_domain; -SELECT JSON_QUERY(jsonb '[]', '$[*]'); - json_query ------------- - -(1 row) - -SELECT JSON_QUERY(jsonb '[]', '$[*]' NULL ON EMPTY); - json_query ------------- - -(1 row) - -SELECT JSON_QUERY(jsonb '[]', '$[*]' EMPTY ON EMPTY); - json_query ------------- - [] -(1 row) - -SELECT JSON_QUERY(jsonb '[]', '$[*]' EMPTY ARRAY ON EMPTY); - json_query ------------- - [] -(1 row) - -SELECT JSON_QUERY(jsonb '[]', '$[*]' EMPTY OBJECT ON EMPTY); - json_query ------------- - {} -(1 row) - -SELECT JSON_QUERY(jsonb '[]', '$[*]' ERROR ON EMPTY); -ERROR: no SQL/JSON item found for specified path -SELECT JSON_QUERY(jsonb '[]', '$[*]' DEFAULT '"empty"' ON EMPTY); - json_query ------------- - "empty" -(1 row) - -SELECT JSON_QUERY(jsonb '[]', '$[*]' ERROR ON EMPTY NULL ON ERROR); -ERROR: no SQL/JSON item found for specified path -SELECT JSON_QUERY(jsonb '[]', '$[*]' ERROR ON EMPTY EMPTY ARRAY ON ERROR); -ERROR: no SQL/JSON item found for specified path -SELECT JSON_QUERY(jsonb '[]', '$[*]' ERROR ON EMPTY EMPTY OBJECT ON ERROR); -ERROR: no SQL/JSON item found for specified path -SELECT JSON_QUERY(jsonb '[]', '$[*]' ERROR ON EMPTY ERROR ON ERROR); -ERROR: no SQL/JSON item found for specified path -SELECT JSON_QUERY(jsonb '[]', '$[*]' ERROR ON ERROR); -- NULL ON EMPTY - json_query ------------- - -(1 row) - -SELECT JSON_QUERY(jsonb '[1,2]', '$[*]' ERROR ON ERROR); -ERROR: JSON path expression in JSON_QUERY should return single item without wrapper -HINT: Use the WITH WRAPPER clause to wrap SQL/JSON items into an array. -SELECT JSON_QUERY(jsonb '[1,2]', '$[*]' DEFAULT '"empty"' ON ERROR); - json_query ------------- - "empty" -(1 row) - -SELECT JSON_QUERY(jsonb '[1,2]', '$' RETURNING json); - json_query ------------- - [1, 2] -(1 row) - -SELECT JSON_QUERY(jsonb '[1,2]', '$' RETURNING json FORMAT JSON); - json_query ------------- - [1, 2] -(1 row) - -SELECT JSON_QUERY(jsonb '[1,2]', '$' RETURNING jsonb); - json_query ------------- - [1, 2] -(1 row) - -SELECT JSON_QUERY(jsonb '[1,2]', '$' RETURNING jsonb FORMAT JSON); - json_query ------------- - [1, 2] -(1 row) - -SELECT JSON_QUERY(jsonb '[1,2]', '$' RETURNING text); - json_query ------------- - [1, 2] -(1 row) - -SELECT JSON_QUERY(jsonb '[1,2]', '$' RETURNING char(10)); - json_query ------------- - [1, 2] -(1 row) - -SELECT JSON_QUERY(jsonb '[1,2]', '$' RETURNING text FORMAT JSON); - json_query ------------- - [1, 2] -(1 row) - -SELECT JSON_QUERY(jsonb '[1,2]', '$' RETURNING bytea); - json_query ----------------- - \x5b312c20325d -(1 row) - -SELECT JSON_QUERY(jsonb '[1,2]', '$' RETURNING bytea FORMAT JSON); - json_query ----------------- - \x5b312c20325d -(1 row) - -SELECT JSON_QUERY(jsonb '[1,2]', '$[*]' RETURNING bytea EMPTY OBJECT ON ERROR); - json_query ------------- - \x7b7d -(1 row) - -SELECT JSON_QUERY(jsonb '[1,2]', '$[*]' RETURNING bytea FORMAT JSON EMPTY OBJECT ON ERROR); - json_query ------------- - \x7b7d -(1 row) - -SELECT JSON_QUERY(jsonb '[1,2]', '$[*]' RETURNING json EMPTY OBJECT ON ERROR); - json_query ------------- - {} -(1 row) - -SELECT JSON_QUERY(jsonb '[1,2]', '$[*]' RETURNING jsonb EMPTY OBJECT ON ERROR); - json_query ------------- - {} -(1 row) - -SELECT JSON_QUERY(jsonb '[3,4]', '$[*]' RETURNING bigint[] EMPTY OBJECT ON ERROR); -ERROR: could not coerce ON ERROR expression (EMPTY OBJECT) to the RETURNING type -DETAIL: expected JSON array -SELECT JSON_QUERY(jsonb '"[3,4]"', '$[*]' RETURNING bigint[] EMPTY OBJECT ON ERROR); -ERROR: could not coerce ON ERROR expression (EMPTY OBJECT) to the RETURNING type -DETAIL: expected JSON array --- Coercion fails with quotes on -SELECT JSON_QUERY(jsonb '"123.1"', '$' RETURNING int2 error on error); -ERROR: invalid input syntax for type smallint: ""123.1"" -SELECT JSON_QUERY(jsonb '"123.1"', '$' RETURNING int4 error on error); -ERROR: invalid input syntax for type integer: ""123.1"" -SELECT JSON_QUERY(jsonb '"123.1"', '$' RETURNING int8 error on error); -ERROR: invalid input syntax for type bigint: ""123.1"" -SELECT JSON_QUERY(jsonb '"123.1"', '$' RETURNING bool error on error); -ERROR: invalid input syntax for type boolean: ""123.1"" -SELECT JSON_QUERY(jsonb '"123.1"', '$' RETURNING numeric error on error); -ERROR: invalid input syntax for type numeric: ""123.1"" -SELECT JSON_QUERY(jsonb '"123.1"', '$' RETURNING real error on error); -ERROR: invalid input syntax for type real: ""123.1"" -SELECT JSON_QUERY(jsonb '"123.1"', '$' RETURNING float8 error on error); -ERROR: invalid input syntax for type double precision: ""123.1"" --- Fine with OMIT QUOTES -SELECT JSON_QUERY(jsonb '"123.1"', '$' RETURNING int2 omit quotes error on error); -ERROR: invalid input syntax for type smallint: "123.1" -SELECT JSON_QUERY(jsonb '"123.1"', '$' RETURNING float8 omit quotes error on error); - json_query ------------- - 123.1 -(1 row) - --- RETUGNING pseudo-types not allowed -SELECT JSON_QUERY(jsonb '[3,4]', '$[*]' RETURNING anyarray EMPTY OBJECT ON ERROR); -ERROR: returning pseudo-types is not supported in SQL/JSON functions -SELECT - x, y, - JSON_QUERY( - jsonb '[1,2,3,4,5,null]', - '$[*] ? (@ >= $x && @ <= $y)' - PASSING x AS x, y AS y - WITH CONDITIONAL WRAPPER - EMPTY ARRAY ON EMPTY - ) list -FROM - generate_series(0, 4) x, - generate_series(0, 4) y; - x | y | list ----+---+-------------- - 0 | 0 | [] - 0 | 1 | 1 - 0 | 2 | [1, 2] - 0 | 3 | [1, 2, 3] - 0 | 4 | [1, 2, 3, 4] - 1 | 0 | [] - 1 | 1 | 1 - 1 | 2 | [1, 2] - 1 | 3 | [1, 2, 3] - 1 | 4 | [1, 2, 3, 4] - 2 | 0 | [] - 2 | 1 | [] - 2 | 2 | 2 - 2 | 3 | [2, 3] - 2 | 4 | [2, 3, 4] - 3 | 0 | [] - 3 | 1 | [] - 3 | 2 | [] - 3 | 3 | 3 - 3 | 4 | [3, 4] - 4 | 0 | [] - 4 | 1 | [] - 4 | 2 | [] - 4 | 3 | [] - 4 | 4 | 4 -(25 rows) - --- record type returning with quotes behavior. -CREATE TYPE comp_abc AS (a text, b int, c timestamp); -SELECT JSON_QUERY(jsonb'{"rec": "(abc,42,01.02.2003)"}', '$.rec' returning comp_abc omit quotes); - json_query -------------------------------------- - (abc,42,"Thu Jan 02 00:00:00 2003") -(1 row) - -SELECT JSON_QUERY(jsonb'{"rec": "(abc,42,01.02.2003)"}', '$.rec' returning comp_abc keep quotes); - json_query ------------- - -(1 row) - -SELECT JSON_QUERY(jsonb'{"rec": "(abc,42,01.02.2003)"}', '$.rec' returning comp_abc keep quotes error on error); -ERROR: cannot call populate_composite on a scalar -DROP TYPE comp_abc; --- Extension: record types returning -CREATE TYPE sqljsonb_rec AS (a int, t text, js json, jb jsonb, jsa json[]); -CREATE TYPE sqljsonb_reca AS (reca sqljsonb_rec[]); -SELECT JSON_QUERY(jsonb '[{"a": 1, "b": "foo", "t": "aaa", "js": [1, "2", {}], "jb": {"x": [1, "2", {}]}}, {"a": 2}]', '$[0]' RETURNING sqljsonb_rec); - json_query ------------------------------------------------------ - (1,aaa,"[1, ""2"", {}]","{""x"": [1, ""2"", {}]}",) -(1 row) - -SELECT JSON_QUERY(jsonb '[{"a": "a", "b": "foo", "t": "aaa", "js": [1, "2", {}], "jb": {"x": [1, "2", {}]}}, {"a": 2}]', '$[0]' RETURNING sqljsonb_rec ERROR ON ERROR); -ERROR: invalid input syntax for type integer: "a" -SELECT JSON_QUERY(jsonb '[{"a": "a", "b": "foo", "t": "aaa", "js": [1, "2", {}], "jb": {"x": [1, "2", {}]}}, {"a": 2}]', '$[0]' RETURNING sqljsonb_rec); - json_query ------------- - -(1 row) - -SELECT * FROM unnest((JSON_QUERY(jsonb '{"jsa": [{"a": 1, "b": ["foo"]}, {"a": 2, "c": {}}, 123]}', '$' RETURNING sqljsonb_rec)).jsa); - unnest ------------------------- - {"a": 1, "b": ["foo"]} - {"a": 2, "c": {}} - 123 -(3 rows) - -SELECT * FROM unnest((JSON_QUERY(jsonb '{"reca": [{"a": 1, "t": ["foo", []]}, {"a": 2, "jb": [{}, true]}]}', '$' RETURNING sqljsonb_reca)).reca); - a | t | js | jb | jsa ----+-------------+----+------------+----- - 1 | ["foo", []] | | | - 2 | | | [{}, true] | -(2 rows) - -SELECT JSON_QUERY(jsonb '[{"a": 1, "b": "foo", "t": "aaa", "js": [1, "2", {}], "jb": {"x": [1, "2", {}]}}, {"a": 2}]', '$[0]' RETURNING jsonpath); - json_query ------------- - -(1 row) - -SELECT JSON_QUERY(jsonb '[{"a": 1, "b": "foo", "t": "aaa", "js": [1, "2", {}], "jb": {"x": [1, "2", {}]}}, {"a": 2}]', '$[0]' RETURNING jsonpath ERROR ON ERROR); -ERROR: syntax error at or near "{" of jsonpath input --- Extension: array types returning -SELECT JSON_QUERY(jsonb '[1,2,null,"3"]', '$[*]' RETURNING int[] WITH WRAPPER); - json_query --------------- - {1,2,NULL,3} -(1 row) - -SELECT JSON_QUERY(jsonb '[1,2,null,"a"]', '$[*]' RETURNING int[] WITH WRAPPER ERROR ON ERROR); -ERROR: invalid input syntax for type integer: "a" -SELECT JSON_QUERY(jsonb '[1,2,null,"a"]', '$[*]' RETURNING int[] WITH WRAPPER); - json_query ------------- - -(1 row) - -SELECT * FROM unnest(JSON_QUERY(jsonb '[{"a": 1, "t": ["foo", []]}, {"a": 2, "jb": [{}, true]}]', '$' RETURNING sqljsonb_rec[])); - a | t | js | jb | jsa ----+-------------+----+------------+----- - 1 | ["foo", []] | | | - 2 | | | [{}, true] | -(2 rows) - --- Extension: domain types returning -SELECT JSON_QUERY(jsonb '{"a": 1}', '$.a' RETURNING sqljsonb_int_not_null); - json_query ------------- - 1 -(1 row) - -SELECT JSON_QUERY(jsonb '{"a": 1}', '$.b' RETURNING sqljsonb_int_not_null); -ERROR: could not coerce ON EMPTY expression (NULL) to the RETURNING type -DETAIL: domain sqljsonb_int_not_null does not allow null values -SELECT JSON_QUERY(jsonb '{"a": 1}', '$.b' RETURNING sqljsonb_int_not_null ERROR ON EMPTY ERROR ON ERROR); -ERROR: no SQL/JSON item found for specified path --- Test timestamptz passing and output -SELECT JSON_QUERY(jsonb 'null', '$ts' PASSING timestamptz '2018-02-21 12:34:56 +10' AS ts); - json_query ------------------------------ - "2018-02-21T02:34:56+00:00" -(1 row) - -SELECT JSON_QUERY(jsonb 'null', '$ts' PASSING timestamptz '2018-02-21 12:34:56 +10' AS ts RETURNING json); - json_query ------------------------------ - "2018-02-21T02:34:56+00:00" -(1 row) - -SELECT JSON_QUERY(jsonb 'null', '$ts' PASSING timestamptz '2018-02-21 12:34:56 +10' AS ts RETURNING jsonb); - json_query ------------------------------ - "2018-02-21T02:34:56+00:00" -(1 row) - --- Test constraints -CREATE TABLE test_jsonb_constraints ( - js text, - i int, - x jsonb DEFAULT JSON_QUERY(jsonb '[1,2]', '$[*]' WITH WRAPPER) - CONSTRAINT test_jsonb_constraint1 - CHECK (js IS JSON) - CONSTRAINT test_jsonb_constraint2 - CHECK (JSON_EXISTS(js::jsonb, '$.a' PASSING i + 5 AS int, i::text AS txt, array[1,2,3] as arr)) - CONSTRAINT test_jsonb_constraint3 - CHECK (JSON_VALUE(js::jsonb, '$.a' RETURNING int DEFAULT '12' ON EMPTY ERROR ON ERROR) > i) - CONSTRAINT test_jsonb_constraint4 - CHECK (JSON_QUERY(js::jsonb, '$.a' WITH CONDITIONAL WRAPPER EMPTY OBJECT ON ERROR) = jsonb '[10]') - CONSTRAINT test_jsonb_constraint5 - CHECK (JSON_QUERY(js::jsonb, '$.a' RETURNING char(5) OMIT QUOTES EMPTY ARRAY ON EMPTY) > 'a' COLLATE "C") -); -\d test_jsonb_constraints - Table "public.test_jsonb_constraints" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+-------------------------------------------------------------------------------------------- - js | text | | | - i | integer | | | - x | jsonb | | | JSON_QUERY('[1, 2]'::jsonb, '$[*]' RETURNING jsonb WITH UNCONDITIONAL WRAPPER KEEP QUOTES) -Check constraints: - "test_jsonb_constraint1" CHECK (js IS JSON) - "test_jsonb_constraint2" CHECK (JSON_EXISTS(js::jsonb, '$."a"' PASSING i + 5 AS int, i::text AS txt, ARRAY[1, 2, 3] AS arr)) - "test_jsonb_constraint3" CHECK (JSON_VALUE(js::jsonb, '$."a"' RETURNING integer DEFAULT 12 ON EMPTY ERROR ON ERROR) > i) - "test_jsonb_constraint4" CHECK (JSON_QUERY(js::jsonb, '$."a"' RETURNING jsonb WITH CONDITIONAL WRAPPER KEEP QUOTES EMPTY OBJECT ON ERROR) = '[10]'::jsonb) - "test_jsonb_constraint5" CHECK (JSON_QUERY(js::jsonb, '$."a"' RETURNING character(5) WITHOUT WRAPPER OMIT QUOTES EMPTY ARRAY ON EMPTY) > ('a'::bpchar COLLATE "C")) - -SELECT check_clause -FROM information_schema.check_constraints -WHERE constraint_name LIKE 'test_jsonb_constraint%' -ORDER BY 1; - check_clause ----------------------------------------------------------------------------------------------------------------------------------------- - (JSON_QUERY((js)::jsonb, '$."a"' RETURNING character(5) WITHOUT WRAPPER OMIT QUOTES EMPTY ARRAY ON EMPTY) > ('a'::bpchar COLLATE "C")) - (JSON_QUERY((js)::jsonb, '$."a"' RETURNING jsonb WITH CONDITIONAL WRAPPER KEEP QUOTES EMPTY OBJECT ON ERROR) = '[10]'::jsonb) - (JSON_VALUE((js)::jsonb, '$."a"' RETURNING integer DEFAULT 12 ON EMPTY ERROR ON ERROR) > i) - (js IS JSON) - JSON_EXISTS((js)::jsonb, '$."a"' PASSING (i + 5) AS int, (i)::text AS txt, ARRAY[1, 2, 3] AS arr) -(5 rows) - -SELECT pg_get_expr(adbin, adrelid) -FROM pg_attrdef -WHERE adrelid = 'test_jsonb_constraints'::regclass -ORDER BY 1; - pg_get_expr --------------------------------------------------------------------------------------------- - JSON_QUERY('[1, 2]'::jsonb, '$[*]' RETURNING jsonb WITH UNCONDITIONAL WRAPPER KEEP QUOTES) -(1 row) - -INSERT INTO test_jsonb_constraints VALUES ('', 1); -ERROR: new row for relation "test_jsonb_constraints" violates check constraint "test_jsonb_constraint1" -DETAIL: Failing row contains (, 1, [1, 2]). -INSERT INTO test_jsonb_constraints VALUES ('1', 1); -ERROR: new row for relation "test_jsonb_constraints" violates check constraint "test_jsonb_constraint2" -DETAIL: Failing row contains (1, 1, [1, 2]). -INSERT INTO test_jsonb_constraints VALUES ('[]'); -ERROR: new row for relation "test_jsonb_constraints" violates check constraint "test_jsonb_constraint2" -DETAIL: Failing row contains ([], null, [1, 2]). -INSERT INTO test_jsonb_constraints VALUES ('{"b": 1}', 1); -ERROR: new row for relation "test_jsonb_constraints" violates check constraint "test_jsonb_constraint2" -DETAIL: Failing row contains ({"b": 1}, 1, [1, 2]). -INSERT INTO test_jsonb_constraints VALUES ('{"a": 1}', 1); -ERROR: new row for relation "test_jsonb_constraints" violates check constraint "test_jsonb_constraint3" -DETAIL: Failing row contains ({"a": 1}, 1, [1, 2]). -INSERT INTO test_jsonb_constraints VALUES ('{"a": 10}', 1); -ERROR: new row for relation "test_jsonb_constraints" violates check constraint "test_jsonb_constraint4" -DETAIL: Failing row contains ({"a": 10}, 1, [1, 2]). -DROP TABLE test_jsonb_constraints; --- Test mutabilily of query functions -CREATE TABLE test_jsonb_mutability(js jsonb, b int); -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$')); -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a[0]')); -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.time()')); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.date()')); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.time_tz()')); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.timestamp()')); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.timestamp_tz()')); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.date() < $.time_tz())')); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.date() < $.time())')); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.time() < $.time())')); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.time() < $.time_tz())')); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.timestamp() < $.timestamp_tz())')); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.timestamp_tz() < $.timestamp_tz())')); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.time() < $.datetime("HH:MI TZH"))')); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.date() < $.datetime("HH:MI TZH"))')); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.timestamp() < $.datetime("HH:MI TZH"))')); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.timestamp() < $.datetime("HH:MI"))')); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.timestamp_tz() < $.datetime("HH:MI TZH"))')); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.timestamp_tz() < $.datetime("HH:MI"))')); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.date() < $x' PASSING '12:34'::timetz AS x)); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.date() < $x' PASSING '1234'::int AS x)); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.timestamp(2) < $.timestamp(3))')); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.datetime()')); -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@ < $.datetime())')); -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.datetime() < $.datetime())')); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.datetime() < $.datetime("HH:MI TZH"))')); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.datetime("HH:MI TZH") < $.datetime("HH:MI TZH"))')); -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.datetime("HH:MI") < $.datetime("YY-MM-DD HH:MI"))')); -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.datetime("HH:MI TZH") < $.datetime("YY-MM-DD HH:MI"))')); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.datetime("HH:MI TZH") < $x' PASSING '12:34'::timetz AS x)); -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.datetime("HH:MI TZH") < $y' PASSING '12:34'::timetz AS x)); -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.datetime() < $x' PASSING '12:34'::timetz AS x)); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.datetime() < $x' PASSING '1234'::int AS x)); -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.datetime() ? (@ == $x)' PASSING '12:34'::time AS x)); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.datetime("YY-MM-DD") ? (@ == $x)' PASSING '2020-07-14'::date AS x)); -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$[1, $.a ? (@.datetime() == $x)]' PASSING '12:34'::time AS x)); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$[1, 0 to $.a ? (@.datetime() == $x)]' PASSING '12:34'::time AS x)); -ERROR: functions in index expression must be marked IMMUTABLE -CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$[1, $.a ? (@.datetime("HH:MI") == $x)]' PASSING '12:34'::time AS x)); -CREATE INDEX ON test_jsonb_mutability (JSON_VALUE(js, '$' DEFAULT random()::int ON ERROR)); -ERROR: functions in index expression must be marked IMMUTABLE --- DEFAULT expression -CREATE OR REPLACE FUNCTION ret_setint() RETURNS SETOF integer AS -$$ -BEGIN - RETURN QUERY EXECUTE 'select 1 union all select 1'; -END; -$$ -LANGUAGE plpgsql IMMUTABLE; -SELECT JSON_QUERY(js, '$' RETURNING int DEFAULT ret_setint() ON ERROR) FROM test_jsonb_mutability; -ERROR: DEFAULT expression must not return a set -LINE 1: SELECT JSON_QUERY(js, '$' RETURNING int DEFAULT ret_setint(... - ^ -SELECT JSON_QUERY(js, '$' RETURNING int DEFAULT b + 1 ON ERROR) FROM test_jsonb_mutability; -ERROR: DEFAULT expression must not contain column references -LINE 1: SELECT JSON_QUERY(js, '$' RETURNING int DEFAULT b + 1 ON ER... - ^ -SELECT JSON_QUERY(js, '$' RETURNING int DEFAULT sum(1) over() ON ERROR) FROM test_jsonb_mutability; -ERROR: can only specify a constant, non-aggregate function, or operator expression for DEFAULT -LINE 1: SELECT JSON_QUERY(js, '$' RETURNING int DEFAULT sum(1) over... - ^ -SELECT JSON_QUERY(js, '$' RETURNING int DEFAULT (SELECT 1) ON ERROR) FROM test_jsonb_mutability; -ERROR: can only specify a constant, non-aggregate function, or operator expression for DEFAULT -LINE 1: SELECT JSON_QUERY(js, '$' RETURNING int DEFAULT (SELECT 1) ... - ^ -DROP TABLE test_jsonb_mutability; -DROP FUNCTION ret_setint; -CREATE DOMAIN queryfuncs_test_domain AS text CHECK (value <> 'foo'); -SELECT JSON_VALUE(jsonb '{"d1": "H"}', '$.a2' RETURNING queryfuncs_test_domain DEFAULT 'foo'::queryfuncs_test_domain ON EMPTY); -ERROR: could not coerce ON EMPTY expression (DEFAULT) to the RETURNING type -DETAIL: value for domain queryfuncs_test_domain violates check constraint "queryfuncs_test_domain_check" -SELECT JSON_VALUE(jsonb '{"d1": "H"}', '$.a2' RETURNING queryfuncs_test_domain DEFAULT 'foo1'::queryfuncs_test_domain ON EMPTY); - json_value ------------- - foo1 -(1 row) - -SELECT JSON_VALUE(jsonb '{"d1": "H"}', '$.a2' RETURNING queryfuncs_test_domain DEFAULT '"foo1"'::jsonb::text ON EMPTY); - json_value ------------- - "foo1" -(1 row) - -SELECT JSON_VALUE(jsonb '{"d1": "foo"}', '$.a2' RETURNING queryfuncs_test_domain DEFAULT 'foo1'::queryfuncs_test_domain ON EMPTY); - json_value ------------- - foo1 -(1 row) - --- Check the cases where a coercion-related expression is masking an --- unsupported expressions --- CoerceViaIO -SELECT JSON_QUERY('"a"', '$.a' RETURNING int DEFAULT (SELECT '"1"')::jsonb ON ERROR); -ERROR: can only specify a constant, non-aggregate function, or operator expression for DEFAULT -LINE 1: ...CT JSON_QUERY('"a"', '$.a' RETURNING int DEFAULT (SELECT '"... - ^ --- CoerceToDomain -SELECT JSON_QUERY('"a"', '$.a' RETURNING queryfuncs_test_domain DEFAULT (select '"1"')::queryfuncs_test_domain ON ERROR); -ERROR: can only specify a constant, non-aggregate function, or operator expression for DEFAULT -LINE 1: ...', '$.a' RETURNING queryfuncs_test_domain DEFAULT (select '"... - ^ --- RelabelType -SELECT JSON_QUERY('"a"', '$.a' RETURNING int DEFAULT (SELECT 1)::oid::int ON ERROR); -ERROR: can only specify a constant, non-aggregate function, or operator expression for DEFAULT -LINE 1: ...CT JSON_QUERY('"a"', '$.a' RETURNING int DEFAULT (SELECT 1)... - ^ --- ArrayCoerceExpr -SELECT JSON_QUERY('"a"', '$.a' RETURNING int[] DEFAULT (SELECT '{1}')::oid[]::int[] ON ERROR); -ERROR: can only specify a constant, non-aggregate function, or operator expression for DEFAULT -LINE 1: ... JSON_QUERY('"a"', '$.a' RETURNING int[] DEFAULT (SELECT '{... - ^ --- CollateExpr -SELECT JSON_QUERY('"a"', '$.a' RETURNING int[] DEFAULT (SELECT '{1}')::text COLLATE "C" ON ERROR); -ERROR: can only specify a constant, non-aggregate function, or operator expression for DEFAULT -LINE 1: ... JSON_QUERY('"a"', '$.a' RETURNING int[] DEFAULT (SELECT '{... - ^ --- ConvertRowtypeExpr -CREATE TABLE someparent (a int); -CREATE TABLE somechild () INHERITS (someparent); -SELECT JSON_QUERY('"a"', '$.a' RETURNING someparent DEFAULT (SELECT '(1)')::somechild::someparent ON ERROR); -ERROR: can only specify a constant, non-aggregate function, or operator expression for DEFAULT -LINE 1: ..._QUERY('"a"', '$.a' RETURNING someparent DEFAULT (SELECT '(... - ^ -DROP DOMAIN queryfuncs_test_domain; -DROP TABLE someparent, somechild; --- Extension: non-constant JSON path -SELECT JSON_EXISTS(jsonb '{"a": 123}', '$' || '.' || 'a'); - json_exists -------------- - t -(1 row) - -SELECT JSON_VALUE(jsonb '{"a": 123}', '$' || '.' || 'a'); - json_value ------------- - 123 -(1 row) - -SELECT JSON_VALUE(jsonb '{"a": 123}', '$' || '.' || 'b' DEFAULT 'foo' ON EMPTY); - json_value ------------- - foo -(1 row) - -SELECT JSON_QUERY(jsonb '{"a": 123}', '$' || '.' || 'a'); - json_query ------------- - 123 -(1 row) - -SELECT JSON_QUERY(jsonb '{"a": 123}', '$' || '.' || 'a' WITH WRAPPER); - json_query ------------- - [123] -(1 row) - --- Should fail (invalid path) -SELECT JSON_QUERY(jsonb '{"a": 123}', 'error' || ' ' || 'error'); -ERROR: syntax error at or near " " of jsonpath input --- Non-jsonb inputs automatically coerced to jsonb -SELECT JSON_EXISTS(json '{"a": 123}', '$' || '.' || 'a'); - json_exists -------------- - t -(1 row) - -SELECT JSON_QUERY(NULL FORMAT JSON, '$'); - json_query ------------- - -(1 row) - --- Test non-const jsonpath -CREATE TEMP TABLE jsonpaths (path) AS SELECT '$'; -SELECT json_value('"aaa"', path RETURNING json) FROM jsonpaths; - json_value ------------- - "aaa" -(1 row) - --- Test PASSING argument parsing -SELECT JSON_QUERY(jsonb 'null', '$xyz' PASSING 1 AS xy); -ERROR: could not find jsonpath variable "xyz" -SELECT JSON_QUERY(jsonb 'null', '$xy' PASSING 1 AS xyz); -ERROR: could not find jsonpath variable "xy" -SELECT JSON_QUERY(jsonb 'null', '$xyz' PASSING 1 AS xyz); - json_query ------------- - 1 -(1 row) - --- Test ON ERROR / EMPTY value validity for the function; all fail. -SELECT JSON_EXISTS(jsonb '1', '$' DEFAULT 1 ON ERROR); -ERROR: invalid ON ERROR behavior -LINE 1: SELECT JSON_EXISTS(jsonb '1', '$' DEFAULT 1 ON ERROR); - ^ -DETAIL: Only ERROR, TRUE, FALSE, or UNKNOWN is allowed in ON ERROR for JSON_EXISTS(). -SELECT JSON_VALUE(jsonb '1', '$' EMPTY ON ERROR); -ERROR: invalid ON ERROR behavior -LINE 1: SELECT JSON_VALUE(jsonb '1', '$' EMPTY ON ERROR); - ^ -DETAIL: Only ERROR, NULL, or DEFAULT expression is allowed in ON ERROR for JSON_VALUE(). -SELECT JSON_QUERY(jsonb '1', '$' TRUE ON ERROR); -ERROR: invalid ON ERROR behavior -LINE 1: SELECT JSON_QUERY(jsonb '1', '$' TRUE ON ERROR); - ^ -DETAIL: Only ERROR, NULL, EMPTY ARRAY, EMPTY OBJECT, or DEFAULT expression is allowed in ON ERROR for JSON_QUERY(). --- Test implicit coercion to a domain over fixed-length type specified in --- RETURNING -CREATE DOMAIN queryfuncs_char2 AS char(2); -CREATE DOMAIN queryfuncs_char2_chk AS char(2) CHECK (VALUE NOT IN ('12')); -SELECT JSON_QUERY(jsonb '123', '$' RETURNING queryfuncs_char2 ERROR ON ERROR); -ERROR: value too long for type character(2) -SELECT JSON_QUERY(jsonb '123', '$' RETURNING queryfuncs_char2 DEFAULT '1' ON ERROR); - json_query ------------- - 1 -(1 row) - -SELECT JSON_QUERY(jsonb '123', '$' RETURNING queryfuncs_char2_chk ERROR ON ERROR); -ERROR: value too long for type character(2) -SELECT JSON_QUERY(jsonb '123', '$' RETURNING queryfuncs_char2_chk DEFAULT '1' ON ERROR); - json_query ------------- - 1 -(1 row) - -SELECT JSON_VALUE(jsonb '123', '$' RETURNING queryfuncs_char2 ERROR ON ERROR); -ERROR: value too long for type character(2) -SELECT JSON_VALUE(jsonb '123', '$' RETURNING queryfuncs_char2 DEFAULT 1 ON ERROR); - json_value ------------- - 1 -(1 row) - -SELECT JSON_VALUE(jsonb '123', '$' RETURNING queryfuncs_char2_chk ERROR ON ERROR); -ERROR: value too long for type character(2) -SELECT JSON_VALUE(jsonb '123', '$' RETURNING queryfuncs_char2_chk DEFAULT 1 ON ERROR); - json_value ------------- - 1 -(1 row) - -DROP DOMAIN queryfuncs_char2, queryfuncs_char2_chk; --- Test coercion to domain over another fixed-length type of the ON ERROR / --- EMPTY expressions. Ask user to cast the DEFAULT expression explicitly if --- automatic casting cannot be done, for example, from int to bit(2). -CREATE DOMAIN queryfuncs_d_varbit3 AS varbit(3) CHECK (VALUE <> '01'); -SELECT JSON_VALUE(jsonb '1234', '$' RETURNING queryfuncs_d_varbit3 DEFAULT '111111' ON ERROR); -ERROR: bit string too long for type bit varying(3) -SELECT JSON_VALUE(jsonb '1234', '$' RETURNING queryfuncs_d_varbit3 DEFAULT '010' ON ERROR); - json_value ------------- - 010 -(1 row) - -SELECT JSON_VALUE(jsonb '1234', '$' RETURNING queryfuncs_d_varbit3 DEFAULT '01' ON ERROR); -ERROR: could not coerce ON ERROR expression (DEFAULT) to the RETURNING type -DETAIL: value for domain queryfuncs_d_varbit3 violates check constraint "queryfuncs_d_varbit3_check" -SELECT JSON_VALUE(jsonb '"111"', '$' RETURNING bit(2) ERROR ON ERROR); -ERROR: bit string length 3 does not match type bit(2) -SELECT JSON_VALUE(jsonb '1234', '$' RETURNING bit(3) DEFAULT 1 ON ERROR); -ERROR: cannot cast behavior expression of type integer to bit -LINE 1: ...VALUE(jsonb '1234', '$' RETURNING bit(3) DEFAULT 1 ON ERROR... - ^ -HINT: You will need to explicitly cast the expression to type bit. -SELECT JSON_VALUE(jsonb '1234', '$' RETURNING bit(3) DEFAULT 1::bit(3) ON ERROR); - json_value ------------- - 001 -(1 row) - -SELECT JSON_VALUE(jsonb '"111"', '$.a' RETURNING bit(3) DEFAULT '1111' ON EMPTY); -ERROR: bit string length 4 does not match type bit(3) -DROP DOMAIN queryfuncs_d_varbit3; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/sqljson_jsontable.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/sqljson_jsontable.out --- /Users/admin/pgsql/src/test/regress/expected/sqljson_jsontable.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/sqljson_jsontable.out 2024-12-13 13:20:12 @@ -1,1180 +1,2 @@ --- JSON_TABLE --- Should fail (JSON_TABLE can be used only in FROM clause) -SELECT JSON_TABLE('[]', '$'); -ERROR: syntax error at or near "(" -LINE 1: SELECT JSON_TABLE('[]', '$'); - ^ --- Only allow EMPTY and ERROR for ON ERROR -SELECT * FROM JSON_TABLE('[]', 'strict $.a' COLUMNS (js2 int PATH '$') DEFAULT 1 ON ERROR); -ERROR: invalid ON ERROR behavior -LINE 1: ...BLE('[]', 'strict $.a' COLUMNS (js2 int PATH '$') DEFAULT 1 ... - ^ -DETAIL: Only EMPTY [ ARRAY ] or ERROR is allowed in the top-level ON ERROR clause. -SELECT * FROM JSON_TABLE('[]', 'strict $.a' COLUMNS (js2 int PATH '$') NULL ON ERROR); -ERROR: invalid ON ERROR behavior -LINE 1: ...BLE('[]', 'strict $.a' COLUMNS (js2 int PATH '$') NULL ON ER... - ^ -DETAIL: Only EMPTY [ ARRAY ] or ERROR is allowed in the top-level ON ERROR clause. -SELECT * FROM JSON_TABLE('[]', 'strict $.a' COLUMNS (js2 int PATH '$') EMPTY ON ERROR); - js2 ------ -(0 rows) - -SELECT * FROM JSON_TABLE('[]', 'strict $.a' COLUMNS (js2 int PATH '$') ERROR ON ERROR); -ERROR: jsonpath member accessor can only be applied to an object --- Column and path names must be distinct -SELECT * FROM JSON_TABLE(jsonb'"1.23"', '$.a' as js2 COLUMNS (js2 int path '$')); -ERROR: duplicate JSON_TABLE column or path name: js2 -LINE 1: ...M JSON_TABLE(jsonb'"1.23"', '$.a' as js2 COLUMNS (js2 int pa... - ^ --- Should fail (no columns) -SELECT * FROM JSON_TABLE(NULL, '$' COLUMNS ()); -ERROR: syntax error at or near ")" -LINE 1: SELECT * FROM JSON_TABLE(NULL, '$' COLUMNS ()); - ^ -SELECT * FROM JSON_TABLE (NULL::jsonb, '$' COLUMNS (v1 timestamp)) AS f (v1, v2); -ERROR: JSON_TABLE function has 1 columns available but 2 columns specified ---duplicated column name -SELECT * FROM JSON_TABLE(jsonb'"1.23"', '$.a' COLUMNS (js2 int path '$', js2 int path '$')); -ERROR: duplicate JSON_TABLE column or path name: js2 -LINE 1: ...E(jsonb'"1.23"', '$.a' COLUMNS (js2 int path '$', js2 int pa... - ^ ---return composite data type. -create type comp as (a int, b int); -SELECT * FROM JSON_TABLE(jsonb '{"rec": "(1,2)"}', '$' COLUMNS (id FOR ORDINALITY, comp comp path '$.rec' omit quotes)) jt; - id | comp -----+------- - 1 | (1,2) -(1 row) - -drop type comp; --- NULL => empty table -SELECT * FROM JSON_TABLE(NULL::jsonb, '$' COLUMNS (foo int)) bar; - foo ------ -(0 rows) - -SELECT * FROM JSON_TABLE(jsonb'"1.23"', 'strict $.a' COLUMNS (js2 int PATH '$')); - js2 ------ -(0 rows) - --- -SELECT * FROM JSON_TABLE(jsonb '123', '$' - COLUMNS (item int PATH '$', foo int)) bar; - item | foo -------+----- - 123 | -(1 row) - --- JSON_TABLE: basic functionality -CREATE DOMAIN jsonb_test_domain AS text CHECK (value <> 'foo'); -CREATE TEMP TABLE json_table_test (js) AS - (VALUES - ('1'), - ('[]'), - ('{}'), - ('[1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""]') - ); --- Regular "unformatted" columns -SELECT * -FROM json_table_test vals - LEFT OUTER JOIN - JSON_TABLE( - vals.js::jsonb, 'lax $[*]' - COLUMNS ( - id FOR ORDINALITY, - "int" int PATH '$', - "text" text PATH '$', - "char(4)" char(4) PATH '$', - "bool" bool PATH '$', - "numeric" numeric PATH '$', - "domain" jsonb_test_domain PATH '$', - js json PATH '$', - jb jsonb PATH '$' - ) - ) jt - ON true; - js | id | int | text | char(4) | bool | numeric | domain | js | jb ----------------------------------------------------------------------------------------+----+-----+---------+---------+------+---------+---------+--------------+-------------- - 1 | 1 | 1 | 1 | 1 | t | 1 | 1 | 1 | 1 - [] | | | | | | | | | - {} | 1 | | | | | | | {} | {} - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 1 | 1 | 1 | 1 | t | 1 | 1 | 1 | 1 - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 2 | | 1.23 | 1.23 | | 1.23 | 1.23 | 1.23 | 1.23 - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 3 | 2 | 2 | 2 | | 2 | 2 | "2" | "2" - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 4 | | aaaaaaa | | | | aaaaaaa | "aaaaaaa" | "aaaaaaa" - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 5 | | foo | foo | | | | "foo" | "foo" - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 6 | | | | | | | null | null - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 7 | | f | f | f | | false | false | false - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 8 | | t | t | t | | true | true | true - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 9 | | | | | | | {"aaa": 123} | {"aaa": 123} - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 10 | | [1,2] | | | | [1,2] | "[1,2]" | "[1,2]" - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 11 | | "str" | | | | "str" | "\"str\"" | "\"str\"" -(14 rows) - --- "formatted" columns -SELECT * -FROM json_table_test vals - LEFT OUTER JOIN - JSON_TABLE( - vals.js::jsonb, 'lax $[*]' - COLUMNS ( - id FOR ORDINALITY, - jst text FORMAT JSON PATH '$', - jsc char(4) FORMAT JSON PATH '$', - jsv varchar(4) FORMAT JSON PATH '$', - jsb jsonb FORMAT JSON PATH '$', - jsbq jsonb FORMAT JSON PATH '$' OMIT QUOTES - ) - ) jt - ON true; - js | id | jst | jsc | jsv | jsb | jsbq ----------------------------------------------------------------------------------------+----+--------------+------+------+--------------+-------------- - 1 | 1 | 1 | 1 | 1 | 1 | 1 - [] | | | | | | - {} | 1 | {} | {} | {} | {} | {} - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 1 | 1 | 1 | 1 | 1 | 1 - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 2 | 1.23 | 1.23 | 1.23 | 1.23 | 1.23 - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 3 | "2" | "2" | "2" | "2" | 2 - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 4 | "aaaaaaa" | | | "aaaaaaa" | - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 5 | "foo" | | | "foo" | - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 6 | null | null | null | null | null - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 7 | false | | | false | false - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 8 | true | true | true | true | true - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 9 | {"aaa": 123} | | | {"aaa": 123} | {"aaa": 123} - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 10 | "[1,2]" | | | "[1,2]" | [1, 2] - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 11 | "\"str\"" | | | "\"str\"" | "str" -(14 rows) - --- EXISTS columns -SELECT * -FROM json_table_test vals - LEFT OUTER JOIN - JSON_TABLE( - vals.js::jsonb, 'lax $[*]' - COLUMNS ( - id FOR ORDINALITY, - exists1 bool EXISTS PATH '$.aaa', - exists2 int EXISTS PATH '$.aaa', - exists3 int EXISTS PATH 'strict $.aaa' UNKNOWN ON ERROR, - exists4 text EXISTS PATH 'strict $.aaa' FALSE ON ERROR - ) - ) jt - ON true; - js | id | exists1 | exists2 | exists3 | exists4 ----------------------------------------------------------------------------------------+----+---------+---------+---------+--------- - 1 | 1 | f | 0 | | false - [] | | | | | - {} | 1 | f | 0 | | false - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 1 | f | 0 | | false - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 2 | f | 0 | | false - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 3 | f | 0 | | false - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 4 | f | 0 | | false - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 5 | f | 0 | | false - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 6 | f | 0 | | false - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 7 | f | 0 | | false - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 8 | f | 0 | | false - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 9 | t | 1 | 1 | true - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 10 | f | 0 | | false - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 11 | f | 0 | | false -(14 rows) - --- Other miscellaneous checks -SELECT * -FROM json_table_test vals - LEFT OUTER JOIN - JSON_TABLE( - vals.js::jsonb, 'lax $[*]' - COLUMNS ( - id FOR ORDINALITY, - aaa int, -- "aaa" has implicit path '$."aaa"' - aaa1 int PATH '$.aaa', - js2 json PATH '$', - jsb2w jsonb PATH '$' WITH WRAPPER, - jsb2q jsonb PATH '$' OMIT QUOTES, - ia int[] PATH '$', - ta text[] PATH '$', - jba jsonb[] PATH '$' - ) - ) jt - ON true; - js | id | aaa | aaa1 | js2 | jsb2w | jsb2q | ia | ta | jba ----------------------------------------------------------------------------------------+----+-----+------+--------------+----------------+--------------+----+----+----- - 1 | 1 | | | 1 | [1] | 1 | | | - [] | | | | | | | | | - {} | 1 | | | {} | [{}] | {} | | | - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 1 | | | 1 | [1] | 1 | | | - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 2 | | | 1.23 | [1.23] | 1.23 | | | - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 3 | | | "2" | ["2"] | 2 | | | - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 4 | | | "aaaaaaa" | ["aaaaaaa"] | | | | - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 5 | | | "foo" | ["foo"] | | | | - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 6 | | | null | [null] | null | | | - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 7 | | | false | [false] | false | | | - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 8 | | | true | [true] | true | | | - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 9 | 123 | 123 | {"aaa": 123} | [{"aaa": 123}] | {"aaa": 123} | | | - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 10 | | | "[1,2]" | ["[1,2]"] | [1, 2] | | | - [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 11 | | | "\"str\"" | ["\"str\""] | "str" | | | -(14 rows) - --- Test using casts in DEFAULT .. ON ERROR expression -SELECT * FROM JSON_TABLE(jsonb '{"d1": "H"}', '$' - COLUMNS (js1 jsonb_test_domain PATH '$.a2' DEFAULT '"foo1"'::jsonb::text ON EMPTY)); - js1 --------- - "foo1" -(1 row) - -SELECT * FROM JSON_TABLE(jsonb '{"d1": "H"}', '$' - COLUMNS (js1 jsonb_test_domain PATH '$.a2' DEFAULT 'foo'::jsonb_test_domain ON EMPTY)); -ERROR: could not coerce ON EMPTY expression (DEFAULT) to the RETURNING type -DETAIL: value for domain jsonb_test_domain violates check constraint "jsonb_test_domain_check" -SELECT * FROM JSON_TABLE(jsonb '{"d1": "H"}', '$' - COLUMNS (js1 jsonb_test_domain PATH '$.a2' DEFAULT 'foo1'::jsonb_test_domain ON EMPTY)); - js1 ------- - foo1 -(1 row) - -SELECT * FROM JSON_TABLE(jsonb '{"d1": "foo"}', '$' - COLUMNS (js1 jsonb_test_domain PATH '$.d1' DEFAULT 'foo2'::jsonb_test_domain ON ERROR)); - js1 ------- - foo2 -(1 row) - -SELECT * FROM JSON_TABLE(jsonb '{"d1": "foo"}', '$' - COLUMNS (js1 oid[] PATH '$.d2' DEFAULT '{1}'::int[]::oid[] ON EMPTY)); - js1 ------ - {1} -(1 row) - --- JSON_TABLE: Test backward parsing -CREATE VIEW jsonb_table_view2 AS -SELECT * FROM - JSON_TABLE( - jsonb 'null', 'lax $[*]' PASSING 1 + 2 AS a, json '"foo"' AS "b c" - COLUMNS ( - "int" int PATH '$', - "text" text PATH '$', - "char(4)" char(4) PATH '$', - "bool" bool PATH '$', - "numeric" numeric PATH '$', - "domain" jsonb_test_domain PATH '$')); -CREATE VIEW jsonb_table_view3 AS -SELECT * FROM - JSON_TABLE( - jsonb 'null', 'lax $[*]' PASSING 1 + 2 AS a, json '"foo"' AS "b c" - COLUMNS ( - js json PATH '$', - jb jsonb PATH '$', - jst text FORMAT JSON PATH '$', - jsc char(4) FORMAT JSON PATH '$', - jsv varchar(4) FORMAT JSON PATH '$')); -CREATE VIEW jsonb_table_view4 AS -SELECT * FROM - JSON_TABLE( - jsonb 'null', 'lax $[*]' PASSING 1 + 2 AS a, json '"foo"' AS "b c" - COLUMNS ( - jsb jsonb FORMAT JSON PATH '$', - jsbq jsonb FORMAT JSON PATH '$' OMIT QUOTES, - aaa int, -- implicit path '$."aaa"', - aaa1 int PATH '$.aaa')); -CREATE VIEW jsonb_table_view5 AS -SELECT * FROM - JSON_TABLE( - jsonb 'null', 'lax $[*]' PASSING 1 + 2 AS a, json '"foo"' AS "b c" - COLUMNS ( - exists1 bool EXISTS PATH '$.aaa', - exists2 int EXISTS PATH '$.aaa' TRUE ON ERROR, - exists3 text EXISTS PATH 'strict $.aaa' UNKNOWN ON ERROR)); -CREATE VIEW jsonb_table_view6 AS -SELECT * FROM - JSON_TABLE( - jsonb 'null', 'lax $[*]' PASSING 1 + 2 AS a, json '"foo"' AS "b c" - COLUMNS ( - js2 json PATH '$', - jsb2w jsonb PATH '$' WITH WRAPPER, - jsb2q jsonb PATH '$' OMIT QUOTES, - ia int[] PATH '$', - ta text[] PATH '$', - jba jsonb[] PATH '$')); -\sv jsonb_table_view2 -CREATE OR REPLACE VIEW public.jsonb_table_view2 AS - SELECT "int", - text, - "char(4)", - bool, - "numeric", - domain - FROM JSON_TABLE( - 'null'::jsonb, '$[*]' AS json_table_path_0 - PASSING - 1 + 2 AS a, - '"foo"'::json AS "b c" - COLUMNS ( - "int" integer PATH '$', - text text PATH '$', - "char(4)" character(4) PATH '$', - bool boolean PATH '$', - "numeric" numeric PATH '$', - domain jsonb_test_domain PATH '$' - ) - ) -\sv jsonb_table_view3 -CREATE OR REPLACE VIEW public.jsonb_table_view3 AS - SELECT js, - jb, - jst, - jsc, - jsv - FROM JSON_TABLE( - 'null'::jsonb, '$[*]' AS json_table_path_0 - PASSING - 1 + 2 AS a, - '"foo"'::json AS "b c" - COLUMNS ( - js json PATH '$' WITHOUT WRAPPER KEEP QUOTES, - jb jsonb PATH '$' WITHOUT WRAPPER KEEP QUOTES, - jst text FORMAT JSON PATH '$' WITHOUT WRAPPER KEEP QUOTES, - jsc character(4) FORMAT JSON PATH '$' WITHOUT WRAPPER KEEP QUOTES, - jsv character varying(4) FORMAT JSON PATH '$' WITHOUT WRAPPER KEEP QUOTES - ) - ) -\sv jsonb_table_view4 -CREATE OR REPLACE VIEW public.jsonb_table_view4 AS - SELECT jsb, - jsbq, - aaa, - aaa1 - FROM JSON_TABLE( - 'null'::jsonb, '$[*]' AS json_table_path_0 - PASSING - 1 + 2 AS a, - '"foo"'::json AS "b c" - COLUMNS ( - jsb jsonb PATH '$' WITHOUT WRAPPER KEEP QUOTES, - jsbq jsonb PATH '$' WITHOUT WRAPPER OMIT QUOTES, - aaa integer PATH '$."aaa"', - aaa1 integer PATH '$."aaa"' - ) - ) -\sv jsonb_table_view5 -CREATE OR REPLACE VIEW public.jsonb_table_view5 AS - SELECT exists1, - exists2, - exists3 - FROM JSON_TABLE( - 'null'::jsonb, '$[*]' AS json_table_path_0 - PASSING - 1 + 2 AS a, - '"foo"'::json AS "b c" - COLUMNS ( - exists1 boolean EXISTS PATH '$."aaa"', - exists2 integer EXISTS PATH '$."aaa"' TRUE ON ERROR, - exists3 text EXISTS PATH 'strict $."aaa"' UNKNOWN ON ERROR - ) - ) -\sv jsonb_table_view6 -CREATE OR REPLACE VIEW public.jsonb_table_view6 AS - SELECT js2, - jsb2w, - jsb2q, - ia, - ta, - jba - FROM JSON_TABLE( - 'null'::jsonb, '$[*]' AS json_table_path_0 - PASSING - 1 + 2 AS a, - '"foo"'::json AS "b c" - COLUMNS ( - js2 json PATH '$' WITHOUT WRAPPER KEEP QUOTES, - jsb2w jsonb PATH '$' WITH UNCONDITIONAL WRAPPER KEEP QUOTES, - jsb2q jsonb PATH '$' WITHOUT WRAPPER OMIT QUOTES, - ia integer[] PATH '$' WITHOUT WRAPPER KEEP QUOTES, - ta text[] PATH '$' WITHOUT WRAPPER KEEP QUOTES, - jba jsonb[] PATH '$' WITHOUT WRAPPER KEEP QUOTES - ) - ) -EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM jsonb_table_view2; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Table Function Scan on "json_table" - Output: "json_table"."int", "json_table".text, "json_table"."char(4)", "json_table".bool, "json_table"."numeric", "json_table".domain - Table Function Call: JSON_TABLE('null'::jsonb, '$[*]' AS json_table_path_0 PASSING 3 AS a, '"foo"'::jsonb AS "b c" COLUMNS ("int" integer PATH '$', text text PATH '$', "char(4)" character(4) PATH '$', bool boolean PATH '$', "numeric" numeric PATH '$', domain jsonb_test_domain PATH '$')) -(3 rows) - -EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM jsonb_table_view3; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Table Function Scan on "json_table" - Output: "json_table".js, "json_table".jb, "json_table".jst, "json_table".jsc, "json_table".jsv - Table Function Call: JSON_TABLE('null'::jsonb, '$[*]' AS json_table_path_0 PASSING 3 AS a, '"foo"'::jsonb AS "b c" COLUMNS (js json PATH '$' WITHOUT WRAPPER KEEP QUOTES, jb jsonb PATH '$' WITHOUT WRAPPER KEEP QUOTES, jst text FORMAT JSON PATH '$' WITHOUT WRAPPER KEEP QUOTES, jsc character(4) FORMAT JSON PATH '$' WITHOUT WRAPPER KEEP QUOTES, jsv character varying(4) FORMAT JSON PATH '$' WITHOUT WRAPPER KEEP QUOTES)) -(3 rows) - -EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM jsonb_table_view4; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Table Function Scan on "json_table" - Output: "json_table".jsb, "json_table".jsbq, "json_table".aaa, "json_table".aaa1 - Table Function Call: JSON_TABLE('null'::jsonb, '$[*]' AS json_table_path_0 PASSING 3 AS a, '"foo"'::jsonb AS "b c" COLUMNS (jsb jsonb PATH '$' WITHOUT WRAPPER KEEP QUOTES, jsbq jsonb PATH '$' WITHOUT WRAPPER OMIT QUOTES, aaa integer PATH '$."aaa"', aaa1 integer PATH '$."aaa"')) -(3 rows) - -EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM jsonb_table_view5; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Table Function Scan on "json_table" - Output: "json_table".exists1, "json_table".exists2, "json_table".exists3 - Table Function Call: JSON_TABLE('null'::jsonb, '$[*]' AS json_table_path_0 PASSING 3 AS a, '"foo"'::jsonb AS "b c" COLUMNS (exists1 boolean EXISTS PATH '$."aaa"', exists2 integer EXISTS PATH '$."aaa"' TRUE ON ERROR, exists3 text EXISTS PATH 'strict $."aaa"' UNKNOWN ON ERROR)) -(3 rows) - -EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM jsonb_table_view6; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Table Function Scan on "json_table" - Output: "json_table".js2, "json_table".jsb2w, "json_table".jsb2q, "json_table".ia, "json_table".ta, "json_table".jba - Table Function Call: JSON_TABLE('null'::jsonb, '$[*]' AS json_table_path_0 PASSING 3 AS a, '"foo"'::jsonb AS "b c" COLUMNS (js2 json PATH '$' WITHOUT WRAPPER KEEP QUOTES, jsb2w jsonb PATH '$' WITH UNCONDITIONAL WRAPPER KEEP QUOTES, jsb2q jsonb PATH '$' WITHOUT WRAPPER OMIT QUOTES, ia integer[] PATH '$' WITHOUT WRAPPER KEEP QUOTES, ta text[] PATH '$' WITHOUT WRAPPER KEEP QUOTES, jba jsonb[] PATH '$' WITHOUT WRAPPER KEEP QUOTES)) -(3 rows) - --- JSON_TABLE() with alias -EXPLAIN (COSTS OFF, VERBOSE) -SELECT * FROM - JSON_TABLE( - jsonb 'null', 'lax $[*]' PASSING 1 + 2 AS a, json '"foo"' AS "b c" - COLUMNS ( - id FOR ORDINALITY, - "int" int PATH '$', - "text" text PATH '$' - )) json_table_func; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ - Table Function Scan on "json_table" json_table_func - Output: id, "int", text - Table Function Call: JSON_TABLE('null'::jsonb, '$[*]' AS json_table_path_0 PASSING 3 AS a, '"foo"'::jsonb AS "b c" COLUMNS (id FOR ORDINALITY, "int" integer PATH '$', text text PATH '$')) -(3 rows) - -EXPLAIN (COSTS OFF, FORMAT JSON, VERBOSE) -SELECT * FROM - JSON_TABLE( - jsonb 'null', 'lax $[*]' PASSING 1 + 2 AS a, json '"foo"' AS "b c" - COLUMNS ( - id FOR ORDINALITY, - "int" int PATH '$', - "text" text PATH '$' - )) json_table_func; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - [ + - { + - "Plan": { + - "Node Type": "Table Function Scan", + - "Parallel Aware": false, + - "Async Capable": false, + - "Table Function Name": "json_table", + - "Alias": "json_table_func", + - "Disabled": false, + - "Output": ["id", "\"int\"", "text"], + - "Table Function Call": "JSON_TABLE('null'::jsonb, '$[*]' AS json_table_path_0 PASSING 3 AS a, '\"foo\"'::jsonb AS \"b c\" COLUMNS (id FOR ORDINALITY, \"int\" integer PATH '$', text text PATH '$'))"+ - } + - } + - ] -(1 row) - -DROP VIEW jsonb_table_view2; -DROP VIEW jsonb_table_view3; -DROP VIEW jsonb_table_view4; -DROP VIEW jsonb_table_view5; -DROP VIEW jsonb_table_view6; -DROP DOMAIN jsonb_test_domain; --- JSON_TABLE: only one FOR ORDINALITY columns allowed -SELECT * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (id FOR ORDINALITY, id2 FOR ORDINALITY, a int PATH '$.a' ERROR ON EMPTY)) jt; -ERROR: only one FOR ORDINALITY column is allowed -LINE 1: ..._TABLE(jsonb '1', '$' COLUMNS (id FOR ORDINALITY, id2 FOR OR... - ^ -SELECT * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (id FOR ORDINALITY, a int PATH '$' ERROR ON EMPTY)) jt; - id | a -----+--- - 1 | 1 -(1 row) - --- JSON_TABLE: ON EMPTY/ON ERROR behavior -SELECT * -FROM - (VALUES ('1'), ('"err"')) vals(js), - JSON_TABLE(vals.js::jsonb, '$' COLUMNS (a int PATH '$')) jt; - js | a --------+--- - 1 | 1 - "err" | -(2 rows) - -SELECT * -FROM - (VALUES ('1'), ('"err"')) vals(js) - LEFT OUTER JOIN - JSON_TABLE(vals.js::jsonb, '$' COLUMNS (a int PATH '$' ERROR ON ERROR)) jt - ON true; -ERROR: invalid input syntax for type integer: "err" --- TABLE-level ERROR ON ERROR is not propagated to columns -SELECT * -FROM - (VALUES ('1'), ('"err"')) vals(js) - LEFT OUTER JOIN - JSON_TABLE(vals.js::jsonb, '$' COLUMNS (a int PATH '$' ERROR ON ERROR)) jt - ON true; -ERROR: invalid input syntax for type integer: "err" -SELECT * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (a int PATH '$.a' ERROR ON EMPTY)) jt; -ERROR: no SQL/JSON item found for specified path of column "a" -SELECT * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (a int PATH 'strict $.a' ERROR ON ERROR) ERROR ON ERROR) jt; -ERROR: jsonpath member accessor can only be applied to an object -SELECT * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (a int PATH 'lax $.a' ERROR ON EMPTY) ERROR ON ERROR) jt; -ERROR: no SQL/JSON item found for specified path of column "a" -SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a int PATH '$' DEFAULT 1 ON EMPTY DEFAULT 2 ON ERROR)) jt; - a ---- - 2 -(1 row) - -SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a int PATH 'strict $.a' DEFAULT 1 ON EMPTY DEFAULT 2 ON ERROR)) jt; - a ---- - 2 -(1 row) - -SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a int PATH 'lax $.a' DEFAULT 1 ON EMPTY DEFAULT 2 ON ERROR)) jt; - a ---- - 1 -(1 row) - --- JSON_TABLE: EXISTS PATH types -SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a int4 EXISTS PATH '$.a' ERROR ON ERROR)); -- ok; can cast to int4 - a ---- - 0 -(1 row) - -SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a int4 EXISTS PATH '$' ERROR ON ERROR)); -- ok; can cast to int4 - a ---- - 1 -(1 row) - -SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a int2 EXISTS PATH '$.a')); -ERROR: could not coerce ON ERROR expression (FALSE) to the RETURNING type -DETAIL: invalid input syntax for type smallint: "false" -SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a int8 EXISTS PATH '$.a')); -ERROR: could not coerce ON ERROR expression (FALSE) to the RETURNING type -DETAIL: invalid input syntax for type bigint: "false" -SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a float4 EXISTS PATH '$.a')); -ERROR: could not coerce ON ERROR expression (FALSE) to the RETURNING type -DETAIL: invalid input syntax for type real: "false" --- Default FALSE (ON ERROR) doesn't fit char(3) -SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a char(3) EXISTS PATH '$.a')); -ERROR: could not coerce ON ERROR expression (FALSE) to the RETURNING type -DETAIL: value too long for type character(3) -SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a char(3) EXISTS PATH '$.a' ERROR ON ERROR)); -ERROR: value too long for type character(3) -SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a char(5) EXISTS PATH '$.a' ERROR ON ERROR)); - a -------- - false -(1 row) - -SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a json EXISTS PATH '$.a')); - a -------- - false -(1 row) - -SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a jsonb EXISTS PATH '$.a')); - a -------- - false -(1 row) - --- EXISTS PATH domain over int -CREATE DOMAIN dint4 AS int; -CREATE DOMAIN dint4_0 AS int CHECK (VALUE <> 0 ); -SELECT a, a::bool FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a dint4 EXISTS PATH '$.a' )); - a | a ----+--- - 0 | f -(1 row) - -SELECT a, a::bool FROM JSON_TABLE(jsonb '{"a":1}', '$' COLUMNS (a dint4_0 EXISTS PATH '$.b')); -ERROR: could not coerce ON ERROR expression (FALSE) to the RETURNING type -DETAIL: value for domain dint4_0 violates check constraint "dint4_0_check" -SELECT a, a::bool FROM JSON_TABLE(jsonb '{"a":1}', '$' COLUMNS (a dint4_0 EXISTS PATH '$.b' ERROR ON ERROR)); -ERROR: value for domain dint4_0 violates check constraint "dint4_0_check" -SELECT a, a::bool FROM JSON_TABLE(jsonb '{"a":1}', '$' COLUMNS (a dint4_0 EXISTS PATH '$.b' FALSE ON ERROR)); -ERROR: could not coerce ON ERROR expression (FALSE) to the RETURNING type -DETAIL: value for domain dint4_0 violates check constraint "dint4_0_check" -SELECT a, a::bool FROM JSON_TABLE(jsonb '{"a":1}', '$' COLUMNS (a dint4_0 EXISTS PATH '$.b' TRUE ON ERROR)); - a | a ----+--- - 1 | t -(1 row) - -DROP DOMAIN dint4, dint4_0; --- JSON_TABLE: WRAPPER/QUOTES clauses on scalar columns -SELECT * FROM JSON_TABLE(jsonb '"world"', '$' COLUMNS (item text PATH '$' KEEP QUOTES ON SCALAR STRING)); - item ---------- - "world" -(1 row) - -SELECT * FROM JSON_TABLE(jsonb '"world"', '$' COLUMNS (item text PATH '$' OMIT QUOTES ON SCALAR STRING)); - item -------- - world -(1 row) - -SELECT * FROM JSON_TABLE(jsonb '"world"', '$' COLUMNS (item text FORMAT JSON PATH '$' KEEP QUOTES)); - item ---------- - "world" -(1 row) - -SELECT * FROM JSON_TABLE(jsonb '"world"', '$' COLUMNS (item text FORMAT JSON PATH '$' OMIT QUOTES)); - item -------- - world -(1 row) - -SELECT * FROM JSON_TABLE(jsonb '"world"', '$' COLUMNS (item text FORMAT JSON PATH '$' WITHOUT WRAPPER KEEP QUOTES)); - item ---------- - "world" -(1 row) - -SELECT * FROM JSON_TABLE(jsonb '"world"', '$' COLUMNS (item text PATH '$' WITHOUT WRAPPER OMIT QUOTES)); - item -------- - world -(1 row) - -SELECT * FROM JSON_TABLE(jsonb '"world"', '$' COLUMNS (item text FORMAT JSON PATH '$' WITH WRAPPER)); - item ------------ - ["world"] -(1 row) - --- Error: OMIT QUOTES should not be specified when WITH WRAPPER is present -SELECT * FROM JSON_TABLE(jsonb '"world"', '$' COLUMNS (item text PATH '$' WITH WRAPPER OMIT QUOTES)); -ERROR: SQL/JSON QUOTES behavior must not be specified when WITH WRAPPER is used -LINE 1: ...T * FROM JSON_TABLE(jsonb '"world"', '$' COLUMNS (item text ... - ^ --- But KEEP QUOTES (the default) is fine -SELECT * FROM JSON_TABLE(jsonb '"world"', '$' COLUMNS (item text FORMAT JSON PATH '$' WITH WRAPPER KEEP QUOTES)); - item ------------ - ["world"] -(1 row) - --- Test PASSING args -SELECT * -FROM JSON_TABLE( - jsonb '[1,2,3]', - '$[*] ? (@ < $x)' - PASSING 3 AS x - COLUMNS (y text FORMAT JSON PATH '$') - ) jt; - y ---- - 1 - 2 -(2 rows) - --- PASSING arguments are also passed to column paths -SELECT * -FROM JSON_TABLE( - jsonb '[1,2,3]', - '$[*] ? (@ < $x)' - PASSING 10 AS x, 3 AS y - COLUMNS (a text FORMAT JSON PATH '$ ? (@ < $y)') - ) jt; - a ---- - 1 - 2 - -(3 rows) - --- Should fail (not supported) -SELECT * FROM JSON_TABLE(jsonb '{"a": 123}', '$' || '.' || 'a' COLUMNS (foo int)); -ERROR: only string constants are supported in JSON_TABLE path specification -LINE 1: SELECT * FROM JSON_TABLE(jsonb '{"a": 123}', '$' || '.' || '... - ^ --- JsonPathQuery() error message mentioning column name -SELECT * FROM JSON_TABLE('{"a": [{"b": "1"}, {"b": "2"}]}', '$' COLUMNS (b json path '$.a[*].b' ERROR ON ERROR)); -ERROR: JSON path expression for column "b" should return single item without wrapper -HINT: Use the WITH WRAPPER clause to wrap SQL/JSON items into an array. --- JSON_TABLE: nested paths --- Duplicate path names -SELECT * FROM JSON_TABLE( - jsonb '[]', '$' AS a - COLUMNS ( - b int, - NESTED PATH '$' AS a - COLUMNS ( - c int - ) - ) -) jt; -ERROR: duplicate JSON_TABLE column or path name: a -LINE 5: NESTED PATH '$' AS a - ^ -SELECT * FROM JSON_TABLE( - jsonb '[]', '$' AS a - COLUMNS ( - b int, - NESTED PATH '$' AS n_a - COLUMNS ( - c int - ) - ) -) jt; - b | c ----+--- - | -(1 row) - -SELECT * FROM JSON_TABLE( - jsonb '[]', '$' - COLUMNS ( - b int, - NESTED PATH '$' AS b - COLUMNS ( - c int - ) - ) -) jt; -ERROR: duplicate JSON_TABLE column or path name: b -LINE 5: NESTED PATH '$' AS b - ^ -SELECT * FROM JSON_TABLE( - jsonb '[]', '$' - COLUMNS ( - NESTED PATH '$' AS a - COLUMNS ( - b int - ), - NESTED PATH '$' - COLUMNS ( - NESTED PATH '$' AS a - COLUMNS ( - c int - ) - ) - ) -) jt; -ERROR: duplicate JSON_TABLE column or path name: a -LINE 10: NESTED PATH '$' AS a - ^ --- JSON_TABLE: plan execution -CREATE TEMP TABLE jsonb_table_test (js jsonb); -INSERT INTO jsonb_table_test -VALUES ( - '[ - {"a": 1, "b": [], "c": []}, - {"a": 2, "b": [1, 2, 3], "c": [10, null, 20]}, - {"a": 3, "b": [1, 2], "c": []}, - {"x": "4", "b": [1, 2], "c": 123} - ]' -); -select - jt.* -from - jsonb_table_test jtt, - json_table ( - jtt.js,'strict $[*]' as p - columns ( - n for ordinality, - a int path 'lax $.a' default -1 on empty, - nested path 'strict $.b[*]' as pb columns (b_id for ordinality, b int path '$' ), - nested path 'strict $.c[*]' as pc columns (c_id for ordinality, c int path '$' ) - ) - ) jt; - n | a | b_id | b | c_id | c ----+----+------+---+------+---- - 1 | 1 | | | | - 2 | 2 | 1 | 1 | | - 2 | 2 | 2 | 2 | | - 2 | 2 | 3 | 3 | | - 2 | 2 | | | 1 | 10 - 2 | 2 | | | 2 | - 2 | 2 | | | 3 | 20 - 3 | 3 | 1 | 1 | | - 3 | 3 | 2 | 2 | | - 4 | -1 | 1 | 1 | | - 4 | -1 | 2 | 2 | | -(11 rows) - --- PASSING arguments are passed to nested paths and their columns' paths -SELECT * -FROM - generate_series(1, 3) x, - generate_series(1, 3) y, - JSON_TABLE(jsonb - '[[1,2,3],[2,3,4,5],[3,4,5,6]]', - 'strict $[*] ? (@[*] <= $x)' - PASSING x AS x, y AS y - COLUMNS ( - y text FORMAT JSON PATH '$', - NESTED PATH 'strict $[*] ? (@ == $y)' - COLUMNS ( - z int PATH '$' - ) - ) - ) jt; - x | y | y | z ----+---+--------------+--- - 1 | 1 | [1, 2, 3] | 1 - 2 | 1 | [1, 2, 3] | 1 - 2 | 1 | [2, 3, 4, 5] | - 3 | 1 | [1, 2, 3] | 1 - 3 | 1 | [2, 3, 4, 5] | - 3 | 1 | [3, 4, 5, 6] | - 1 | 2 | [1, 2, 3] | 2 - 2 | 2 | [1, 2, 3] | 2 - 2 | 2 | [2, 3, 4, 5] | 2 - 3 | 2 | [1, 2, 3] | 2 - 3 | 2 | [2, 3, 4, 5] | 2 - 3 | 2 | [3, 4, 5, 6] | - 1 | 3 | [1, 2, 3] | 3 - 2 | 3 | [1, 2, 3] | 3 - 2 | 3 | [2, 3, 4, 5] | 3 - 3 | 3 | [1, 2, 3] | 3 - 3 | 3 | [2, 3, 4, 5] | 3 - 3 | 3 | [3, 4, 5, 6] | 3 -(18 rows) - --- JSON_TABLE: Test backward parsing with nested paths -CREATE VIEW jsonb_table_view_nested AS -SELECT * FROM - JSON_TABLE( - jsonb 'null', 'lax $[*]' PASSING 1 + 2 AS a, json '"foo"' AS "b c" - COLUMNS ( - id FOR ORDINALITY, - NESTED PATH '$[1]' AS p1 COLUMNS ( - a1 int, - NESTED PATH '$[*]' AS "p1 1" COLUMNS ( - a11 text - ), - b1 text - ), - NESTED PATH '$[2]' AS p2 COLUMNS ( - NESTED PATH '$[*]' AS "p2:1" COLUMNS ( - a21 text - ), - NESTED PATH '$[*]' AS p22 COLUMNS ( - a22 text - ) - ) - ) - ); -\sv jsonb_table_view_nested -CREATE OR REPLACE VIEW public.jsonb_table_view_nested AS - SELECT id, - a1, - b1, - a11, - a21, - a22 - FROM JSON_TABLE( - 'null'::jsonb, '$[*]' AS json_table_path_0 - PASSING - 1 + 2 AS a, - '"foo"'::json AS "b c" - COLUMNS ( - id FOR ORDINALITY, - NESTED PATH '$[1]' AS p1 - COLUMNS ( - a1 integer PATH '$."a1"', - b1 text PATH '$."b1"', - NESTED PATH '$[*]' AS "p1 1" - COLUMNS ( - a11 text PATH '$."a11"' - ) - ), - NESTED PATH '$[2]' AS p2 - COLUMNS ( - NESTED PATH '$[*]' AS "p2:1" - COLUMNS ( - a21 text PATH '$."a21"' - ), - NESTED PATH '$[*]' AS p22 - COLUMNS ( - a22 text PATH '$."a22"' - ) - ) - ) - ) -DROP VIEW jsonb_table_view_nested; -CREATE TABLE s (js jsonb); -INSERT INTO s VALUES - ('{"a":{"za":[{"z1": [11,2222]},{"z21": [22, 234,2345]},{"z22": [32, 204,145]}]},"c": 3}'), - ('{"a":{"za":[{"z1": [21,4222]},{"z21": [32, 134,1345]}]},"c": 10}'); --- error -SELECT sub.* FROM s, - JSON_TABLE(js, '$' PASSING 32 AS x, 13 AS y COLUMNS ( - xx int path '$.c', - NESTED PATH '$.a.za[1]' columns (NESTED PATH '$.z21[*]' COLUMNS (z21 int path '$?(@ >= $"x")' ERROR ON ERROR)) - )) sub; - xx | z21 -----+------ - 3 | - 3 | 234 - 3 | 2345 - 10 | 32 - 10 | 134 - 10 | 1345 -(6 rows) - --- Parent columns xx1, xx appear before NESTED ones -SELECT sub.* FROM s, - (VALUES (23)) x(x), generate_series(13, 13) y, - JSON_TABLE(js, '$' AS c1 PASSING x AS x, y AS y COLUMNS ( - NESTED PATH '$.a.za[2]' COLUMNS ( - NESTED PATH '$.z22[*]' as z22 COLUMNS (c int PATH '$')), - NESTED PATH '$.a.za[1]' columns (d int[] PATH '$.z21'), - NESTED PATH '$.a.za[0]' columns (NESTED PATH '$.z1[*]' as z1 COLUMNS (a int PATH '$')), - xx1 int PATH '$.c', - NESTED PATH '$.a.za[1]' columns (NESTED PATH '$.z21[*]' as z21 COLUMNS (b int PATH '$')), - xx int PATH '$.c' - )) sub; - xx1 | xx | c | d | a | b ------+----+-----+---------------+------+------ - 3 | 3 | 32 | | | - 3 | 3 | 204 | | | - 3 | 3 | 145 | | | - 3 | 3 | | {22,234,2345} | | - 3 | 3 | | | 11 | - 3 | 3 | | | 2222 | - 3 | 3 | | | | 22 - 3 | 3 | | | | 234 - 3 | 3 | | | | 2345 - 10 | 10 | | {32,134,1345} | | - 10 | 10 | | | 21 | - 10 | 10 | | | 4222 | - 10 | 10 | | | | 32 - 10 | 10 | | | | 134 - 10 | 10 | | | | 1345 -(15 rows) - --- Test applying PASSING variables at different nesting levels -SELECT sub.* FROM s, - (VALUES (23)) x(x), generate_series(13, 13) y, - JSON_TABLE(js, '$' AS c1 PASSING x AS x, y AS y COLUMNS ( - xx1 int PATH '$.c', - NESTED PATH '$.a.za[0].z1[*]' COLUMNS (NESTED PATH '$ ?(@ >= ($"x" -2))' COLUMNS (a int PATH '$')), - NESTED PATH '$.a.za[0]' COLUMNS (NESTED PATH '$.z1[*] ? (@ >= ($"x" -2))' COLUMNS (b int PATH '$')) - )) sub; - xx1 | a | b ------+------+------ - 3 | | - 3 | 2222 | - 3 | | 2222 - 10 | 21 | - 10 | 4222 | - 10 | | 21 - 10 | | 4222 -(7 rows) - --- Test applying PASSING variable to paths all the levels -SELECT sub.* FROM s, - (VALUES (23)) x(x), - generate_series(13, 13) y, - JSON_TABLE(js, '$' AS c1 PASSING x AS x, y AS y - COLUMNS ( - xx1 int PATH '$.c', - NESTED PATH '$.a.za[1]' - COLUMNS (NESTED PATH '$.z21[*]' COLUMNS (b int PATH '$')), - NESTED PATH '$.a.za[1] ? (@.z21[*] >= ($"x"-1))' COLUMNS - (NESTED PATH '$.z21[*] ? (@ >= ($"y" + 3))' as z22 COLUMNS (a int PATH '$ ? (@ >= ($"y" + 12))')), - NESTED PATH '$.a.za[1]' COLUMNS - (NESTED PATH '$.z21[*] ? (@ >= ($"y" +121))' as z21 COLUMNS (c int PATH '$ ? (@ > ($"x" +111))')) - )) sub; - xx1 | b | a | c ------+------+------+------ - 3 | 22 | | - 3 | 234 | | - 3 | 2345 | | - 3 | | | - 3 | | 234 | - 3 | | 2345 | - 3 | | | 234 - 3 | | | 2345 - 10 | 32 | | - 10 | 134 | | - 10 | 1345 | | - 10 | | 32 | - 10 | | 134 | - 10 | | 1345 | - 10 | | | - 10 | | | 1345 -(16 rows) - ------ test on empty behavior -SELECT sub.* FROM s, - (values(23)) x(x), - generate_series(13, 13) y, - JSON_TABLE(js, '$' AS c1 PASSING x AS x, y AS y - COLUMNS ( - xx1 int PATH '$.c', - NESTED PATH '$.a.za[2]' COLUMNS (NESTED PATH '$.z22[*]' as z22 COLUMNS (c int PATH '$')), - NESTED PATH '$.a.za[1]' COLUMNS (d json PATH '$ ? (@.z21[*] == ($"x" -1))'), - NESTED PATH '$.a.za[0]' COLUMNS (NESTED PATH '$.z1[*] ? (@ >= ($"x" -2))' as z1 COLUMNS (a int PATH '$')), - NESTED PATH '$.a.za[1]' COLUMNS - (NESTED PATH '$.z21[*] ? (@ >= ($"y" +121))' as z21 COLUMNS (b int PATH '$ ? (@ > ($"x" +111))' DEFAULT 0 ON EMPTY)) - )) sub; - xx1 | c | d | a | b ------+-----+--------------------------+------+------ - 3 | 32 | | | - 3 | 204 | | | - 3 | 145 | | | - 3 | | {"z21": [22, 234, 2345]} | | - 3 | | | 2222 | - 3 | | | | 234 - 3 | | | | 2345 - 10 | | | | - 10 | | | 21 | - 10 | | | 4222 | - 10 | | | | 0 - 10 | | | | 1345 -(12 rows) - -CREATE OR REPLACE VIEW jsonb_table_view7 AS -SELECT sub.* FROM s, - (values(23)) x(x), - generate_series(13, 13) y, - JSON_TABLE(js, '$' AS c1 PASSING x AS x, y AS y - COLUMNS ( - xx1 int PATH '$.c', - NESTED PATH '$.a.za[2]' COLUMNS (NESTED PATH '$.z22[*]' as z22 COLUMNS (c int PATH '$' WITHOUT WRAPPER OMIT QUOTES)), - NESTED PATH '$.a.za[1]' COLUMNS (d json PATH '$ ? (@.z21[*] == ($"x" -1))' WITH WRAPPER), - NESTED PATH '$.a.za[0]' COLUMNS (NESTED PATH '$.z1[*] ? (@ >= ($"x" -2))' as z1 COLUMNS (a int PATH '$' KEEP QUOTES)), - NESTED PATH '$.a.za[1]' COLUMNS - (NESTED PATH '$.z21[*] ? (@ >= ($"y" +121))' as z21 COLUMNS (b int PATH '$ ? (@ > ($"x" +111))' DEFAULT 0 ON EMPTY)) - )) sub; -\sv jsonb_table_view7 -CREATE OR REPLACE VIEW public.jsonb_table_view7 AS - SELECT sub.xx1, - sub.c, - sub.d, - sub.a, - sub.b - FROM s, - ( VALUES (23)) x(x), - generate_series(13, 13) y(y), - LATERAL JSON_TABLE( - s.js, '$' AS c1 - PASSING - x.x AS x, - y.y AS y - COLUMNS ( - xx1 integer PATH '$."c"', - NESTED PATH '$."a"."za"[2]' AS json_table_path_0 - COLUMNS ( - NESTED PATH '$."z22"[*]' AS z22 - COLUMNS ( - c integer PATH '$' WITHOUT WRAPPER OMIT QUOTES - ) - ), - NESTED PATH '$."a"."za"[1]' AS json_table_path_1 - COLUMNS ( - d json PATH '$?(@."z21"[*] == $"x" - 1)' WITH UNCONDITIONAL WRAPPER KEEP QUOTES - ), - NESTED PATH '$."a"."za"[0]' AS json_table_path_2 - COLUMNS ( - NESTED PATH '$."z1"[*]?(@ >= $"x" - 2)' AS z1 - COLUMNS ( - a integer PATH '$' WITHOUT WRAPPER KEEP QUOTES - ) - ), - NESTED PATH '$."a"."za"[1]' AS json_table_path_3 - COLUMNS ( - NESTED PATH '$."z21"[*]?(@ >= $"y" + 121)' AS z21 - COLUMNS ( - b integer PATH '$?(@ > $"x" + 111)' DEFAULT 0 ON EMPTY - ) - ) - ) - ) sub -DROP VIEW jsonb_table_view7; -DROP TABLE s; --- Prevent ON EMPTY specification on EXISTS columns -SELECT * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (a int exists empty object on empty)); -ERROR: syntax error at or near "empty" -LINE 1: ...sonb '1', '$' COLUMNS (a int exists empty object on empty)); - ^ --- Test ON ERROR / EMPTY value validity for the function and column types; --- all fail -SELECT * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (a int) NULL ON ERROR); -ERROR: invalid ON ERROR behavior -LINE 1: ... * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (a int) NULL ON ER... - ^ -DETAIL: Only EMPTY [ ARRAY ] or ERROR is allowed in the top-level ON ERROR clause. -SELECT * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (a int true on empty)); -ERROR: invalid ON EMPTY behavior for column "a" -LINE 1: ...T * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (a int true on em... - ^ -DETAIL: Only ERROR, NULL, or DEFAULT expression is allowed in ON EMPTY for scalar columns. -SELECT * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (a int omit quotes true on error)); -ERROR: invalid ON ERROR behavior for column "a" -LINE 1: ...N_TABLE(jsonb '1', '$' COLUMNS (a int omit quotes true on er... - ^ -DETAIL: Only ERROR, NULL, EMPTY ARRAY, EMPTY OBJECT, or DEFAULT expression is allowed in ON ERROR for formatted columns. -SELECT * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (a int exists empty object on error)); -ERROR: invalid ON ERROR behavior for column "a" -LINE 1: ...M JSON_TABLE(jsonb '1', '$' COLUMNS (a int exists empty obje... - ^ -DETAIL: Only ERROR, TRUE, FALSE, or UNKNOWN is allowed in ON ERROR for EXISTS columns. --- Test JSON_TABLE() column deparsing -- don't emit default ON ERROR / EMPTY --- behavior -CREATE VIEW json_table_view8 AS SELECT * from JSON_TABLE('"a"', '$' COLUMNS (a text PATH '$')); -\sv json_table_view8; -CREATE OR REPLACE VIEW public.json_table_view8 AS - SELECT a - FROM JSON_TABLE( - '"a"'::text, '$' AS json_table_path_0 - COLUMNS ( - a text PATH '$' - ) - ) -CREATE VIEW json_table_view9 AS SELECT * from JSON_TABLE('"a"', '$' COLUMNS (a text PATH '$') ERROR ON ERROR); -\sv json_table_view9; -CREATE OR REPLACE VIEW public.json_table_view9 AS - SELECT a - FROM JSON_TABLE( - '"a"'::text, '$' AS json_table_path_0 - COLUMNS ( - a text PATH '$' - ) ERROR ON ERROR - ) -DROP VIEW json_table_view8, json_table_view9; --- Test JSON_TABLE() deparsing -- don't emit default ON ERROR behavior -CREATE VIEW json_table_view8 AS SELECT * from JSON_TABLE('"a"', '$' COLUMNS (a text PATH '$') EMPTY ON ERROR); -\sv json_table_view8; -CREATE OR REPLACE VIEW public.json_table_view8 AS - SELECT a - FROM JSON_TABLE( - '"a"'::text, '$' AS json_table_path_0 - COLUMNS ( - a text PATH '$' - ) - ) -CREATE VIEW json_table_view9 AS SELECT * from JSON_TABLE('"a"', '$' COLUMNS (a text PATH '$') EMPTY ARRAY ON ERROR); -\sv json_table_view9; -CREATE OR REPLACE VIEW public.json_table_view9 AS - SELECT a - FROM JSON_TABLE( - '"a"'::text, '$' AS json_table_path_0 - COLUMNS ( - a text PATH '$' - ) - ) -DROP VIEW json_table_view8, json_table_view9; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/plancache.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/plancache.out --- /Users/admin/pgsql/src/test/regress/expected/plancache.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/plancache.out 2024-12-13 13:20:12 @@ -1,400 +1,2 @@ --- --- Tests to exercise the plan caching/invalidation mechanism --- -CREATE TEMP TABLE pcachetest AS SELECT * FROM int8_tbl; --- create and use a cached plan -PREPARE prepstmt AS SELECT * FROM pcachetest; -EXECUTE prepstmt; - q1 | q2 -------------------+------------------- - 123 | 456 - 123 | 4567890123456789 - 4567890123456789 | 123 - 4567890123456789 | 4567890123456789 - 4567890123456789 | -4567890123456789 -(5 rows) - --- and one with parameters -PREPARE prepstmt2(bigint) AS SELECT * FROM pcachetest WHERE q1 = $1; -EXECUTE prepstmt2(123); - q1 | q2 ------+------------------ - 123 | 456 - 123 | 4567890123456789 -(2 rows) - --- invalidate the plans and see what happens -DROP TABLE pcachetest; -EXECUTE prepstmt; -ERROR: relation "pcachetest" does not exist -EXECUTE prepstmt2(123); -ERROR: relation "pcachetest" does not exist --- recreate the temp table (this demonstrates that the raw plan is --- purely textual and doesn't depend on OIDs, for instance) -CREATE TEMP TABLE pcachetest AS SELECT * FROM int8_tbl ORDER BY 2; -EXECUTE prepstmt; - q1 | q2 -------------------+------------------- - 4567890123456789 | -4567890123456789 - 4567890123456789 | 123 - 123 | 456 - 123 | 4567890123456789 - 4567890123456789 | 4567890123456789 -(5 rows) - -EXECUTE prepstmt2(123); - q1 | q2 ------+------------------ - 123 | 456 - 123 | 4567890123456789 -(2 rows) - --- prepared statements should prevent change in output tupdesc, --- since clients probably aren't expecting that to change on the fly -ALTER TABLE pcachetest ADD COLUMN q3 bigint; -EXECUTE prepstmt; -ERROR: cached plan must not change result type -EXECUTE prepstmt2(123); -ERROR: cached plan must not change result type --- but we're nice guys and will let you undo your mistake -ALTER TABLE pcachetest DROP COLUMN q3; -EXECUTE prepstmt; - q1 | q2 -------------------+------------------- - 4567890123456789 | -4567890123456789 - 4567890123456789 | 123 - 123 | 456 - 123 | 4567890123456789 - 4567890123456789 | 4567890123456789 -(5 rows) - -EXECUTE prepstmt2(123); - q1 | q2 ------+------------------ - 123 | 456 - 123 | 4567890123456789 -(2 rows) - --- Try it with a view, which isn't directly used in the resulting plan --- but should trigger invalidation anyway -CREATE TEMP VIEW pcacheview AS - SELECT * FROM pcachetest; -PREPARE vprep AS SELECT * FROM pcacheview; -EXECUTE vprep; - q1 | q2 -------------------+------------------- - 4567890123456789 | -4567890123456789 - 4567890123456789 | 123 - 123 | 456 - 123 | 4567890123456789 - 4567890123456789 | 4567890123456789 -(5 rows) - -CREATE OR REPLACE TEMP VIEW pcacheview AS - SELECT q1, q2/2 AS q2 FROM pcachetest; -EXECUTE vprep; - q1 | q2 -------------------+------------------- - 4567890123456789 | -2283945061728394 - 4567890123456789 | 61 - 123 | 228 - 123 | 2283945061728394 - 4567890123456789 | 2283945061728394 -(5 rows) - --- Check basic SPI plan invalidation -create function cache_test(int) returns int as $$ -declare total int; -begin - create temp table t1(f1 int); - insert into t1 values($1); - insert into t1 values(11); - insert into t1 values(12); - insert into t1 values(13); - select sum(f1) into total from t1; - drop table t1; - return total; -end -$$ language plpgsql; -select cache_test(1); - cache_test ------------- - 37 -(1 row) - -select cache_test(2); - cache_test ------------- - 38 -(1 row) - -select cache_test(3); - cache_test ------------- - 39 -(1 row) - --- Check invalidation of plpgsql "simple expression" -create temp view v1 as - select 2+2 as f1; -create function cache_test_2() returns int as $$ -begin - return f1 from v1; -end$$ language plpgsql; -select cache_test_2(); - cache_test_2 --------------- - 4 -(1 row) - -create or replace temp view v1 as - select 2+2+4 as f1; -select cache_test_2(); - cache_test_2 --------------- - 8 -(1 row) - -create or replace temp view v1 as - select 2+2+4+(select max(unique1) from tenk1) as f1; -select cache_test_2(); - cache_test_2 --------------- - 10007 -(1 row) - ---- Check that change of search_path is honored when re-using cached plan -create schema s1 - create table abc (f1 int); -create schema s2 - create table abc (f1 int); -insert into s1.abc values(123); -insert into s2.abc values(456); -set search_path = s1; -prepare p1 as select f1 from abc; -execute p1; - f1 ------ - 123 -(1 row) - -set search_path = s2; -select f1 from abc; - f1 ------ - 456 -(1 row) - -execute p1; - f1 ------ - 456 -(1 row) - -alter table s1.abc add column f2 float8; -- force replan -execute p1; - f1 ------ - 456 -(1 row) - -drop schema s1 cascade; -NOTICE: drop cascades to table s1.abc -drop schema s2 cascade; -NOTICE: drop cascades to table abc -reset search_path; --- Check that invalidation deals with regclass constants -create temp sequence seq; -prepare p2 as select nextval('seq'); -execute p2; - nextval ---------- - 1 -(1 row) - -drop sequence seq; -create temp sequence seq; -execute p2; - nextval ---------- - 1 -(1 row) - --- Check DDL via SPI, immediately followed by SPI plan re-use --- (bug in original coding) -create function cachebug() returns void as $$ -declare r int; -begin - drop table if exists temptable cascade; - create temp table temptable as select * from generate_series(1,3) as f1; - create temp view vv as select * from temptable; - for r in select * from vv loop - raise notice '%', r; - end loop; -end$$ language plpgsql; -select cachebug(); -NOTICE: table "temptable" does not exist, skipping -NOTICE: 1 -NOTICE: 2 -NOTICE: 3 - cachebug ----------- - -(1 row) - -select cachebug(); -NOTICE: drop cascades to view vv -NOTICE: 1 -NOTICE: 2 -NOTICE: 3 - cachebug ----------- - -(1 row) - --- Check that addition or removal of any partition is correctly dealt with by --- default partition table when it is being used in prepared statement. -create table pc_list_parted (a int) partition by list(a); -create table pc_list_part_null partition of pc_list_parted for values in (null); -create table pc_list_part_1 partition of pc_list_parted for values in (1); -create table pc_list_part_def partition of pc_list_parted default; -prepare pstmt_def_insert (int) as insert into pc_list_part_def values($1); --- should fail -execute pstmt_def_insert(null); -ERROR: new row for relation "pc_list_part_def" violates partition constraint -DETAIL: Failing row contains (null). -execute pstmt_def_insert(1); -ERROR: new row for relation "pc_list_part_def" violates partition constraint -DETAIL: Failing row contains (1). -create table pc_list_part_2 partition of pc_list_parted for values in (2); -execute pstmt_def_insert(2); -ERROR: new row for relation "pc_list_part_def" violates partition constraint -DETAIL: Failing row contains (2). -alter table pc_list_parted detach partition pc_list_part_null; --- should be ok -execute pstmt_def_insert(null); -drop table pc_list_part_1; --- should be ok -execute pstmt_def_insert(1); -drop table pc_list_parted, pc_list_part_null; -deallocate pstmt_def_insert; --- Test plan_cache_mode -create table test_mode (a int); -insert into test_mode select 1 from generate_series(1,1000) union all select 2; -create index on test_mode (a); -analyze test_mode; -prepare test_mode_pp (int) as select count(*) from test_mode where a = $1; -select name, generic_plans, custom_plans from pg_prepared_statements - where name = 'test_mode_pp'; - name | generic_plans | custom_plans ---------------+---------------+-------------- - test_mode_pp | 0 | 0 -(1 row) - --- up to 5 executions, custom plan is used -set plan_cache_mode to auto; -explain (costs off) execute test_mode_pp(2); - QUERY PLAN ----------------------------------------------------------- - Aggregate - -> Index Only Scan using test_mode_a_idx on test_mode - Index Cond: (a = 2) -(3 rows) - -select name, generic_plans, custom_plans from pg_prepared_statements - where name = 'test_mode_pp'; - name | generic_plans | custom_plans ---------------+---------------+-------------- - test_mode_pp | 0 | 1 -(1 row) - --- force generic plan -set plan_cache_mode to force_generic_plan; -explain (costs off) execute test_mode_pp(2); - QUERY PLAN ------------------------------ - Aggregate - -> Seq Scan on test_mode - Filter: (a = $1) -(3 rows) - -select name, generic_plans, custom_plans from pg_prepared_statements - where name = 'test_mode_pp'; - name | generic_plans | custom_plans ---------------+---------------+-------------- - test_mode_pp | 1 | 1 -(1 row) - --- get to generic plan by 5 executions -set plan_cache_mode to auto; -execute test_mode_pp(1); -- 1x - count -------- - 1000 -(1 row) - -execute test_mode_pp(1); -- 2x - count -------- - 1000 -(1 row) - -execute test_mode_pp(1); -- 3x - count -------- - 1000 -(1 row) - -execute test_mode_pp(1); -- 4x - count -------- - 1000 -(1 row) - -select name, generic_plans, custom_plans from pg_prepared_statements - where name = 'test_mode_pp'; - name | generic_plans | custom_plans ---------------+---------------+-------------- - test_mode_pp | 1 | 5 -(1 row) - -execute test_mode_pp(1); -- 5x - count -------- - 1000 -(1 row) - -select name, generic_plans, custom_plans from pg_prepared_statements - where name = 'test_mode_pp'; - name | generic_plans | custom_plans ---------------+---------------+-------------- - test_mode_pp | 2 | 5 -(1 row) - --- we should now get a really bad plan -explain (costs off) execute test_mode_pp(2); - QUERY PLAN ------------------------------ - Aggregate - -> Seq Scan on test_mode - Filter: (a = $1) -(3 rows) - --- but we can force a custom plan -set plan_cache_mode to force_custom_plan; -explain (costs off) execute test_mode_pp(2); - QUERY PLAN ----------------------------------------------------------- - Aggregate - -> Index Only Scan using test_mode_a_idx on test_mode - Index Cond: (a = 2) -(3 rows) - -select name, generic_plans, custom_plans from pg_prepared_statements - where name = 'test_mode_pp'; - name | generic_plans | custom_plans ---------------+---------------+-------------- - test_mode_pp | 3 | 6 -(1 row) - -drop table test_mode; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/limit.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/limit.out --- /Users/admin/pgsql/src/test/regress/expected/limit.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/limit.out 2024-12-13 13:20:12 @@ -1,698 +1,2 @@ --- --- LIMIT --- Check the LIMIT/OFFSET feature of SELECT --- -SELECT ''::text AS two, unique1, unique2, stringu1 - FROM onek WHERE unique1 > 50 - ORDER BY unique1 LIMIT 2; - two | unique1 | unique2 | stringu1 ------+---------+---------+---------- - | 51 | 76 | ZBAAAA - | 52 | 985 | ACAAAA -(2 rows) - -SELECT ''::text AS five, unique1, unique2, stringu1 - FROM onek WHERE unique1 > 60 - ORDER BY unique1 LIMIT 5; - five | unique1 | unique2 | stringu1 -------+---------+---------+---------- - | 61 | 560 | JCAAAA - | 62 | 633 | KCAAAA - | 63 | 296 | LCAAAA - | 64 | 479 | MCAAAA - | 65 | 64 | NCAAAA -(5 rows) - -SELECT ''::text AS two, unique1, unique2, stringu1 - FROM onek WHERE unique1 > 60 AND unique1 < 63 - ORDER BY unique1 LIMIT 5; - two | unique1 | unique2 | stringu1 ------+---------+---------+---------- - | 61 | 560 | JCAAAA - | 62 | 633 | KCAAAA -(2 rows) - -SELECT ''::text AS three, unique1, unique2, stringu1 - FROM onek WHERE unique1 > 100 - ORDER BY unique1 LIMIT 3 OFFSET 20; - three | unique1 | unique2 | stringu1 --------+---------+---------+---------- - | 121 | 700 | REAAAA - | 122 | 519 | SEAAAA - | 123 | 777 | TEAAAA -(3 rows) - -SELECT ''::text AS zero, unique1, unique2, stringu1 - FROM onek WHERE unique1 < 50 - ORDER BY unique1 DESC LIMIT 8 OFFSET 99; - zero | unique1 | unique2 | stringu1 -------+---------+---------+---------- -(0 rows) - -SELECT ''::text AS eleven, unique1, unique2, stringu1 - FROM onek WHERE unique1 < 50 - ORDER BY unique1 DESC LIMIT 20 OFFSET 39; - eleven | unique1 | unique2 | stringu1 ---------+---------+---------+---------- - | 10 | 520 | KAAAAA - | 9 | 49 | JAAAAA - | 8 | 653 | IAAAAA - | 7 | 647 | HAAAAA - | 6 | 978 | GAAAAA - | 5 | 541 | FAAAAA - | 4 | 833 | EAAAAA - | 3 | 431 | DAAAAA - | 2 | 326 | CAAAAA - | 1 | 214 | BAAAAA - | 0 | 998 | AAAAAA -(11 rows) - -SELECT ''::text AS ten, unique1, unique2, stringu1 - FROM onek - ORDER BY unique1 OFFSET 990; - ten | unique1 | unique2 | stringu1 ------+---------+---------+---------- - | 990 | 369 | CMAAAA - | 991 | 426 | DMAAAA - | 992 | 363 | EMAAAA - | 993 | 661 | FMAAAA - | 994 | 695 | GMAAAA - | 995 | 144 | HMAAAA - | 996 | 258 | IMAAAA - | 997 | 21 | JMAAAA - | 998 | 549 | KMAAAA - | 999 | 152 | LMAAAA -(10 rows) - -SELECT ''::text AS five, unique1, unique2, stringu1 - FROM onek - ORDER BY unique1 OFFSET 990 LIMIT 5; - five | unique1 | unique2 | stringu1 -------+---------+---------+---------- - | 990 | 369 | CMAAAA - | 991 | 426 | DMAAAA - | 992 | 363 | EMAAAA - | 993 | 661 | FMAAAA - | 994 | 695 | GMAAAA -(5 rows) - -SELECT ''::text AS five, unique1, unique2, stringu1 - FROM onek - ORDER BY unique1 LIMIT 5 OFFSET 900; - five | unique1 | unique2 | stringu1 -------+---------+---------+---------- - | 900 | 913 | QIAAAA - | 901 | 931 | RIAAAA - | 902 | 702 | SIAAAA - | 903 | 641 | TIAAAA - | 904 | 793 | UIAAAA -(5 rows) - --- Test null limit and offset. The planner would discard a simple null --- constant, so to ensure executor is exercised, do this: -select * from int8_tbl limit (case when random() < 0.5 then null::bigint end); - q1 | q2 -------------------+------------------- - 123 | 456 - 123 | 4567890123456789 - 4567890123456789 | 123 - 4567890123456789 | 4567890123456789 - 4567890123456789 | -4567890123456789 -(5 rows) - -select * from int8_tbl offset (case when random() < 0.5 then null::bigint end); - q1 | q2 -------------------+------------------- - 123 | 456 - 123 | 4567890123456789 - 4567890123456789 | 123 - 4567890123456789 | 4567890123456789 - 4567890123456789 | -4567890123456789 -(5 rows) - --- Test assorted cases involving backwards fetch from a LIMIT plan node -begin; -declare c1 cursor for select * from int8_tbl limit 10; -fetch all in c1; - q1 | q2 -------------------+------------------- - 123 | 456 - 123 | 4567890123456789 - 4567890123456789 | 123 - 4567890123456789 | 4567890123456789 - 4567890123456789 | -4567890123456789 -(5 rows) - -fetch 1 in c1; - q1 | q2 -----+---- -(0 rows) - -fetch backward 1 in c1; - q1 | q2 -------------------+------------------- - 4567890123456789 | -4567890123456789 -(1 row) - -fetch backward all in c1; - q1 | q2 -------------------+------------------ - 4567890123456789 | 4567890123456789 - 4567890123456789 | 123 - 123 | 4567890123456789 - 123 | 456 -(4 rows) - -fetch backward 1 in c1; - q1 | q2 -----+---- -(0 rows) - -fetch all in c1; - q1 | q2 -------------------+------------------- - 123 | 456 - 123 | 4567890123456789 - 4567890123456789 | 123 - 4567890123456789 | 4567890123456789 - 4567890123456789 | -4567890123456789 -(5 rows) - -declare c2 cursor for select * from int8_tbl limit 3; -fetch all in c2; - q1 | q2 -------------------+------------------ - 123 | 456 - 123 | 4567890123456789 - 4567890123456789 | 123 -(3 rows) - -fetch 1 in c2; - q1 | q2 -----+---- -(0 rows) - -fetch backward 1 in c2; - q1 | q2 -------------------+----- - 4567890123456789 | 123 -(1 row) - -fetch backward all in c2; - q1 | q2 ------+------------------ - 123 | 4567890123456789 - 123 | 456 -(2 rows) - -fetch backward 1 in c2; - q1 | q2 -----+---- -(0 rows) - -fetch all in c2; - q1 | q2 -------------------+------------------ - 123 | 456 - 123 | 4567890123456789 - 4567890123456789 | 123 -(3 rows) - -declare c3 cursor for select * from int8_tbl offset 3; -fetch all in c3; - q1 | q2 -------------------+------------------- - 4567890123456789 | 4567890123456789 - 4567890123456789 | -4567890123456789 -(2 rows) - -fetch 1 in c3; - q1 | q2 -----+---- -(0 rows) - -fetch backward 1 in c3; - q1 | q2 -------------------+------------------- - 4567890123456789 | -4567890123456789 -(1 row) - -fetch backward all in c3; - q1 | q2 -------------------+------------------ - 4567890123456789 | 4567890123456789 -(1 row) - -fetch backward 1 in c3; - q1 | q2 -----+---- -(0 rows) - -fetch all in c3; - q1 | q2 -------------------+------------------- - 4567890123456789 | 4567890123456789 - 4567890123456789 | -4567890123456789 -(2 rows) - -declare c4 cursor for select * from int8_tbl offset 10; -fetch all in c4; - q1 | q2 -----+---- -(0 rows) - -fetch 1 in c4; - q1 | q2 -----+---- -(0 rows) - -fetch backward 1 in c4; - q1 | q2 -----+---- -(0 rows) - -fetch backward all in c4; - q1 | q2 -----+---- -(0 rows) - -fetch backward 1 in c4; - q1 | q2 -----+---- -(0 rows) - -fetch all in c4; - q1 | q2 -----+---- -(0 rows) - -declare c5 cursor for select * from int8_tbl order by q1 fetch first 2 rows with ties; -fetch all in c5; - q1 | q2 ------+------------------ - 123 | 456 - 123 | 4567890123456789 -(2 rows) - -fetch 1 in c5; - q1 | q2 -----+---- -(0 rows) - -fetch backward 1 in c5; - q1 | q2 ------+------------------ - 123 | 4567890123456789 -(1 row) - -fetch backward 1 in c5; - q1 | q2 ------+----- - 123 | 456 -(1 row) - -fetch all in c5; - q1 | q2 ------+------------------ - 123 | 4567890123456789 -(1 row) - -fetch backward all in c5; - q1 | q2 ------+------------------ - 123 | 4567890123456789 - 123 | 456 -(2 rows) - -fetch all in c5; - q1 | q2 ------+------------------ - 123 | 456 - 123 | 4567890123456789 -(2 rows) - -fetch backward all in c5; - q1 | q2 ------+------------------ - 123 | 4567890123456789 - 123 | 456 -(2 rows) - -rollback; --- Stress test for variable LIMIT in conjunction with bounded-heap sorting -SELECT - (SELECT n - FROM (VALUES (1)) AS x, - (SELECT n FROM generate_series(1,10) AS n - ORDER BY n LIMIT 1 OFFSET s-1) AS y) AS z - FROM generate_series(1,10) AS s; - z ----- - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 -(10 rows) - --- --- Test behavior of volatile and set-returning functions in conjunction --- with ORDER BY and LIMIT. --- -create temp sequence testseq; -explain (verbose, costs off) -select unique1, unique2, nextval('testseq') - from tenk1 order by unique2 limit 10; - QUERY PLAN ----------------------------------------------------------------- - Limit - Output: unique1, unique2, (nextval('testseq'::regclass)) - -> Index Scan using tenk1_unique2 on public.tenk1 - Output: unique1, unique2, nextval('testseq'::regclass) -(4 rows) - -select unique1, unique2, nextval('testseq') - from tenk1 order by unique2 limit 10; - unique1 | unique2 | nextval ----------+---------+--------- - 8800 | 0 | 1 - 1891 | 1 | 2 - 3420 | 2 | 3 - 9850 | 3 | 4 - 7164 | 4 | 5 - 8009 | 5 | 6 - 5057 | 6 | 7 - 6701 | 7 | 8 - 4321 | 8 | 9 - 3043 | 9 | 10 -(10 rows) - -select currval('testseq'); - currval ---------- - 10 -(1 row) - -explain (verbose, costs off) -select unique1, unique2, nextval('testseq') - from tenk1 order by tenthous limit 10; - QUERY PLAN --------------------------------------------------------------------------- - Limit - Output: unique1, unique2, (nextval('testseq'::regclass)), tenthous - -> Result - Output: unique1, unique2, nextval('testseq'::regclass), tenthous - -> Sort - Output: unique1, unique2, tenthous - Sort Key: tenk1.tenthous - -> Seq Scan on public.tenk1 - Output: unique1, unique2, tenthous -(9 rows) - -select unique1, unique2, nextval('testseq') - from tenk1 order by tenthous limit 10; - unique1 | unique2 | nextval ----------+---------+--------- - 0 | 9998 | 11 - 1 | 2838 | 12 - 2 | 2716 | 13 - 3 | 5679 | 14 - 4 | 1621 | 15 - 5 | 5557 | 16 - 6 | 2855 | 17 - 7 | 8518 | 18 - 8 | 5435 | 19 - 9 | 4463 | 20 -(10 rows) - -select currval('testseq'); - currval ---------- - 20 -(1 row) - -explain (verbose, costs off) -select unique1, unique2, generate_series(1,10) - from tenk1 order by unique2 limit 7; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------------------------------- - Limit - Output: unique1, unique2, (generate_series(1, 10)) - -> ProjectSet - Output: unique1, unique2, generate_series(1, 10) - -> Index Scan using tenk1_unique2 on public.tenk1 - Output: unique1, unique2, two, four, ten, twenty, hundred, thousand, twothousand, fivethous, tenthous, odd, even, stringu1, stringu2, string4 -(6 rows) - -select unique1, unique2, generate_series(1,10) - from tenk1 order by unique2 limit 7; - unique1 | unique2 | generate_series ----------+---------+----------------- - 8800 | 0 | 1 - 8800 | 0 | 2 - 8800 | 0 | 3 - 8800 | 0 | 4 - 8800 | 0 | 5 - 8800 | 0 | 6 - 8800 | 0 | 7 -(7 rows) - -explain (verbose, costs off) -select unique1, unique2, generate_series(1,10) - from tenk1 order by tenthous limit 7; - QUERY PLAN --------------------------------------------------------------------- - Limit - Output: unique1, unique2, (generate_series(1, 10)), tenthous - -> ProjectSet - Output: unique1, unique2, generate_series(1, 10), tenthous - -> Sort - Output: unique1, unique2, tenthous - Sort Key: tenk1.tenthous - -> Seq Scan on public.tenk1 - Output: unique1, unique2, tenthous -(9 rows) - -select unique1, unique2, generate_series(1,10) - from tenk1 order by tenthous limit 7; - unique1 | unique2 | generate_series ----------+---------+----------------- - 0 | 9998 | 1 - 0 | 9998 | 2 - 0 | 9998 | 3 - 0 | 9998 | 4 - 0 | 9998 | 5 - 0 | 9998 | 6 - 0 | 9998 | 7 -(7 rows) - --- use of random() is to keep planner from folding the expressions together -explain (verbose, costs off) -select generate_series(0,2) as s1, generate_series((random()*.1)::int,2) as s2; - QUERY PLAN ------------------------------------------------------------------------------------------------------- - ProjectSet - Output: generate_series(0, 2), generate_series(((random() * '0.1'::double precision))::integer, 2) - -> Result -(3 rows) - -select generate_series(0,2) as s1, generate_series((random()*.1)::int,2) as s2; - s1 | s2 -----+---- - 0 | 0 - 1 | 1 - 2 | 2 -(3 rows) - -explain (verbose, costs off) -select generate_series(0,2) as s1, generate_series((random()*.1)::int,2) as s2 -order by s2 desc; - QUERY PLAN ------------------------------------------------------------------------------------------------------------- - Sort - Output: (generate_series(0, 2)), (generate_series(((random() * '0.1'::double precision))::integer, 2)) - Sort Key: (generate_series(((random() * '0.1'::double precision))::integer, 2)) DESC - -> ProjectSet - Output: generate_series(0, 2), generate_series(((random() * '0.1'::double precision))::integer, 2) - -> Result -(6 rows) - -select generate_series(0,2) as s1, generate_series((random()*.1)::int,2) as s2 -order by s2 desc; - s1 | s2 -----+---- - 2 | 2 - 1 | 1 - 0 | 0 -(3 rows) - --- test for failure to set all aggregates' aggtranstype -explain (verbose, costs off) -select sum(tenthous) as s1, sum(tenthous) + random()*0 as s2 - from tenk1 group by thousand order by thousand limit 3; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------- - Limit - Output: (sum(tenthous)), (((sum(tenthous))::double precision + (random() * '0'::double precision))), thousand - -> GroupAggregate - Output: sum(tenthous), ((sum(tenthous))::double precision + (random() * '0'::double precision)), thousand - Group Key: tenk1.thousand - -> Index Only Scan using tenk1_thous_tenthous on public.tenk1 - Output: thousand, tenthous -(7 rows) - -select sum(tenthous) as s1, sum(tenthous) + random()*0 as s2 - from tenk1 group by thousand order by thousand limit 3; - s1 | s2 --------+------- - 45000 | 45000 - 45010 | 45010 - 45020 | 45020 -(3 rows) - --- --- FETCH FIRST --- Check the WITH TIES clause --- -SELECT thousand - FROM onek WHERE thousand < 5 - ORDER BY thousand FETCH FIRST 2 ROW WITH TIES; - thousand ----------- - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 -(10 rows) - -SELECT thousand - FROM onek WHERE thousand < 5 - ORDER BY thousand FETCH FIRST ROWS WITH TIES; - thousand ----------- - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 -(10 rows) - -SELECT thousand - FROM onek WHERE thousand < 5 - ORDER BY thousand FETCH FIRST 1 ROW WITH TIES; - thousand ----------- - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 -(10 rows) - -SELECT thousand - FROM onek WHERE thousand < 5 - ORDER BY thousand FETCH FIRST 2 ROW ONLY; - thousand ----------- - 0 - 0 -(2 rows) - --- SKIP LOCKED and WITH TIES are incompatible -SELECT thousand - FROM onek WHERE thousand < 5 - ORDER BY thousand FETCH FIRST 1 ROW WITH TIES FOR UPDATE SKIP LOCKED; -ERROR: SKIP LOCKED and WITH TIES options cannot be used together -LINE 3: ORDER BY thousand FETCH FIRST 1 ROW WITH TIES FOR UPDATE S... - ^ --- should fail -SELECT ''::text AS two, unique1, unique2, stringu1 - FROM onek WHERE unique1 > 50 - FETCH FIRST 2 ROW WITH TIES; -ERROR: WITH TIES cannot be specified without ORDER BY clause -LINE 3: FETCH FIRST 2 ROW WITH TIES; - ^ --- test ruleutils -CREATE VIEW limit_thousand_v_1 AS SELECT thousand FROM onek WHERE thousand < 995 - ORDER BY thousand FETCH FIRST 5 ROWS WITH TIES OFFSET 10; -\d+ limit_thousand_v_1 - View "public.limit_thousand_v_1" - Column | Type | Collation | Nullable | Default | Storage | Description -----------+---------+-----------+----------+---------+---------+------------- - thousand | integer | | | | plain | -View definition: - SELECT thousand - FROM onek - WHERE thousand < 995 - ORDER BY thousand - OFFSET 10 - FETCH FIRST 5 ROWS WITH TIES; - -CREATE VIEW limit_thousand_v_2 AS SELECT thousand FROM onek WHERE thousand < 995 - ORDER BY thousand OFFSET 10 FETCH FIRST 5 ROWS ONLY; -\d+ limit_thousand_v_2 - View "public.limit_thousand_v_2" - Column | Type | Collation | Nullable | Default | Storage | Description -----------+---------+-----------+----------+---------+---------+------------- - thousand | integer | | | | plain | -View definition: - SELECT thousand - FROM onek - WHERE thousand < 995 - ORDER BY thousand - OFFSET 10 - LIMIT 5; - -CREATE VIEW limit_thousand_v_3 AS SELECT thousand FROM onek WHERE thousand < 995 - ORDER BY thousand FETCH FIRST NULL ROWS WITH TIES; -- fails -ERROR: row count cannot be null in FETCH FIRST ... WITH TIES clause -CREATE VIEW limit_thousand_v_3 AS SELECT thousand FROM onek WHERE thousand < 995 - ORDER BY thousand FETCH FIRST (NULL+1) ROWS WITH TIES; -\d+ limit_thousand_v_3 - View "public.limit_thousand_v_3" - Column | Type | Collation | Nullable | Default | Storage | Description -----------+---------+-----------+----------+---------+---------+------------- - thousand | integer | | | | plain | -View definition: - SELECT thousand - FROM onek - WHERE thousand < 995 - ORDER BY thousand - FETCH FIRST (NULL::integer + 1) ROWS WITH TIES; - -CREATE VIEW limit_thousand_v_4 AS SELECT thousand FROM onek WHERE thousand < 995 - ORDER BY thousand FETCH FIRST NULL ROWS ONLY; -\d+ limit_thousand_v_4 - View "public.limit_thousand_v_4" - Column | Type | Collation | Nullable | Default | Storage | Description -----------+---------+-----------+----------+---------+---------+------------- - thousand | integer | | | | plain | -View definition: - SELECT thousand - FROM onek - WHERE thousand < 995 - ORDER BY thousand - LIMIT ALL; - --- leave these views +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/plpgsql.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/plpgsql.out --- /Users/admin/pgsql/src/test/regress/expected/plpgsql.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/plpgsql.out 2024-12-13 13:20:12 @@ -1,5854 +1,2 @@ --- --- PLPGSQL --- --- Scenario: --- --- A building with a modern TP cable installation where any --- of the wall connectors can be used to plug in phones, --- ethernet interfaces or local office hubs. The backside --- of the wall connectors is wired to one of several patch- --- fields in the building. --- --- In the patchfields, there are hubs and all the slots --- representing the wall connectors. In addition there are --- slots that can represent a phone line from the central --- phone system. --- --- Triggers ensure consistency of the patching information. --- --- Functions are used to build up powerful views that let --- you look behind the wall when looking at a patchfield --- or into a room. --- -create table Room ( - roomno char(8), - comment text -); -create unique index Room_rno on Room using btree (roomno bpchar_ops); -create table WSlot ( - slotname char(20), - roomno char(8), - slotlink char(20), - backlink char(20) -); -create unique index WSlot_name on WSlot using btree (slotname bpchar_ops); -create table PField ( - name text, - comment text -); -create unique index PField_name on PField using btree (name text_ops); -create table PSlot ( - slotname char(20), - pfname text, - slotlink char(20), - backlink char(20) -); -create unique index PSlot_name on PSlot using btree (slotname bpchar_ops); -create table PLine ( - slotname char(20), - phonenumber char(20), - comment text, - backlink char(20) -); -create unique index PLine_name on PLine using btree (slotname bpchar_ops); -create table Hub ( - name char(14), - comment text, - nslots integer -); -create unique index Hub_name on Hub using btree (name bpchar_ops); -create table HSlot ( - slotname char(20), - hubname char(14), - slotno integer, - slotlink char(20) -); -create unique index HSlot_name on HSlot using btree (slotname bpchar_ops); -create index HSlot_hubname on HSlot using btree (hubname bpchar_ops); -create table System ( - name text, - comment text -); -create unique index System_name on System using btree (name text_ops); -create table IFace ( - slotname char(20), - sysname text, - ifname text, - slotlink char(20) -); -create unique index IFace_name on IFace using btree (slotname bpchar_ops); -create table PHone ( - slotname char(20), - comment text, - slotlink char(20) -); -create unique index PHone_name on PHone using btree (slotname bpchar_ops); --- ************************************************************ --- * --- * Trigger procedures and functions for the patchfield --- * test of PL/pgSQL --- * --- ************************************************************ --- ************************************************************ --- * AFTER UPDATE on Room --- * - If room no changes let wall slots follow --- ************************************************************ -create function tg_room_au() returns trigger as ' -begin - if new.roomno != old.roomno then - update WSlot set roomno = new.roomno where roomno = old.roomno; - end if; - return new; -end; -' language plpgsql; -create trigger tg_room_au after update - on Room for each row execute procedure tg_room_au(); --- ************************************************************ --- * AFTER DELETE on Room --- * - delete wall slots in this room --- ************************************************************ -create function tg_room_ad() returns trigger as ' -begin - delete from WSlot where roomno = old.roomno; - return old; -end; -' language plpgsql; -create trigger tg_room_ad after delete - on Room for each row execute procedure tg_room_ad(); --- ************************************************************ --- * BEFORE INSERT or UPDATE on WSlot --- * - Check that room exists --- ************************************************************ -create function tg_wslot_biu() returns trigger as $$ -begin - if count(*) = 0 from Room where roomno = new.roomno then - raise exception 'Room % does not exist', new.roomno; - end if; - return new; -end; -$$ language plpgsql; -create trigger tg_wslot_biu before insert or update - on WSlot for each row execute procedure tg_wslot_biu(); --- ************************************************************ --- * AFTER UPDATE on PField --- * - Let PSlots of this field follow --- ************************************************************ -create function tg_pfield_au() returns trigger as ' -begin - if new.name != old.name then - update PSlot set pfname = new.name where pfname = old.name; - end if; - return new; -end; -' language plpgsql; -create trigger tg_pfield_au after update - on PField for each row execute procedure tg_pfield_au(); --- ************************************************************ --- * AFTER DELETE on PField --- * - Remove all slots of this patchfield --- ************************************************************ -create function tg_pfield_ad() returns trigger as ' -begin - delete from PSlot where pfname = old.name; - return old; -end; -' language plpgsql; -create trigger tg_pfield_ad after delete - on PField for each row execute procedure tg_pfield_ad(); --- ************************************************************ --- * BEFORE INSERT or UPDATE on PSlot --- * - Ensure that our patchfield does exist --- ************************************************************ -create function tg_pslot_biu() returns trigger as $proc$ -declare - pfrec record; - ps alias for new; -begin - select into pfrec * from PField where name = ps.pfname; - if not found then - raise exception $$Patchfield "%" does not exist$$, ps.pfname; - end if; - return ps; -end; -$proc$ language plpgsql; -create trigger tg_pslot_biu before insert or update - on PSlot for each row execute procedure tg_pslot_biu(); --- ************************************************************ --- * AFTER UPDATE on System --- * - If system name changes let interfaces follow --- ************************************************************ -create function tg_system_au() returns trigger as ' -begin - if new.name != old.name then - update IFace set sysname = new.name where sysname = old.name; - end if; - return new; -end; -' language plpgsql; -create trigger tg_system_au after update - on System for each row execute procedure tg_system_au(); --- ************************************************************ --- * BEFORE INSERT or UPDATE on IFace --- * - set the slotname to IF.sysname.ifname --- ************************************************************ -create function tg_iface_biu() returns trigger as $$ -declare - sname text; - sysrec record; -begin - select into sysrec * from system where name = new.sysname; - if not found then - raise exception $q$system "%" does not exist$q$, new.sysname; - end if; - sname := 'IF.' || new.sysname; - sname := sname || '.'; - sname := sname || new.ifname; - if length(sname) > 20 then - raise exception 'IFace slotname "%" too long (20 char max)', sname; - end if; - new.slotname := sname; - return new; -end; -$$ language plpgsql; -create trigger tg_iface_biu before insert or update - on IFace for each row execute procedure tg_iface_biu(); --- ************************************************************ --- * AFTER INSERT or UPDATE or DELETE on Hub --- * - insert/delete/rename slots as required --- ************************************************************ -create function tg_hub_a() returns trigger as ' -declare - hname text; - dummy integer; -begin - if tg_op = ''INSERT'' then - dummy := tg_hub_adjustslots(new.name, 0, new.nslots); - return new; - end if; - if tg_op = ''UPDATE'' then - if new.name != old.name then - update HSlot set hubname = new.name where hubname = old.name; - end if; - dummy := tg_hub_adjustslots(new.name, old.nslots, new.nslots); - return new; - end if; - if tg_op = ''DELETE'' then - dummy := tg_hub_adjustslots(old.name, old.nslots, 0); - return old; - end if; -end; -' language plpgsql; -create trigger tg_hub_a after insert or update or delete - on Hub for each row execute procedure tg_hub_a(); --- ************************************************************ --- * Support function to add/remove slots of Hub --- ************************************************************ -create function tg_hub_adjustslots(hname bpchar, - oldnslots integer, - newnslots integer) -returns integer as ' -begin - if newnslots = oldnslots then - return 0; - end if; - if newnslots < oldnslots then - delete from HSlot where hubname = hname and slotno > newnslots; - return 0; - end if; - for i in oldnslots + 1 .. newnslots loop - insert into HSlot (slotname, hubname, slotno, slotlink) - values (''HS.dummy'', hname, i, ''''); - end loop; - return 0; -end -' language plpgsql; --- Test comments -COMMENT ON FUNCTION tg_hub_adjustslots_wrong(bpchar, integer, integer) IS 'function with args'; -ERROR: function tg_hub_adjustslots_wrong(character, integer, integer) does not exist -COMMENT ON FUNCTION tg_hub_adjustslots(bpchar, integer, integer) IS 'function with args'; -COMMENT ON FUNCTION tg_hub_adjustslots(bpchar, integer, integer) IS NULL; --- ************************************************************ --- * BEFORE INSERT or UPDATE on HSlot --- * - prevent from manual manipulation --- * - set the slotname to HS.hubname.slotno --- ************************************************************ -create function tg_hslot_biu() returns trigger as ' -declare - sname text; - xname HSlot.slotname%TYPE; - hubrec record; -begin - select into hubrec * from Hub where name = new.hubname; - if not found then - raise exception ''no manual manipulation of HSlot''; - end if; - if new.slotno < 1 or new.slotno > hubrec.nslots then - raise exception ''no manual manipulation of HSlot''; - end if; - if tg_op = ''UPDATE'' and new.hubname != old.hubname then - if count(*) > 0 from Hub where name = old.hubname then - raise exception ''no manual manipulation of HSlot''; - end if; - end if; - sname := ''HS.'' || trim(new.hubname); - sname := sname || ''.''; - sname := sname || new.slotno::text; - if length(sname) > 20 then - raise exception ''HSlot slotname "%" too long (20 char max)'', sname; - end if; - new.slotname := sname; - return new; -end; -' language plpgsql; -create trigger tg_hslot_biu before insert or update - on HSlot for each row execute procedure tg_hslot_biu(); --- ************************************************************ --- * BEFORE DELETE on HSlot --- * - prevent from manual manipulation --- ************************************************************ -create function tg_hslot_bd() returns trigger as ' -declare - hubrec record; -begin - select into hubrec * from Hub where name = old.hubname; - if not found then - return old; - end if; - if old.slotno > hubrec.nslots then - return old; - end if; - raise exception ''no manual manipulation of HSlot''; -end; -' language plpgsql; -create trigger tg_hslot_bd before delete - on HSlot for each row execute procedure tg_hslot_bd(); --- ************************************************************ --- * BEFORE INSERT on all slots --- * - Check name prefix --- ************************************************************ -create function tg_chkslotname() returns trigger as ' -begin - if substr(new.slotname, 1, 2) != tg_argv[0] then - raise exception ''slotname must begin with %'', tg_argv[0]; - end if; - return new; -end; -' language plpgsql; -create trigger tg_chkslotname before insert - on PSlot for each row execute procedure tg_chkslotname('PS'); -create trigger tg_chkslotname before insert - on WSlot for each row execute procedure tg_chkslotname('WS'); -create trigger tg_chkslotname before insert - on PLine for each row execute procedure tg_chkslotname('PL'); -create trigger tg_chkslotname before insert - on IFace for each row execute procedure tg_chkslotname('IF'); -create trigger tg_chkslotname before insert - on PHone for each row execute procedure tg_chkslotname('PH'); --- ************************************************************ --- * BEFORE INSERT or UPDATE on all slots with slotlink --- * - Set slotlink to empty string if NULL value given --- ************************************************************ -create function tg_chkslotlink() returns trigger as ' -begin - if new.slotlink isnull then - new.slotlink := ''''; - end if; - return new; -end; -' language plpgsql; -create trigger tg_chkslotlink before insert or update - on PSlot for each row execute procedure tg_chkslotlink(); -create trigger tg_chkslotlink before insert or update - on WSlot for each row execute procedure tg_chkslotlink(); -create trigger tg_chkslotlink before insert or update - on IFace for each row execute procedure tg_chkslotlink(); -create trigger tg_chkslotlink before insert or update - on HSlot for each row execute procedure tg_chkslotlink(); -create trigger tg_chkslotlink before insert or update - on PHone for each row execute procedure tg_chkslotlink(); --- ************************************************************ --- * BEFORE INSERT or UPDATE on all slots with backlink --- * - Set backlink to empty string if NULL value given --- ************************************************************ -create function tg_chkbacklink() returns trigger as ' -begin - if new.backlink isnull then - new.backlink := ''''; - end if; - return new; -end; -' language plpgsql; -create trigger tg_chkbacklink before insert or update - on PSlot for each row execute procedure tg_chkbacklink(); -create trigger tg_chkbacklink before insert or update - on WSlot for each row execute procedure tg_chkbacklink(); -create trigger tg_chkbacklink before insert or update - on PLine for each row execute procedure tg_chkbacklink(); --- ************************************************************ --- * BEFORE UPDATE on PSlot --- * - do delete/insert instead of update if name changes --- ************************************************************ -create function tg_pslot_bu() returns trigger as ' -begin - if new.slotname != old.slotname then - delete from PSlot where slotname = old.slotname; - insert into PSlot ( - slotname, - pfname, - slotlink, - backlink - ) values ( - new.slotname, - new.pfname, - new.slotlink, - new.backlink - ); - return null; - end if; - return new; -end; -' language plpgsql; -create trigger tg_pslot_bu before update - on PSlot for each row execute procedure tg_pslot_bu(); --- ************************************************************ --- * BEFORE UPDATE on WSlot --- * - do delete/insert instead of update if name changes --- ************************************************************ -create function tg_wslot_bu() returns trigger as ' -begin - if new.slotname != old.slotname then - delete from WSlot where slotname = old.slotname; - insert into WSlot ( - slotname, - roomno, - slotlink, - backlink - ) values ( - new.slotname, - new.roomno, - new.slotlink, - new.backlink - ); - return null; - end if; - return new; -end; -' language plpgsql; -create trigger tg_wslot_bu before update - on WSlot for each row execute procedure tg_Wslot_bu(); --- ************************************************************ --- * BEFORE UPDATE on PLine --- * - do delete/insert instead of update if name changes --- ************************************************************ -create function tg_pline_bu() returns trigger as ' -begin - if new.slotname != old.slotname then - delete from PLine where slotname = old.slotname; - insert into PLine ( - slotname, - phonenumber, - comment, - backlink - ) values ( - new.slotname, - new.phonenumber, - new.comment, - new.backlink - ); - return null; - end if; - return new; -end; -' language plpgsql; -create trigger tg_pline_bu before update - on PLine for each row execute procedure tg_pline_bu(); --- ************************************************************ --- * BEFORE UPDATE on IFace --- * - do delete/insert instead of update if name changes --- ************************************************************ -create function tg_iface_bu() returns trigger as ' -begin - if new.slotname != old.slotname then - delete from IFace where slotname = old.slotname; - insert into IFace ( - slotname, - sysname, - ifname, - slotlink - ) values ( - new.slotname, - new.sysname, - new.ifname, - new.slotlink - ); - return null; - end if; - return new; -end; -' language plpgsql; -create trigger tg_iface_bu before update - on IFace for each row execute procedure tg_iface_bu(); --- ************************************************************ --- * BEFORE UPDATE on HSlot --- * - do delete/insert instead of update if name changes --- ************************************************************ -create function tg_hslot_bu() returns trigger as ' -begin - if new.slotname != old.slotname or new.hubname != old.hubname then - delete from HSlot where slotname = old.slotname; - insert into HSlot ( - slotname, - hubname, - slotno, - slotlink - ) values ( - new.slotname, - new.hubname, - new.slotno, - new.slotlink - ); - return null; - end if; - return new; -end; -' language plpgsql; -create trigger tg_hslot_bu before update - on HSlot for each row execute procedure tg_hslot_bu(); --- ************************************************************ --- * BEFORE UPDATE on PHone --- * - do delete/insert instead of update if name changes --- ************************************************************ -create function tg_phone_bu() returns trigger as ' -begin - if new.slotname != old.slotname then - delete from PHone where slotname = old.slotname; - insert into PHone ( - slotname, - comment, - slotlink - ) values ( - new.slotname, - new.comment, - new.slotlink - ); - return null; - end if; - return new; -end; -' language plpgsql; -create trigger tg_phone_bu before update - on PHone for each row execute procedure tg_phone_bu(); --- ************************************************************ --- * AFTER INSERT or UPDATE or DELETE on slot with backlink --- * - Ensure that the opponent correctly points back to us --- ************************************************************ -create function tg_backlink_a() returns trigger as ' -declare - dummy integer; -begin - if tg_op = ''INSERT'' then - if new.backlink != '''' then - dummy := tg_backlink_set(new.backlink, new.slotname); - end if; - return new; - end if; - if tg_op = ''UPDATE'' then - if new.backlink != old.backlink then - if old.backlink != '''' then - dummy := tg_backlink_unset(old.backlink, old.slotname); - end if; - if new.backlink != '''' then - dummy := tg_backlink_set(new.backlink, new.slotname); - end if; - else - if new.slotname != old.slotname and new.backlink != '''' then - dummy := tg_slotlink_set(new.backlink, new.slotname); - end if; - end if; - return new; - end if; - if tg_op = ''DELETE'' then - if old.backlink != '''' then - dummy := tg_backlink_unset(old.backlink, old.slotname); - end if; - return old; - end if; -end; -' language plpgsql; -create trigger tg_backlink_a after insert or update or delete - on PSlot for each row execute procedure tg_backlink_a('PS'); -create trigger tg_backlink_a after insert or update or delete - on WSlot for each row execute procedure tg_backlink_a('WS'); -create trigger tg_backlink_a after insert or update or delete - on PLine for each row execute procedure tg_backlink_a('PL'); --- ************************************************************ --- * Support function to set the opponents backlink field --- * if it does not already point to the requested slot --- ************************************************************ -create function tg_backlink_set(myname bpchar, blname bpchar) -returns integer as ' -declare - mytype char(2); - link char(4); - rec record; -begin - mytype := substr(myname, 1, 2); - link := mytype || substr(blname, 1, 2); - if link = ''PLPL'' then - raise exception - ''backlink between two phone lines does not make sense''; - end if; - if link in (''PLWS'', ''WSPL'') then - raise exception - ''direct link of phone line to wall slot not permitted''; - end if; - if mytype = ''PS'' then - select into rec * from PSlot where slotname = myname; - if not found then - raise exception ''% does not exist'', myname; - end if; - if rec.backlink != blname then - update PSlot set backlink = blname where slotname = myname; - end if; - return 0; - end if; - if mytype = ''WS'' then - select into rec * from WSlot where slotname = myname; - if not found then - raise exception ''% does not exist'', myname; - end if; - if rec.backlink != blname then - update WSlot set backlink = blname where slotname = myname; - end if; - return 0; - end if; - if mytype = ''PL'' then - select into rec * from PLine where slotname = myname; - if not found then - raise exception ''% does not exist'', myname; - end if; - if rec.backlink != blname then - update PLine set backlink = blname where slotname = myname; - end if; - return 0; - end if; - raise exception ''illegal backlink beginning with %'', mytype; -end; -' language plpgsql; --- ************************************************************ --- * Support function to clear out the backlink field if --- * it still points to specific slot --- ************************************************************ -create function tg_backlink_unset(bpchar, bpchar) -returns integer as ' -declare - myname alias for $1; - blname alias for $2; - mytype char(2); - rec record; -begin - mytype := substr(myname, 1, 2); - if mytype = ''PS'' then - select into rec * from PSlot where slotname = myname; - if not found then - return 0; - end if; - if rec.backlink = blname then - update PSlot set backlink = '''' where slotname = myname; - end if; - return 0; - end if; - if mytype = ''WS'' then - select into rec * from WSlot where slotname = myname; - if not found then - return 0; - end if; - if rec.backlink = blname then - update WSlot set backlink = '''' where slotname = myname; - end if; - return 0; - end if; - if mytype = ''PL'' then - select into rec * from PLine where slotname = myname; - if not found then - return 0; - end if; - if rec.backlink = blname then - update PLine set backlink = '''' where slotname = myname; - end if; - return 0; - end if; -end -' language plpgsql; --- ************************************************************ --- * AFTER INSERT or UPDATE or DELETE on slot with slotlink --- * - Ensure that the opponent correctly points back to us --- ************************************************************ -create function tg_slotlink_a() returns trigger as ' -declare - dummy integer; -begin - if tg_op = ''INSERT'' then - if new.slotlink != '''' then - dummy := tg_slotlink_set(new.slotlink, new.slotname); - end if; - return new; - end if; - if tg_op = ''UPDATE'' then - if new.slotlink != old.slotlink then - if old.slotlink != '''' then - dummy := tg_slotlink_unset(old.slotlink, old.slotname); - end if; - if new.slotlink != '''' then - dummy := tg_slotlink_set(new.slotlink, new.slotname); - end if; - else - if new.slotname != old.slotname and new.slotlink != '''' then - dummy := tg_slotlink_set(new.slotlink, new.slotname); - end if; - end if; - return new; - end if; - if tg_op = ''DELETE'' then - if old.slotlink != '''' then - dummy := tg_slotlink_unset(old.slotlink, old.slotname); - end if; - return old; - end if; -end; -' language plpgsql; -create trigger tg_slotlink_a after insert or update or delete - on PSlot for each row execute procedure tg_slotlink_a('PS'); -create trigger tg_slotlink_a after insert or update or delete - on WSlot for each row execute procedure tg_slotlink_a('WS'); -create trigger tg_slotlink_a after insert or update or delete - on IFace for each row execute procedure tg_slotlink_a('IF'); -create trigger tg_slotlink_a after insert or update or delete - on HSlot for each row execute procedure tg_slotlink_a('HS'); -create trigger tg_slotlink_a after insert or update or delete - on PHone for each row execute procedure tg_slotlink_a('PH'); --- ************************************************************ --- * Support function to set the opponents slotlink field --- * if it does not already point to the requested slot --- ************************************************************ -create function tg_slotlink_set(bpchar, bpchar) -returns integer as ' -declare - myname alias for $1; - blname alias for $2; - mytype char(2); - link char(4); - rec record; -begin - mytype := substr(myname, 1, 2); - link := mytype || substr(blname, 1, 2); - if link = ''PHPH'' then - raise exception - ''slotlink between two phones does not make sense''; - end if; - if link in (''PHHS'', ''HSPH'') then - raise exception - ''link of phone to hub does not make sense''; - end if; - if link in (''PHIF'', ''IFPH'') then - raise exception - ''link of phone to hub does not make sense''; - end if; - if link in (''PSWS'', ''WSPS'') then - raise exception - ''slotlink from patchslot to wallslot not permitted''; - end if; - if mytype = ''PS'' then - select into rec * from PSlot where slotname = myname; - if not found then - raise exception ''% does not exist'', myname; - end if; - if rec.slotlink != blname then - update PSlot set slotlink = blname where slotname = myname; - end if; - return 0; - end if; - if mytype = ''WS'' then - select into rec * from WSlot where slotname = myname; - if not found then - raise exception ''% does not exist'', myname; - end if; - if rec.slotlink != blname then - update WSlot set slotlink = blname where slotname = myname; - end if; - return 0; - end if; - if mytype = ''IF'' then - select into rec * from IFace where slotname = myname; - if not found then - raise exception ''% does not exist'', myname; - end if; - if rec.slotlink != blname then - update IFace set slotlink = blname where slotname = myname; - end if; - return 0; - end if; - if mytype = ''HS'' then - select into rec * from HSlot where slotname = myname; - if not found then - raise exception ''% does not exist'', myname; - end if; - if rec.slotlink != blname then - update HSlot set slotlink = blname where slotname = myname; - end if; - return 0; - end if; - if mytype = ''PH'' then - select into rec * from PHone where slotname = myname; - if not found then - raise exception ''% does not exist'', myname; - end if; - if rec.slotlink != blname then - update PHone set slotlink = blname where slotname = myname; - end if; - return 0; - end if; - raise exception ''illegal slotlink beginning with %'', mytype; -end; -' language plpgsql; --- ************************************************************ --- * Support function to clear out the slotlink field if --- * it still points to specific slot --- ************************************************************ -create function tg_slotlink_unset(bpchar, bpchar) -returns integer as ' -declare - myname alias for $1; - blname alias for $2; - mytype char(2); - rec record; -begin - mytype := substr(myname, 1, 2); - if mytype = ''PS'' then - select into rec * from PSlot where slotname = myname; - if not found then - return 0; - end if; - if rec.slotlink = blname then - update PSlot set slotlink = '''' where slotname = myname; - end if; - return 0; - end if; - if mytype = ''WS'' then - select into rec * from WSlot where slotname = myname; - if not found then - return 0; - end if; - if rec.slotlink = blname then - update WSlot set slotlink = '''' where slotname = myname; - end if; - return 0; - end if; - if mytype = ''IF'' then - select into rec * from IFace where slotname = myname; - if not found then - return 0; - end if; - if rec.slotlink = blname then - update IFace set slotlink = '''' where slotname = myname; - end if; - return 0; - end if; - if mytype = ''HS'' then - select into rec * from HSlot where slotname = myname; - if not found then - return 0; - end if; - if rec.slotlink = blname then - update HSlot set slotlink = '''' where slotname = myname; - end if; - return 0; - end if; - if mytype = ''PH'' then - select into rec * from PHone where slotname = myname; - if not found then - return 0; - end if; - if rec.slotlink = blname then - update PHone set slotlink = '''' where slotname = myname; - end if; - return 0; - end if; -end; -' language plpgsql; --- ************************************************************ --- * Describe the backside of a patchfield slot --- ************************************************************ -create function pslot_backlink_view(bpchar) -returns text as ' -<> -declare - rec record; - bltype char(2); - retval text; -begin - select into rec * from PSlot where slotname = $1; - if not found then - return ''''; - end if; - if rec.backlink = '''' then - return ''-''; - end if; - bltype := substr(rec.backlink, 1, 2); - if bltype = ''PL'' then - declare - rec record; - begin - select into rec * from PLine where slotname = "outer".rec.backlink; - retval := ''Phone line '' || trim(rec.phonenumber); - if rec.comment != '''' then - retval := retval || '' (''; - retval := retval || rec.comment; - retval := retval || '')''; - end if; - return retval; - end; - end if; - if bltype = ''WS'' then - select into rec * from WSlot where slotname = rec.backlink; - retval := trim(rec.slotname) || '' in room ''; - retval := retval || trim(rec.roomno); - retval := retval || '' -> ''; - return retval || wslot_slotlink_view(rec.slotname); - end if; - return rec.backlink; -end; -' language plpgsql; --- ************************************************************ --- * Describe the front of a patchfield slot --- ************************************************************ -create function pslot_slotlink_view(bpchar) -returns text as ' -declare - psrec record; - sltype char(2); - retval text; -begin - select into psrec * from PSlot where slotname = $1; - if not found then - return ''''; - end if; - if psrec.slotlink = '''' then - return ''-''; - end if; - sltype := substr(psrec.slotlink, 1, 2); - if sltype = ''PS'' then - retval := trim(psrec.slotlink) || '' -> ''; - return retval || pslot_backlink_view(psrec.slotlink); - end if; - if sltype = ''HS'' then - retval := comment from Hub H, HSlot HS - where HS.slotname = psrec.slotlink - and H.name = HS.hubname; - retval := retval || '' slot ''; - retval := retval || slotno::text from HSlot - where slotname = psrec.slotlink; - return retval; - end if; - return psrec.slotlink; -end; -' language plpgsql; --- ************************************************************ --- * Describe the front of a wall connector slot --- ************************************************************ -create function wslot_slotlink_view(bpchar) -returns text as ' -declare - rec record; - sltype char(2); - retval text; -begin - select into rec * from WSlot where slotname = $1; - if not found then - return ''''; - end if; - if rec.slotlink = '''' then - return ''-''; - end if; - sltype := substr(rec.slotlink, 1, 2); - if sltype = ''PH'' then - select into rec * from PHone where slotname = rec.slotlink; - retval := ''Phone '' || trim(rec.slotname); - if rec.comment != '''' then - retval := retval || '' (''; - retval := retval || rec.comment; - retval := retval || '')''; - end if; - return retval; - end if; - if sltype = ''IF'' then - declare - syrow System%RowType; - ifrow IFace%ROWTYPE; - begin - select into ifrow * from IFace where slotname = rec.slotlink; - select into syrow * from System where name = ifrow.sysname; - retval := syrow.name || '' IF ''; - retval := retval || ifrow.ifname; - if syrow.comment != '''' then - retval := retval || '' (''; - retval := retval || syrow.comment; - retval := retval || '')''; - end if; - return retval; - end; - end if; - return rec.slotlink; -end; -' language plpgsql; --- ************************************************************ --- * View of a patchfield describing backside and patches --- ************************************************************ -create view Pfield_v1 as select PF.pfname, PF.slotname, - pslot_backlink_view(PF.slotname) as backside, - pslot_slotlink_view(PF.slotname) as patch - from PSlot PF; --- --- First we build the house - so we create the rooms --- -insert into Room values ('001', 'Entrance'); -insert into Room values ('002', 'Office'); -insert into Room values ('003', 'Office'); -insert into Room values ('004', 'Technical'); -insert into Room values ('101', 'Office'); -insert into Room values ('102', 'Conference'); -insert into Room values ('103', 'Restroom'); -insert into Room values ('104', 'Technical'); -insert into Room values ('105', 'Office'); -insert into Room values ('106', 'Office'); --- --- Second we install the wall connectors --- -insert into WSlot values ('WS.001.1a', '001', '', ''); -insert into WSlot values ('WS.001.1b', '001', '', ''); -insert into WSlot values ('WS.001.2a', '001', '', ''); -insert into WSlot values ('WS.001.2b', '001', '', ''); -insert into WSlot values ('WS.001.3a', '001', '', ''); -insert into WSlot values ('WS.001.3b', '001', '', ''); -insert into WSlot values ('WS.002.1a', '002', '', ''); -insert into WSlot values ('WS.002.1b', '002', '', ''); -insert into WSlot values ('WS.002.2a', '002', '', ''); -insert into WSlot values ('WS.002.2b', '002', '', ''); -insert into WSlot values ('WS.002.3a', '002', '', ''); -insert into WSlot values ('WS.002.3b', '002', '', ''); -insert into WSlot values ('WS.003.1a', '003', '', ''); -insert into WSlot values ('WS.003.1b', '003', '', ''); -insert into WSlot values ('WS.003.2a', '003', '', ''); -insert into WSlot values ('WS.003.2b', '003', '', ''); -insert into WSlot values ('WS.003.3a', '003', '', ''); -insert into WSlot values ('WS.003.3b', '003', '', ''); -insert into WSlot values ('WS.101.1a', '101', '', ''); -insert into WSlot values ('WS.101.1b', '101', '', ''); -insert into WSlot values ('WS.101.2a', '101', '', ''); -insert into WSlot values ('WS.101.2b', '101', '', ''); -insert into WSlot values ('WS.101.3a', '101', '', ''); -insert into WSlot values ('WS.101.3b', '101', '', ''); -insert into WSlot values ('WS.102.1a', '102', '', ''); -insert into WSlot values ('WS.102.1b', '102', '', ''); -insert into WSlot values ('WS.102.2a', '102', '', ''); -insert into WSlot values ('WS.102.2b', '102', '', ''); -insert into WSlot values ('WS.102.3a', '102', '', ''); -insert into WSlot values ('WS.102.3b', '102', '', ''); -insert into WSlot values ('WS.105.1a', '105', '', ''); -insert into WSlot values ('WS.105.1b', '105', '', ''); -insert into WSlot values ('WS.105.2a', '105', '', ''); -insert into WSlot values ('WS.105.2b', '105', '', ''); -insert into WSlot values ('WS.105.3a', '105', '', ''); -insert into WSlot values ('WS.105.3b', '105', '', ''); -insert into WSlot values ('WS.106.1a', '106', '', ''); -insert into WSlot values ('WS.106.1b', '106', '', ''); -insert into WSlot values ('WS.106.2a', '106', '', ''); -insert into WSlot values ('WS.106.2b', '106', '', ''); -insert into WSlot values ('WS.106.3a', '106', '', ''); -insert into WSlot values ('WS.106.3b', '106', '', ''); --- --- Now create the patch fields and their slots --- -insert into PField values ('PF0_1', 'Wallslots basement'); --- --- The cables for these will be made later, so they are unconnected for now --- -insert into PSlot values ('PS.base.a1', 'PF0_1', '', ''); -insert into PSlot values ('PS.base.a2', 'PF0_1', '', ''); -insert into PSlot values ('PS.base.a3', 'PF0_1', '', ''); -insert into PSlot values ('PS.base.a4', 'PF0_1', '', ''); -insert into PSlot values ('PS.base.a5', 'PF0_1', '', ''); -insert into PSlot values ('PS.base.a6', 'PF0_1', '', ''); --- --- These are already wired to the wall connectors --- -insert into PSlot values ('PS.base.b1', 'PF0_1', '', 'WS.002.1a'); -insert into PSlot values ('PS.base.b2', 'PF0_1', '', 'WS.002.1b'); -insert into PSlot values ('PS.base.b3', 'PF0_1', '', 'WS.002.2a'); -insert into PSlot values ('PS.base.b4', 'PF0_1', '', 'WS.002.2b'); -insert into PSlot values ('PS.base.b5', 'PF0_1', '', 'WS.002.3a'); -insert into PSlot values ('PS.base.b6', 'PF0_1', '', 'WS.002.3b'); -insert into PSlot values ('PS.base.c1', 'PF0_1', '', 'WS.003.1a'); -insert into PSlot values ('PS.base.c2', 'PF0_1', '', 'WS.003.1b'); -insert into PSlot values ('PS.base.c3', 'PF0_1', '', 'WS.003.2a'); -insert into PSlot values ('PS.base.c4', 'PF0_1', '', 'WS.003.2b'); -insert into PSlot values ('PS.base.c5', 'PF0_1', '', 'WS.003.3a'); -insert into PSlot values ('PS.base.c6', 'PF0_1', '', 'WS.003.3b'); --- --- This patchfield will be renamed later into PF0_2 - so its --- slots references in pfname should follow --- -insert into PField values ('PF0_X', 'Phonelines basement'); -insert into PSlot values ('PS.base.ta1', 'PF0_X', '', ''); -insert into PSlot values ('PS.base.ta2', 'PF0_X', '', ''); -insert into PSlot values ('PS.base.ta3', 'PF0_X', '', ''); -insert into PSlot values ('PS.base.ta4', 'PF0_X', '', ''); -insert into PSlot values ('PS.base.ta5', 'PF0_X', '', ''); -insert into PSlot values ('PS.base.ta6', 'PF0_X', '', ''); -insert into PSlot values ('PS.base.tb1', 'PF0_X', '', ''); -insert into PSlot values ('PS.base.tb2', 'PF0_X', '', ''); -insert into PSlot values ('PS.base.tb3', 'PF0_X', '', ''); -insert into PSlot values ('PS.base.tb4', 'PF0_X', '', ''); -insert into PSlot values ('PS.base.tb5', 'PF0_X', '', ''); -insert into PSlot values ('PS.base.tb6', 'PF0_X', '', ''); -insert into PField values ('PF1_1', 'Wallslots first floor'); -insert into PSlot values ('PS.first.a1', 'PF1_1', '', 'WS.101.1a'); -insert into PSlot values ('PS.first.a2', 'PF1_1', '', 'WS.101.1b'); -insert into PSlot values ('PS.first.a3', 'PF1_1', '', 'WS.101.2a'); -insert into PSlot values ('PS.first.a4', 'PF1_1', '', 'WS.101.2b'); -insert into PSlot values ('PS.first.a5', 'PF1_1', '', 'WS.101.3a'); -insert into PSlot values ('PS.first.a6', 'PF1_1', '', 'WS.101.3b'); -insert into PSlot values ('PS.first.b1', 'PF1_1', '', 'WS.102.1a'); -insert into PSlot values ('PS.first.b2', 'PF1_1', '', 'WS.102.1b'); -insert into PSlot values ('PS.first.b3', 'PF1_1', '', 'WS.102.2a'); -insert into PSlot values ('PS.first.b4', 'PF1_1', '', 'WS.102.2b'); -insert into PSlot values ('PS.first.b5', 'PF1_1', '', 'WS.102.3a'); -insert into PSlot values ('PS.first.b6', 'PF1_1', '', 'WS.102.3b'); -insert into PSlot values ('PS.first.c1', 'PF1_1', '', 'WS.105.1a'); -insert into PSlot values ('PS.first.c2', 'PF1_1', '', 'WS.105.1b'); -insert into PSlot values ('PS.first.c3', 'PF1_1', '', 'WS.105.2a'); -insert into PSlot values ('PS.first.c4', 'PF1_1', '', 'WS.105.2b'); -insert into PSlot values ('PS.first.c5', 'PF1_1', '', 'WS.105.3a'); -insert into PSlot values ('PS.first.c6', 'PF1_1', '', 'WS.105.3b'); -insert into PSlot values ('PS.first.d1', 'PF1_1', '', 'WS.106.1a'); -insert into PSlot values ('PS.first.d2', 'PF1_1', '', 'WS.106.1b'); -insert into PSlot values ('PS.first.d3', 'PF1_1', '', 'WS.106.2a'); -insert into PSlot values ('PS.first.d4', 'PF1_1', '', 'WS.106.2b'); -insert into PSlot values ('PS.first.d5', 'PF1_1', '', 'WS.106.3a'); -insert into PSlot values ('PS.first.d6', 'PF1_1', '', 'WS.106.3b'); --- --- Now we wire the wall connectors 1a-2a in room 001 to the --- patchfield. In the second update we make an error, and --- correct it after --- -update PSlot set backlink = 'WS.001.1a' where slotname = 'PS.base.a1'; -update PSlot set backlink = 'WS.001.1b' where slotname = 'PS.base.a3'; -select * from WSlot where roomno = '001' order by slotname; - slotname | roomno | slotlink | backlink -----------------------+----------+----------------------+---------------------- - WS.001.1a | 001 | | PS.base.a1 - WS.001.1b | 001 | | PS.base.a3 - WS.001.2a | 001 | | - WS.001.2b | 001 | | - WS.001.3a | 001 | | - WS.001.3b | 001 | | -(6 rows) - -select * from PSlot where slotname ~ 'PS.base.a' order by slotname; - slotname | pfname | slotlink | backlink -----------------------+--------+----------------------+---------------------- - PS.base.a1 | PF0_1 | | WS.001.1a - PS.base.a2 | PF0_1 | | - PS.base.a3 | PF0_1 | | WS.001.1b - PS.base.a4 | PF0_1 | | - PS.base.a5 | PF0_1 | | - PS.base.a6 | PF0_1 | | -(6 rows) - -update PSlot set backlink = 'WS.001.2a' where slotname = 'PS.base.a3'; -select * from WSlot where roomno = '001' order by slotname; - slotname | roomno | slotlink | backlink -----------------------+----------+----------------------+---------------------- - WS.001.1a | 001 | | PS.base.a1 - WS.001.1b | 001 | | - WS.001.2a | 001 | | PS.base.a3 - WS.001.2b | 001 | | - WS.001.3a | 001 | | - WS.001.3b | 001 | | -(6 rows) - -select * from PSlot where slotname ~ 'PS.base.a' order by slotname; - slotname | pfname | slotlink | backlink -----------------------+--------+----------------------+---------------------- - PS.base.a1 | PF0_1 | | WS.001.1a - PS.base.a2 | PF0_1 | | - PS.base.a3 | PF0_1 | | WS.001.2a - PS.base.a4 | PF0_1 | | - PS.base.a5 | PF0_1 | | - PS.base.a6 | PF0_1 | | -(6 rows) - -update PSlot set backlink = 'WS.001.1b' where slotname = 'PS.base.a2'; -select * from WSlot where roomno = '001' order by slotname; - slotname | roomno | slotlink | backlink -----------------------+----------+----------------------+---------------------- - WS.001.1a | 001 | | PS.base.a1 - WS.001.1b | 001 | | PS.base.a2 - WS.001.2a | 001 | | PS.base.a3 - WS.001.2b | 001 | | - WS.001.3a | 001 | | - WS.001.3b | 001 | | -(6 rows) - -select * from PSlot where slotname ~ 'PS.base.a' order by slotname; - slotname | pfname | slotlink | backlink -----------------------+--------+----------------------+---------------------- - PS.base.a1 | PF0_1 | | WS.001.1a - PS.base.a2 | PF0_1 | | WS.001.1b - PS.base.a3 | PF0_1 | | WS.001.2a - PS.base.a4 | PF0_1 | | - PS.base.a5 | PF0_1 | | - PS.base.a6 | PF0_1 | | -(6 rows) - --- --- Same procedure for 2b-3b but this time updating the WSlot instead --- of the PSlot. Due to the triggers the result is the same: --- WSlot and corresponding PSlot point to each other. --- -update WSlot set backlink = 'PS.base.a4' where slotname = 'WS.001.2b'; -update WSlot set backlink = 'PS.base.a6' where slotname = 'WS.001.3a'; -select * from WSlot where roomno = '001' order by slotname; - slotname | roomno | slotlink | backlink -----------------------+----------+----------------------+---------------------- - WS.001.1a | 001 | | PS.base.a1 - WS.001.1b | 001 | | PS.base.a2 - WS.001.2a | 001 | | PS.base.a3 - WS.001.2b | 001 | | PS.base.a4 - WS.001.3a | 001 | | PS.base.a6 - WS.001.3b | 001 | | -(6 rows) - -select * from PSlot where slotname ~ 'PS.base.a' order by slotname; - slotname | pfname | slotlink | backlink -----------------------+--------+----------------------+---------------------- - PS.base.a1 | PF0_1 | | WS.001.1a - PS.base.a2 | PF0_1 | | WS.001.1b - PS.base.a3 | PF0_1 | | WS.001.2a - PS.base.a4 | PF0_1 | | WS.001.2b - PS.base.a5 | PF0_1 | | - PS.base.a6 | PF0_1 | | WS.001.3a -(6 rows) - -update WSlot set backlink = 'PS.base.a6' where slotname = 'WS.001.3b'; -select * from WSlot where roomno = '001' order by slotname; - slotname | roomno | slotlink | backlink -----------------------+----------+----------------------+---------------------- - WS.001.1a | 001 | | PS.base.a1 - WS.001.1b | 001 | | PS.base.a2 - WS.001.2a | 001 | | PS.base.a3 - WS.001.2b | 001 | | PS.base.a4 - WS.001.3a | 001 | | - WS.001.3b | 001 | | PS.base.a6 -(6 rows) - -select * from PSlot where slotname ~ 'PS.base.a' order by slotname; - slotname | pfname | slotlink | backlink -----------------------+--------+----------------------+---------------------- - PS.base.a1 | PF0_1 | | WS.001.1a - PS.base.a2 | PF0_1 | | WS.001.1b - PS.base.a3 | PF0_1 | | WS.001.2a - PS.base.a4 | PF0_1 | | WS.001.2b - PS.base.a5 | PF0_1 | | - PS.base.a6 | PF0_1 | | WS.001.3b -(6 rows) - -update WSlot set backlink = 'PS.base.a5' where slotname = 'WS.001.3a'; -select * from WSlot where roomno = '001' order by slotname; - slotname | roomno | slotlink | backlink -----------------------+----------+----------------------+---------------------- - WS.001.1a | 001 | | PS.base.a1 - WS.001.1b | 001 | | PS.base.a2 - WS.001.2a | 001 | | PS.base.a3 - WS.001.2b | 001 | | PS.base.a4 - WS.001.3a | 001 | | PS.base.a5 - WS.001.3b | 001 | | PS.base.a6 -(6 rows) - -select * from PSlot where slotname ~ 'PS.base.a' order by slotname; - slotname | pfname | slotlink | backlink -----------------------+--------+----------------------+---------------------- - PS.base.a1 | PF0_1 | | WS.001.1a - PS.base.a2 | PF0_1 | | WS.001.1b - PS.base.a3 | PF0_1 | | WS.001.2a - PS.base.a4 | PF0_1 | | WS.001.2b - PS.base.a5 | PF0_1 | | WS.001.3a - PS.base.a6 | PF0_1 | | WS.001.3b -(6 rows) - -insert into PField values ('PF1_2', 'Phonelines first floor'); -insert into PSlot values ('PS.first.ta1', 'PF1_2', '', ''); -insert into PSlot values ('PS.first.ta2', 'PF1_2', '', ''); -insert into PSlot values ('PS.first.ta3', 'PF1_2', '', ''); -insert into PSlot values ('PS.first.ta4', 'PF1_2', '', ''); -insert into PSlot values ('PS.first.ta5', 'PF1_2', '', ''); -insert into PSlot values ('PS.first.ta6', 'PF1_2', '', ''); -insert into PSlot values ('PS.first.tb1', 'PF1_2', '', ''); -insert into PSlot values ('PS.first.tb2', 'PF1_2', '', ''); -insert into PSlot values ('PS.first.tb3', 'PF1_2', '', ''); -insert into PSlot values ('PS.first.tb4', 'PF1_2', '', ''); -insert into PSlot values ('PS.first.tb5', 'PF1_2', '', ''); -insert into PSlot values ('PS.first.tb6', 'PF1_2', '', ''); --- --- Fix the wrong name for patchfield PF0_2 --- -update PField set name = 'PF0_2' where name = 'PF0_X'; -select * from PSlot order by slotname; - slotname | pfname | slotlink | backlink -----------------------+--------+----------------------+---------------------- - PS.base.a1 | PF0_1 | | WS.001.1a - PS.base.a2 | PF0_1 | | WS.001.1b - PS.base.a3 | PF0_1 | | WS.001.2a - PS.base.a4 | PF0_1 | | WS.001.2b - PS.base.a5 | PF0_1 | | WS.001.3a - PS.base.a6 | PF0_1 | | WS.001.3b - PS.base.b1 | PF0_1 | | WS.002.1a - PS.base.b2 | PF0_1 | | WS.002.1b - PS.base.b3 | PF0_1 | | WS.002.2a - PS.base.b4 | PF0_1 | | WS.002.2b - PS.base.b5 | PF0_1 | | WS.002.3a - PS.base.b6 | PF0_1 | | WS.002.3b - PS.base.c1 | PF0_1 | | WS.003.1a - PS.base.c2 | PF0_1 | | WS.003.1b - PS.base.c3 | PF0_1 | | WS.003.2a - PS.base.c4 | PF0_1 | | WS.003.2b - PS.base.c5 | PF0_1 | | WS.003.3a - PS.base.c6 | PF0_1 | | WS.003.3b - PS.base.ta1 | PF0_2 | | - PS.base.ta2 | PF0_2 | | - PS.base.ta3 | PF0_2 | | - PS.base.ta4 | PF0_2 | | - PS.base.ta5 | PF0_2 | | - PS.base.ta6 | PF0_2 | | - PS.base.tb1 | PF0_2 | | - PS.base.tb2 | PF0_2 | | - PS.base.tb3 | PF0_2 | | - PS.base.tb4 | PF0_2 | | - PS.base.tb5 | PF0_2 | | - PS.base.tb6 | PF0_2 | | - PS.first.a1 | PF1_1 | | WS.101.1a - PS.first.a2 | PF1_1 | | WS.101.1b - PS.first.a3 | PF1_1 | | WS.101.2a - PS.first.a4 | PF1_1 | | WS.101.2b - PS.first.a5 | PF1_1 | | WS.101.3a - PS.first.a6 | PF1_1 | | WS.101.3b - PS.first.b1 | PF1_1 | | WS.102.1a - PS.first.b2 | PF1_1 | | WS.102.1b - PS.first.b3 | PF1_1 | | WS.102.2a - PS.first.b4 | PF1_1 | | WS.102.2b - PS.first.b5 | PF1_1 | | WS.102.3a - PS.first.b6 | PF1_1 | | WS.102.3b - PS.first.c1 | PF1_1 | | WS.105.1a - PS.first.c2 | PF1_1 | | WS.105.1b - PS.first.c3 | PF1_1 | | WS.105.2a - PS.first.c4 | PF1_1 | | WS.105.2b - PS.first.c5 | PF1_1 | | WS.105.3a - PS.first.c6 | PF1_1 | | WS.105.3b - PS.first.d1 | PF1_1 | | WS.106.1a - PS.first.d2 | PF1_1 | | WS.106.1b - PS.first.d3 | PF1_1 | | WS.106.2a - PS.first.d4 | PF1_1 | | WS.106.2b - PS.first.d5 | PF1_1 | | WS.106.3a - PS.first.d6 | PF1_1 | | WS.106.3b - PS.first.ta1 | PF1_2 | | - PS.first.ta2 | PF1_2 | | - PS.first.ta3 | PF1_2 | | - PS.first.ta4 | PF1_2 | | - PS.first.ta5 | PF1_2 | | - PS.first.ta6 | PF1_2 | | - PS.first.tb1 | PF1_2 | | - PS.first.tb2 | PF1_2 | | - PS.first.tb3 | PF1_2 | | - PS.first.tb4 | PF1_2 | | - PS.first.tb5 | PF1_2 | | - PS.first.tb6 | PF1_2 | | -(66 rows) - -select * from WSlot order by slotname; - slotname | roomno | slotlink | backlink -----------------------+----------+----------------------+---------------------- - WS.001.1a | 001 | | PS.base.a1 - WS.001.1b | 001 | | PS.base.a2 - WS.001.2a | 001 | | PS.base.a3 - WS.001.2b | 001 | | PS.base.a4 - WS.001.3a | 001 | | PS.base.a5 - WS.001.3b | 001 | | PS.base.a6 - WS.002.1a | 002 | | PS.base.b1 - WS.002.1b | 002 | | PS.base.b2 - WS.002.2a | 002 | | PS.base.b3 - WS.002.2b | 002 | | PS.base.b4 - WS.002.3a | 002 | | PS.base.b5 - WS.002.3b | 002 | | PS.base.b6 - WS.003.1a | 003 | | PS.base.c1 - WS.003.1b | 003 | | PS.base.c2 - WS.003.2a | 003 | | PS.base.c3 - WS.003.2b | 003 | | PS.base.c4 - WS.003.3a | 003 | | PS.base.c5 - WS.003.3b | 003 | | PS.base.c6 - WS.101.1a | 101 | | PS.first.a1 - WS.101.1b | 101 | | PS.first.a2 - WS.101.2a | 101 | | PS.first.a3 - WS.101.2b | 101 | | PS.first.a4 - WS.101.3a | 101 | | PS.first.a5 - WS.101.3b | 101 | | PS.first.a6 - WS.102.1a | 102 | | PS.first.b1 - WS.102.1b | 102 | | PS.first.b2 - WS.102.2a | 102 | | PS.first.b3 - WS.102.2b | 102 | | PS.first.b4 - WS.102.3a | 102 | | PS.first.b5 - WS.102.3b | 102 | | PS.first.b6 - WS.105.1a | 105 | | PS.first.c1 - WS.105.1b | 105 | | PS.first.c2 - WS.105.2a | 105 | | PS.first.c3 - WS.105.2b | 105 | | PS.first.c4 - WS.105.3a | 105 | | PS.first.c5 - WS.105.3b | 105 | | PS.first.c6 - WS.106.1a | 106 | | PS.first.d1 - WS.106.1b | 106 | | PS.first.d2 - WS.106.2a | 106 | | PS.first.d3 - WS.106.2b | 106 | | PS.first.d4 - WS.106.3a | 106 | | PS.first.d5 - WS.106.3b | 106 | | PS.first.d6 -(42 rows) - --- --- Install the central phone system and create the phone numbers. --- They are wired on insert to the patchfields. Again the --- triggers automatically tell the PSlots to update their --- backlink field. --- -insert into PLine values ('PL.001', '-0', 'Central call', 'PS.base.ta1'); -insert into PLine values ('PL.002', '-101', '', 'PS.base.ta2'); -insert into PLine values ('PL.003', '-102', '', 'PS.base.ta3'); -insert into PLine values ('PL.004', '-103', '', 'PS.base.ta5'); -insert into PLine values ('PL.005', '-104', '', 'PS.base.ta6'); -insert into PLine values ('PL.006', '-106', '', 'PS.base.tb2'); -insert into PLine values ('PL.007', '-108', '', 'PS.base.tb3'); -insert into PLine values ('PL.008', '-109', '', 'PS.base.tb4'); -insert into PLine values ('PL.009', '-121', '', 'PS.base.tb5'); -insert into PLine values ('PL.010', '-122', '', 'PS.base.tb6'); -insert into PLine values ('PL.015', '-134', '', 'PS.first.ta1'); -insert into PLine values ('PL.016', '-137', '', 'PS.first.ta3'); -insert into PLine values ('PL.017', '-139', '', 'PS.first.ta4'); -insert into PLine values ('PL.018', '-362', '', 'PS.first.tb1'); -insert into PLine values ('PL.019', '-363', '', 'PS.first.tb2'); -insert into PLine values ('PL.020', '-364', '', 'PS.first.tb3'); -insert into PLine values ('PL.021', '-365', '', 'PS.first.tb5'); -insert into PLine values ('PL.022', '-367', '', 'PS.first.tb6'); -insert into PLine values ('PL.028', '-501', 'Fax entrance', 'PS.base.ta2'); -insert into PLine values ('PL.029', '-502', 'Fax first floor', 'PS.first.ta1'); --- --- Buy some phones, plug them into the wall and patch the --- phone lines to the corresponding patchfield slots. --- -insert into PHone values ('PH.hc001', 'Hicom standard', 'WS.001.1a'); -update PSlot set slotlink = 'PS.base.ta1' where slotname = 'PS.base.a1'; -insert into PHone values ('PH.hc002', 'Hicom standard', 'WS.002.1a'); -update PSlot set slotlink = 'PS.base.ta5' where slotname = 'PS.base.b1'; -insert into PHone values ('PH.hc003', 'Hicom standard', 'WS.002.2a'); -update PSlot set slotlink = 'PS.base.tb2' where slotname = 'PS.base.b3'; -insert into PHone values ('PH.fax001', 'Canon fax', 'WS.001.2a'); -update PSlot set slotlink = 'PS.base.ta2' where slotname = 'PS.base.a3'; --- --- Install a hub at one of the patchfields, plug a computers --- ethernet interface into the wall and patch it to the hub. --- -insert into Hub values ('base.hub1', 'Patchfield PF0_1 hub', 16); -insert into System values ('orion', 'PC'); -insert into IFace values ('IF', 'orion', 'eth0', 'WS.002.1b'); -update PSlot set slotlink = 'HS.base.hub1.1' where slotname = 'PS.base.b2'; --- --- Now we take a look at the patchfield --- -select * from PField_v1 where pfname = 'PF0_1' order by slotname; - pfname | slotname | backside | patch ---------+----------------------+----------------------------------------------------------+----------------------------------------------- - PF0_1 | PS.base.a1 | WS.001.1a in room 001 -> Phone PH.hc001 (Hicom standard) | PS.base.ta1 -> Phone line -0 (Central call) - PF0_1 | PS.base.a2 | WS.001.1b in room 001 -> - | - - PF0_1 | PS.base.a3 | WS.001.2a in room 001 -> Phone PH.fax001 (Canon fax) | PS.base.ta2 -> Phone line -501 (Fax entrance) - PF0_1 | PS.base.a4 | WS.001.2b in room 001 -> - | - - PF0_1 | PS.base.a5 | WS.001.3a in room 001 -> - | - - PF0_1 | PS.base.a6 | WS.001.3b in room 001 -> - | - - PF0_1 | PS.base.b1 | WS.002.1a in room 002 -> Phone PH.hc002 (Hicom standard) | PS.base.ta5 -> Phone line -103 - PF0_1 | PS.base.b2 | WS.002.1b in room 002 -> orion IF eth0 (PC) | Patchfield PF0_1 hub slot 1 - PF0_1 | PS.base.b3 | WS.002.2a in room 002 -> Phone PH.hc003 (Hicom standard) | PS.base.tb2 -> Phone line -106 - PF0_1 | PS.base.b4 | WS.002.2b in room 002 -> - | - - PF0_1 | PS.base.b5 | WS.002.3a in room 002 -> - | - - PF0_1 | PS.base.b6 | WS.002.3b in room 002 -> - | - - PF0_1 | PS.base.c1 | WS.003.1a in room 003 -> - | - - PF0_1 | PS.base.c2 | WS.003.1b in room 003 -> - | - - PF0_1 | PS.base.c3 | WS.003.2a in room 003 -> - | - - PF0_1 | PS.base.c4 | WS.003.2b in room 003 -> - | - - PF0_1 | PS.base.c5 | WS.003.3a in room 003 -> - | - - PF0_1 | PS.base.c6 | WS.003.3b in room 003 -> - | - -(18 rows) - -select * from PField_v1 where pfname = 'PF0_2' order by slotname; - pfname | slotname | backside | patch ---------+----------------------+--------------------------------+------------------------------------------------------------------------ - PF0_2 | PS.base.ta1 | Phone line -0 (Central call) | PS.base.a1 -> WS.001.1a in room 001 -> Phone PH.hc001 (Hicom standard) - PF0_2 | PS.base.ta2 | Phone line -501 (Fax entrance) | PS.base.a3 -> WS.001.2a in room 001 -> Phone PH.fax001 (Canon fax) - PF0_2 | PS.base.ta3 | Phone line -102 | - - PF0_2 | PS.base.ta4 | - | - - PF0_2 | PS.base.ta5 | Phone line -103 | PS.base.b1 -> WS.002.1a in room 002 -> Phone PH.hc002 (Hicom standard) - PF0_2 | PS.base.ta6 | Phone line -104 | - - PF0_2 | PS.base.tb1 | - | - - PF0_2 | PS.base.tb2 | Phone line -106 | PS.base.b3 -> WS.002.2a in room 002 -> Phone PH.hc003 (Hicom standard) - PF0_2 | PS.base.tb3 | Phone line -108 | - - PF0_2 | PS.base.tb4 | Phone line -109 | - - PF0_2 | PS.base.tb5 | Phone line -121 | - - PF0_2 | PS.base.tb6 | Phone line -122 | - -(12 rows) - --- --- Finally we want errors --- -insert into PField values ('PF1_1', 'should fail due to unique index'); -ERROR: duplicate key value violates unique constraint "pfield_name" -DETAIL: Key (name)=(PF1_1) already exists. -update PSlot set backlink = 'WS.not.there' where slotname = 'PS.base.a1'; -ERROR: WS.not.there does not exist -CONTEXT: PL/pgSQL function tg_backlink_set(character,character) line 30 at RAISE -PL/pgSQL function tg_backlink_a() line 17 at assignment -update PSlot set backlink = 'XX.illegal' where slotname = 'PS.base.a1'; -ERROR: illegal backlink beginning with XX -CONTEXT: PL/pgSQL function tg_backlink_set(character,character) line 47 at RAISE -PL/pgSQL function tg_backlink_a() line 17 at assignment -update PSlot set slotlink = 'PS.not.there' where slotname = 'PS.base.a1'; -ERROR: PS.not.there does not exist -CONTEXT: PL/pgSQL function tg_slotlink_set(character,character) line 30 at RAISE -PL/pgSQL function tg_slotlink_a() line 17 at assignment -update PSlot set slotlink = 'XX.illegal' where slotname = 'PS.base.a1'; -ERROR: illegal slotlink beginning with XX -CONTEXT: PL/pgSQL function tg_slotlink_set(character,character) line 77 at RAISE -PL/pgSQL function tg_slotlink_a() line 17 at assignment -insert into HSlot values ('HS', 'base.hub1', 1, ''); -ERROR: duplicate key value violates unique constraint "hslot_name" -DETAIL: Key (slotname)=(HS.base.hub1.1 ) already exists. -insert into HSlot values ('HS', 'base.hub1', 20, ''); -ERROR: no manual manipulation of HSlot -CONTEXT: PL/pgSQL function tg_hslot_biu() line 12 at RAISE -delete from HSlot; -ERROR: no manual manipulation of HSlot -CONTEXT: PL/pgSQL function tg_hslot_bd() line 12 at RAISE -insert into IFace values ('IF', 'notthere', 'eth0', ''); -ERROR: system "notthere" does not exist -CONTEXT: PL/pgSQL function tg_iface_biu() line 8 at RAISE -insert into IFace values ('IF', 'orion', 'ethernet_interface_name_too_long', ''); -ERROR: IFace slotname "IF.orion.ethernet_interface_name_too_long" too long (20 char max) -CONTEXT: PL/pgSQL function tg_iface_biu() line 14 at RAISE --- --- The following tests are unrelated to the scenario outlined above; --- they merely exercise specific parts of PL/pgSQL --- --- --- Test recursion, per bug report 7-Sep-01 --- -CREATE FUNCTION recursion_test(int,int) RETURNS text AS ' -DECLARE rslt text; -BEGIN - IF $1 <= 0 THEN - rslt = CAST($2 AS TEXT); - ELSE - rslt = CAST($1 AS TEXT) || '','' || recursion_test($1 - 1, $2); - END IF; - RETURN rslt; -END;' LANGUAGE plpgsql; -SELECT recursion_test(4,3); - recursion_test ----------------- - 4,3,2,1,3 -(1 row) - --- --- Test the FOUND magic variable --- -CREATE TABLE found_test_tbl (a int); -create function test_found() - returns boolean as ' - declare - begin - insert into found_test_tbl values (1); - if FOUND then - insert into found_test_tbl values (2); - end if; - - update found_test_tbl set a = 100 where a = 1; - if FOUND then - insert into found_test_tbl values (3); - end if; - - delete from found_test_tbl where a = 9999; -- matches no rows - if not FOUND then - insert into found_test_tbl values (4); - end if; - - for i in 1 .. 10 loop - -- no need to do anything - end loop; - if FOUND then - insert into found_test_tbl values (5); - end if; - - -- never executes the loop - for i in 2 .. 1 loop - -- no need to do anything - end loop; - if not FOUND then - insert into found_test_tbl values (6); - end if; - return true; - end;' language plpgsql; -select test_found(); - test_found ------------- - t -(1 row) - -select * from found_test_tbl; - a ------ - 2 - 100 - 3 - 4 - 5 - 6 -(6 rows) - --- --- Test set-returning functions for PL/pgSQL --- -create function test_table_func_rec() returns setof found_test_tbl as ' -DECLARE - rec RECORD; -BEGIN - FOR rec IN select * from found_test_tbl LOOP - RETURN NEXT rec; - END LOOP; - RETURN; -END;' language plpgsql; -select * from test_table_func_rec(); - a ------ - 2 - 100 - 3 - 4 - 5 - 6 -(6 rows) - -create function test_table_func_row() returns setof found_test_tbl as ' -DECLARE - row found_test_tbl%ROWTYPE; -BEGIN - FOR row IN select * from found_test_tbl LOOP - RETURN NEXT row; - END LOOP; - RETURN; -END;' language plpgsql; -select * from test_table_func_row(); - a ------ - 2 - 100 - 3 - 4 - 5 - 6 -(6 rows) - -create function test_ret_set_scalar(int,int) returns setof int as ' -DECLARE - i int; -BEGIN - FOR i IN $1 .. $2 LOOP - RETURN NEXT i + 1; - END LOOP; - RETURN; -END;' language plpgsql; -select * from test_ret_set_scalar(1,10); - test_ret_set_scalar ---------------------- - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 - 11 -(10 rows) - -create function test_ret_set_rec_dyn(int) returns setof record as ' -DECLARE - retval RECORD; -BEGIN - IF $1 > 10 THEN - SELECT INTO retval 5, 10, 15; - RETURN NEXT retval; - RETURN NEXT retval; - ELSE - SELECT INTO retval 50, 5::numeric, ''xxx''::text; - RETURN NEXT retval; - RETURN NEXT retval; - END IF; - RETURN; -END;' language plpgsql; -SELECT * FROM test_ret_set_rec_dyn(1500) AS (a int, b int, c int); - a | b | c ----+----+---- - 5 | 10 | 15 - 5 | 10 | 15 -(2 rows) - -SELECT * FROM test_ret_set_rec_dyn(5) AS (a int, b numeric, c text); - a | b | c -----+---+----- - 50 | 5 | xxx - 50 | 5 | xxx -(2 rows) - -create function test_ret_rec_dyn(int) returns record as ' -DECLARE - retval RECORD; -BEGIN - IF $1 > 10 THEN - SELECT INTO retval 5, 10, 15; - RETURN retval; - ELSE - SELECT INTO retval 50, 5::numeric, ''xxx''::text; - RETURN retval; - END IF; -END;' language plpgsql; -SELECT * FROM test_ret_rec_dyn(1500) AS (a int, b int, c int); - a | b | c ----+----+---- - 5 | 10 | 15 -(1 row) - -SELECT * FROM test_ret_rec_dyn(5) AS (a int, b numeric, c text); - a | b | c -----+---+----- - 50 | 5 | xxx -(1 row) - --- --- Test some simple polymorphism cases. --- -create function f1(x anyelement) returns anyelement as $$ -begin - return x + 1; -end$$ language plpgsql; -select f1(42) as int, f1(4.5) as num; - int | num ------+----- - 43 | 5.5 -(1 row) - -select f1(point(3,4)); -- fail for lack of + operator -ERROR: operator does not exist: point + integer -LINE 1: x + 1 - ^ -HINT: No operator matches the given name and argument types. You might need to add explicit type casts. -QUERY: x + 1 -CONTEXT: PL/pgSQL function f1(anyelement) line 3 at RETURN -drop function f1(x anyelement); -create function f1(x anyelement) returns anyarray as $$ -begin - return array[x + 1, x + 2]; -end$$ language plpgsql; -select f1(42) as int, f1(4.5) as num; - int | num ----------+----------- - {43,44} | {5.5,6.5} -(1 row) - -drop function f1(x anyelement); -create function f1(x anyarray) returns anyelement as $$ -begin - return x[1]; -end$$ language plpgsql; -select f1(array[2,4]) as int, f1(array[4.5, 7.7]) as num; - int | num ------+----- - 2 | 4.5 -(1 row) - -select f1(stavalues1) from pg_statistic; -- fail, can't infer element type -ERROR: cannot determine element type of "anyarray" argument -drop function f1(x anyarray); -create function f1(x anyarray) returns anyarray as $$ -begin - return x; -end$$ language plpgsql; -select f1(array[2,4]) as int, f1(array[4.5, 7.7]) as num; - int | num --------+----------- - {2,4} | {4.5,7.7} -(1 row) - -select f1(stavalues1) from pg_statistic; -- fail, can't infer element type -ERROR: PL/pgSQL functions cannot accept type anyarray -CONTEXT: compilation of PL/pgSQL function "f1" near line 1 -drop function f1(x anyarray); --- fail, can't infer type: -create function f1(x anyelement) returns anyrange as $$ -begin - return array[x + 1, x + 2]; -end$$ language plpgsql; -ERROR: cannot determine result data type -DETAIL: A result of type anyrange requires at least one input of type anyrange or anymultirange. -create function f1(x anyrange) returns anyarray as $$ -begin - return array[lower(x), upper(x)]; -end$$ language plpgsql; -select f1(int4range(42, 49)) as int, f1(float8range(4.5, 7.8)) as num; - int | num ----------+----------- - {42,49} | {4.5,7.8} -(1 row) - -drop function f1(x anyrange); -create function f1(x anycompatible, y anycompatible) returns anycompatiblearray as $$ -begin - return array[x, y]; -end$$ language plpgsql; -select f1(2, 4) as int, f1(2, 4.5) as num; - int | num --------+--------- - {2,4} | {2,4.5} -(1 row) - -drop function f1(x anycompatible, y anycompatible); -create function f1(x anycompatiblerange, y anycompatible, z anycompatible) returns anycompatiblearray as $$ -begin - return array[lower(x), upper(x), y, z]; -end$$ language plpgsql; -select f1(int4range(42, 49), 11, 2::smallint) as int, f1(float8range(4.5, 7.8), 7.8, 11::real) as num; - int | num ---------------+------------------ - {42,49,11,2} | {4.5,7.8,7.8,11} -(1 row) - -select f1(int4range(42, 49), 11, 4.5) as fail; -- range type doesn't fit -ERROR: function f1(int4range, integer, numeric) does not exist -LINE 1: select f1(int4range(42, 49), 11, 4.5) as fail; - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -drop function f1(x anycompatiblerange, y anycompatible, z anycompatible); --- fail, can't infer type: -create function f1(x anycompatible) returns anycompatiblerange as $$ -begin - return array[x + 1, x + 2]; -end$$ language plpgsql; -ERROR: cannot determine result data type -DETAIL: A result of type anycompatiblerange requires at least one input of type anycompatiblerange or anycompatiblemultirange. -create function f1(x anycompatiblerange, y anycompatiblearray) returns anycompatiblerange as $$ -begin - return x; -end$$ language plpgsql; -select f1(int4range(42, 49), array[11]) as int, f1(float8range(4.5, 7.8), array[7]) as num; - int | num ----------+----------- - [42,49) | [4.5,7.8) -(1 row) - -drop function f1(x anycompatiblerange, y anycompatiblearray); -create function f1(a anyelement, b anyarray, - c anycompatible, d anycompatible, - OUT x anyarray, OUT y anycompatiblearray) -as $$ -begin - x := a || b; - y := array[c, d]; -end$$ language plpgsql; -select x, pg_typeof(x), y, pg_typeof(y) - from f1(11, array[1, 2], 42, 34.5); - x | pg_typeof | y | pg_typeof -----------+-----------+-----------+----------- - {11,1,2} | integer[] | {42,34.5} | numeric[] -(1 row) - -select x, pg_typeof(x), y, pg_typeof(y) - from f1(11, array[1, 2], point(1,2), point(3,4)); - x | pg_typeof | y | pg_typeof -----------+-----------+-------------------+----------- - {11,1,2} | integer[] | {"(1,2)","(3,4)"} | point[] -(1 row) - -select x, pg_typeof(x), y, pg_typeof(y) - from f1(11, '{1,2}', point(1,2), '(3,4)'); - x | pg_typeof | y | pg_typeof -----------+-----------+-------------------+----------- - {11,1,2} | integer[] | {"(1,2)","(3,4)"} | point[] -(1 row) - -select x, pg_typeof(x), y, pg_typeof(y) - from f1(11, array[1, 2.2], 42, 34.5); -- fail -ERROR: function f1(integer, numeric[], integer, numeric) does not exist -LINE 2: from f1(11, array[1, 2.2], 42, 34.5); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -drop function f1(a anyelement, b anyarray, - c anycompatible, d anycompatible); --- --- Test handling of OUT parameters, including polymorphic cases. --- Note that RETURN is optional with OUT params; we try both ways. --- --- wrong way to do it: -create function f1(in i int, out j int) returns int as $$ -begin - return i+1; -end$$ language plpgsql; -ERROR: RETURN cannot have a parameter in function with OUT parameters -LINE 3: return i+1; - ^ -create function f1(in i int, out j int) as $$ -begin - j := i+1; - return; -end$$ language plpgsql; -select f1(42); - f1 ----- - 43 -(1 row) - -select * from f1(42); - j ----- - 43 -(1 row) - -create or replace function f1(inout i int) as $$ -begin - i := i+1; -end$$ language plpgsql; -select f1(42); - f1 ----- - 43 -(1 row) - -select * from f1(42); - i ----- - 43 -(1 row) - -drop function f1(int); -create function f1(in i int, out j int) returns setof int as $$ -begin - j := i+1; - return next; - j := i+2; - return next; - return; -end$$ language plpgsql; -select * from f1(42); - j ----- - 43 - 44 -(2 rows) - -drop function f1(int); -create function f1(in i int, out j int, out k text) as $$ -begin - j := i; - j := j+1; - k := 'foo'; -end$$ language plpgsql; -select f1(42); - f1 ----------- - (43,foo) -(1 row) - -select * from f1(42); - j | k -----+----- - 43 | foo -(1 row) - -drop function f1(int); -create function f1(in i int, out j int, out k text) returns setof record as $$ -begin - j := i+1; - k := 'foo'; - return next; - j := j+1; - k := 'foot'; - return next; -end$$ language plpgsql; -select * from f1(42); - j | k -----+------ - 43 | foo - 44 | foot -(2 rows) - -drop function f1(int); -create function duplic(in i anyelement, out j anyelement, out k anyarray) as $$ -begin - j := i; - k := array[j,j]; - return; -end$$ language plpgsql; -select * from duplic(42); - j | k -----+--------- - 42 | {42,42} -(1 row) - -select * from duplic('foo'::text); - j | k ------+----------- - foo | {foo,foo} -(1 row) - -drop function duplic(anyelement); -create function duplic(in i anycompatiblerange, out j anycompatible, out k anycompatiblearray) as $$ -begin - j := lower(i); - k := array[lower(i),upper(i)]; - return; -end$$ language plpgsql; -select * from duplic(int4range(42,49)); - j | k -----+--------- - 42 | {42,49} -(1 row) - -select * from duplic(textrange('aaa', 'bbb')); - j | k ------+----------- - aaa | {aaa,bbb} -(1 row) - -drop function duplic(anycompatiblerange); --- --- test PERFORM --- -create table perform_test ( - a INT, - b INT -); -create function perform_simple_func(int) returns boolean as ' -BEGIN - IF $1 < 20 THEN - INSERT INTO perform_test VALUES ($1, $1 + 10); - RETURN TRUE; - ELSE - RETURN FALSE; - END IF; -END;' language plpgsql; -create function perform_test_func() returns void as ' -BEGIN - IF FOUND then - INSERT INTO perform_test VALUES (100, 100); - END IF; - - PERFORM perform_simple_func(5); - - IF FOUND then - INSERT INTO perform_test VALUES (100, 100); - END IF; - - PERFORM perform_simple_func(50); - - IF FOUND then - INSERT INTO perform_test VALUES (100, 100); - END IF; - - RETURN; -END;' language plpgsql; -SELECT perform_test_func(); - perform_test_func -------------------- - -(1 row) - -SELECT * FROM perform_test; - a | b ------+----- - 5 | 15 - 100 | 100 - 100 | 100 -(3 rows) - -drop table perform_test; --- --- Test proper snapshot handling in simple expressions --- -create temp table users(login text, id serial); -create function sp_id_user(a_login text) returns int as $$ -declare x int; -begin - select into x id from users where login = a_login; - if found then return x; end if; - return 0; -end$$ language plpgsql stable; -insert into users values('user1'); -select sp_id_user('user1'); - sp_id_user ------------- - 1 -(1 row) - -select sp_id_user('userx'); - sp_id_user ------------- - 0 -(1 row) - -create function sp_add_user(a_login text) returns int as $$ -declare my_id_user int; -begin - my_id_user = sp_id_user( a_login ); - IF my_id_user > 0 THEN - RETURN -1; -- error code for existing user - END IF; - INSERT INTO users ( login ) VALUES ( a_login ); - my_id_user = sp_id_user( a_login ); - IF my_id_user = 0 THEN - RETURN -2; -- error code for insertion failure - END IF; - RETURN my_id_user; -end$$ language plpgsql; -select sp_add_user('user1'); - sp_add_user -------------- - -1 -(1 row) - -select sp_add_user('user2'); - sp_add_user -------------- - 2 -(1 row) - -select sp_add_user('user2'); - sp_add_user -------------- - -1 -(1 row) - -select sp_add_user('user3'); - sp_add_user -------------- - 3 -(1 row) - -select sp_add_user('user3'); - sp_add_user -------------- - -1 -(1 row) - -drop function sp_add_user(text); -drop function sp_id_user(text); --- --- tests for refcursors --- -create table rc_test (a int, b int); -copy rc_test from stdin; -create function return_unnamed_refcursor() returns refcursor as $$ -declare - rc refcursor; -begin - open rc for select a from rc_test; - return rc; -end -$$ language plpgsql; -create function use_refcursor(rc refcursor) returns int as $$ -declare - rc refcursor; - x record; -begin - rc := return_unnamed_refcursor(); - fetch next from rc into x; - return x.a; -end -$$ language plpgsql; -select use_refcursor(return_unnamed_refcursor()); - use_refcursor ---------------- - 5 -(1 row) - -create function return_refcursor(rc refcursor) returns refcursor as $$ -begin - open rc for select a from rc_test; - return rc; -end -$$ language plpgsql; -create function refcursor_test1(refcursor) returns refcursor as $$ -begin - perform return_refcursor($1); - return $1; -end -$$ language plpgsql; -begin; -select refcursor_test1('test1'); - refcursor_test1 ------------------ - test1 -(1 row) - -fetch next in test1; - a ---- - 5 -(1 row) - -select refcursor_test1('test2'); - refcursor_test1 ------------------ - test2 -(1 row) - -fetch all from test2; - a ------ - 5 - 50 - 500 -(3 rows) - -commit; --- should fail -fetch next from test1; -ERROR: cursor "test1" does not exist -create function refcursor_test2(int, int) returns boolean as $$ -declare - c1 cursor (param1 int, param2 int) for select * from rc_test where a > param1 and b > param2; - nonsense record; -begin - open c1($1, $2); - fetch c1 into nonsense; - close c1; - if found then - return true; - else - return false; - end if; -end -$$ language plpgsql; -select refcursor_test2(20000, 20000) as "Should be false", - refcursor_test2(20, 20) as "Should be true"; - Should be false | Should be true ------------------+---------------- - f | t -(1 row) - --- should fail -create function constant_refcursor() returns refcursor as $$ -declare - rc constant refcursor; -begin - open rc for select a from rc_test; - return rc; -end -$$ language plpgsql; -select constant_refcursor(); -ERROR: variable "rc" is declared CONSTANT -CONTEXT: PL/pgSQL function constant_refcursor() line 5 at OPEN --- but it's okay like this -create or replace function constant_refcursor() returns refcursor as $$ -declare - rc constant refcursor := 'my_cursor_name'; -begin - open rc for select a from rc_test; - return rc; -end -$$ language plpgsql; -select constant_refcursor(); - constant_refcursor --------------------- - my_cursor_name -(1 row) - --- --- tests for cursors with named parameter arguments --- -create function namedparmcursor_test1(int, int) returns boolean as $$ -declare - c1 cursor (param1 int, param12 int) for select * from rc_test where a > param1 and b > param12; - nonsense record; -begin - open c1(param12 := $2, param1 := $1); - fetch c1 into nonsense; - close c1; - if found then - return true; - else - return false; - end if; -end -$$ language plpgsql; -select namedparmcursor_test1(20000, 20000) as "Should be false", - namedparmcursor_test1(20, 20) as "Should be true"; - Should be false | Should be true ------------------+---------------- - f | t -(1 row) - --- mixing named and positional argument notations -create function namedparmcursor_test2(int, int) returns boolean as $$ -declare - c1 cursor (param1 int, param2 int) for select * from rc_test where a > param1 and b > param2; - nonsense record; -begin - open c1(param1 := $1, $2); - fetch c1 into nonsense; - close c1; - if found then - return true; - else - return false; - end if; -end -$$ language plpgsql; -select namedparmcursor_test2(20, 20); - namedparmcursor_test2 ------------------------ - t -(1 row) - --- mixing named and positional: param2 is given twice, once in named notation --- and second time in positional notation. Should throw an error at parse time -create function namedparmcursor_test3() returns void as $$ -declare - c1 cursor (param1 int, param2 int) for select * from rc_test where a > param1 and b > param2; -begin - open c1(param2 := 20, 21); -end -$$ language plpgsql; -ERROR: value for parameter "param2" of cursor "c1" specified more than once -LINE 5: open c1(param2 := 20, 21); - ^ --- mixing named and positional: same as previous test, but param1 is duplicated -create function namedparmcursor_test4() returns void as $$ -declare - c1 cursor (param1 int, param2 int) for select * from rc_test where a > param1 and b > param2; -begin - open c1(20, param1 := 21); -end -$$ language plpgsql; -ERROR: value for parameter "param1" of cursor "c1" specified more than once -LINE 5: open c1(20, param1 := 21); - ^ --- duplicate named parameter, should throw an error at parse time -create function namedparmcursor_test5() returns void as $$ -declare - c1 cursor (p1 int, p2 int) for - select * from tenk1 where thousand = p1 and tenthous = p2; -begin - open c1 (p2 := 77, p2 := 42); -end -$$ language plpgsql; -ERROR: value for parameter "p2" of cursor "c1" specified more than once -LINE 6: open c1 (p2 := 77, p2 := 42); - ^ --- not enough parameters, should throw an error at parse time -create function namedparmcursor_test6() returns void as $$ -declare - c1 cursor (p1 int, p2 int) for - select * from tenk1 where thousand = p1 and tenthous = p2; -begin - open c1 (p2 := 77); -end -$$ language plpgsql; -ERROR: not enough arguments for cursor "c1" -LINE 6: open c1 (p2 := 77); - ^ --- division by zero runtime error, the context given in the error message --- should be sensible -create function namedparmcursor_test7() returns void as $$ -declare - c1 cursor (p1 int, p2 int) for - select * from tenk1 where thousand = p1 and tenthous = p2; -begin - open c1 (p2 := 77, p1 := 42/0); -end $$ language plpgsql; -select namedparmcursor_test7(); -ERROR: division by zero -CONTEXT: PL/pgSQL expression "42/0 AS p1, 77 AS p2" -PL/pgSQL function namedparmcursor_test7() line 6 at OPEN --- check that line comments work correctly within the argument list --- (this used to require a special hack in the code; it no longer does, --- but let's keep the test anyway) -create function namedparmcursor_test8() returns int4 as $$ -declare - c1 cursor (p1 int, p2 int) for - select count(*) from tenk1 where thousand = p1 and tenthous = p2; - n int4; -begin - open c1 (77 -- test - , 42); - fetch c1 into n; - return n; -end $$ language plpgsql; -select namedparmcursor_test8(); - namedparmcursor_test8 ------------------------ - 0 -(1 row) - --- cursor parameter name can match plpgsql variable or unreserved keyword -create function namedparmcursor_test9(p1 int) returns int4 as $$ -declare - c1 cursor (p1 int, p2 int, debug int) for - select count(*) from tenk1 where thousand = p1 and tenthous = p2 - and four = debug; - p2 int4 := 1006; - n int4; -begin - open c1 (p1 := p1, p2 := p2, debug := 2); - fetch c1 into n; - return n; -end $$ language plpgsql; -select namedparmcursor_test9(6); - namedparmcursor_test9 ------------------------ - 1 -(1 row) - --- --- tests for "raise" processing --- -create function raise_test1(int) returns int as $$ -begin - raise notice 'This message has too many parameters!', $1; - return $1; -end; -$$ language plpgsql; -ERROR: too many parameters specified for RAISE -CONTEXT: compilation of PL/pgSQL function "raise_test1" near line 3 -create function raise_test2(int) returns int as $$ -begin - raise notice 'This message has too few parameters: %, %, %', $1, $1; - return $1; -end; -$$ language plpgsql; -ERROR: too few parameters specified for RAISE -CONTEXT: compilation of PL/pgSQL function "raise_test2" near line 3 -create function raise_test3(int) returns int as $$ -begin - raise notice 'This message has no parameters (despite having %% signs in it)!'; - return $1; -end; -$$ language plpgsql; -select raise_test3(1); -NOTICE: This message has no parameters (despite having % signs in it)! - raise_test3 -------------- - 1 -(1 row) - --- Test re-RAISE inside a nested exception block. This case is allowed --- by Oracle's PL/SQL but was handled differently by PG before 9.1. -CREATE FUNCTION reraise_test() RETURNS void AS $$ -BEGIN - BEGIN - RAISE syntax_error; - EXCEPTION - WHEN syntax_error THEN - BEGIN - raise notice 'exception % thrown in inner block, reraising', sqlerrm; - RAISE; - EXCEPTION - WHEN OTHERS THEN - raise notice 'RIGHT - exception % caught in inner block', sqlerrm; - END; - END; -EXCEPTION - WHEN OTHERS THEN - raise notice 'WRONG - exception % caught in outer block', sqlerrm; -END; -$$ LANGUAGE plpgsql; -SELECT reraise_test(); -NOTICE: exception syntax_error thrown in inner block, reraising -NOTICE: RIGHT - exception syntax_error caught in inner block - reraise_test --------------- - -(1 row) - --- --- reject function definitions that contain malformed SQL queries at --- compile-time, where possible --- -create function bad_sql1() returns int as $$ -declare a int; -begin - a := 5; - Johnny Yuma; - a := 10; - return a; -end$$ language plpgsql; -ERROR: syntax error at or near "Johnny" -LINE 5: Johnny Yuma; - ^ -create function bad_sql2() returns int as $$ -declare r record; -begin - for r in select I fought the law, the law won LOOP - raise notice 'in loop'; - end loop; - return 5; -end;$$ language plpgsql; -ERROR: syntax error at or near "the" -LINE 4: for r in select I fought the law, the law won LOOP - ^ --- a RETURN expression is mandatory, except for void-returning --- functions, where it is not allowed -create function missing_return_expr() returns int as $$ -begin - return ; -end;$$ language plpgsql; -ERROR: missing expression at or near ";" -LINE 3: return ; - ^ -create function void_return_expr() returns void as $$ -begin - return 5; -end;$$ language plpgsql; -ERROR: RETURN cannot have a parameter in function returning void -LINE 3: return 5; - ^ --- VOID functions are allowed to omit RETURN -create function void_return_expr() returns void as $$ -begin - perform 2+2; -end;$$ language plpgsql; -select void_return_expr(); - void_return_expr ------------------- - -(1 row) - --- but ordinary functions are not -create function missing_return_expr() returns int as $$ -begin - perform 2+2; -end;$$ language plpgsql; -select missing_return_expr(); -ERROR: control reached end of function without RETURN -CONTEXT: PL/pgSQL function missing_return_expr() -drop function void_return_expr(); -drop function missing_return_expr(); --- --- EXECUTE ... INTO test --- -create table eifoo (i integer, y integer); -create type eitype as (i integer, y integer); -create or replace function execute_into_test(varchar) returns record as $$ -declare - _r record; - _rt eifoo%rowtype; - _v eitype; - i int; - j int; - k int; -begin - execute 'insert into '||$1||' values(10,15)'; - execute 'select (row).* from (select row(10,1)::eifoo) s' into _r; - raise notice '% %', _r.i, _r.y; - execute 'select * from '||$1||' limit 1' into _rt; - raise notice '% %', _rt.i, _rt.y; - execute 'select *, 20 from '||$1||' limit 1' into i, j, k; - raise notice '% % %', i, j, k; - execute 'select 1,2' into _v; - return _v; -end; $$ language plpgsql; -select execute_into_test('eifoo'); -NOTICE: 10 1 -NOTICE: 10 15 -NOTICE: 10 15 20 - execute_into_test -------------------- - (1,2) -(1 row) - -drop table eifoo cascade; -drop type eitype cascade; --- --- SQLSTATE and SQLERRM test --- -create function excpt_test1() returns void as $$ -begin - raise notice '% %', sqlstate, sqlerrm; -end; $$ language plpgsql; --- should fail: SQLSTATE and SQLERRM are only in defined EXCEPTION --- blocks -select excpt_test1(); -ERROR: column "sqlstate" does not exist -LINE 1: sqlstate - ^ -QUERY: sqlstate -CONTEXT: PL/pgSQL function excpt_test1() line 3 at RAISE -create function excpt_test2() returns void as $$ -begin - begin - begin - raise notice '% %', sqlstate, sqlerrm; - end; - end; -end; $$ language plpgsql; --- should fail -select excpt_test2(); -ERROR: column "sqlstate" does not exist -LINE 1: sqlstate - ^ -QUERY: sqlstate -CONTEXT: PL/pgSQL function excpt_test2() line 5 at RAISE -create function excpt_test3() returns void as $$ -begin - begin - raise exception 'user exception'; - exception when others then - raise notice 'caught exception % %', sqlstate, sqlerrm; - begin - raise notice '% %', sqlstate, sqlerrm; - perform 10/0; - exception - when substring_error then - -- this exception handler shouldn't be invoked - raise notice 'unexpected exception: % %', sqlstate, sqlerrm; - when division_by_zero then - raise notice 'caught exception % %', sqlstate, sqlerrm; - end; - raise notice '% %', sqlstate, sqlerrm; - end; -end; $$ language plpgsql; -select excpt_test3(); -NOTICE: caught exception P0001 user exception -NOTICE: P0001 user exception -NOTICE: caught exception 22012 division by zero -NOTICE: P0001 user exception - excpt_test3 -------------- - -(1 row) - -create function excpt_test4() returns text as $$ -begin - begin perform 1/0; - exception when others then return sqlerrm; end; -end; $$ language plpgsql; -select excpt_test4(); - excpt_test4 ------------------- - division by zero -(1 row) - -drop function excpt_test1(); -drop function excpt_test2(); -drop function excpt_test3(); -drop function excpt_test4(); --- parameters of raise stmt can be expressions -create function raise_exprs() returns void as $$ -declare - a integer[] = '{10,20,30}'; - c varchar = 'xyz'; - i integer; -begin - i := 2; - raise notice '%; %; %; %; %; %', a, a[i], c, (select c || 'abc'), row(10,'aaa',NULL,30), NULL; -end;$$ language plpgsql; -select raise_exprs(); -NOTICE: {10,20,30}; 20; xyz; xyzabc; (10,aaa,,30); - raise_exprs -------------- - -(1 row) - -drop function raise_exprs(); --- regression test: verify that multiple uses of same plpgsql datum within --- a SQL command all get mapped to the same $n parameter. The return value --- of the SELECT is not important, we only care that it doesn't fail with --- a complaint about an ungrouped column reference. -create function multi_datum_use(p1 int) returns bool as $$ -declare - x int; - y int; -begin - select into x,y unique1/p1, unique1/$1 from tenk1 group by unique1/p1; - return x = y; -end$$ language plpgsql; -select multi_datum_use(42); - multi_datum_use ------------------ - t -(1 row) - --- --- Test STRICT limiter in both planned and EXECUTE invocations. --- Note that a data-modifying query is quasi strict (disallow multi rows) --- by default in the planned case, but not in EXECUTE. --- -create temp table foo (f1 int, f2 int); -insert into foo values (1,2), (3,4); -create or replace function stricttest() returns void as $$ -declare x record; -begin - -- should work - insert into foo values(5,6) returning * into x; - raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; -end$$ language plpgsql; -select stricttest(); -NOTICE: x.f1 = 5, x.f2 = 6 - stricttest ------------- - -(1 row) - -create or replace function stricttest() returns void as $$ -declare x record; -begin - -- should fail due to implicit strict - insert into foo values(7,8),(9,10) returning * into x; - raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; -end$$ language plpgsql; -select stricttest(); -ERROR: query returned more than one row -HINT: Make sure the query returns a single row, or use LIMIT 1. -CONTEXT: PL/pgSQL function stricttest() line 5 at SQL statement -create or replace function stricttest() returns void as $$ -declare x record; -begin - -- should work - execute 'insert into foo values(5,6) returning *' into x; - raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; -end$$ language plpgsql; -select stricttest(); -NOTICE: x.f1 = 5, x.f2 = 6 - stricttest ------------- - -(1 row) - -create or replace function stricttest() returns void as $$ -declare x record; -begin - -- this should work since EXECUTE isn't as picky - execute 'insert into foo values(7,8),(9,10) returning *' into x; - raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; -end$$ language plpgsql; -select stricttest(); -NOTICE: x.f1 = 7, x.f2 = 8 - stricttest ------------- - -(1 row) - -select * from foo; - f1 | f2 -----+---- - 1 | 2 - 3 | 4 - 5 | 6 - 5 | 6 - 7 | 8 - 9 | 10 -(6 rows) - -create or replace function stricttest() returns void as $$ -declare x record; -begin - -- should work - select * from foo where f1 = 3 into strict x; - raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; -end$$ language plpgsql; -select stricttest(); -NOTICE: x.f1 = 3, x.f2 = 4 - stricttest ------------- - -(1 row) - -create or replace function stricttest() returns void as $$ -declare x record; -begin - -- should fail, no rows - select * from foo where f1 = 0 into strict x; - raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; -end$$ language plpgsql; -select stricttest(); -ERROR: query returned no rows -CONTEXT: PL/pgSQL function stricttest() line 5 at SQL statement -create or replace function stricttest() returns void as $$ -declare x record; -begin - -- should fail, too many rows - select * from foo where f1 > 3 into strict x; - raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; -end$$ language plpgsql; -select stricttest(); -ERROR: query returned more than one row -HINT: Make sure the query returns a single row, or use LIMIT 1. -CONTEXT: PL/pgSQL function stricttest() line 5 at SQL statement -create or replace function stricttest() returns void as $$ -declare x record; -begin - -- should work - execute 'select * from foo where f1 = 3' into strict x; - raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; -end$$ language plpgsql; -select stricttest(); -NOTICE: x.f1 = 3, x.f2 = 4 - stricttest ------------- - -(1 row) - -create or replace function stricttest() returns void as $$ -declare x record; -begin - -- should fail, no rows - execute 'select * from foo where f1 = 0' into strict x; - raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; -end$$ language plpgsql; -select stricttest(); -ERROR: query returned no rows -CONTEXT: PL/pgSQL function stricttest() line 5 at EXECUTE -create or replace function stricttest() returns void as $$ -declare x record; -begin - -- should fail, too many rows - execute 'select * from foo where f1 > 3' into strict x; - raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; -end$$ language plpgsql; -select stricttest(); -ERROR: query returned more than one row -CONTEXT: PL/pgSQL function stricttest() line 5 at EXECUTE -drop function stricttest(); --- test printing parameters after failure due to STRICT -set plpgsql.print_strict_params to true; -create or replace function stricttest() returns void as $$ -declare -x record; -p1 int := 2; -p3 text := 'foo'; -begin - -- no rows - select * from foo where f1 = p1 and f1::text = p3 into strict x; - raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; -end$$ language plpgsql; -select stricttest(); -ERROR: query returned no rows -DETAIL: parameters: p1 = '2', p3 = 'foo' -CONTEXT: PL/pgSQL function stricttest() line 8 at SQL statement -create or replace function stricttest() returns void as $$ -declare -x record; -p1 int := 2; -p3 text := $a$'Valame Dios!' dijo Sancho; 'no le dije yo a vuestra merced que mirase bien lo que hacia?'$a$; -begin - -- no rows - select * from foo where f1 = p1 and f1::text = p3 into strict x; - raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; -end$$ language plpgsql; -select stricttest(); -ERROR: query returned no rows -DETAIL: parameters: p1 = '2', p3 = '''Valame Dios!'' dijo Sancho; ''no le dije yo a vuestra merced que mirase bien lo que hacia?''' -CONTEXT: PL/pgSQL function stricttest() line 8 at SQL statement -create or replace function stricttest() returns void as $$ -declare -x record; -p1 int := 2; -p3 text := 'foo'; -begin - -- too many rows - select * from foo where f1 > p1 or f1::text = p3 into strict x; - raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; -end$$ language plpgsql; -select stricttest(); -ERROR: query returned more than one row -DETAIL: parameters: p1 = '2', p3 = 'foo' -HINT: Make sure the query returns a single row, or use LIMIT 1. -CONTEXT: PL/pgSQL function stricttest() line 8 at SQL statement -create or replace function stricttest() returns void as $$ -declare x record; -begin - -- too many rows, no params - select * from foo where f1 > 3 into strict x; - raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; -end$$ language plpgsql; -select stricttest(); -ERROR: query returned more than one row -HINT: Make sure the query returns a single row, or use LIMIT 1. -CONTEXT: PL/pgSQL function stricttest() line 5 at SQL statement -create or replace function stricttest() returns void as $$ -declare x record; -begin - -- no rows - execute 'select * from foo where f1 = $1 or f1::text = $2' using 0, 'foo' into strict x; - raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; -end$$ language plpgsql; -select stricttest(); -ERROR: query returned no rows -DETAIL: parameters: $1 = '0', $2 = 'foo' -CONTEXT: PL/pgSQL function stricttest() line 5 at EXECUTE -create or replace function stricttest() returns void as $$ -declare x record; -begin - -- too many rows - execute 'select * from foo where f1 > $1' using 1 into strict x; - raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; -end$$ language plpgsql; -select stricttest(); -ERROR: query returned more than one row -DETAIL: parameters: $1 = '1' -CONTEXT: PL/pgSQL function stricttest() line 5 at EXECUTE -create or replace function stricttest() returns void as $$ -declare x record; -begin - -- too many rows, no parameters - execute 'select * from foo where f1 > 3' into strict x; - raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; -end$$ language plpgsql; -select stricttest(); -ERROR: query returned more than one row -CONTEXT: PL/pgSQL function stricttest() line 5 at EXECUTE -create or replace function stricttest() returns void as $$ --- override the global -#print_strict_params off -declare -x record; -p1 int := 2; -p3 text := 'foo'; -begin - -- too many rows - select * from foo where f1 > p1 or f1::text = p3 into strict x; - raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; -end$$ language plpgsql; -select stricttest(); -ERROR: query returned more than one row -HINT: Make sure the query returns a single row, or use LIMIT 1. -CONTEXT: PL/pgSQL function stricttest() line 10 at SQL statement -reset plpgsql.print_strict_params; -create or replace function stricttest() returns void as $$ --- override the global -#print_strict_params on -declare -x record; -p1 int := 2; -p3 text := 'foo'; -begin - -- too many rows - select * from foo where f1 > p1 or f1::text = p3 into strict x; - raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; -end$$ language plpgsql; -select stricttest(); -ERROR: query returned more than one row -DETAIL: parameters: p1 = '2', p3 = 'foo' -HINT: Make sure the query returns a single row, or use LIMIT 1. -CONTEXT: PL/pgSQL function stricttest() line 10 at SQL statement --- test warnings and errors -set plpgsql.extra_warnings to 'all'; -set plpgsql.extra_warnings to 'none'; -set plpgsql.extra_errors to 'all'; -set plpgsql.extra_errors to 'none'; --- test warnings when shadowing a variable -set plpgsql.extra_warnings to 'shadowed_variables'; --- simple shadowing of input and output parameters -create or replace function shadowtest(in1 int) - returns table (out1 int) as $$ -declare -in1 int; -out1 int; -begin -end -$$ language plpgsql; -WARNING: variable "in1" shadows a previously defined variable -LINE 4: in1 int; - ^ -WARNING: variable "out1" shadows a previously defined variable -LINE 5: out1 int; - ^ -select shadowtest(1); - shadowtest ------------- -(0 rows) - -set plpgsql.extra_warnings to 'shadowed_variables'; -select shadowtest(1); - shadowtest ------------- -(0 rows) - -create or replace function shadowtest(in1 int) - returns table (out1 int) as $$ -declare -in1 int; -out1 int; -begin -end -$$ language plpgsql; -WARNING: variable "in1" shadows a previously defined variable -LINE 4: in1 int; - ^ -WARNING: variable "out1" shadows a previously defined variable -LINE 5: out1 int; - ^ -select shadowtest(1); - shadowtest ------------- -(0 rows) - -drop function shadowtest(int); --- shadowing in a second DECLARE block -create or replace function shadowtest() - returns void as $$ -declare -f1 int; -begin - declare - f1 int; - begin - end; -end$$ language plpgsql; -WARNING: variable "f1" shadows a previously defined variable -LINE 7: f1 int; - ^ -drop function shadowtest(); --- several levels of shadowing -create or replace function shadowtest(in1 int) - returns void as $$ -declare -in1 int; -begin - declare - in1 int; - begin - end; -end$$ language plpgsql; -WARNING: variable "in1" shadows a previously defined variable -LINE 4: in1 int; - ^ -WARNING: variable "in1" shadows a previously defined variable -LINE 7: in1 int; - ^ -drop function shadowtest(int); --- shadowing in cursor definitions -create or replace function shadowtest() - returns void as $$ -declare -f1 int; -c1 cursor (f1 int) for select 1; -begin -end$$ language plpgsql; -WARNING: variable "f1" shadows a previously defined variable -LINE 5: c1 cursor (f1 int) for select 1; - ^ -drop function shadowtest(); --- test errors when shadowing a variable -set plpgsql.extra_errors to 'shadowed_variables'; -create or replace function shadowtest(f1 int) - returns boolean as $$ -declare f1 int; begin return 1; end $$ language plpgsql; -ERROR: variable "f1" shadows a previously defined variable -LINE 3: declare f1 int; begin return 1; end $$ language plpgsql; - ^ -select shadowtest(1); -ERROR: function shadowtest(integer) does not exist -LINE 1: select shadowtest(1); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -reset plpgsql.extra_errors; -reset plpgsql.extra_warnings; -create or replace function shadowtest(f1 int) - returns boolean as $$ -declare f1 int; begin return 1; end $$ language plpgsql; -select shadowtest(1); - shadowtest ------------- - t -(1 row) - --- runtime extra checks -set plpgsql.extra_warnings to 'too_many_rows'; -do $$ -declare x int; -begin - select v from generate_series(1,2) g(v) into x; -end; -$$; -WARNING: query returned more than one row -HINT: Make sure the query returns a single row, or use LIMIT 1. -set plpgsql.extra_errors to 'too_many_rows'; -do $$ -declare x int; -begin - select v from generate_series(1,2) g(v) into x; -end; -$$; -ERROR: query returned more than one row -HINT: Make sure the query returns a single row, or use LIMIT 1. -CONTEXT: PL/pgSQL function inline_code_block line 4 at SQL statement -reset plpgsql.extra_errors; -reset plpgsql.extra_warnings; -set plpgsql.extra_warnings to 'strict_multi_assignment'; -do $$ -declare - x int; - y int; -begin - select 1 into x, y; - select 1,2 into x, y; - select 1,2,3 into x, y; -end -$$; -WARNING: number of source and target fields in assignment does not match -DETAIL: strict_multi_assignment check of extra_warnings is active. -HINT: Make sure the query returns the exact list of columns. -WARNING: number of source and target fields in assignment does not match -DETAIL: strict_multi_assignment check of extra_warnings is active. -HINT: Make sure the query returns the exact list of columns. -set plpgsql.extra_errors to 'strict_multi_assignment'; -do $$ -declare - x int; - y int; -begin - select 1 into x, y; - select 1,2 into x, y; - select 1,2,3 into x, y; -end -$$; -ERROR: number of source and target fields in assignment does not match -DETAIL: strict_multi_assignment check of extra_errors is active. -HINT: Make sure the query returns the exact list of columns. -CONTEXT: PL/pgSQL function inline_code_block line 6 at SQL statement -create table test_01(a int, b int, c int); -alter table test_01 drop column a; --- the check is active only when source table is not empty -insert into test_01 values(10,20); -do $$ -declare - x int; - y int; -begin - select * from test_01 into x, y; -- should be ok - raise notice 'ok'; - select * from test_01 into x; -- should to fail -end; -$$; -NOTICE: ok -ERROR: number of source and target fields in assignment does not match -DETAIL: strict_multi_assignment check of extra_errors is active. -HINT: Make sure the query returns the exact list of columns. -CONTEXT: PL/pgSQL function inline_code_block line 8 at SQL statement -do $$ -declare - t test_01; -begin - select 1, 2 into t; -- should be ok - raise notice 'ok'; - select 1, 2, 3 into t; -- should fail; -end; -$$; -NOTICE: ok -ERROR: number of source and target fields in assignment does not match -DETAIL: strict_multi_assignment check of extra_errors is active. -HINT: Make sure the query returns the exact list of columns. -CONTEXT: PL/pgSQL function inline_code_block line 7 at SQL statement -do $$ -declare - t test_01; -begin - select 1 into t; -- should fail; -end; -$$; -ERROR: number of source and target fields in assignment does not match -DETAIL: strict_multi_assignment check of extra_errors is active. -HINT: Make sure the query returns the exact list of columns. -CONTEXT: PL/pgSQL function inline_code_block line 5 at SQL statement -drop table test_01; -reset plpgsql.extra_errors; -reset plpgsql.extra_warnings; --- test scrollable cursor support -create function sc_test() returns setof integer as $$ -declare - c scroll cursor for select f1 from int4_tbl; - x integer; -begin - open c; - fetch last from c into x; - while found loop - return next x; - fetch prior from c into x; - end loop; - close c; -end; -$$ language plpgsql; -select * from sc_test(); - sc_test -------------- - -2147483647 - 2147483647 - -123456 - 123456 - 0 -(5 rows) - -create or replace function sc_test() returns setof integer as $$ -declare - c no scroll cursor for select f1 from int4_tbl; - x integer; -begin - open c; - fetch last from c into x; - while found loop - return next x; - fetch prior from c into x; - end loop; - close c; -end; -$$ language plpgsql; -select * from sc_test(); -- fails because of NO SCROLL specification -ERROR: cursor can only scan forward -HINT: Declare it with SCROLL option to enable backward scan. -CONTEXT: PL/pgSQL function sc_test() line 7 at FETCH -create or replace function sc_test() returns setof integer as $$ -declare - c refcursor; - x integer; -begin - open c scroll for select f1 from int4_tbl; - fetch last from c into x; - while found loop - return next x; - fetch prior from c into x; - end loop; - close c; -end; -$$ language plpgsql; -select * from sc_test(); - sc_test -------------- - -2147483647 - 2147483647 - -123456 - 123456 - 0 -(5 rows) - -create or replace function sc_test() returns setof integer as $$ -declare - c refcursor; - x integer; -begin - open c scroll for execute 'select f1 from int4_tbl'; - fetch last from c into x; - while found loop - return next x; - fetch relative -2 from c into x; - end loop; - close c; -end; -$$ language plpgsql; -select * from sc_test(); - sc_test -------------- - -2147483647 - -123456 - 0 -(3 rows) - -create or replace function sc_test() returns setof integer as $$ -declare - c refcursor; - x integer; -begin - open c scroll for execute 'select f1 from int4_tbl'; - fetch last from c into x; - while found loop - return next x; - move backward 2 from c; - fetch relative -1 from c into x; - end loop; - close c; -end; -$$ language plpgsql; -select * from sc_test(); - sc_test -------------- - -2147483647 - 123456 -(2 rows) - -create or replace function sc_test() returns setof integer as $$ -declare - c cursor for select * from generate_series(1, 10); - x integer; -begin - open c; - loop - move relative 2 in c; - if not found then - exit; - end if; - fetch next from c into x; - if found then - return next x; - end if; - end loop; - close c; -end; -$$ language plpgsql; -select * from sc_test(); - sc_test ---------- - 3 - 6 - 9 -(3 rows) - -create or replace function sc_test() returns setof integer as $$ -declare - c cursor for select * from generate_series(1, 10); - x integer; -begin - open c; - move forward all in c; - fetch backward from c into x; - if found then - return next x; - end if; - close c; -end; -$$ language plpgsql; -select * from sc_test(); - sc_test ---------- - 10 -(1 row) - -drop function sc_test(); --- test qualified variable names -create function pl_qual_names (param1 int) returns void as $$ -<> -declare - param1 int := 1; -begin - <> - declare - param1 int := 2; - begin - raise notice 'param1 = %', param1; - raise notice 'pl_qual_names.param1 = %', pl_qual_names.param1; - raise notice 'outerblock.param1 = %', outerblock.param1; - raise notice 'innerblock.param1 = %', innerblock.param1; - end; -end; -$$ language plpgsql; -select pl_qual_names(42); -NOTICE: param1 = 2 -NOTICE: pl_qual_names.param1 = 42 -NOTICE: outerblock.param1 = 1 -NOTICE: innerblock.param1 = 2 - pl_qual_names ---------------- - -(1 row) - -drop function pl_qual_names(int); --- tests for RETURN QUERY -create function ret_query1(out int, out int) returns setof record as $$ -begin - $1 := -1; - $2 := -2; - return next; - return query select x + 1, x * 10 from generate_series(0, 10) s (x); - return next; -end; -$$ language plpgsql; -select * from ret_query1(); - column1 | column2 ----------+--------- - -1 | -2 - 1 | 0 - 2 | 10 - 3 | 20 - 4 | 30 - 5 | 40 - 6 | 50 - 7 | 60 - 8 | 70 - 9 | 80 - 10 | 90 - 11 | 100 - -1 | -2 -(13 rows) - -create type record_type as (x text, y int, z boolean); -create or replace function ret_query2(lim int) returns setof record_type as $$ -begin - return query select fipshash(s.x::text), s.x, s.x > 0 - from generate_series(-8, lim) s (x) where s.x % 2 = 0; -end; -$$ language plpgsql; -select * from ret_query2(8); - x | y | z -----------------------------------+----+--- - e91592205d3881e3ea35d66973bb4898 | -8 | f - 03b26944890929ff751653acb2f2af79 | -6 | f - e5e0093f285a4fb94c3fcc2ad7fd04ed | -4 | f - cf3bae39dd692048a8bf961182e6a34d | -2 | f - 5feceb66ffc86f38d952786c6d696c79 | 0 | f - d4735e3a265e16eee03f59718b9b5d03 | 2 | t - 4b227777d4dd1fc61c6f884f48641d02 | 4 | t - e7f6c011776e8db7cd330b54174fd76f | 6 | t - 2c624232cdd221771294dfbb310aca00 | 8 | t -(9 rows) - --- test EXECUTE USING -create function exc_using(int, text) returns int as $$ -declare i int; -begin - for i in execute 'select * from generate_series(1,$1)' using $1+1 loop - raise notice '%', i; - end loop; - execute 'select $2 + $2*3 + length($1)' into i using $2,$1; - return i; -end -$$ language plpgsql; -select exc_using(5, 'foobar'); -NOTICE: 1 -NOTICE: 2 -NOTICE: 3 -NOTICE: 4 -NOTICE: 5 -NOTICE: 6 - exc_using ------------ - 26 -(1 row) - -drop function exc_using(int, text); -create or replace function exc_using(int) returns void as $$ -declare - c refcursor; - i int; -begin - open c for execute 'select * from generate_series(1,$1)' using $1+1; - loop - fetch c into i; - exit when not found; - raise notice '%', i; - end loop; - close c; - return; -end; -$$ language plpgsql; -select exc_using(5); -NOTICE: 1 -NOTICE: 2 -NOTICE: 3 -NOTICE: 4 -NOTICE: 5 -NOTICE: 6 - exc_using ------------ - -(1 row) - -drop function exc_using(int); --- test FOR-over-cursor -create or replace function forc01() returns void as $$ -declare - c cursor(r1 integer, r2 integer) - for select * from generate_series(r1,r2) i; - c2 cursor - for select * from generate_series(41,43) i; -begin - -- assign portal names to cursors to get stable output - c := 'c'; - c2 := 'c2'; - for r in c(5,7) loop - raise notice '% from %', r.i, c; - end loop; - -- again, to test if cursor was closed properly - for r in c(9,10) loop - raise notice '% from %', r.i, c; - end loop; - -- and test a parameterless cursor - for r in c2 loop - raise notice '% from %', r.i, c2; - end loop; - -- and try it with a hand-assigned name - raise notice 'after loop, c2 = %', c2; - c2 := 'special_name'; - for r in c2 loop - raise notice '% from %', r.i, c2; - end loop; - raise notice 'after loop, c2 = %', c2; - -- and try it with a generated name - -- (which we can't show in the output because it's variable) - c2 := null; - for r in c2 loop - raise notice '%', r.i; - end loop; - raise notice 'after loop, c2 = %', c2; - return; -end; -$$ language plpgsql; -select forc01(); -NOTICE: 5 from c -NOTICE: 6 from c -NOTICE: 7 from c -NOTICE: 9 from c -NOTICE: 10 from c -NOTICE: 41 from c2 -NOTICE: 42 from c2 -NOTICE: 43 from c2 -NOTICE: after loop, c2 = c2 -NOTICE: 41 from special_name -NOTICE: 42 from special_name -NOTICE: 43 from special_name -NOTICE: after loop, c2 = special_name -NOTICE: 41 -NOTICE: 42 -NOTICE: 43 -NOTICE: after loop, c2 = - forc01 --------- - -(1 row) - --- try updating the cursor's current row -create temp table forc_test as - select n as i, n as j from generate_series(1,10) n; -create or replace function forc01() returns void as $$ -declare - c cursor for select * from forc_test; -begin - for r in c loop - raise notice '%, %', r.i, r.j; - update forc_test set i = i * 100, j = r.j * 2 where current of c; - end loop; -end; -$$ language plpgsql; -select forc01(); -NOTICE: 1, 1 -NOTICE: 2, 2 -NOTICE: 3, 3 -NOTICE: 4, 4 -NOTICE: 5, 5 -NOTICE: 6, 6 -NOTICE: 7, 7 -NOTICE: 8, 8 -NOTICE: 9, 9 -NOTICE: 10, 10 - forc01 --------- - -(1 row) - -select * from forc_test; - i | j -------+---- - 100 | 2 - 200 | 4 - 300 | 6 - 400 | 8 - 500 | 10 - 600 | 12 - 700 | 14 - 800 | 16 - 900 | 18 - 1000 | 20 -(10 rows) - --- same, with a cursor whose portal name doesn't match variable name -create or replace function forc01() returns void as $$ -declare - c refcursor := 'fooled_ya'; - r record; -begin - open c for select * from forc_test; - loop - fetch c into r; - exit when not found; - raise notice '%, %', r.i, r.j; - update forc_test set i = i * 100, j = r.j * 2 where current of c; - end loop; -end; -$$ language plpgsql; -select forc01(); -NOTICE: 100, 2 -NOTICE: 200, 4 -NOTICE: 300, 6 -NOTICE: 400, 8 -NOTICE: 500, 10 -NOTICE: 600, 12 -NOTICE: 700, 14 -NOTICE: 800, 16 -NOTICE: 900, 18 -NOTICE: 1000, 20 - forc01 --------- - -(1 row) - -select * from forc_test; - i | j ---------+---- - 10000 | 4 - 20000 | 8 - 30000 | 12 - 40000 | 16 - 50000 | 20 - 60000 | 24 - 70000 | 28 - 80000 | 32 - 90000 | 36 - 100000 | 40 -(10 rows) - -drop function forc01(); --- it's okay to re-use a cursor variable name, even when bound -do $$ -declare cnt int := 0; - c1 cursor for select * from forc_test; -begin - for r1 in c1 loop - declare c1 cursor for select * from forc_test; - begin - for r2 in c1 loop - cnt := cnt + 1; - end loop; - end; - end loop; - raise notice 'cnt = %', cnt; -end $$; -NOTICE: cnt = 100 --- fail because cursor has no query bound to it -create or replace function forc_bad() returns void as $$ -declare - c refcursor; -begin - for r in c loop - raise notice '%', r.i; - end loop; -end; -$$ language plpgsql; -ERROR: cursor FOR loop must use a bound cursor variable -LINE 5: for r in c loop - ^ --- test RETURN QUERY EXECUTE -create or replace function return_dquery() -returns setof int as $$ -begin - return query execute 'select * from (values(10),(20)) f'; - return query execute 'select * from (values($1),($2)) f' using 40,50; -end; -$$ language plpgsql; -select * from return_dquery(); - return_dquery ---------------- - 10 - 20 - 40 - 50 -(4 rows) - -drop function return_dquery(); --- test RETURN QUERY with dropped columns -create table tabwithcols(a int, b int, c int, d int); -insert into tabwithcols values(10,20,30,40),(50,60,70,80); -create or replace function returnqueryf() -returns setof tabwithcols as $$ -begin - return query select * from tabwithcols; - return query execute 'select * from tabwithcols'; -end; -$$ language plpgsql; -select * from returnqueryf(); - a | b | c | d -----+----+----+---- - 10 | 20 | 30 | 40 - 50 | 60 | 70 | 80 - 10 | 20 | 30 | 40 - 50 | 60 | 70 | 80 -(4 rows) - -alter table tabwithcols drop column b; -select * from returnqueryf(); - a | c | d -----+----+---- - 10 | 30 | 40 - 50 | 70 | 80 - 10 | 30 | 40 - 50 | 70 | 80 -(4 rows) - -alter table tabwithcols drop column d; -select * from returnqueryf(); - a | c -----+---- - 10 | 30 - 50 | 70 - 10 | 30 - 50 | 70 -(4 rows) - -alter table tabwithcols add column d int; -select * from returnqueryf(); - a | c | d -----+----+--- - 10 | 30 | - 50 | 70 | - 10 | 30 | - 50 | 70 | -(4 rows) - -drop function returnqueryf(); -drop table tabwithcols; --- --- Tests for composite-type results --- -create type compostype as (x int, y varchar); --- test: use of variable of composite type in return statement -create or replace function compos() returns compostype as $$ -declare - v compostype; -begin - v := (1, 'hello'); - return v; -end; -$$ language plpgsql; -select compos(); - compos ------------ - (1,hello) -(1 row) - --- test: use of variable of record type in return statement -create or replace function compos() returns compostype as $$ -declare - v record; -begin - v := (1, 'hello'::varchar); - return v; -end; -$$ language plpgsql; -select compos(); - compos ------------ - (1,hello) -(1 row) - --- test: use of row expr in return statement -create or replace function compos() returns compostype as $$ -begin - return (1, 'hello'::varchar); -end; -$$ language plpgsql; -select compos(); - compos ------------ - (1,hello) -(1 row) - --- this does not work currently (no implicit casting) -create or replace function compos() returns compostype as $$ -begin - return (1, 'hello'); -end; -$$ language plpgsql; -select compos(); -ERROR: returned record type does not match expected record type -DETAIL: Returned type unknown does not match expected type character varying in column 2. -CONTEXT: PL/pgSQL function compos() while casting return value to function's return type --- ... but this does -create or replace function compos() returns compostype as $$ -begin - return (1, 'hello')::compostype; -end; -$$ language plpgsql; -select compos(); - compos ------------ - (1,hello) -(1 row) - -drop function compos(); --- test: return a row expr as record. -create or replace function composrec() returns record as $$ -declare - v record; -begin - v := (1, 'hello'); - return v; -end; -$$ language plpgsql; -select composrec(); - composrec ------------ - (1,hello) -(1 row) - --- test: return row expr in return statement. -create or replace function composrec() returns record as $$ -begin - return (1, 'hello'); -end; -$$ language plpgsql; -select composrec(); - composrec ------------ - (1,hello) -(1 row) - -drop function composrec(); --- test: row expr in RETURN NEXT statement. -create or replace function compos() returns setof compostype as $$ -begin - for i in 1..3 - loop - return next (1, 'hello'::varchar); - end loop; - return next null::compostype; - return next (2, 'goodbye')::compostype; -end; -$$ language plpgsql; -select * from compos(); - x | y ----+--------- - 1 | hello - 1 | hello - 1 | hello - | - 2 | goodbye -(5 rows) - -drop function compos(); --- test: use invalid expr in return statement. -create or replace function compos() returns compostype as $$ -begin - return 1 + 1; -end; -$$ language plpgsql; -select compos(); -ERROR: cannot return non-composite value from function returning composite type -CONTEXT: PL/pgSQL function compos() line 3 at RETURN --- RETURN variable is a different code path ... -create or replace function compos() returns compostype as $$ -declare x int := 42; -begin - return x; -end; -$$ language plpgsql; -select * from compos(); -ERROR: cannot return non-composite value from function returning composite type -CONTEXT: PL/pgSQL function compos() line 4 at RETURN -drop function compos(); --- test: invalid use of composite variable in scalar-returning function -create or replace function compos() returns int as $$ -declare - v compostype; -begin - v := (1, 'hello'); - return v; -end; -$$ language plpgsql; -select compos(); -ERROR: invalid input syntax for type integer: "(1,hello)" -CONTEXT: PL/pgSQL function compos() while casting return value to function's return type --- test: invalid use of composite expression in scalar-returning function -create or replace function compos() returns int as $$ -begin - return (1, 'hello')::compostype; -end; -$$ language plpgsql; -select compos(); -ERROR: invalid input syntax for type integer: "(1,hello)" -CONTEXT: PL/pgSQL function compos() while casting return value to function's return type -drop function compos(); -drop type compostype; --- --- Tests for 8.4's new RAISE features --- -create or replace function raise_test() returns void as $$ -begin - raise notice '% % %', 1, 2, 3 - using errcode = '55001', detail = 'some detail info', hint = 'some hint'; - raise '% % %', 1, 2, 3 - using errcode = 'division_by_zero', detail = 'some detail info'; -end; -$$ language plpgsql; -select raise_test(); -NOTICE: 1 2 3 -DETAIL: some detail info -HINT: some hint -ERROR: 1 2 3 -DETAIL: some detail info -CONTEXT: PL/pgSQL function raise_test() line 5 at RAISE --- Since we can't actually see the thrown SQLSTATE in default psql output, --- test it like this; this also tests re-RAISE -create or replace function raise_test() returns void as $$ -begin - raise 'check me' - using errcode = 'division_by_zero', detail = 'some detail info'; - exception - when others then - raise notice 'SQLSTATE: % SQLERRM: %', sqlstate, sqlerrm; - raise; -end; -$$ language plpgsql; -select raise_test(); -NOTICE: SQLSTATE: 22012 SQLERRM: check me -ERROR: check me -DETAIL: some detail info -CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE -create or replace function raise_test() returns void as $$ -begin - raise 'check me' - using errcode = '1234F', detail = 'some detail info'; - exception - when others then - raise notice 'SQLSTATE: % SQLERRM: %', sqlstate, sqlerrm; - raise; -end; -$$ language plpgsql; -select raise_test(); -NOTICE: SQLSTATE: 1234F SQLERRM: check me -ERROR: check me -DETAIL: some detail info -CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE --- SQLSTATE specification in WHEN -create or replace function raise_test() returns void as $$ -begin - raise 'check me' - using errcode = '1234F', detail = 'some detail info'; - exception - when sqlstate '1234F' then - raise notice 'SQLSTATE: % SQLERRM: %', sqlstate, sqlerrm; - raise; -end; -$$ language plpgsql; -select raise_test(); -NOTICE: SQLSTATE: 1234F SQLERRM: check me -ERROR: check me -DETAIL: some detail info -CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE -create or replace function raise_test() returns void as $$ -begin - raise division_by_zero using detail = 'some detail info'; - exception - when others then - raise notice 'SQLSTATE: % SQLERRM: %', sqlstate, sqlerrm; - raise; -end; -$$ language plpgsql; -select raise_test(); -NOTICE: SQLSTATE: 22012 SQLERRM: division_by_zero -ERROR: division_by_zero -DETAIL: some detail info -CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE -create or replace function raise_test() returns void as $$ -begin - raise division_by_zero; -end; -$$ language plpgsql; -select raise_test(); -ERROR: division_by_zero -CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE -create or replace function raise_test() returns void as $$ -begin - raise sqlstate '1234F'; -end; -$$ language plpgsql; -select raise_test(); -ERROR: 1234F -CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE -create or replace function raise_test() returns void as $$ -begin - raise division_by_zero using message = 'custom' || ' message'; -end; -$$ language plpgsql; -select raise_test(); -ERROR: custom message -CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE -create or replace function raise_test() returns void as $$ -begin - raise using message = 'custom' || ' message', errcode = '22012'; -end; -$$ language plpgsql; -select raise_test(); -ERROR: custom message -CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE --- conflict on message -create or replace function raise_test() returns void as $$ -begin - raise notice 'some message' using message = 'custom' || ' message', errcode = '22012'; -end; -$$ language plpgsql; -select raise_test(); -ERROR: RAISE option already specified: MESSAGE -CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE --- conflict on errcode -create or replace function raise_test() returns void as $$ -begin - raise division_by_zero using message = 'custom' || ' message', errcode = '22012'; -end; -$$ language plpgsql; -select raise_test(); -ERROR: RAISE option already specified: ERRCODE -CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE --- nothing to re-RAISE -create or replace function raise_test() returns void as $$ -begin - raise; -end; -$$ language plpgsql; -select raise_test(); -ERROR: RAISE without parameters cannot be used outside an exception handler -CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE --- test access to exception data -create function zero_divide() returns int as $$ -declare v int := 0; -begin - return 10 / v; -end; -$$ language plpgsql parallel safe; -create or replace function raise_test() returns void as $$ -begin - raise exception 'custom exception' - using detail = 'some detail of custom exception', - hint = 'some hint related to custom exception'; -end; -$$ language plpgsql; -create function stacked_diagnostics_test() returns void as $$ -declare _sqlstate text; - _message text; - _context text; -begin - perform zero_divide(); -exception when others then - get stacked diagnostics - _sqlstate = returned_sqlstate, - _message = message_text, - _context = pg_exception_context; - raise notice 'sqlstate: %, message: %, context: [%]', - _sqlstate, _message, replace(_context, E'\n', ' <- '); -end; -$$ language plpgsql; -select stacked_diagnostics_test(); -NOTICE: sqlstate: 22012, message: division by zero, context: [PL/pgSQL function zero_divide() line 4 at RETURN <- SQL statement "SELECT zero_divide()" <- PL/pgSQL function stacked_diagnostics_test() line 6 at PERFORM] - stacked_diagnostics_test --------------------------- - -(1 row) - -create or replace function stacked_diagnostics_test() returns void as $$ -declare _detail text; - _hint text; - _message text; -begin - perform raise_test(); -exception when others then - get stacked diagnostics - _message = message_text, - _detail = pg_exception_detail, - _hint = pg_exception_hint; - raise notice 'message: %, detail: %, hint: %', _message, _detail, _hint; -end; -$$ language plpgsql; -select stacked_diagnostics_test(); -NOTICE: message: custom exception, detail: some detail of custom exception, hint: some hint related to custom exception - stacked_diagnostics_test --------------------------- - -(1 row) - --- fail, cannot use stacked diagnostics statement outside handler -create or replace function stacked_diagnostics_test() returns void as $$ -declare _detail text; - _hint text; - _message text; -begin - get stacked diagnostics - _message = message_text, - _detail = pg_exception_detail, - _hint = pg_exception_hint; - raise notice 'message: %, detail: %, hint: %', _message, _detail, _hint; -end; -$$ language plpgsql; -select stacked_diagnostics_test(); -ERROR: GET STACKED DIAGNOSTICS cannot be used outside an exception handler -CONTEXT: PL/pgSQL function stacked_diagnostics_test() line 6 at GET STACKED DIAGNOSTICS -drop function stacked_diagnostics_test(); --- Test that an error recovery subtransaction is parallel safe -create function error_trap_test() returns text as $$ -begin - perform zero_divide(); - return 'no error detected!'; -exception when division_by_zero then - return 'division_by_zero detected'; -end; -$$ language plpgsql parallel safe; -set debug_parallel_query to on; -explain (verbose, costs off) select error_trap_test(); - QUERY PLAN ------------------------------------ - Gather - Output: (error_trap_test()) - Workers Planned: 1 - Single Copy: true - -> Result - Output: error_trap_test() -(6 rows) - -select error_trap_test(); - error_trap_test ---------------------------- - division_by_zero detected -(1 row) - -reset debug_parallel_query; -drop function error_trap_test(); -drop function zero_divide(); --- check cases where implicit SQLSTATE variable could be confused with --- SQLSTATE as a keyword, cf bug #5524 -create or replace function raise_test() returns void as $$ -begin - perform 1/0; -exception - when sqlstate '22012' then - raise notice using message = sqlstate; - raise sqlstate '22012' using message = 'substitute message'; -end; -$$ language plpgsql; -select raise_test(); -NOTICE: 22012 -ERROR: substitute message -CONTEXT: PL/pgSQL function raise_test() line 7 at RAISE -drop function raise_test(); --- test passing column_name, constraint_name, datatype_name, table_name --- and schema_name error fields -create or replace function stacked_diagnostics_test() returns void as $$ -declare _column_name text; - _constraint_name text; - _datatype_name text; - _table_name text; - _schema_name text; -begin - raise exception using - column = '>>some column name<<', - constraint = '>>some constraint name<<', - datatype = '>>some datatype name<<', - table = '>>some table name<<', - schema = '>>some schema name<<'; -exception when others then - get stacked diagnostics - _column_name = column_name, - _constraint_name = constraint_name, - _datatype_name = pg_datatype_name, - _table_name = table_name, - _schema_name = schema_name; - raise notice 'column %, constraint %, type %, table %, schema %', - _column_name, _constraint_name, _datatype_name, _table_name, _schema_name; -end; -$$ language plpgsql; -select stacked_diagnostics_test(); -NOTICE: column >>some column name<<, constraint >>some constraint name<<, type >>some datatype name<<, table >>some table name<<, schema >>some schema name<< - stacked_diagnostics_test --------------------------- - -(1 row) - -drop function stacked_diagnostics_test(); --- test variadic functions -create or replace function vari(variadic int[]) -returns void as $$ -begin - for i in array_lower($1,1)..array_upper($1,1) loop - raise notice '%', $1[i]; - end loop; end; -$$ language plpgsql; -select vari(1,2,3,4,5); -NOTICE: 1 -NOTICE: 2 -NOTICE: 3 -NOTICE: 4 -NOTICE: 5 - vari ------- - -(1 row) - -select vari(3,4,5); -NOTICE: 3 -NOTICE: 4 -NOTICE: 5 - vari ------- - -(1 row) - -select vari(variadic array[5,6,7]); -NOTICE: 5 -NOTICE: 6 -NOTICE: 7 - vari ------- - -(1 row) - -drop function vari(int[]); --- coercion test -create or replace function pleast(variadic numeric[]) -returns numeric as $$ -declare aux numeric = $1[array_lower($1,1)]; -begin - for i in array_lower($1,1)+1..array_upper($1,1) loop - if $1[i] < aux then aux := $1[i]; end if; - end loop; - return aux; -end; -$$ language plpgsql immutable strict; -select pleast(10,1,2,3,-16); - pleast --------- - -16 -(1 row) - -select pleast(10.2,2.2,-1.1); - pleast --------- - -1.1 -(1 row) - -select pleast(10.2,10, -20); - pleast --------- - -20 -(1 row) - -select pleast(10,20, -1.0); - pleast --------- - -1.0 -(1 row) - --- in case of conflict, non-variadic version is preferred -create or replace function pleast(numeric) -returns numeric as $$ -begin - raise notice 'non-variadic function called'; - return $1; -end; -$$ language plpgsql immutable strict; -select pleast(10); -NOTICE: non-variadic function called - pleast --------- - 10 -(1 row) - -drop function pleast(numeric[]); -drop function pleast(numeric); --- test table functions -create function tftest(int) returns table(a int, b int) as $$ -begin - return query select $1, $1+i from generate_series(1,5) g(i); -end; -$$ language plpgsql immutable strict; -select * from tftest(10); - a | b -----+---- - 10 | 11 - 10 | 12 - 10 | 13 - 10 | 14 - 10 | 15 -(5 rows) - -create or replace function tftest(a1 int) returns table(a int, b int) as $$ -begin - a := a1; b := a1 + 1; - return next; - a := a1 * 10; b := a1 * 10 + 1; - return next; -end; -$$ language plpgsql immutable strict; -select * from tftest(10); - a | b ------+----- - 10 | 11 - 100 | 101 -(2 rows) - -drop function tftest(int); -create function rttest() -returns setof int as $$ -declare rc int; -begin - return query values(10),(20); - get diagnostics rc = row_count; - raise notice '% %', found, rc; - return query select * from (values(10),(20)) f(a) where false; - get diagnostics rc = row_count; - raise notice '% %', found, rc; - return query execute 'values(10),(20)'; - get diagnostics rc = row_count; - raise notice '% %', found, rc; - return query execute 'select * from (values(10),(20)) f(a) where false'; - get diagnostics rc = row_count; - raise notice '% %', found, rc; -end; -$$ language plpgsql; -select * from rttest(); -NOTICE: t 2 -NOTICE: f 0 -NOTICE: t 2 -NOTICE: f 0 - rttest --------- - 10 - 20 - 10 - 20 -(4 rows) - --- check some error cases, too -create or replace function rttest() -returns setof int as $$ -begin - return query select 10 into no_such_table; -end; -$$ language plpgsql; -select * from rttest(); -ERROR: SELECT INTO query does not return tuples -CONTEXT: SQL statement "select 10 into no_such_table" -PL/pgSQL function rttest() line 3 at RETURN QUERY -create or replace function rttest() -returns setof int as $$ -begin - return query execute 'select 10 into no_such_table'; -end; -$$ language plpgsql; -select * from rttest(); -ERROR: SELECT INTO query does not return tuples -CONTEXT: SQL statement "select 10 into no_such_table" -PL/pgSQL function rttest() line 3 at RETURN QUERY -select * from no_such_table; -ERROR: relation "no_such_table" does not exist -LINE 1: select * from no_such_table; - ^ -drop function rttest(); --- Test for proper cleanup at subtransaction exit. This example --- exposed a bug in PG 8.2. -CREATE FUNCTION leaker_1(fail BOOL) RETURNS INTEGER AS $$ -DECLARE - v_var INTEGER; -BEGIN - BEGIN - v_var := (leaker_2(fail)).error_code; - EXCEPTION - WHEN others THEN RETURN 0; - END; - RETURN 1; -END; -$$ LANGUAGE plpgsql; -CREATE FUNCTION leaker_2(fail BOOL, OUT error_code INTEGER, OUT new_id INTEGER) - RETURNS RECORD AS $$ -BEGIN - IF fail THEN - RAISE EXCEPTION 'fail ...'; - END IF; - error_code := 1; - new_id := 1; - RETURN; -END; -$$ LANGUAGE plpgsql; -SELECT * FROM leaker_1(false); - leaker_1 ----------- - 1 -(1 row) - -SELECT * FROM leaker_1(true); - leaker_1 ----------- - 0 -(1 row) - -DROP FUNCTION leaker_1(bool); -DROP FUNCTION leaker_2(bool); --- Test for appropriate cleanup of non-simple expression evaluations --- (bug in all versions prior to August 2010) -CREATE FUNCTION nonsimple_expr_test() RETURNS text[] AS $$ -DECLARE - arr text[]; - lr text; - i integer; -BEGIN - arr := array[array['foo','bar'], array['baz', 'quux']]; - lr := 'fool'; - i := 1; - -- use sub-SELECTs to make expressions non-simple - arr[(SELECT i)][(SELECT i+1)] := (SELECT lr); - RETURN arr; -END; -$$ LANGUAGE plpgsql; -SELECT nonsimple_expr_test(); - nonsimple_expr_test -------------------------- - {{foo,fool},{baz,quux}} -(1 row) - -DROP FUNCTION nonsimple_expr_test(); -CREATE FUNCTION nonsimple_expr_test() RETURNS integer AS $$ -declare - i integer NOT NULL := 0; -begin - begin - i := (SELECT NULL::integer); -- should throw error - exception - WHEN OTHERS THEN - i := (SELECT 1::integer); - end; - return i; -end; -$$ LANGUAGE plpgsql; -SELECT nonsimple_expr_test(); - nonsimple_expr_test ---------------------- - 1 -(1 row) - -DROP FUNCTION nonsimple_expr_test(); --- --- Test cases involving recursion and error recovery in simple expressions --- (bugs in all versions before October 2010). The problems are most --- easily exposed by mutual recursion between plpgsql and sql functions. --- -create function recurse(float8) returns float8 as -$$ -begin - if ($1 > 0) then - return sql_recurse($1 - 1); - else - return $1; - end if; -end; -$$ language plpgsql; --- "limit" is to prevent this from being inlined -create function sql_recurse(float8) returns float8 as -$$ select recurse($1) limit 1; $$ language sql; -select recurse(10); - recurse ---------- - 0 -(1 row) - -create function error1(text) returns text language sql as -$$ SELECT relname::text FROM pg_class c WHERE c.oid = $1::regclass $$; -create function error2(p_name_table text) returns text language plpgsql as $$ -begin - return error1(p_name_table); -end$$; -BEGIN; -create table public.stuffs (stuff text); -SAVEPOINT a; -select error2('nonexistent.stuffs'); -ERROR: schema "nonexistent" does not exist -CONTEXT: SQL function "error1" statement 1 -PL/pgSQL function error2(text) line 3 at RETURN -ROLLBACK TO a; -select error2('public.stuffs'); - error2 --------- - stuffs -(1 row) - -rollback; -drop function error2(p_name_table text); -drop function error1(text); --- Test for proper handling of cast-expression caching -create function sql_to_date(integer) returns date as $$ -select $1::text::date -$$ language sql immutable strict; -create cast (integer as date) with function sql_to_date(integer) as assignment; -create function cast_invoker(integer) returns date as $$ -begin - return $1; -end$$ language plpgsql; -select cast_invoker(20150717); - cast_invoker --------------- - 07-17-2015 -(1 row) - -select cast_invoker(20150718); -- second call crashed in pre-release 9.5 - cast_invoker --------------- - 07-18-2015 -(1 row) - -begin; -select cast_invoker(20150717); - cast_invoker --------------- - 07-17-2015 -(1 row) - -select cast_invoker(20150718); - cast_invoker --------------- - 07-18-2015 -(1 row) - -savepoint s1; -select cast_invoker(20150718); - cast_invoker --------------- - 07-18-2015 -(1 row) - -select cast_invoker(-1); -- fails -ERROR: invalid input syntax for type date: "-1" -CONTEXT: SQL function "sql_to_date" statement 1 -PL/pgSQL function cast_invoker(integer) while casting return value to function's return type -rollback to savepoint s1; -select cast_invoker(20150719); - cast_invoker --------------- - 07-19-2015 -(1 row) - -select cast_invoker(20150720); - cast_invoker --------------- - 07-20-2015 -(1 row) - -commit; -drop function cast_invoker(integer); -drop function sql_to_date(integer) cascade; -NOTICE: drop cascades to cast from integer to date --- Test handling of cast cache inside DO blocks --- (to check the original crash case, this must be a cast not previously --- used in this session) -begin; -do $$ declare x text[]; begin x := '{1.23, 4.56}'::numeric[]; end $$; -do $$ declare x text[]; begin x := '{1.23, 4.56}'::numeric[]; end $$; -end; --- Test for consistent reporting of error context -create function fail() returns int language plpgsql as $$ -begin - return 1/0; -end -$$; -select fail(); -ERROR: division by zero -CONTEXT: PL/pgSQL expression "1/0" -PL/pgSQL function fail() line 3 at RETURN -select fail(); -ERROR: division by zero -CONTEXT: PL/pgSQL expression "1/0" -PL/pgSQL function fail() line 3 at RETURN -drop function fail(); --- Test handling of string literals. -set standard_conforming_strings = off; -create or replace function strtest() returns text as $$ -begin - raise notice 'foo\\bar\041baz'; - return 'foo\\bar\041baz'; -end -$$ language plpgsql; -WARNING: nonstandard use of \\ in a string literal -LINE 3: raise notice 'foo\\bar\041baz'; - ^ -HINT: Use the escape string syntax for backslashes, e.g., E'\\'. -WARNING: nonstandard use of \\ in a string literal -LINE 4: return 'foo\\bar\041baz'; - ^ -HINT: Use the escape string syntax for backslashes, e.g., E'\\'. -WARNING: nonstandard use of \\ in a string literal -LINE 4: return 'foo\\bar\041baz'; - ^ -HINT: Use the escape string syntax for backslashes, e.g., E'\\'. -select strtest(); -NOTICE: foo\bar!baz -WARNING: nonstandard use of \\ in a string literal -LINE 1: 'foo\\bar\041baz' - ^ -HINT: Use the escape string syntax for backslashes, e.g., E'\\'. -QUERY: 'foo\\bar\041baz' - strtest -------------- - foo\bar!baz -(1 row) - -create or replace function strtest() returns text as $$ -begin - raise notice E'foo\\bar\041baz'; - return E'foo\\bar\041baz'; -end -$$ language plpgsql; -select strtest(); -NOTICE: foo\bar!baz - strtest -------------- - foo\bar!baz -(1 row) - -set standard_conforming_strings = on; -create or replace function strtest() returns text as $$ -begin - raise notice 'foo\\bar\041baz\'; - return 'foo\\bar\041baz\'; -end -$$ language plpgsql; -select strtest(); -NOTICE: foo\\bar\041baz\ - strtest ------------------- - foo\\bar\041baz\ -(1 row) - -create or replace function strtest() returns text as $$ -begin - raise notice E'foo\\bar\041baz'; - return E'foo\\bar\041baz'; -end -$$ language plpgsql; -select strtest(); -NOTICE: foo\bar!baz - strtest -------------- - foo\bar!baz -(1 row) - -drop function strtest(); --- Test anonymous code blocks. -DO $$ -DECLARE r record; -BEGIN - FOR r IN SELECT rtrim(roomno) AS roomno, comment FROM Room ORDER BY roomno - LOOP - RAISE NOTICE '%, %', r.roomno, r.comment; - END LOOP; -END$$; -NOTICE: 001, Entrance -NOTICE: 002, Office -NOTICE: 003, Office -NOTICE: 004, Technical -NOTICE: 101, Office -NOTICE: 102, Conference -NOTICE: 103, Restroom -NOTICE: 104, Technical -NOTICE: 105, Office -NOTICE: 106, Office --- these are to check syntax error reporting -DO LANGUAGE plpgsql $$begin return 1; end$$; -ERROR: RETURN cannot have a parameter in function returning void -LINE 1: DO LANGUAGE plpgsql $$begin return 1; end$$; - ^ -DO $$ -DECLARE r record; -BEGIN - FOR r IN SELECT rtrim(roomno) AS roomno, foo FROM Room ORDER BY roomno - LOOP - RAISE NOTICE '%, %', r.roomno, r.comment; - END LOOP; -END$$; -ERROR: column "foo" does not exist -LINE 1: SELECT rtrim(roomno) AS roomno, foo FROM Room ORDER BY roomn... - ^ -QUERY: SELECT rtrim(roomno) AS roomno, foo FROM Room ORDER BY roomno -CONTEXT: PL/pgSQL function inline_code_block line 4 at FOR over SELECT rows --- Check handling of errors thrown from/into anonymous code blocks. -do $outer$ -begin - for i in 1..10 loop - begin - execute $ex$ - do $$ - declare x int = 0; - begin - x := 1 / x; - end; - $$; - $ex$; - exception when division_by_zero then - raise notice 'caught division by zero'; - end; - end loop; -end; -$outer$; -NOTICE: caught division by zero -NOTICE: caught division by zero -NOTICE: caught division by zero -NOTICE: caught division by zero -NOTICE: caught division by zero -NOTICE: caught division by zero -NOTICE: caught division by zero -NOTICE: caught division by zero -NOTICE: caught division by zero -NOTICE: caught division by zero --- Check variable scoping -- a var is not available in its own or prior --- default expressions, but it is available in later ones. -do $$ -declare x int := x + 1; -- error -begin - raise notice 'x = %', x; -end; -$$; -ERROR: column "x" does not exist -LINE 1: x + 1 - ^ -QUERY: x + 1 -CONTEXT: PL/pgSQL function inline_code_block line 2 during statement block local variable initialization -do $$ -declare y int := x + 1; -- error - x int := 42; -begin - raise notice 'x = %, y = %', x, y; -end; -$$; -ERROR: column "x" does not exist -LINE 1: x + 1 - ^ -QUERY: x + 1 -CONTEXT: PL/pgSQL function inline_code_block line 2 during statement block local variable initialization -do $$ -declare x int := 42; - y int := x + 1; -begin - raise notice 'x = %, y = %', x, y; -end; -$$; -NOTICE: x = 42, y = 43 -do $$ -declare x int := 42; -begin - declare y int := x + 1; - x int := x + 2; - z int := x * 10; - begin - raise notice 'x = %, y = %, z = %', x, y, z; - end; -end; -$$; -NOTICE: x = 44, y = 43, z = 440 --- Check handling of conflicts between plpgsql vars and table columns. -set plpgsql.variable_conflict = error; -create function conflict_test() returns setof int8_tbl as $$ -declare r record; - q1 bigint := 42; -begin - for r in select q1,q2 from int8_tbl loop - return next r; - end loop; -end; -$$ language plpgsql; -select * from conflict_test(); -ERROR: column reference "q1" is ambiguous -LINE 1: select q1,q2 from int8_tbl - ^ -DETAIL: It could refer to either a PL/pgSQL variable or a table column. -QUERY: select q1,q2 from int8_tbl -CONTEXT: PL/pgSQL function conflict_test() line 5 at FOR over SELECT rows -create or replace function conflict_test() returns setof int8_tbl as $$ -#variable_conflict use_variable -declare r record; - q1 bigint := 42; -begin - for r in select q1,q2 from int8_tbl loop - return next r; - end loop; -end; -$$ language plpgsql; -select * from conflict_test(); - q1 | q2 -----+------------------- - 42 | 456 - 42 | 4567890123456789 - 42 | 123 - 42 | 4567890123456789 - 42 | -4567890123456789 -(5 rows) - -create or replace function conflict_test() returns setof int8_tbl as $$ -#variable_conflict use_column -declare r record; - q1 bigint := 42; -begin - for r in select q1,q2 from int8_tbl loop - return next r; - end loop; -end; -$$ language plpgsql; -select * from conflict_test(); - q1 | q2 -------------------+------------------- - 123 | 456 - 123 | 4567890123456789 - 4567890123456789 | 123 - 4567890123456789 | 4567890123456789 - 4567890123456789 | -4567890123456789 -(5 rows) - -drop function conflict_test(); --- Check that an unreserved keyword can be used as a variable name -create function unreserved_test() returns int as $$ -declare - forward int := 21; -begin - forward := forward * 2; - return forward; -end -$$ language plpgsql; -select unreserved_test(); - unreserved_test ------------------ - 42 -(1 row) - -create or replace function unreserved_test() returns int as $$ -declare - return int := 42; -begin - return := return + 1; - return return; -end -$$ language plpgsql; -select unreserved_test(); - unreserved_test ------------------ - 43 -(1 row) - -create or replace function unreserved_test() returns int as $$ -declare - comment int := 21; -begin - comment := comment * 2; - comment on function unreserved_test() is 'this is a test'; - return comment; -end -$$ language plpgsql; -select unreserved_test(); - unreserved_test ------------------ - 42 -(1 row) - -select obj_description('unreserved_test()'::regprocedure, 'pg_proc'); - obj_description ------------------ - this is a test -(1 row) - -drop function unreserved_test(); --- --- Test FOREACH over arrays --- -create function foreach_test(anyarray) -returns void as $$ -declare x int; -begin - foreach x in array $1 - loop - raise notice '%', x; - end loop; - end; -$$ language plpgsql; -select foreach_test(ARRAY[1,2,3,4]); -NOTICE: 1 -NOTICE: 2 -NOTICE: 3 -NOTICE: 4 - foreach_test --------------- - -(1 row) - -select foreach_test(ARRAY[[1,2],[3,4]]); -NOTICE: 1 -NOTICE: 2 -NOTICE: 3 -NOTICE: 4 - foreach_test --------------- - -(1 row) - -create or replace function foreach_test(anyarray) -returns void as $$ -declare x int; -begin - foreach x slice 1 in array $1 - loop - raise notice '%', x; - end loop; - end; -$$ language plpgsql; --- should fail -select foreach_test(ARRAY[1,2,3,4]); -ERROR: FOREACH ... SLICE loop variable must be of an array type -CONTEXT: PL/pgSQL function foreach_test(anyarray) line 4 at FOREACH over array -select foreach_test(ARRAY[[1,2],[3,4]]); -ERROR: FOREACH ... SLICE loop variable must be of an array type -CONTEXT: PL/pgSQL function foreach_test(anyarray) line 4 at FOREACH over array -create or replace function foreach_test(anyarray) -returns void as $$ -declare x int[]; -begin - foreach x slice 1 in array $1 - loop - raise notice '%', x; - end loop; - end; -$$ language plpgsql; -select foreach_test(ARRAY[1,2,3,4]); -NOTICE: {1,2,3,4} - foreach_test --------------- - -(1 row) - -select foreach_test(ARRAY[[1,2],[3,4]]); -NOTICE: {1,2} -NOTICE: {3,4} - foreach_test --------------- - -(1 row) - --- higher level of slicing -create or replace function foreach_test(anyarray) -returns void as $$ -declare x int[]; -begin - foreach x slice 2 in array $1 - loop - raise notice '%', x; - end loop; - end; -$$ language plpgsql; --- should fail -select foreach_test(ARRAY[1,2,3,4]); -ERROR: slice dimension (2) is out of the valid range 0..1 -CONTEXT: PL/pgSQL function foreach_test(anyarray) line 4 at FOREACH over array --- ok -select foreach_test(ARRAY[[1,2],[3,4]]); -NOTICE: {{1,2},{3,4}} - foreach_test --------------- - -(1 row) - -select foreach_test(ARRAY[[[1,2]],[[3,4]]]); -NOTICE: {{1,2}} -NOTICE: {{3,4}} - foreach_test --------------- - -(1 row) - -create type xy_tuple AS (x int, y int); --- iteration over array of records -create or replace function foreach_test(anyarray) -returns void as $$ -declare r record; -begin - foreach r in array $1 - loop - raise notice '%', r; - end loop; - end; -$$ language plpgsql; -select foreach_test(ARRAY[(10,20),(40,69),(35,78)]::xy_tuple[]); -NOTICE: (10,20) -NOTICE: (40,69) -NOTICE: (35,78) - foreach_test --------------- - -(1 row) - -select foreach_test(ARRAY[[(10,20),(40,69)],[(35,78),(88,76)]]::xy_tuple[]); -NOTICE: (10,20) -NOTICE: (40,69) -NOTICE: (35,78) -NOTICE: (88,76) - foreach_test --------------- - -(1 row) - -create or replace function foreach_test(anyarray) -returns void as $$ -declare x int; y int; -begin - foreach x, y in array $1 - loop - raise notice 'x = %, y = %', x, y; - end loop; - end; -$$ language plpgsql; -select foreach_test(ARRAY[(10,20),(40,69),(35,78)]::xy_tuple[]); -NOTICE: x = 10, y = 20 -NOTICE: x = 40, y = 69 -NOTICE: x = 35, y = 78 - foreach_test --------------- - -(1 row) - -select foreach_test(ARRAY[[(10,20),(40,69)],[(35,78),(88,76)]]::xy_tuple[]); -NOTICE: x = 10, y = 20 -NOTICE: x = 40, y = 69 -NOTICE: x = 35, y = 78 -NOTICE: x = 88, y = 76 - foreach_test --------------- - -(1 row) - --- slicing over array of composite types -create or replace function foreach_test(anyarray) -returns void as $$ -declare x xy_tuple[]; -begin - foreach x slice 1 in array $1 - loop - raise notice '%', x; - end loop; - end; -$$ language plpgsql; -select foreach_test(ARRAY[(10,20),(40,69),(35,78)]::xy_tuple[]); -NOTICE: {"(10,20)","(40,69)","(35,78)"} - foreach_test --------------- - -(1 row) - -select foreach_test(ARRAY[[(10,20),(40,69)],[(35,78),(88,76)]]::xy_tuple[]); -NOTICE: {"(10,20)","(40,69)"} -NOTICE: {"(35,78)","(88,76)"} - foreach_test --------------- - -(1 row) - -drop function foreach_test(anyarray); -drop type xy_tuple; --- --- Assorted tests for array subscript assignment --- -create temp table rtype (id int, ar text[]); -create function arrayassign1() returns text[] language plpgsql as $$ -declare - r record; -begin - r := row(12, '{foo,bar,baz}')::rtype; - r.ar[2] := 'replace'; - return r.ar; -end$$; -select arrayassign1(); - arrayassign1 -------------------- - {foo,replace,baz} -(1 row) - -select arrayassign1(); -- try again to exercise internal caching - arrayassign1 -------------------- - {foo,replace,baz} -(1 row) - -create domain orderedarray as int[2] - constraint sorted check (value[1] < value[2]); -select '{1,2}'::orderedarray; - orderedarray --------------- - {1,2} -(1 row) - -select '{2,1}'::orderedarray; -- fail -ERROR: value for domain orderedarray violates check constraint "sorted" -create function testoa(x1 int, x2 int, x3 int) returns orderedarray -language plpgsql as $$ -declare res orderedarray; -begin - res := array[x1, x2]; - res[2] := x3; - return res; -end$$; -select testoa(1,2,3); - testoa --------- - {1,3} -(1 row) - -select testoa(1,2,3); -- try again to exercise internal caching - testoa --------- - {1,3} -(1 row) - -select testoa(2,1,3); -- fail at initial assign -ERROR: value for domain orderedarray violates check constraint "sorted" -CONTEXT: PL/pgSQL function testoa(integer,integer,integer) line 4 at assignment -select testoa(1,2,1); -- fail at update -ERROR: value for domain orderedarray violates check constraint "sorted" -CONTEXT: PL/pgSQL function testoa(integer,integer,integer) line 5 at assignment -drop function arrayassign1(); -drop function testoa(x1 int, x2 int, x3 int); --- --- Test handling of expanded arrays --- -create function returns_rw_array(int) returns int[] -language plpgsql as $$ - declare r int[]; - begin r := array[$1, $1]; return r; end; -$$ stable; -create function consumes_rw_array(int[]) returns int -language plpgsql as $$ - begin return $1[1]; end; -$$ stable; -select consumes_rw_array(returns_rw_array(42)); - consumes_rw_array -------------------- - 42 -(1 row) - --- bug #14174 -explain (verbose, costs off) -select i, a from - (select returns_rw_array(1) as a offset 0) ss, - lateral consumes_rw_array(a) i; - QUERY PLAN ------------------------------------------------------------------ - Nested Loop - Output: i.i, (returns_rw_array(1)) - -> Result - Output: returns_rw_array(1) - -> Function Scan on public.consumes_rw_array i - Output: i.i - Function Call: consumes_rw_array((returns_rw_array(1))) -(7 rows) - -select i, a from - (select returns_rw_array(1) as a offset 0) ss, - lateral consumes_rw_array(a) i; - i | a ----+------- - 1 | {1,1} -(1 row) - -explain (verbose, costs off) -select consumes_rw_array(a), a from returns_rw_array(1) a; - QUERY PLAN --------------------------------------------- - Function Scan on public.returns_rw_array a - Output: consumes_rw_array(a), a - Function Call: returns_rw_array(1) -(3 rows) - -select consumes_rw_array(a), a from returns_rw_array(1) a; - consumes_rw_array | a --------------------+------- - 1 | {1,1} -(1 row) - -explain (verbose, costs off) -select consumes_rw_array(a), a from - (values (returns_rw_array(1)), (returns_rw_array(2))) v(a); - QUERY PLAN ---------------------------------------------------------------------- - Values Scan on "*VALUES*" - Output: consumes_rw_array("*VALUES*".column1), "*VALUES*".column1 -(2 rows) - -select consumes_rw_array(a), a from - (values (returns_rw_array(1)), (returns_rw_array(2))) v(a); - consumes_rw_array | a --------------------+------- - 1 | {1,1} - 2 | {2,2} -(2 rows) - -do $$ -declare a int[] := array[1,2]; -begin - a := a || 3; - raise notice 'a = %', a; -end$$; -NOTICE: a = {1,2,3} --- --- Test access to call stack --- -create function inner_func(int) -returns int as $$ -declare _context text; -begin - get diagnostics _context = pg_context; - raise notice '***%***', _context; - -- lets do it again, just for fun.. - get diagnostics _context = pg_context; - raise notice '***%***', _context; - raise notice 'lets make sure we didnt break anything'; - return 2 * $1; -end; -$$ language plpgsql; -create or replace function outer_func(int) -returns int as $$ -declare - myresult int; -begin - raise notice 'calling down into inner_func()'; - myresult := inner_func($1); - raise notice 'inner_func() done'; - return myresult; -end; -$$ language plpgsql; -create or replace function outer_outer_func(int) -returns int as $$ -declare - myresult int; -begin - raise notice 'calling down into outer_func()'; - myresult := outer_func($1); - raise notice 'outer_func() done'; - return myresult; -end; -$$ language plpgsql; -select outer_outer_func(10); -NOTICE: calling down into outer_func() -NOTICE: calling down into inner_func() -NOTICE: ***PL/pgSQL function inner_func(integer) line 4 at GET DIAGNOSTICS -PL/pgSQL function outer_func(integer) line 6 at assignment -PL/pgSQL function outer_outer_func(integer) line 6 at assignment*** -NOTICE: ***PL/pgSQL function inner_func(integer) line 7 at GET DIAGNOSTICS -PL/pgSQL function outer_func(integer) line 6 at assignment -PL/pgSQL function outer_outer_func(integer) line 6 at assignment*** -NOTICE: lets make sure we didnt break anything -NOTICE: inner_func() done -NOTICE: outer_func() done - outer_outer_func ------------------- - 20 -(1 row) - --- repeated call should work -select outer_outer_func(20); -NOTICE: calling down into outer_func() -NOTICE: calling down into inner_func() -NOTICE: ***PL/pgSQL function inner_func(integer) line 4 at GET DIAGNOSTICS -PL/pgSQL function outer_func(integer) line 6 at assignment -PL/pgSQL function outer_outer_func(integer) line 6 at assignment*** -NOTICE: ***PL/pgSQL function inner_func(integer) line 7 at GET DIAGNOSTICS -PL/pgSQL function outer_func(integer) line 6 at assignment -PL/pgSQL function outer_outer_func(integer) line 6 at assignment*** -NOTICE: lets make sure we didnt break anything -NOTICE: inner_func() done -NOTICE: outer_func() done - outer_outer_func ------------------- - 40 -(1 row) - -drop function outer_outer_func(int); -drop function outer_func(int); -drop function inner_func(int); --- access to call stack from exception -create function inner_func(int) -returns int as $$ -declare - _context text; - sx int := 5; -begin - begin - perform sx / 0; - exception - when division_by_zero then - get diagnostics _context = pg_context; - raise notice '***%***', _context; - end; - - -- lets do it again, just for fun.. - get diagnostics _context = pg_context; - raise notice '***%***', _context; - raise notice 'lets make sure we didnt break anything'; - return 2 * $1; -end; -$$ language plpgsql; -create or replace function outer_func(int) -returns int as $$ -declare - myresult int; -begin - raise notice 'calling down into inner_func()'; - myresult := inner_func($1); - raise notice 'inner_func() done'; - return myresult; -end; -$$ language plpgsql; -create or replace function outer_outer_func(int) -returns int as $$ -declare - myresult int; -begin - raise notice 'calling down into outer_func()'; - myresult := outer_func($1); - raise notice 'outer_func() done'; - return myresult; -end; -$$ language plpgsql; -select outer_outer_func(10); -NOTICE: calling down into outer_func() -NOTICE: calling down into inner_func() -NOTICE: ***PL/pgSQL function inner_func(integer) line 10 at GET DIAGNOSTICS -PL/pgSQL function outer_func(integer) line 6 at assignment -PL/pgSQL function outer_outer_func(integer) line 6 at assignment*** -NOTICE: ***PL/pgSQL function inner_func(integer) line 15 at GET DIAGNOSTICS -PL/pgSQL function outer_func(integer) line 6 at assignment -PL/pgSQL function outer_outer_func(integer) line 6 at assignment*** -NOTICE: lets make sure we didnt break anything -NOTICE: inner_func() done -NOTICE: outer_func() done - outer_outer_func ------------------- - 20 -(1 row) - --- repeated call should work -select outer_outer_func(20); -NOTICE: calling down into outer_func() -NOTICE: calling down into inner_func() -NOTICE: ***PL/pgSQL function inner_func(integer) line 10 at GET DIAGNOSTICS -PL/pgSQL function outer_func(integer) line 6 at assignment -PL/pgSQL function outer_outer_func(integer) line 6 at assignment*** -NOTICE: ***PL/pgSQL function inner_func(integer) line 15 at GET DIAGNOSTICS -PL/pgSQL function outer_func(integer) line 6 at assignment -PL/pgSQL function outer_outer_func(integer) line 6 at assignment*** -NOTICE: lets make sure we didnt break anything -NOTICE: inner_func() done -NOTICE: outer_func() done - outer_outer_func ------------------- - 40 -(1 row) - -drop function outer_outer_func(int); -drop function outer_func(int); -drop function inner_func(int); --- Test pg_routine_oid -create function current_function(text) -returns regprocedure as $$ -declare - fn_oid regprocedure; -begin - get diagnostics fn_oid = pg_routine_oid; - return fn_oid; -end; -$$ language plpgsql; -select current_function('foo'); - current_function ------------------------- - current_function(text) -(1 row) - -drop function current_function(text); --- shouldn't fail in DO, even though there's no useful data -do $$ -declare - fn_oid oid; -begin - get diagnostics fn_oid = pg_routine_oid; - raise notice 'pg_routine_oid = %', fn_oid; -end; -$$; -NOTICE: pg_routine_oid = 0 --- --- Test ASSERT --- -do $$ -begin - assert 1=1; -- should succeed -end; -$$; -do $$ -begin - assert 1=0; -- should fail -end; -$$; -ERROR: assertion failed -CONTEXT: PL/pgSQL function inline_code_block line 3 at ASSERT -do $$ -begin - assert NULL; -- should fail -end; -$$; -ERROR: assertion failed -CONTEXT: PL/pgSQL function inline_code_block line 3 at ASSERT --- check controlling GUC -set plpgsql.check_asserts = off; -do $$ -begin - assert 1=0; -- won't be tested -end; -$$; -reset plpgsql.check_asserts; --- test custom message -do $$ -declare var text := 'some value'; -begin - assert 1=0, format('assertion failed, var = "%s"', var); -end; -$$; -ERROR: assertion failed, var = "some value" -CONTEXT: PL/pgSQL function inline_code_block line 4 at ASSERT --- ensure assertions are not trapped by 'others' -do $$ -begin - assert 1=0, 'unhandled assertion'; -exception when others then - null; -- do nothing -end; -$$; -ERROR: unhandled assertion -CONTEXT: PL/pgSQL function inline_code_block line 3 at ASSERT --- Test use of plpgsql in a domain check constraint (cf. bug #14414) -create function plpgsql_domain_check(val int) returns boolean as $$ -begin return val > 0; end -$$ language plpgsql immutable; -create domain plpgsql_domain as integer check(plpgsql_domain_check(value)); -do $$ -declare v_test plpgsql_domain; -begin - v_test := 1; -end; -$$; -do $$ -declare v_test plpgsql_domain := 1; -begin - v_test := 0; -- fail -end; -$$; -ERROR: value for domain plpgsql_domain violates check constraint "plpgsql_domain_check" -CONTEXT: PL/pgSQL function inline_code_block line 4 at assignment --- Test handling of expanded array passed to a domain constraint (bug #14472) -create function plpgsql_arr_domain_check(val int[]) returns boolean as $$ -begin return val[1] > 0; end -$$ language plpgsql immutable; -create domain plpgsql_arr_domain as int[] check(plpgsql_arr_domain_check(value)); -do $$ -declare v_test plpgsql_arr_domain; -begin - v_test := array[1]; - v_test := v_test || 2; -end; -$$; -do $$ -declare v_test plpgsql_arr_domain := array[1]; -begin - v_test := 0 || v_test; -- fail -end; -$$; -ERROR: value for domain plpgsql_arr_domain violates check constraint "plpgsql_arr_domain_check" -CONTEXT: PL/pgSQL function inline_code_block line 4 at assignment --- --- test usage of transition tables in AFTER triggers --- -CREATE TABLE transition_table_base (id int PRIMARY KEY, val text); -CREATE FUNCTION transition_table_base_ins_func() - RETURNS trigger - LANGUAGE plpgsql -AS $$ -DECLARE - t text; - l text; -BEGIN - t = ''; - FOR l IN EXECUTE - $q$ - EXPLAIN (TIMING off, COSTS off, VERBOSE on) - SELECT * FROM newtable - $q$ LOOP - t = t || l || E'\n'; - END LOOP; - - RAISE INFO '%', t; - RETURN new; -END; -$$; -CREATE TRIGGER transition_table_base_ins_trig - AFTER INSERT ON transition_table_base - REFERENCING OLD TABLE AS oldtable NEW TABLE AS newtable - FOR EACH STATEMENT - EXECUTE PROCEDURE transition_table_base_ins_func(); -ERROR: OLD TABLE can only be specified for a DELETE or UPDATE trigger -CREATE TRIGGER transition_table_base_ins_trig - AFTER INSERT ON transition_table_base - REFERENCING NEW TABLE AS newtable - FOR EACH STATEMENT - EXECUTE PROCEDURE transition_table_base_ins_func(); -INSERT INTO transition_table_base VALUES (1, 'One'), (2, 'Two'); -INFO: Named Tuplestore Scan - Output: id, val - -INSERT INTO transition_table_base VALUES (3, 'Three'), (4, 'Four'); -INFO: Named Tuplestore Scan - Output: id, val - -CREATE OR REPLACE FUNCTION transition_table_base_upd_func() - RETURNS trigger - LANGUAGE plpgsql -AS $$ -DECLARE - t text; - l text; -BEGIN - t = ''; - FOR l IN EXECUTE - $q$ - EXPLAIN (TIMING off, COSTS off, VERBOSE on) - SELECT * FROM oldtable ot FULL JOIN newtable nt USING (id) - $q$ LOOP - t = t || l || E'\n'; - END LOOP; - - RAISE INFO '%', t; - RETURN new; -END; -$$; -CREATE TRIGGER transition_table_base_upd_trig - AFTER UPDATE ON transition_table_base - REFERENCING OLD TABLE AS oldtable NEW TABLE AS newtable - FOR EACH STATEMENT - EXECUTE PROCEDURE transition_table_base_upd_func(); -UPDATE transition_table_base - SET val = '*' || val || '*' - WHERE id BETWEEN 2 AND 3; -INFO: Hash Full Join - Output: COALESCE(ot.id, nt.id), ot.val, nt.val - Hash Cond: (ot.id = nt.id) - -> Named Tuplestore Scan - Output: ot.id, ot.val - -> Hash - Output: nt.id, nt.val - -> Named Tuplestore Scan - Output: nt.id, nt.val - -CREATE TABLE transition_table_level1 -( - level1_no serial NOT NULL , - level1_node_name varchar(255), - PRIMARY KEY (level1_no) -) WITHOUT OIDS; -CREATE TABLE transition_table_level2 -( - level2_no serial NOT NULL , - parent_no int NOT NULL, - level1_node_name varchar(255), - PRIMARY KEY (level2_no) -) WITHOUT OIDS; -CREATE TABLE transition_table_status -( - level int NOT NULL, - node_no int NOT NULL, - status int, - PRIMARY KEY (level, node_no) -) WITHOUT OIDS; -CREATE FUNCTION transition_table_level1_ri_parent_del_func() - RETURNS TRIGGER - LANGUAGE plpgsql -AS $$ - DECLARE n bigint; - BEGIN - PERFORM FROM p JOIN transition_table_level2 c ON c.parent_no = p.level1_no; - IF FOUND THEN - RAISE EXCEPTION 'RI error'; - END IF; - RETURN NULL; - END; -$$; -CREATE TRIGGER transition_table_level1_ri_parent_del_trigger - AFTER DELETE ON transition_table_level1 - REFERENCING OLD TABLE AS p - FOR EACH STATEMENT EXECUTE PROCEDURE - transition_table_level1_ri_parent_del_func(); -CREATE FUNCTION transition_table_level1_ri_parent_upd_func() - RETURNS TRIGGER - LANGUAGE plpgsql -AS $$ - DECLARE - x int; - BEGIN - WITH p AS (SELECT level1_no, sum(delta) cnt - FROM (SELECT level1_no, 1 AS delta FROM i - UNION ALL - SELECT level1_no, -1 AS delta FROM d) w - GROUP BY level1_no - HAVING sum(delta) < 0) - SELECT level1_no - FROM p JOIN transition_table_level2 c ON c.parent_no = p.level1_no - INTO x; - IF FOUND THEN - RAISE EXCEPTION 'RI error'; - END IF; - RETURN NULL; - END; -$$; -CREATE TRIGGER transition_table_level1_ri_parent_upd_trigger - AFTER UPDATE ON transition_table_level1 - REFERENCING OLD TABLE AS d NEW TABLE AS i - FOR EACH STATEMENT EXECUTE PROCEDURE - transition_table_level1_ri_parent_upd_func(); -CREATE FUNCTION transition_table_level2_ri_child_insupd_func() - RETURNS TRIGGER - LANGUAGE plpgsql -AS $$ - BEGIN - PERFORM FROM i - LEFT JOIN transition_table_level1 p - ON p.level1_no IS NOT NULL AND p.level1_no = i.parent_no - WHERE p.level1_no IS NULL; - IF FOUND THEN - RAISE EXCEPTION 'RI error'; - END IF; - RETURN NULL; - END; -$$; -CREATE TRIGGER transition_table_level2_ri_child_ins_trigger - AFTER INSERT ON transition_table_level2 - REFERENCING NEW TABLE AS i - FOR EACH STATEMENT EXECUTE PROCEDURE - transition_table_level2_ri_child_insupd_func(); -CREATE TRIGGER transition_table_level2_ri_child_upd_trigger - AFTER UPDATE ON transition_table_level2 - REFERENCING NEW TABLE AS i - FOR EACH STATEMENT EXECUTE PROCEDURE - transition_table_level2_ri_child_insupd_func(); --- create initial test data -INSERT INTO transition_table_level1 (level1_no) - SELECT generate_series(1,200); -ANALYZE transition_table_level1; -INSERT INTO transition_table_level2 (level2_no, parent_no) - SELECT level2_no, level2_no / 50 + 1 AS parent_no - FROM generate_series(1,9999) level2_no; -ANALYZE transition_table_level2; -INSERT INTO transition_table_status (level, node_no, status) - SELECT 1, level1_no, 0 FROM transition_table_level1; -INSERT INTO transition_table_status (level, node_no, status) - SELECT 2, level2_no, 0 FROM transition_table_level2; -ANALYZE transition_table_status; -INSERT INTO transition_table_level1(level1_no) - SELECT generate_series(201,1000); -ANALYZE transition_table_level1; --- behave reasonably if someone tries to modify a transition table -CREATE FUNCTION transition_table_level2_bad_usage_func() - RETURNS TRIGGER - LANGUAGE plpgsql -AS $$ - BEGIN - INSERT INTO dx VALUES (1000000, 1000000, 'x'); - RETURN NULL; - END; -$$; -CREATE TRIGGER transition_table_level2_bad_usage_trigger - AFTER DELETE ON transition_table_level2 - REFERENCING OLD TABLE AS dx - FOR EACH STATEMENT EXECUTE PROCEDURE - transition_table_level2_bad_usage_func(); -DELETE FROM transition_table_level2 - WHERE level2_no BETWEEN 301 AND 305; -ERROR: relation "dx" cannot be the target of a modifying statement -CONTEXT: SQL statement "INSERT INTO dx VALUES (1000000, 1000000, 'x')" -PL/pgSQL function transition_table_level2_bad_usage_func() line 3 at SQL statement -DROP TRIGGER transition_table_level2_bad_usage_trigger - ON transition_table_level2; --- attempt modifications which would break RI (should all fail) -DELETE FROM transition_table_level1 - WHERE level1_no = 25; -ERROR: RI error -CONTEXT: PL/pgSQL function transition_table_level1_ri_parent_del_func() line 6 at RAISE -UPDATE transition_table_level1 SET level1_no = -1 - WHERE level1_no = 30; -ERROR: RI error -CONTEXT: PL/pgSQL function transition_table_level1_ri_parent_upd_func() line 15 at RAISE -INSERT INTO transition_table_level2 (level2_no, parent_no) - VALUES (10000, 10000); -ERROR: RI error -CONTEXT: PL/pgSQL function transition_table_level2_ri_child_insupd_func() line 8 at RAISE -UPDATE transition_table_level2 SET parent_no = 2000 - WHERE level2_no = 40; -ERROR: RI error -CONTEXT: PL/pgSQL function transition_table_level2_ri_child_insupd_func() line 8 at RAISE --- attempt modifications which would not break RI (should all succeed) -DELETE FROM transition_table_level1 - WHERE level1_no BETWEEN 201 AND 1000; -DELETE FROM transition_table_level1 - WHERE level1_no BETWEEN 100000000 AND 100000010; -SELECT count(*) FROM transition_table_level1; - count -------- - 200 -(1 row) - -DELETE FROM transition_table_level2 - WHERE level2_no BETWEEN 211 AND 220; -SELECT count(*) FROM transition_table_level2; - count -------- - 9989 -(1 row) - -CREATE TABLE alter_table_under_transition_tables -( - id int PRIMARY KEY, - name text -); -CREATE FUNCTION alter_table_under_transition_tables_upd_func() - RETURNS TRIGGER - LANGUAGE plpgsql -AS $$ -BEGIN - RAISE WARNING 'old table = %, new table = %', - (SELECT string_agg(id || '=' || name, ',') FROM d), - (SELECT string_agg(id || '=' || name, ',') FROM i); - RAISE NOTICE 'one = %', (SELECT 1 FROM alter_table_under_transition_tables LIMIT 1); - RETURN NULL; -END; -$$; --- should fail, TRUNCATE is not compatible with transition tables -CREATE TRIGGER alter_table_under_transition_tables_upd_trigger - AFTER TRUNCATE OR UPDATE ON alter_table_under_transition_tables - REFERENCING OLD TABLE AS d NEW TABLE AS i - FOR EACH STATEMENT EXECUTE PROCEDURE - alter_table_under_transition_tables_upd_func(); -ERROR: TRUNCATE triggers with transition tables are not supported --- should work -CREATE TRIGGER alter_table_under_transition_tables_upd_trigger - AFTER UPDATE ON alter_table_under_transition_tables - REFERENCING OLD TABLE AS d NEW TABLE AS i - FOR EACH STATEMENT EXECUTE PROCEDURE - alter_table_under_transition_tables_upd_func(); -INSERT INTO alter_table_under_transition_tables - VALUES (1, '1'), (2, '2'), (3, '3'); -UPDATE alter_table_under_transition_tables - SET name = name || name; -WARNING: old table = 1=1,2=2,3=3, new table = 1=11,2=22,3=33 -NOTICE: one = 1 --- now change 'name' to an integer to see what happens... -ALTER TABLE alter_table_under_transition_tables - ALTER COLUMN name TYPE int USING name::integer; -UPDATE alter_table_under_transition_tables - SET name = (name::text || name::text)::integer; -WARNING: old table = 1=11,2=22,3=33, new table = 1=1111,2=2222,3=3333 -NOTICE: one = 1 --- now drop column 'name' -ALTER TABLE alter_table_under_transition_tables - DROP column name; -UPDATE alter_table_under_transition_tables - SET id = id; -ERROR: column "name" does not exist -LINE 1: (SELECT string_agg(id || '=' || name, ',') FROM d) - ^ -QUERY: (SELECT string_agg(id || '=' || name, ',') FROM d) -CONTEXT: PL/pgSQL function alter_table_under_transition_tables_upd_func() line 3 at RAISE --- --- Test multiple reference to a transition table --- -CREATE TABLE multi_test (i int); -INSERT INTO multi_test VALUES (1); -CREATE OR REPLACE FUNCTION multi_test_trig() RETURNS trigger -LANGUAGE plpgsql AS $$ -BEGIN - RAISE NOTICE 'count = %', (SELECT COUNT(*) FROM new_test); - RAISE NOTICE 'count union = %', - (SELECT COUNT(*) - FROM (SELECT * FROM new_test UNION ALL SELECT * FROM new_test) ss); - RETURN NULL; -END$$; -CREATE TRIGGER my_trigger AFTER UPDATE ON multi_test - REFERENCING NEW TABLE AS new_test OLD TABLE as old_test - FOR EACH STATEMENT EXECUTE PROCEDURE multi_test_trig(); -UPDATE multi_test SET i = i; -NOTICE: count = 1 -NOTICE: count union = 2 -DROP TABLE multi_test; -DROP FUNCTION multi_test_trig(); --- --- Check type parsing and record fetching from partitioned tables --- -CREATE TABLE partitioned_table (a int, b text) PARTITION BY LIST (a); -CREATE TABLE pt_part1 PARTITION OF partitioned_table FOR VALUES IN (1); -CREATE TABLE pt_part2 PARTITION OF partitioned_table FOR VALUES IN (2); -INSERT INTO partitioned_table VALUES (1, 'Row 1'); -INSERT INTO partitioned_table VALUES (2, 'Row 2'); -CREATE OR REPLACE FUNCTION get_from_partitioned_table(partitioned_table.a%type) -RETURNS partitioned_table AS $$ -DECLARE - a_val partitioned_table.a%TYPE; - result partitioned_table%ROWTYPE; -BEGIN - a_val := $1; - SELECT * INTO result FROM partitioned_table WHERE a = a_val; - RETURN result; -END; $$ LANGUAGE plpgsql; -NOTICE: type reference partitioned_table.a%TYPE converted to integer -SELECT * FROM get_from_partitioned_table(1) AS t; - a | b ----+------- - 1 | Row 1 -(1 row) - -CREATE OR REPLACE FUNCTION list_partitioned_table() -RETURNS SETOF public.partitioned_table.a%TYPE AS $$ -DECLARE - row public.partitioned_table%ROWTYPE; - a_val public.partitioned_table.a%TYPE; -BEGIN - FOR row IN SELECT * FROM public.partitioned_table ORDER BY a LOOP - a_val := row.a; - RETURN NEXT a_val; - END LOOP; - RETURN; -END; $$ LANGUAGE plpgsql; -NOTICE: type reference public.partitioned_table.a%TYPE converted to integer -SELECT * FROM list_partitioned_table() AS t; - t ---- - 1 - 2 -(2 rows) - --- --- Check argument name is used instead of $n in error message --- -CREATE FUNCTION fx(x WSlot) RETURNS void AS $$ -BEGIN - GET DIAGNOSTICS x = ROW_COUNT; - RETURN; -END; $$ LANGUAGE plpgsql; -ERROR: "x" is not a scalar variable -LINE 3: GET DIAGNOSTICS x = ROW_COUNT; - ^ +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/copy2.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/copy2.out --- /Users/admin/pgsql/src/test/regress/expected/copy2.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/copy2.out 2024-12-13 13:20:12 @@ -1,931 +1,2 @@ -CREATE TEMP TABLE x ( - a serial, - b int, - c text not null default 'stuff', - d text, - e text -) ; -CREATE FUNCTION fn_x_before () RETURNS TRIGGER AS ' - BEGIN - NEW.e := ''before trigger fired''::text; - return NEW; - END; -' LANGUAGE plpgsql; -CREATE FUNCTION fn_x_after () RETURNS TRIGGER AS ' - BEGIN - UPDATE x set e=''after trigger fired'' where c=''stuff''; - return NULL; - END; -' LANGUAGE plpgsql; -CREATE TRIGGER trg_x_after AFTER INSERT ON x -FOR EACH ROW EXECUTE PROCEDURE fn_x_after(); -CREATE TRIGGER trg_x_before BEFORE INSERT ON x -FOR EACH ROW EXECUTE PROCEDURE fn_x_before(); -COPY x (a, b, c, d, e) from stdin; -COPY x (b, d) from stdin; -COPY x (b, d) from stdin; -COPY x (a, b, c, d, e) from stdin; --- non-existent column in column list: should fail -COPY x (xyz) from stdin; -ERROR: column "xyz" of relation "x" does not exist --- redundant options -COPY x from stdin (format CSV, FORMAT CSV); -ERROR: conflicting or redundant options -LINE 1: COPY x from stdin (format CSV, FORMAT CSV); - ^ -COPY x from stdin (freeze off, freeze on); -ERROR: conflicting or redundant options -LINE 1: COPY x from stdin (freeze off, freeze on); - ^ -COPY x from stdin (delimiter ',', delimiter ','); -ERROR: conflicting or redundant options -LINE 1: COPY x from stdin (delimiter ',', delimiter ','); - ^ -COPY x from stdin (null ' ', null ' '); -ERROR: conflicting or redundant options -LINE 1: COPY x from stdin (null ' ', null ' '); - ^ -COPY x from stdin (header off, header on); -ERROR: conflicting or redundant options -LINE 1: COPY x from stdin (header off, header on); - ^ -COPY x from stdin (quote ':', quote ':'); -ERROR: conflicting or redundant options -LINE 1: COPY x from stdin (quote ':', quote ':'); - ^ -COPY x from stdin (escape ':', escape ':'); -ERROR: conflicting or redundant options -LINE 1: COPY x from stdin (escape ':', escape ':'); - ^ -COPY x from stdin (force_quote (a), force_quote *); -ERROR: conflicting or redundant options -LINE 1: COPY x from stdin (force_quote (a), force_quote *); - ^ -COPY x from stdin (force_not_null (a), force_not_null (b)); -ERROR: conflicting or redundant options -LINE 1: COPY x from stdin (force_not_null (a), force_not_null (b)); - ^ -COPY x from stdin (force_null (a), force_null (b)); -ERROR: conflicting or redundant options -LINE 1: COPY x from stdin (force_null (a), force_null (b)); - ^ -COPY x from stdin (convert_selectively (a), convert_selectively (b)); -ERROR: conflicting or redundant options -LINE 1: COPY x from stdin (convert_selectively (a), convert_selectiv... - ^ -COPY x from stdin (encoding 'sql_ascii', encoding 'sql_ascii'); -ERROR: conflicting or redundant options -LINE 1: COPY x from stdin (encoding 'sql_ascii', encoding 'sql_ascii... - ^ -COPY x from stdin (on_error ignore, on_error ignore); -ERROR: conflicting or redundant options -LINE 1: COPY x from stdin (on_error ignore, on_error ignore); - ^ -COPY x from stdin (log_verbosity default, log_verbosity verbose); -ERROR: conflicting or redundant options -LINE 1: COPY x from stdin (log_verbosity default, log_verbosity verb... - ^ --- incorrect options -COPY x from stdin (format BINARY, delimiter ','); -ERROR: cannot specify DELIMITER in BINARY mode -COPY x from stdin (format BINARY, null 'x'); -ERROR: cannot specify NULL in BINARY mode -COPY x from stdin (format BINARY, on_error ignore); -ERROR: only ON_ERROR STOP is allowed in BINARY mode -COPY x from stdin (on_error unsupported); -ERROR: COPY ON_ERROR "unsupported" not recognized -LINE 1: COPY x from stdin (on_error unsupported); - ^ -COPY x from stdin (format TEXT, force_quote(a)); -ERROR: COPY FORCE_QUOTE requires CSV mode -COPY x from stdin (format TEXT, force_quote *); -ERROR: COPY FORCE_QUOTE requires CSV mode -COPY x from stdin (format CSV, force_quote(a)); -ERROR: COPY FORCE_QUOTE cannot be used with COPY FROM -COPY x from stdin (format CSV, force_quote *); -ERROR: COPY FORCE_QUOTE cannot be used with COPY FROM -COPY x from stdin (format TEXT, force_not_null(a)); -ERROR: COPY FORCE_NOT_NULL requires CSV mode -COPY x from stdin (format TEXT, force_not_null *); -ERROR: COPY FORCE_NOT_NULL requires CSV mode -COPY x to stdout (format CSV, force_not_null(a)); -ERROR: COPY FORCE_NOT_NULL cannot be used with COPY TO -COPY x to stdout (format CSV, force_not_null *); -ERROR: COPY FORCE_NOT_NULL cannot be used with COPY TO -COPY x from stdin (format TEXT, force_null(a)); -ERROR: COPY FORCE_NULL requires CSV mode -COPY x from stdin (format TEXT, force_null *); -ERROR: COPY FORCE_NULL requires CSV mode -COPY x to stdout (format CSV, force_null(a)); -ERROR: COPY FORCE_NULL cannot be used with COPY TO -COPY x to stdout (format CSV, force_null *); -ERROR: COPY FORCE_NULL cannot be used with COPY TO -COPY x to stdout (format BINARY, on_error unsupported); -ERROR: COPY ON_ERROR cannot be used with COPY TO -LINE 1: COPY x to stdout (format BINARY, on_error unsupported); - ^ -COPY x from stdin (log_verbosity unsupported); -ERROR: COPY LOG_VERBOSITY "unsupported" not recognized -LINE 1: COPY x from stdin (log_verbosity unsupported); - ^ -COPY x from stdin with (reject_limit 1); -ERROR: COPY REJECT_LIMIT requires ON_ERROR to be set to IGNORE -COPY x from stdin with (on_error ignore, reject_limit 0); -ERROR: REJECT_LIMIT (0) must be greater than zero --- too many columns in column list: should fail -COPY x (a, b, c, d, e, d, c) from stdin; -ERROR: column "d" specified more than once --- missing data: should fail -COPY x from stdin; -ERROR: invalid input syntax for type integer: "" -CONTEXT: COPY x, line 1, column a: "" -COPY x from stdin; -ERROR: missing data for column "e" -CONTEXT: COPY x, line 1: "2000 230 23 23" -COPY x from stdin; -ERROR: missing data for column "e" -CONTEXT: COPY x, line 1: "2001 231 \N \N" --- extra data: should fail -COPY x from stdin; -ERROR: extra data after last expected column -CONTEXT: COPY x, line 1: "2002 232 40 50 60 70 80" --- various COPY options: delimiters, oids, NULL string, encoding -COPY x (b, c, d, e) from stdin delimiter ',' null 'x'; -COPY x from stdin WITH DELIMITER AS ';' NULL AS ''; -COPY x from stdin WITH DELIMITER AS ':' NULL AS E'\\X' ENCODING 'sql_ascii'; -COPY x TO stdout WHERE a = 1; -ERROR: WHERE clause not allowed with COPY TO -LINE 1: COPY x TO stdout WHERE a = 1; - ^ -COPY x from stdin WHERE a = 50004; -COPY x from stdin WHERE a > 60003; -COPY x from stdin WHERE f > 60003; -ERROR: column "f" does not exist -LINE 1: COPY x from stdin WHERE f > 60003; - ^ -COPY x from stdin WHERE a = max(x.b); -ERROR: aggregate functions are not allowed in COPY FROM WHERE conditions -LINE 1: COPY x from stdin WHERE a = max(x.b); - ^ -COPY x from stdin WHERE a IN (SELECT 1 FROM x); -ERROR: cannot use subquery in COPY FROM WHERE condition -LINE 1: COPY x from stdin WHERE a IN (SELECT 1 FROM x); - ^ -COPY x from stdin WHERE a IN (generate_series(1,5)); -ERROR: set-returning functions are not allowed in COPY FROM WHERE conditions -LINE 1: COPY x from stdin WHERE a IN (generate_series(1,5)); - ^ -COPY x from stdin WHERE a = row_number() over(b); -ERROR: window functions are not allowed in COPY FROM WHERE conditions -LINE 1: COPY x from stdin WHERE a = row_number() over(b); - ^ --- check results of copy in -SELECT * FROM x; - a | b | c | d | e --------+----+------------+--------+---------------------- - 9999 | | \N | NN | before trigger fired - 10000 | 21 | 31 | 41 | before trigger fired - 10001 | 22 | 32 | 42 | before trigger fired - 10002 | 23 | 33 | 43 | before trigger fired - 10003 | 24 | 34 | 44 | before trigger fired - 10004 | 25 | 35 | 45 | before trigger fired - 10005 | 26 | 36 | 46 | before trigger fired - 6 | | 45 | 80 | before trigger fired - 7 | | x | \x | before trigger fired - 8 | | , | \, | before trigger fired - 3000 | | c | | before trigger fired - 4000 | | C | | before trigger fired - 4001 | 1 | empty | | before trigger fired - 4002 | 2 | null | | before trigger fired - 4003 | 3 | Backslash | \ | before trigger fired - 4004 | 4 | BackslashX | \X | before trigger fired - 4005 | 5 | N | N | before trigger fired - 4006 | 6 | BackslashN | \N | before trigger fired - 4007 | 7 | XX | XX | before trigger fired - 4008 | 8 | Delimiter | : | before trigger fired - 50004 | 25 | 35 | 45 | before trigger fired - 60004 | 25 | 35 | 45 | before trigger fired - 60005 | 26 | 36 | 46 | before trigger fired - 1 | 1 | stuff | test_1 | after trigger fired - 2 | 2 | stuff | test_2 | after trigger fired - 3 | 3 | stuff | test_3 | after trigger fired - 4 | 4 | stuff | test_4 | after trigger fired - 5 | 5 | stuff | test_5 | after trigger fired -(28 rows) - --- check copy out -COPY x TO stdout; -9999 \N \\N NN before trigger fired -10000 21 31 41 before trigger fired -10001 22 32 42 before trigger fired -10002 23 33 43 before trigger fired -10003 24 34 44 before trigger fired -10004 25 35 45 before trigger fired -10005 26 36 46 before trigger fired -6 \N 45 80 before trigger fired -7 \N x \\x before trigger fired -8 \N , \\, before trigger fired -3000 \N c \N before trigger fired -4000 \N C \N before trigger fired -4001 1 empty before trigger fired -4002 2 null \N before trigger fired -4003 3 Backslash \\ before trigger fired -4004 4 BackslashX \\X before trigger fired -4005 5 N N before trigger fired -4006 6 BackslashN \\N before trigger fired -4007 7 XX XX before trigger fired -4008 8 Delimiter : before trigger fired -50004 25 35 45 before trigger fired -60004 25 35 45 before trigger fired -60005 26 36 46 before trigger fired -1 1 stuff test_1 after trigger fired -2 2 stuff test_2 after trigger fired -3 3 stuff test_3 after trigger fired -4 4 stuff test_4 after trigger fired -5 5 stuff test_5 after trigger fired -COPY x (c, e) TO stdout; -\\N before trigger fired -31 before trigger fired -32 before trigger fired -33 before trigger fired -34 before trigger fired -35 before trigger fired -36 before trigger fired -45 before trigger fired -x before trigger fired -, before trigger fired -c before trigger fired -C before trigger fired -empty before trigger fired -null before trigger fired -Backslash before trigger fired -BackslashX before trigger fired -N before trigger fired -BackslashN before trigger fired -XX before trigger fired -Delimiter before trigger fired -35 before trigger fired -35 before trigger fired -36 before trigger fired -stuff after trigger fired -stuff after trigger fired -stuff after trigger fired -stuff after trigger fired -stuff after trigger fired -COPY x (b, e) TO stdout WITH NULL 'I''m null'; -I'm null before trigger fired -21 before trigger fired -22 before trigger fired -23 before trigger fired -24 before trigger fired -25 before trigger fired -26 before trigger fired -I'm null before trigger fired -I'm null before trigger fired -I'm null before trigger fired -I'm null before trigger fired -I'm null before trigger fired -1 before trigger fired -2 before trigger fired -3 before trigger fired -4 before trigger fired -5 before trigger fired -6 before trigger fired -7 before trigger fired -8 before trigger fired -25 before trigger fired -25 before trigger fired -26 before trigger fired -1 after trigger fired -2 after trigger fired -3 after trigger fired -4 after trigger fired -5 after trigger fired -CREATE TEMP TABLE y ( - col1 text, - col2 text -); -INSERT INTO y VALUES ('Jackson, Sam', E'\\h'); -INSERT INTO y VALUES ('It is "perfect".',E'\t'); -INSERT INTO y VALUES ('', NULL); -COPY y TO stdout WITH CSV; -"Jackson, Sam",\h -"It is ""perfect"".", -"", -COPY y TO stdout WITH CSV QUOTE '''' DELIMITER '|'; -Jackson, Sam|\h -It is "perfect".| -''| -COPY y TO stdout WITH CSV FORCE QUOTE col2 ESCAPE E'\\' ENCODING 'sql_ascii'; -"Jackson, Sam","\\h" -"It is \"perfect\"."," " -"", -COPY y TO stdout WITH CSV FORCE QUOTE *; -"Jackson, Sam","\h" -"It is ""perfect""."," " -"", --- Repeat above tests with new 9.0 option syntax -COPY y TO stdout (FORMAT CSV); -"Jackson, Sam",\h -"It is ""perfect"".", -"", -COPY y TO stdout (FORMAT CSV, QUOTE '''', DELIMITER '|'); -Jackson, Sam|\h -It is "perfect".| -''| -COPY y TO stdout (FORMAT CSV, FORCE_QUOTE (col2), ESCAPE E'\\'); -"Jackson, Sam","\\h" -"It is \"perfect\"."," " -"", -COPY y TO stdout (FORMAT CSV, FORCE_QUOTE *); -"Jackson, Sam","\h" -"It is ""perfect""."," " -"", -\copy y TO stdout (FORMAT CSV) -"Jackson, Sam",\h -"It is ""perfect"".", -"", -\copy y TO stdout (FORMAT CSV, QUOTE '''', DELIMITER '|') -Jackson, Sam|\h -It is "perfect".| -''| -\copy y TO stdout (FORMAT CSV, FORCE_QUOTE (col2), ESCAPE E'\\') -"Jackson, Sam","\\h" -"It is \"perfect\"."," " -"", -\copy y TO stdout (FORMAT CSV, FORCE_QUOTE *) -"Jackson, Sam","\h" -"It is ""perfect""."," " -"", ---test that we read consecutive LFs properly -CREATE TEMP TABLE testnl (a int, b text, c int); -COPY testnl FROM stdin CSV; --- test end of copy marker -CREATE TEMP TABLE testeoc (a text); -COPY testeoc FROM stdin CSV; -COPY testeoc TO stdout CSV; -a\. -\.b -c\.d -"\." --- test handling of nonstandard null marker that violates escaping rules -CREATE TEMP TABLE testnull(a int, b text); -INSERT INTO testnull VALUES (1, E'\\0'), (NULL, NULL); -COPY testnull TO stdout WITH NULL AS E'\\0'; -1 \\0 -\0 \0 -COPY testnull FROM stdin WITH NULL AS E'\\0'; -SELECT * FROM testnull; - a | b -----+---- - 1 | \0 - | - 42 | \0 - | -(4 rows) - -BEGIN; -CREATE TABLE vistest (LIKE testeoc); -COPY vistest FROM stdin CSV; -COMMIT; -SELECT * FROM vistest; - a ----- - a0 - b -(2 rows) - -BEGIN; -TRUNCATE vistest; -COPY vistest FROM stdin CSV; -SELECT * FROM vistest; - a ----- - a1 - b -(2 rows) - -SAVEPOINT s1; -TRUNCATE vistest; -COPY vistest FROM stdin CSV; -SELECT * FROM vistest; - a ----- - d1 - e -(2 rows) - -COMMIT; -SELECT * FROM vistest; - a ----- - d1 - e -(2 rows) - -BEGIN; -TRUNCATE vistest; -COPY vistest FROM stdin CSV FREEZE; -SELECT * FROM vistest; - a ----- - a2 - b -(2 rows) - -SAVEPOINT s1; -TRUNCATE vistest; -COPY vistest FROM stdin CSV FREEZE; -SELECT * FROM vistest; - a ----- - d2 - e -(2 rows) - -COMMIT; -SELECT * FROM vistest; - a ----- - d2 - e -(2 rows) - -BEGIN; -TRUNCATE vistest; -COPY vistest FROM stdin CSV FREEZE; -SELECT * FROM vistest; - a ---- - x - y -(2 rows) - -COMMIT; -TRUNCATE vistest; -COPY vistest FROM stdin CSV FREEZE; -ERROR: cannot perform COPY FREEZE because the table was not created or truncated in the current subtransaction -BEGIN; -TRUNCATE vistest; -SAVEPOINT s1; -COPY vistest FROM stdin CSV FREEZE; -ERROR: cannot perform COPY FREEZE because the table was not created or truncated in the current subtransaction -COMMIT; -BEGIN; -INSERT INTO vistest VALUES ('z'); -SAVEPOINT s1; -TRUNCATE vistest; -ROLLBACK TO SAVEPOINT s1; -COPY vistest FROM stdin CSV FREEZE; -ERROR: cannot perform COPY FREEZE because the table was not created or truncated in the current subtransaction -COMMIT; -CREATE FUNCTION truncate_in_subxact() RETURNS VOID AS -$$ -BEGIN - TRUNCATE vistest; -EXCEPTION - WHEN OTHERS THEN - INSERT INTO vistest VALUES ('subxact failure'); -END; -$$ language plpgsql; -BEGIN; -INSERT INTO vistest VALUES ('z'); -SELECT truncate_in_subxact(); - truncate_in_subxact ---------------------- - -(1 row) - -COPY vistest FROM stdin CSV FREEZE; -SELECT * FROM vistest; - a ----- - d4 - e -(2 rows) - -COMMIT; -SELECT * FROM vistest; - a ----- - d4 - e -(2 rows) - --- Test FORCE_NOT_NULL and FORCE_NULL options -CREATE TEMP TABLE forcetest ( - a INT NOT NULL, - b TEXT NOT NULL, - c TEXT, - d TEXT, - e TEXT -); -\pset null NULL --- should succeed with no effect ("b" remains an empty string, "c" remains NULL) -BEGIN; -COPY forcetest (a, b, c) FROM STDIN WITH (FORMAT csv, FORCE_NOT_NULL(b), FORCE_NULL(c)); -COMMIT; -SELECT b, c FROM forcetest WHERE a = 1; - b | c ----+------ - | NULL -(1 row) - --- should succeed, FORCE_NULL and FORCE_NOT_NULL can be both specified -BEGIN; -COPY forcetest (a, b, c, d) FROM STDIN WITH (FORMAT csv, FORCE_NOT_NULL(c,d), FORCE_NULL(c,d)); -COMMIT; -SELECT c, d FROM forcetest WHERE a = 2; - c | d ----+------ - | NULL -(1 row) - --- should fail with not-null constraint violation -BEGIN; -COPY forcetest (a, b, c) FROM STDIN WITH (FORMAT csv, FORCE_NULL(b), FORCE_NOT_NULL(c)); -ERROR: null value in column "b" of relation "forcetest" violates not-null constraint -DETAIL: Failing row contains (3, null, , null, null). -CONTEXT: COPY forcetest, line 1: "3,,""" -ROLLBACK; --- should fail with "not referenced by COPY" error -BEGIN; -COPY forcetest (d, e) FROM STDIN WITH (FORMAT csv, FORCE_NOT_NULL(b)); -ERROR: FORCE_NOT_NULL column "b" not referenced by COPY -ROLLBACK; --- should fail with "not referenced by COPY" error -BEGIN; -COPY forcetest (d, e) FROM STDIN WITH (FORMAT csv, FORCE_NULL(b)); -ERROR: FORCE_NULL column "b" not referenced by COPY -ROLLBACK; --- should succeed with no effect ("b" remains an empty string, "c" remains NULL) -BEGIN; -COPY forcetest (a, b, c) FROM STDIN WITH (FORMAT csv, FORCE_NOT_NULL *, FORCE_NULL *); -COMMIT; -SELECT b, c FROM forcetest WHERE a = 4; - b | c ----+------ - | NULL -(1 row) - --- should succeed with effect ("b" remains an empty string) -BEGIN; -COPY forcetest (a, b, c) FROM STDIN WITH (FORMAT csv, FORCE_NOT_NULL *); -COMMIT; -SELECT b, c FROM forcetest WHERE a = 5; - b | c ----+--- - | -(1 row) - --- should succeed with effect ("c" remains NULL) -BEGIN; -COPY forcetest (a, b, c) FROM STDIN WITH (FORMAT csv, FORCE_NULL *); -COMMIT; -SELECT b, c FROM forcetest WHERE a = 6; - b | c ----+------ - b | NULL -(1 row) - --- should fail with "conflicting or redundant options" error -BEGIN; -COPY forcetest (a, b, c) FROM STDIN WITH (FORMAT csv, FORCE_NOT_NULL *, FORCE_NOT_NULL(b)); -ERROR: conflicting or redundant options -LINE 1: ...c) FROM STDIN WITH (FORMAT csv, FORCE_NOT_NULL *, FORCE_NOT_... - ^ -ROLLBACK; --- should fail with "conflicting or redundant options" error -BEGIN; -COPY forcetest (a, b, c) FROM STDIN WITH (FORMAT csv, FORCE_NULL *, FORCE_NULL(b)); -ERROR: conflicting or redundant options -LINE 1: ... b, c) FROM STDIN WITH (FORMAT csv, FORCE_NULL *, FORCE_NULL... - ^ -ROLLBACK; -\pset null '' --- test case with whole-row Var in a check constraint -create table check_con_tbl (f1 int); -create function check_con_function(check_con_tbl) returns bool as $$ -begin - raise notice 'input = %', row_to_json($1); - return $1.f1 > 0; -end $$ language plpgsql immutable; -alter table check_con_tbl add check (check_con_function(check_con_tbl.*)); -\d+ check_con_tbl - Table "public.check_con_tbl" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - f1 | integer | | | | plain | | -Check constraints: - "check_con_tbl_check" CHECK (check_con_function(check_con_tbl.*)) - -copy check_con_tbl from stdin; -NOTICE: input = {"f1":1} -NOTICE: input = {"f1":null} -copy check_con_tbl from stdin; -NOTICE: input = {"f1":0} -ERROR: new row for relation "check_con_tbl" violates check constraint "check_con_tbl_check" -DETAIL: Failing row contains (0). -CONTEXT: COPY check_con_tbl, line 1: "0" -select * from check_con_tbl; - f1 ----- - 1 - -(2 rows) - --- test with RLS enabled. -CREATE ROLE regress_rls_copy_user; -CREATE ROLE regress_rls_copy_user_colperms; -CREATE TABLE rls_t1 (a int, b int, c int); -COPY rls_t1 (a, b, c) from stdin; -CREATE POLICY p1 ON rls_t1 FOR SELECT USING (a % 2 = 0); -ALTER TABLE rls_t1 ENABLE ROW LEVEL SECURITY; -ALTER TABLE rls_t1 FORCE ROW LEVEL SECURITY; -GRANT SELECT ON TABLE rls_t1 TO regress_rls_copy_user; -GRANT SELECT (a, b) ON TABLE rls_t1 TO regress_rls_copy_user_colperms; --- all columns -COPY rls_t1 TO stdout; -1 4 1 -2 3 2 -3 2 3 -4 1 4 -COPY rls_t1 (a, b, c) TO stdout; -1 4 1 -2 3 2 -3 2 3 -4 1 4 --- subset of columns -COPY rls_t1 (a) TO stdout; -1 -2 -3 -4 -COPY rls_t1 (a, b) TO stdout; -1 4 -2 3 -3 2 -4 1 --- column reordering -COPY rls_t1 (b, a) TO stdout; -4 1 -3 2 -2 3 -1 4 -SET SESSION AUTHORIZATION regress_rls_copy_user; --- all columns -COPY rls_t1 TO stdout; -2 3 2 -4 1 4 -COPY rls_t1 (a, b, c) TO stdout; -2 3 2 -4 1 4 --- subset of columns -COPY rls_t1 (a) TO stdout; -2 -4 -COPY rls_t1 (a, b) TO stdout; -2 3 -4 1 --- column reordering -COPY rls_t1 (b, a) TO stdout; -3 2 -1 4 -RESET SESSION AUTHORIZATION; -SET SESSION AUTHORIZATION regress_rls_copy_user_colperms; --- attempt all columns (should fail) -COPY rls_t1 TO stdout; -ERROR: permission denied for table rls_t1 -COPY rls_t1 (a, b, c) TO stdout; -ERROR: permission denied for table rls_t1 --- try to copy column with no privileges (should fail) -COPY rls_t1 (c) TO stdout; -ERROR: permission denied for table rls_t1 --- subset of columns (should succeed) -COPY rls_t1 (a) TO stdout; -2 -4 -COPY rls_t1 (a, b) TO stdout; -2 3 -4 1 -RESET SESSION AUTHORIZATION; --- test with INSTEAD OF INSERT trigger on a view -CREATE TABLE instead_of_insert_tbl(id serial, name text); -CREATE VIEW instead_of_insert_tbl_view AS SELECT ''::text AS str; -COPY instead_of_insert_tbl_view FROM stdin; -- fail -ERROR: cannot copy to view "instead_of_insert_tbl_view" -HINT: To enable copying to a view, provide an INSTEAD OF INSERT trigger. -CREATE FUNCTION fun_instead_of_insert_tbl() RETURNS trigger AS $$ -BEGIN - INSERT INTO instead_of_insert_tbl (name) VALUES (NEW.str); - RETURN NULL; -END; -$$ LANGUAGE plpgsql; -CREATE TRIGGER trig_instead_of_insert_tbl_view - INSTEAD OF INSERT ON instead_of_insert_tbl_view - FOR EACH ROW EXECUTE PROCEDURE fun_instead_of_insert_tbl(); -COPY instead_of_insert_tbl_view FROM stdin; -SELECT * FROM instead_of_insert_tbl; - id | name -----+------- - 1 | test1 -(1 row) - --- Test of COPY optimization with view using INSTEAD OF INSERT --- trigger when relation is created in the same transaction as --- when COPY is executed. -BEGIN; -CREATE VIEW instead_of_insert_tbl_view_2 as select ''::text as str; -CREATE TRIGGER trig_instead_of_insert_tbl_view_2 - INSTEAD OF INSERT ON instead_of_insert_tbl_view_2 - FOR EACH ROW EXECUTE PROCEDURE fun_instead_of_insert_tbl(); -COPY instead_of_insert_tbl_view_2 FROM stdin; -SELECT * FROM instead_of_insert_tbl; - id | name -----+------- - 1 | test1 - 2 | test1 -(2 rows) - -COMMIT; --- tests for on_error option -CREATE TABLE check_ign_err (n int, m int[], k int); -COPY check_ign_err FROM STDIN WITH (on_error stop); -ERROR: invalid input syntax for type integer: "a" -CONTEXT: COPY check_ign_err, line 2, column n: "a" --- want context for notices -\set SHOW_CONTEXT always -COPY check_ign_err FROM STDIN WITH (on_error ignore, log_verbosity verbose); -NOTICE: skipping row due to data type incompatibility at line 2 for column "n": "a" -CONTEXT: COPY check_ign_err -NOTICE: skipping row due to data type incompatibility at line 3 for column "k": "3333333333" -CONTEXT: COPY check_ign_err -NOTICE: skipping row due to data type incompatibility at line 4 for column "m": "{a, 4}" -CONTEXT: COPY check_ign_err -NOTICE: skipping row due to data type incompatibility at line 5 for column "n": "" -CONTEXT: COPY check_ign_err -NOTICE: skipping row due to data type incompatibility at line 7 for column "m": "a" -CONTEXT: COPY check_ign_err -NOTICE: skipping row due to data type incompatibility at line 8 for column "k": "a" -CONTEXT: COPY check_ign_err -NOTICE: 6 rows were skipped due to data type incompatibility --- tests for on_error option with log_verbosity and null constraint via domain -CREATE DOMAIN dcheck_ign_err2 varchar(15) NOT NULL; -CREATE TABLE check_ign_err2 (n int, m int[], k int, l dcheck_ign_err2); -COPY check_ign_err2 FROM STDIN WITH (on_error ignore, log_verbosity verbose); -NOTICE: skipping row due to data type incompatibility at line 2 for column "l": null input -CONTEXT: COPY check_ign_err2 -NOTICE: 1 row was skipped due to data type incompatibility -COPY check_ign_err2 FROM STDIN WITH (on_error ignore, log_verbosity silent); --- reset context choice -\set SHOW_CONTEXT errors -SELECT * FROM check_ign_err; - n | m | k ----+-----+--- - 1 | {1} | 1 - 5 | {5} | 5 - 8 | {8} | 8 -(3 rows) - -SELECT * FROM check_ign_err2; - n | m | k | l ----+-----+---+------- - 1 | {1} | 1 | 'foo' - 3 | {3} | 3 | 'bar' -(2 rows) - --- test datatype error that can't be handled as soft: should fail -CREATE TABLE hard_err(foo widget); -COPY hard_err FROM STDIN WITH (on_error ignore); -ERROR: invalid input syntax for type widget: "1" -CONTEXT: COPY hard_err, line 1, column foo: "1" --- test missing data: should fail -COPY check_ign_err FROM STDIN WITH (on_error ignore); -ERROR: missing data for column "k" -CONTEXT: COPY check_ign_err, line 1: "1 {1}" --- test extra data: should fail -COPY check_ign_err FROM STDIN WITH (on_error ignore); -ERROR: extra data after last expected column -CONTEXT: COPY check_ign_err, line 1: "1 {1} 3 abc" --- tests for reject_limit option -COPY check_ign_err FROM STDIN WITH (on_error ignore, reject_limit 3); -ERROR: skipped more than REJECT_LIMIT (3) rows due to data type incompatibility -CONTEXT: COPY check_ign_err, line 5, column n: "" -COPY check_ign_err FROM STDIN WITH (on_error ignore, reject_limit 4); -NOTICE: 4 rows were skipped due to data type incompatibility --- clean up -DROP TABLE forcetest; -DROP TABLE vistest; -DROP FUNCTION truncate_in_subxact(); -DROP TABLE x, y; -DROP TABLE rls_t1 CASCADE; -DROP ROLE regress_rls_copy_user; -DROP ROLE regress_rls_copy_user_colperms; -DROP FUNCTION fn_x_before(); -DROP FUNCTION fn_x_after(); -DROP TABLE instead_of_insert_tbl; -DROP VIEW instead_of_insert_tbl_view; -DROP VIEW instead_of_insert_tbl_view_2; -DROP FUNCTION fun_instead_of_insert_tbl(); -DROP TABLE check_ign_err; -DROP TABLE check_ign_err2; -DROP DOMAIN dcheck_ign_err2; -DROP TABLE hard_err; --- --- COPY FROM ... DEFAULT --- -create temp table copy_default ( - id integer primary key, - text_value text not null default 'test', - ts_value timestamp without time zone not null default '2022-07-05' -); --- if DEFAULT is not specified, then the marker will be regular data -copy copy_default from stdin; -select id, text_value, ts_value from copy_default; - id | text_value | ts_value -----+------------+-------------------------- - 1 | value | Mon Jul 04 00:00:00 2022 - 2 | D | Tue Jul 05 00:00:00 2022 -(2 rows) - -truncate copy_default; -copy copy_default from stdin with (format csv); -select id, text_value, ts_value from copy_default; - id | text_value | ts_value -----+------------+-------------------------- - 1 | value | Mon Jul 04 00:00:00 2022 - 2 | \D | Tue Jul 05 00:00:00 2022 -(2 rows) - -truncate copy_default; --- DEFAULT cannot be used in binary mode -copy copy_default from stdin with (format binary, default '\D'); -ERROR: cannot specify DEFAULT in BINARY mode --- DEFAULT cannot be new line nor carriage return -copy copy_default from stdin with (default E'\n'); -ERROR: COPY default representation cannot use newline or carriage return -copy copy_default from stdin with (default E'\r'); -ERROR: COPY default representation cannot use newline or carriage return --- DELIMITER cannot appear in DEFAULT spec -copy copy_default from stdin with (delimiter ';', default 'test;test'); -ERROR: COPY delimiter character must not appear in the DEFAULT specification --- CSV quote cannot appear in DEFAULT spec -copy copy_default from stdin with (format csv, quote '"', default 'test"test'); -ERROR: CSV quote character must not appear in the DEFAULT specification --- NULL and DEFAULT spec must be different -copy copy_default from stdin with (default '\N'); -ERROR: NULL specification and DEFAULT specification cannot be the same --- cannot use DEFAULT marker in column that has no DEFAULT value -copy copy_default from stdin with (default '\D'); -ERROR: unexpected default marker in COPY data -DETAIL: Column "id" has no default value. -CONTEXT: COPY copy_default, line 1: "\D value '2022-07-04'" -copy copy_default from stdin with (format csv, default '\D'); -ERROR: unexpected default marker in COPY data -DETAIL: Column "id" has no default value. -CONTEXT: COPY copy_default, line 1: "\D,value,2022-07-04" --- The DEFAULT marker must be unquoted and unescaped or it's not recognized -copy copy_default from stdin with (default '\D'); -select id, text_value, ts_value from copy_default; - id | text_value | ts_value -----+------------+-------------------------- - 1 | test | Mon Jul 04 00:00:00 2022 - 2 | \D | Mon Jul 04 00:00:00 2022 - 3 | "D" | Mon Jul 04 00:00:00 2022 -(3 rows) - -truncate copy_default; -copy copy_default from stdin with (format csv, default '\D'); -select id, text_value, ts_value from copy_default; - id | text_value | ts_value -----+------------+-------------------------- - 1 | test | Mon Jul 04 00:00:00 2022 - 2 | \\D | Mon Jul 04 00:00:00 2022 - 3 | \D | Mon Jul 04 00:00:00 2022 -(3 rows) - -truncate copy_default; --- successful usage of DEFAULT option in COPY -copy copy_default from stdin with (default '\D'); -select id, text_value, ts_value from copy_default; - id | text_value | ts_value -----+------------+-------------------------- - 1 | value | Mon Jul 04 00:00:00 2022 - 2 | test | Sun Jul 03 00:00:00 2022 - 3 | test | Tue Jul 05 00:00:00 2022 -(3 rows) - -truncate copy_default; -copy copy_default from stdin with (format csv, default '\D'); -select id, text_value, ts_value from copy_default; - id | text_value | ts_value -----+------------+-------------------------- - 1 | value | Mon Jul 04 00:00:00 2022 - 2 | test | Sun Jul 03 00:00:00 2022 - 3 | test | Tue Jul 05 00:00:00 2022 -(3 rows) - -truncate copy_default; --- DEFAULT cannot be used in COPY TO -copy (select 1 as test) TO stdout with (default '\D'); -ERROR: COPY DEFAULT cannot be used with COPY TO +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/temp.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/temp.out --- /Users/admin/pgsql/src/test/regress/expected/temp.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/temp.out 2024-12-13 13:20:12 @@ -1,412 +1,2 @@ --- --- TEMP --- Test temp relations and indexes --- --- test temp table/index masking -CREATE TABLE temptest(col int); -CREATE INDEX i_temptest ON temptest(col); -CREATE TEMP TABLE temptest(tcol int); -CREATE INDEX i_temptest ON temptest(tcol); -SELECT * FROM temptest; - tcol ------- -(0 rows) - -DROP INDEX i_temptest; -DROP TABLE temptest; -SELECT * FROM temptest; - col ------ -(0 rows) - -DROP INDEX i_temptest; -DROP TABLE temptest; --- test temp table selects -CREATE TABLE temptest(col int); -INSERT INTO temptest VALUES (1); -CREATE TEMP TABLE temptest(tcol float); -INSERT INTO temptest VALUES (2.1); -SELECT * FROM temptest; - tcol ------- - 2.1 -(1 row) - -DROP TABLE temptest; -SELECT * FROM temptest; - col ------ - 1 -(1 row) - -DROP TABLE temptest; --- test temp table deletion -CREATE TEMP TABLE temptest(col int); -\c -SELECT * FROM temptest; -ERROR: relation "temptest" does not exist -LINE 1: SELECT * FROM temptest; - ^ --- Test ON COMMIT DELETE ROWS -CREATE TEMP TABLE temptest(col int) ON COMMIT DELETE ROWS; --- while we're here, verify successful truncation of index with SQL function -CREATE INDEX ON temptest(bit_length('')); -BEGIN; -INSERT INTO temptest VALUES (1); -INSERT INTO temptest VALUES (2); -SELECT * FROM temptest; - col ------ - 1 - 2 -(2 rows) - -COMMIT; -SELECT * FROM temptest; - col ------ -(0 rows) - -DROP TABLE temptest; -BEGIN; -CREATE TEMP TABLE temptest(col) ON COMMIT DELETE ROWS AS SELECT 1; -SELECT * FROM temptest; - col ------ - 1 -(1 row) - -COMMIT; -SELECT * FROM temptest; - col ------ -(0 rows) - -DROP TABLE temptest; --- Test ON COMMIT DROP -BEGIN; -CREATE TEMP TABLE temptest(col int) ON COMMIT DROP; -INSERT INTO temptest VALUES (1); -INSERT INTO temptest VALUES (2); -SELECT * FROM temptest; - col ------ - 1 - 2 -(2 rows) - -COMMIT; -SELECT * FROM temptest; -ERROR: relation "temptest" does not exist -LINE 1: SELECT * FROM temptest; - ^ -BEGIN; -CREATE TEMP TABLE temptest(col) ON COMMIT DROP AS SELECT 1; -SELECT * FROM temptest; - col ------ - 1 -(1 row) - -COMMIT; -SELECT * FROM temptest; -ERROR: relation "temptest" does not exist -LINE 1: SELECT * FROM temptest; - ^ --- Test it with a CHECK condition that produces a toasted pg_constraint entry -BEGIN; -do $$ -begin - execute format($cmd$ - CREATE TEMP TABLE temptest (col text CHECK (col < %L)) ON COMMIT DROP - $cmd$, - (SELECT string_agg(g.i::text || ':' || random()::text, '|') - FROM generate_series(1, 100) g(i))); -end$$; -SELECT * FROM temptest; - col ------ -(0 rows) - -COMMIT; -SELECT * FROM temptest; -ERROR: relation "temptest" does not exist -LINE 1: SELECT * FROM temptest; - ^ --- ON COMMIT is only allowed for TEMP -CREATE TABLE temptest(col int) ON COMMIT DELETE ROWS; -ERROR: ON COMMIT can only be used on temporary tables -CREATE TABLE temptest(col) ON COMMIT DELETE ROWS AS SELECT 1; -ERROR: ON COMMIT can only be used on temporary tables --- Test foreign keys -BEGIN; -CREATE TEMP TABLE temptest1(col int PRIMARY KEY); -CREATE TEMP TABLE temptest2(col int REFERENCES temptest1) - ON COMMIT DELETE ROWS; -INSERT INTO temptest1 VALUES (1); -INSERT INTO temptest2 VALUES (1); -COMMIT; -SELECT * FROM temptest1; - col ------ - 1 -(1 row) - -SELECT * FROM temptest2; - col ------ -(0 rows) - -BEGIN; -CREATE TEMP TABLE temptest3(col int PRIMARY KEY) ON COMMIT DELETE ROWS; -CREATE TEMP TABLE temptest4(col int REFERENCES temptest3); -COMMIT; -ERROR: unsupported ON COMMIT and foreign key combination -DETAIL: Table "temptest4" references "temptest3", but they do not have the same ON COMMIT setting. --- Test manipulation of temp schema's placement in search path -create table public.whereami (f1 text); -insert into public.whereami values ('public'); -create temp table whereami (f1 text); -insert into whereami values ('temp'); -create function public.whoami() returns text - as $$select 'public'::text$$ language sql; -create function pg_temp.whoami() returns text - as $$select 'temp'::text$$ language sql; --- default should have pg_temp implicitly first, but only for tables -select * from whereami; - f1 ------- - temp -(1 row) - -select whoami(); - whoami --------- - public -(1 row) - --- can list temp first explicitly, but it still doesn't affect functions -set search_path = pg_temp, public; -select * from whereami; - f1 ------- - temp -(1 row) - -select whoami(); - whoami --------- - public -(1 row) - --- or put it last for security -set search_path = public, pg_temp; -select * from whereami; - f1 --------- - public -(1 row) - -select whoami(); - whoami --------- - public -(1 row) - --- you can invoke a temp function explicitly, though -select pg_temp.whoami(); - whoami --------- - temp -(1 row) - -drop table public.whereami; --- types in temp schema -set search_path = pg_temp, public; -create domain pg_temp.nonempty as text check (value <> ''); --- function-syntax invocation of types matches rules for functions -select nonempty(''); -ERROR: function nonempty(unknown) does not exist -LINE 1: select nonempty(''); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -select pg_temp.nonempty(''); -ERROR: value for domain nonempty violates check constraint "nonempty_check" --- other syntax matches rules for tables -select ''::nonempty; -ERROR: value for domain nonempty violates check constraint "nonempty_check" -reset search_path; --- For partitioned temp tables, ON COMMIT actions ignore storage-less --- partitioned tables. -begin; -create temp table temp_parted_oncommit (a int) - partition by list (a) on commit delete rows; -create temp table temp_parted_oncommit_1 - partition of temp_parted_oncommit - for values in (1) on commit delete rows; -insert into temp_parted_oncommit values (1); -commit; --- partitions are emptied by the previous commit -select * from temp_parted_oncommit; - a ---- -(0 rows) - -drop table temp_parted_oncommit; --- Check dependencies between ON COMMIT actions with a partitioned --- table and its partitions. Using ON COMMIT DROP on a parent removes --- the whole set. -begin; -create temp table temp_parted_oncommit_test (a int) - partition by list (a) on commit drop; -create temp table temp_parted_oncommit_test1 - partition of temp_parted_oncommit_test - for values in (1) on commit delete rows; -create temp table temp_parted_oncommit_test2 - partition of temp_parted_oncommit_test - for values in (2) on commit drop; -insert into temp_parted_oncommit_test values (1), (2); -commit; --- no relations remain in this case. -select relname from pg_class where relname ~ '^temp_parted_oncommit_test'; - relname ---------- -(0 rows) - --- Using ON COMMIT DELETE on a partitioned table does not remove --- all rows if partitions preserve their data. -begin; -create temp table temp_parted_oncommit_test (a int) - partition by list (a) on commit delete rows; -create temp table temp_parted_oncommit_test1 - partition of temp_parted_oncommit_test - for values in (1) on commit preserve rows; -create temp table temp_parted_oncommit_test2 - partition of temp_parted_oncommit_test - for values in (2) on commit drop; -insert into temp_parted_oncommit_test values (1), (2); -commit; --- Data from the remaining partition is still here as its rows are --- preserved. -select * from temp_parted_oncommit_test; - a ---- - 1 -(1 row) - --- two relations remain in this case. -select relname from pg_class where relname ~ '^temp_parted_oncommit_test' - order by relname; - relname ----------------------------- - temp_parted_oncommit_test - temp_parted_oncommit_test1 -(2 rows) - -drop table temp_parted_oncommit_test; --- Check dependencies between ON COMMIT actions with inheritance trees. --- Using ON COMMIT DROP on a parent removes the whole set. -begin; -create temp table temp_inh_oncommit_test (a int) on commit drop; -create temp table temp_inh_oncommit_test1 () - inherits(temp_inh_oncommit_test) on commit delete rows; -insert into temp_inh_oncommit_test1 values (1); -commit; --- no relations remain in this case -select relname from pg_class where relname ~ '^temp_inh_oncommit_test'; - relname ---------- -(0 rows) - --- Data on the parent is removed, and the child goes away. -begin; -create temp table temp_inh_oncommit_test (a int) on commit delete rows; -create temp table temp_inh_oncommit_test1 () - inherits(temp_inh_oncommit_test) on commit drop; -insert into temp_inh_oncommit_test1 values (1); -insert into temp_inh_oncommit_test values (1); -commit; -select * from temp_inh_oncommit_test; - a ---- -(0 rows) - --- one relation remains -select relname from pg_class where relname ~ '^temp_inh_oncommit_test'; - relname ------------------------- - temp_inh_oncommit_test -(1 row) - -drop table temp_inh_oncommit_test; --- Tests with two-phase commit --- Transactions creating objects in a temporary namespace cannot be used --- with two-phase commit. --- These cases generate errors about temporary namespace. --- Function creation -begin; -create function pg_temp.twophase_func() returns void as - $$ select '2pc_func'::text $$ language sql; -prepare transaction 'twophase_func'; -ERROR: cannot PREPARE a transaction that has operated on temporary objects --- Function drop -create function pg_temp.twophase_func() returns void as - $$ select '2pc_func'::text $$ language sql; -begin; -drop function pg_temp.twophase_func(); -prepare transaction 'twophase_func'; -ERROR: cannot PREPARE a transaction that has operated on temporary objects --- Operator creation -begin; -create operator pg_temp.@@ (leftarg = int4, rightarg = int4, procedure = int4mi); -prepare transaction 'twophase_operator'; -ERROR: cannot PREPARE a transaction that has operated on temporary objects --- These generate errors about temporary tables. -begin; -create type pg_temp.twophase_type as (a int); -prepare transaction 'twophase_type'; -ERROR: cannot PREPARE a transaction that has operated on temporary objects -begin; -create view pg_temp.twophase_view as select 1; -prepare transaction 'twophase_view'; -ERROR: cannot PREPARE a transaction that has operated on temporary objects -begin; -create sequence pg_temp.twophase_seq; -prepare transaction 'twophase_sequence'; -ERROR: cannot PREPARE a transaction that has operated on temporary objects --- Temporary tables cannot be used with two-phase commit. -create temp table twophase_tab (a int); -begin; -select a from twophase_tab; - a ---- -(0 rows) - -prepare transaction 'twophase_tab'; -ERROR: cannot PREPARE a transaction that has operated on temporary objects -begin; -insert into twophase_tab values (1); -prepare transaction 'twophase_tab'; -ERROR: cannot PREPARE a transaction that has operated on temporary objects -begin; -lock twophase_tab in access exclusive mode; -prepare transaction 'twophase_tab'; -ERROR: cannot PREPARE a transaction that has operated on temporary objects -begin; -drop table twophase_tab; -prepare transaction 'twophase_tab'; -ERROR: cannot PREPARE a transaction that has operated on temporary objects --- Corner case: current_schema may create a temporary schema if namespace --- creation is pending, so check after that. First reset the connection --- to remove the temporary namespace. -\c - -SET search_path TO 'pg_temp'; -BEGIN; -SELECT current_schema() ~ 'pg_temp' AS is_temp_schema; - is_temp_schema ----------------- - t -(1 row) - -PREPARE TRANSACTION 'twophase_search'; -ERROR: cannot PREPARE a transaction that has operated on temporary objects +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/domain.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/domain.out --- /Users/admin/pgsql/src/test/regress/expected/domain.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/domain.out 2024-12-13 13:20:12 @@ -1,1374 +1,2 @@ --- --- Test domains. --- --- Test Comment / Drop -create domain domaindroptest int4; -comment on domain domaindroptest is 'About to drop this..'; -create domain dependenttypetest domaindroptest; --- fail because of dependent type -drop domain domaindroptest; -ERROR: cannot drop type domaindroptest because other objects depend on it -DETAIL: type dependenttypetest depends on type domaindroptest -HINT: Use DROP ... CASCADE to drop the dependent objects too. -drop domain domaindroptest cascade; -NOTICE: drop cascades to type dependenttypetest --- this should fail because already gone -drop domain domaindroptest cascade; -ERROR: type "domaindroptest" does not exist --- some error cases -create domain d_fail as no_such_type; -ERROR: type "no_such_type" does not exist -create domain d_fail as int constraint cc REFERENCES this_table_not_exists(i); -ERROR: foreign key constraints not possible for domains -create domain d_fail as int4 not null no inherit; -ERROR: not-null constraints for domains cannot be marked NO INHERIT -create domain d_fail as int4 not null null; -ERROR: conflicting NULL/NOT NULL constraints -create domain d_fail as int4 not null default 3 default 3; -ERROR: multiple default expressions -create domain d_fail int4 DEFAULT 3 + 'h'; -ERROR: invalid input syntax for type integer: "h" -create domain d_fail int4 collate "C"; -ERROR: collations are not supported by type integer -create domain d_fail as anyelement; -ERROR: "anyelement" is not a valid base type for a domain -create domain d_fail as int4 unique; -ERROR: unique constraints not possible for domains -create domain d_fail as int4 PRIMARY key; -ERROR: primary key constraints not possible for domains -create domain d_fail as int4 constraint cc generated by default as identity; -ERROR: specifying GENERATED not supported for domains -create domain d_fail as int4 constraint cc check (values > 1) no inherit; -ERROR: check constraints for domains cannot be marked NO INHERIT -create domain d_fail as int4 constraint cc check (values > 1) deferrable; -ERROR: specifying constraint deferrability not supported for domains --- Test domain input. --- Note: the point of checking both INSERT and COPY FROM is that INSERT --- exercises CoerceToDomain while COPY exercises domain_in. -create domain domainvarchar varchar(5); -create domain domainnumeric numeric(8,2); -create domain domainint4 int4; -create domain domaintext text; --- Test explicit coercions --- these should succeed (and truncate) -SELECT cast('123456' as domainvarchar); - domainvarchar ---------------- - 12345 -(1 row) - -SELECT cast('12345' as domainvarchar); - domainvarchar ---------------- - 12345 -(1 row) - --- Test tables using domains -create table basictest - ( testint4 domainint4 - , testtext domaintext - , testvarchar domainvarchar - , testnumeric domainnumeric - ); -INSERT INTO basictest values ('88', 'haha', 'short', '123.12'); -- Good -INSERT INTO basictest values ('88', 'haha', 'short text', '123.12'); -- Bad varchar -ERROR: value too long for type character varying(5) -INSERT INTO basictest values ('88', 'haha', 'short', '123.1212'); -- Truncate numeric --- Test copy -COPY basictest (testvarchar) FROM stdin; -- fail -ERROR: value too long for type character varying(5) -CONTEXT: COPY basictest, line 1, column testvarchar: "notsoshorttext" -COPY basictest (testvarchar) FROM stdin; -select * from basictest; - testint4 | testtext | testvarchar | testnumeric -----------+----------+-------------+------------- - 88 | haha | short | 123.12 - 88 | haha | short | 123.12 - | | short | -(3 rows) - --- check that domains inherit operations from base types -select testtext || testvarchar as concat, testnumeric + 42 as sum -from basictest; - concat | sum ------------+-------- - hahashort | 165.12 - hahashort | 165.12 - | -(3 rows) - --- check that union/case/coalesce type resolution handles domains properly -select pg_typeof(coalesce(4::domainint4, 7)); - pg_typeof ------------ - integer -(1 row) - -select pg_typeof(coalesce(4::domainint4, 7::domainint4)); - pg_typeof ------------- - domainint4 -(1 row) - -drop table basictest; -drop domain domainvarchar restrict; -drop domain domainnumeric restrict; -drop domain domainint4 restrict; -drop domain domaintext; --- Test non-error-throwing input -create domain positiveint int4 check(value > 0); -create domain weirdfloat float8 check((1 / value) < 10); -select pg_input_is_valid('1', 'positiveint'); - pg_input_is_valid -------------------- - t -(1 row) - -select pg_input_is_valid('junk', 'positiveint'); - pg_input_is_valid -------------------- - f -(1 row) - -select pg_input_is_valid('-1', 'positiveint'); - pg_input_is_valid -------------------- - f -(1 row) - -select * from pg_input_error_info('junk', 'positiveint'); - message | detail | hint | sql_error_code ------------------------------------------------+--------+------+---------------- - invalid input syntax for type integer: "junk" | | | 22P02 -(1 row) - -select * from pg_input_error_info('-1', 'positiveint'); - message | detail | hint | sql_error_code -----------------------------------------------------------------------------+--------+------+---------------- - value for domain positiveint violates check constraint "positiveint_check" | | | 23514 -(1 row) - -select * from pg_input_error_info('junk', 'weirdfloat'); - message | detail | hint | sql_error_code ---------------------------------------------------------+--------+------+---------------- - invalid input syntax for type double precision: "junk" | | | 22P02 -(1 row) - -select * from pg_input_error_info('0.01', 'weirdfloat'); - message | detail | hint | sql_error_code ---------------------------------------------------------------------------+--------+------+---------------- - value for domain weirdfloat violates check constraint "weirdfloat_check" | | | 23514 -(1 row) - --- We currently can't trap errors raised in the CHECK expression itself -select * from pg_input_error_info('0', 'weirdfloat'); -ERROR: division by zero -drop domain positiveint; -drop domain weirdfloat; --- Test domains over array types -create domain domainint4arr int4[1]; -create domain domainchar4arr varchar(4)[2][3]; -create table domarrtest - ( testint4arr domainint4arr - , testchar4arr domainchar4arr - ); -INSERT INTO domarrtest values ('{2,2}', '{{"a","b"},{"c","d"}}'); -INSERT INTO domarrtest values ('{{2,2},{2,2}}', '{{"a","b"}}'); -INSERT INTO domarrtest values ('{2,2}', '{{"a","b"},{"c","d"},{"e","f"}}'); -INSERT INTO domarrtest values ('{2,2}', '{{"a"},{"c"}}'); -INSERT INTO domarrtest values (NULL, '{{"a","b","c"},{"d","e","f"}}'); -INSERT INTO domarrtest values (NULL, '{{"toolong","b","c"},{"d","e","f"}}'); -ERROR: value too long for type character varying(4) -INSERT INTO domarrtest (testint4arr[1], testint4arr[3]) values (11,22); -select * from domarrtest; - testint4arr | testchar4arr ----------------+--------------------- - {2,2} | {{a,b},{c,d}} - {{2,2},{2,2}} | {{a,b}} - {2,2} | {{a,b},{c,d},{e,f}} - {2,2} | {{a},{c}} - | {{a,b,c},{d,e,f}} - {11,NULL,22} | -(6 rows) - -select testint4arr[1], testchar4arr[2:2] from domarrtest; - testint4arr | testchar4arr --------------+-------------- - 2 | {{c,d}} - | {} - 2 | {{c,d}} - 2 | {{c}} - | {{d,e,f}} - 11 | -(6 rows) - -select array_dims(testint4arr), array_dims(testchar4arr) from domarrtest; - array_dims | array_dims -------------+------------ - [1:2] | [1:2][1:2] - [1:2][1:2] | [1:1][1:2] - [1:2] | [1:3][1:2] - [1:2] | [1:2][1:1] - | [1:2][1:3] - [1:3] | -(6 rows) - -COPY domarrtest FROM stdin; -COPY domarrtest FROM stdin; -- fail -ERROR: value too long for type character varying(4) -CONTEXT: COPY domarrtest, line 1, column testchar4arr: "{qwerty,w,e}" -select * from domarrtest; - testint4arr | testchar4arr ----------------+--------------------- - {2,2} | {{a,b},{c,d}} - {{2,2},{2,2}} | {{a,b}} - {2,2} | {{a,b},{c,d},{e,f}} - {2,2} | {{a},{c}} - | {{a,b,c},{d,e,f}} - {11,NULL,22} | - {3,4} | {q,w,e} - | -(8 rows) - -update domarrtest set - testint4arr[1] = testint4arr[1] + 1, - testint4arr[3] = testint4arr[3] - 1 -where testchar4arr is null; -select * from domarrtest where testchar4arr is null; - testint4arr | testchar4arr -------------------+-------------- - {12,NULL,21} | - {NULL,NULL,NULL} | -(2 rows) - -drop table domarrtest; -drop domain domainint4arr restrict; -drop domain domainchar4arr restrict; -create domain dia as int[]; -select '{1,2,3}'::dia; - dia ---------- - {1,2,3} -(1 row) - -select array_dims('{1,2,3}'::dia); - array_dims ------------- - [1:3] -(1 row) - -select pg_typeof('{1,2,3}'::dia); - pg_typeof ------------ - dia -(1 row) - -select pg_typeof('{1,2,3}'::dia || 42); -- should be int[] not dia - pg_typeof ------------ - integer[] -(1 row) - -drop domain dia; --- Test domains over composites -create type comptype as (r float8, i float8); -create domain dcomptype as comptype; -create table dcomptable (d1 dcomptype unique); -insert into dcomptable values (row(1,2)::dcomptype); -insert into dcomptable values (row(3,4)::comptype); -insert into dcomptable values (row(1,2)::dcomptype); -- fail on uniqueness -ERROR: duplicate key value violates unique constraint "dcomptable_d1_key" -DETAIL: Key (d1)=((1,2)) already exists. -insert into dcomptable (d1.r) values(11); -select * from dcomptable; - d1 -------- - (1,2) - (3,4) - (11,) -(3 rows) - -select (d1).r, (d1).i, (d1).* from dcomptable; - r | i | r | i -----+---+----+--- - 1 | 2 | 1 | 2 - 3 | 4 | 3 | 4 - 11 | | 11 | -(3 rows) - -update dcomptable set d1.r = (d1).r + 1 where (d1).i > 0; -select * from dcomptable; - d1 -------- - (11,) - (2,2) - (4,4) -(3 rows) - -alter domain dcomptype add constraint c1 check ((value).r <= (value).i); -alter domain dcomptype add constraint c2 check ((value).r > (value).i); -- fail -ERROR: column "d1" of table "dcomptable" contains values that violate the new constraint -select row(2,1)::dcomptype; -- fail -ERROR: value for domain dcomptype violates check constraint "c1" -insert into dcomptable values (row(1,2)::comptype); -insert into dcomptable values (row(2,1)::comptype); -- fail -ERROR: value for domain dcomptype violates check constraint "c1" -insert into dcomptable (d1.r) values(99); -insert into dcomptable (d1.r, d1.i) values(99, 100); -insert into dcomptable (d1.r, d1.i) values(100, 99); -- fail -ERROR: value for domain dcomptype violates check constraint "c1" -update dcomptable set d1.r = (d1).r + 1 where (d1).i > 0; -- fail -ERROR: value for domain dcomptype violates check constraint "c1" -update dcomptable set d1.r = (d1).r - 1, d1.i = (d1).i + 1 where (d1).i > 0; -select * from dcomptable; - d1 ----------- - (11,) - (99,) - (1,3) - (3,5) - (0,3) - (98,101) -(6 rows) - -explain (verbose, costs off) - update dcomptable set d1.r = (d1).r - 1, d1.i = (d1).i + 1 where (d1).i > 0; - QUERY PLAN ------------------------------------------------------------------------------------------------ - Update on public.dcomptable - -> Seq Scan on public.dcomptable - Output: ROW(((d1).r - '1'::double precision), ((d1).i + '1'::double precision)), ctid - Filter: ((dcomptable.d1).i > '0'::double precision) -(4 rows) - -create rule silly as on delete to dcomptable do instead - update dcomptable set d1.r = (d1).r - 1, d1.i = (d1).i + 1 where (d1).i > 0; -\d+ dcomptable - Table "public.dcomptable" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+-----------+-----------+----------+---------+----------+--------------+------------- - d1 | dcomptype | | | | extended | | -Indexes: - "dcomptable_d1_key" UNIQUE CONSTRAINT, btree (d1) -Rules: - silly AS - ON DELETE TO dcomptable DO INSTEAD UPDATE dcomptable SET d1.r = (dcomptable.d1).r - 1::double precision, d1.i = (dcomptable.d1).i + 1::double precision - WHERE (dcomptable.d1).i > 0::double precision - -create function makedcomp(r float8, i float8) returns dcomptype -as 'select row(r, i)' language sql; -select makedcomp(1,2); - makedcomp ------------ - (1,2) -(1 row) - -select makedcomp(2,1); -- fail -ERROR: value for domain dcomptype violates check constraint "c1" -select * from makedcomp(1,2) m; - r | i ----+--- - 1 | 2 -(1 row) - -select m, m is not null from makedcomp(1,2) m; - m | ?column? --------+---------- - (1,2) | t -(1 row) - -drop function makedcomp(float8, float8); -drop table dcomptable; -drop type comptype cascade; -NOTICE: drop cascades to type dcomptype --- check altering and dropping columns used by domain constraints -create type comptype as (r float8, i float8); -create domain dcomptype as comptype; -alter domain dcomptype add constraint c1 check ((value).r > 0); -comment on constraint c1 on domain dcomptype is 'random commentary'; -select row(0,1)::dcomptype; -- fail -ERROR: value for domain dcomptype violates check constraint "c1" -alter type comptype alter attribute r type varchar; -- fail -ERROR: operator does not exist: character varying > double precision -HINT: No operator matches the given name and argument types. You might need to add explicit type casts. -alter type comptype alter attribute r type bigint; -alter type comptype drop attribute r; -- fail -ERROR: cannot drop column r of composite type comptype because other objects depend on it -DETAIL: constraint c1 depends on column r of composite type comptype -HINT: Use DROP ... CASCADE to drop the dependent objects too. -alter type comptype drop attribute i; -select conname, obj_description(oid, 'pg_constraint') from pg_constraint - where contypid = 'dcomptype'::regtype; -- check comment is still there - conname | obj_description ----------+------------------- - c1 | random commentary -(1 row) - -drop type comptype cascade; -NOTICE: drop cascades to type dcomptype --- Test domains over arrays of composite -create type comptype as (r float8, i float8); -create domain dcomptypea as comptype[]; -create table dcomptable (d1 dcomptypea unique); -insert into dcomptable values (array[row(1,2)]::dcomptypea); -insert into dcomptable values (array[row(3,4), row(5,6)]::comptype[]); -insert into dcomptable values (array[row(7,8)::comptype, row(9,10)::comptype]); -insert into dcomptable values (array[row(1,2)]::dcomptypea); -- fail on uniqueness -ERROR: duplicate key value violates unique constraint "dcomptable_d1_key" -DETAIL: Key (d1)=({"(1,2)"}) already exists. -insert into dcomptable (d1[1]) values(row(9,10)); -insert into dcomptable (d1[1].r) values(11); -select * from dcomptable; - d1 --------------------- - {"(1,2)"} - {"(3,4)","(5,6)"} - {"(7,8)","(9,10)"} - {"(9,10)"} - {"(11,)"} -(5 rows) - -select d1[2], d1[1].r, d1[1].i from dcomptable; - d1 | r | i ---------+----+---- - | 1 | 2 - (5,6) | 3 | 4 - (9,10) | 7 | 8 - | 9 | 10 - | 11 | -(5 rows) - -update dcomptable set d1[2] = row(d1[2].i, d1[2].r); -select * from dcomptable; - d1 --------------------- - {"(1,2)","(,)"} - {"(3,4)","(6,5)"} - {"(7,8)","(10,9)"} - {"(9,10)","(,)"} - {"(11,)","(,)"} -(5 rows) - -update dcomptable set d1[1].r = d1[1].r + 1 where d1[1].i > 0; -select * from dcomptable; - d1 --------------------- - {"(11,)","(,)"} - {"(2,2)","(,)"} - {"(4,4)","(6,5)"} - {"(8,8)","(10,9)"} - {"(10,10)","(,)"} -(5 rows) - -alter domain dcomptypea add constraint c1 check (value[1].r <= value[1].i); -alter domain dcomptypea add constraint c2 check (value[1].r > value[1].i); -- fail -ERROR: column "d1" of table "dcomptable" contains values that violate the new constraint -select array[row(2,1)]::dcomptypea; -- fail -ERROR: value for domain dcomptypea violates check constraint "c1" -insert into dcomptable values (array[row(1,2)]::comptype[]); -insert into dcomptable values (array[row(2,1)]::comptype[]); -- fail -ERROR: value for domain dcomptypea violates check constraint "c1" -insert into dcomptable (d1[1].r) values(99); -insert into dcomptable (d1[1].r, d1[1].i) values(99, 100); -insert into dcomptable (d1[1].r, d1[1].i) values(100, 99); -- fail -ERROR: value for domain dcomptypea violates check constraint "c1" -update dcomptable set d1[1].r = d1[1].r + 1 where d1[1].i > 0; -- fail -ERROR: value for domain dcomptypea violates check constraint "c1" -update dcomptable set d1[1].r = d1[1].r - 1, d1[1].i = d1[1].i + 1 - where d1[1].i > 0; -select * from dcomptable; - d1 --------------------- - {"(11,)","(,)"} - {"(99,)"} - {"(1,3)","(,)"} - {"(3,5)","(6,5)"} - {"(7,9)","(10,9)"} - {"(9,11)","(,)"} - {"(0,3)"} - {"(98,101)"} -(8 rows) - -explain (verbose, costs off) - update dcomptable set d1[1].r = d1[1].r - 1, d1[1].i = d1[1].i + 1 - where d1[1].i > 0; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------- - Update on public.dcomptable - -> Seq Scan on public.dcomptable - Output: (d1[1].r := (d1[1].r - '1'::double precision))[1].i := (d1[1].i + '1'::double precision), ctid - Filter: (dcomptable.d1[1].i > '0'::double precision) -(4 rows) - -create rule silly as on delete to dcomptable do instead - update dcomptable set d1[1].r = d1[1].r - 1, d1[1].i = d1[1].i + 1 - where d1[1].i > 0; -\d+ dcomptable - Table "public.dcomptable" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+------------+-----------+----------+---------+----------+--------------+------------- - d1 | dcomptypea | | | | extended | | -Indexes: - "dcomptable_d1_key" UNIQUE CONSTRAINT, btree (d1) -Rules: - silly AS - ON DELETE TO dcomptable DO INSTEAD UPDATE dcomptable SET d1[1].r = dcomptable.d1[1].r - 1::double precision, d1[1].i = dcomptable.d1[1].i + 1::double precision - WHERE dcomptable.d1[1].i > 0::double precision - -drop table dcomptable; -drop type comptype cascade; -NOTICE: drop cascades to type dcomptypea --- Test arrays over domains -create domain posint as int check (value > 0); -create table pitable (f1 posint[]); -insert into pitable values(array[42]); -insert into pitable values(array[-1]); -- fail -ERROR: value for domain posint violates check constraint "posint_check" -insert into pitable values('{0}'); -- fail -ERROR: value for domain posint violates check constraint "posint_check" -LINE 1: insert into pitable values('{0}'); - ^ -update pitable set f1[1] = f1[1] + 1; -update pitable set f1[1] = 0; -- fail -ERROR: value for domain posint violates check constraint "posint_check" -select * from pitable; - f1 ------- - {43} -(1 row) - -drop table pitable; -create domain vc4 as varchar(4); -create table vc4table (f1 vc4[]); -insert into vc4table values(array['too long']); -- fail -ERROR: value too long for type character varying(4) -insert into vc4table values(array['too long']::vc4[]); -- cast truncates -select * from vc4table; - f1 ----------- - {"too "} -(1 row) - -drop table vc4table; -drop type vc4; --- You can sort of fake arrays-of-arrays by putting a domain in between -create domain dposinta as posint[]; -create table dposintatable (f1 dposinta[]); -insert into dposintatable values(array[array[42]]); -- fail -ERROR: column "f1" is of type dposinta[] but expression is of type integer[] -LINE 1: insert into dposintatable values(array[array[42]]); - ^ -HINT: You will need to rewrite or cast the expression. -insert into dposintatable values(array[array[42]::posint[]]); -- still fail -ERROR: column "f1" is of type dposinta[] but expression is of type posint[] -LINE 1: insert into dposintatable values(array[array[42]::posint[]])... - ^ -HINT: You will need to rewrite or cast the expression. -insert into dposintatable values(array[array[42]::dposinta]); -- but this works -select f1, f1[1], (f1[1])[1] from dposintatable; - f1 | f1 | f1 -----------+------+---- - {"{42}"} | {42} | 42 -(1 row) - -select pg_typeof(f1) from dposintatable; - pg_typeof ------------- - dposinta[] -(1 row) - -select pg_typeof(f1[1]) from dposintatable; - pg_typeof ------------ - dposinta -(1 row) - -select pg_typeof(f1[1][1]) from dposintatable; - pg_typeof ------------ - dposinta -(1 row) - -select pg_typeof((f1[1])[1]) from dposintatable; - pg_typeof ------------ - posint -(1 row) - -update dposintatable set f1[2] = array[99]; -select f1, f1[1], (f1[2])[1] from dposintatable; - f1 | f1 | f1 ------------------+------+---- - {"{42}","{99}"} | {42} | 99 -(1 row) - --- it'd be nice if you could do something like this, but for now you can't: -update dposintatable set f1[2][1] = array[97]; -ERROR: wrong number of array subscripts --- maybe someday we can make this syntax work: -update dposintatable set (f1[2])[1] = array[98]; -ERROR: syntax error at or near "[" -LINE 1: update dposintatable set (f1[2])[1] = array[98]; - ^ -drop table dposintatable; -drop domain posint cascade; -NOTICE: drop cascades to type dposinta --- Test arrays over domains of composite -create type comptype as (cf1 int, cf2 int); -create domain dcomptype as comptype check ((value).cf1 > 0); -create table dcomptable (f1 dcomptype[]); -insert into dcomptable values (null); -update dcomptable set f1[1].cf2 = 5; -table dcomptable; - f1 ----------- - {"(,5)"} -(1 row) - -update dcomptable set f1[1].cf1 = -1; -- fail -ERROR: value for domain dcomptype violates check constraint "dcomptype_check" -update dcomptable set f1[1].cf1 = 1; -table dcomptable; - f1 ------------ - {"(1,5)"} -(1 row) - --- if there's no constraints, a different code path is taken: -alter domain dcomptype drop constraint dcomptype_check; -update dcomptable set f1[1].cf1 = -1; -- now ok -table dcomptable; - f1 ------------- - {"(-1,5)"} -(1 row) - -drop table dcomptable; -drop type comptype cascade; -NOTICE: drop cascades to type dcomptype --- Test not-null restrictions -create domain dnotnull varchar(15) NOT NULL; -create domain dnull varchar(15); -create domain dcheck varchar(15) NOT NULL CHECK (VALUE = 'a' OR VALUE = 'c' OR VALUE = 'd'); -create table nulltest - ( col1 dnotnull - , col2 dnotnull NULL -- NOT NULL in the domain cannot be overridden - , col3 dnull NOT NULL - , col4 dnull - , col5 dcheck CHECK (col5 IN ('c', 'd')) - ); -INSERT INTO nulltest DEFAULT VALUES; -ERROR: domain dnotnull does not allow null values -INSERT INTO nulltest values ('a', 'b', 'c', 'd', 'c'); -- Good -insert into nulltest values ('a', 'b', 'c', 'd', NULL); -ERROR: domain dcheck does not allow null values -insert into nulltest values ('a', 'b', 'c', 'd', 'a'); -ERROR: new row for relation "nulltest" violates check constraint "nulltest_col5_check" -DETAIL: Failing row contains (a, b, c, d, a). -INSERT INTO nulltest values (NULL, 'b', 'c', 'd', 'd'); -ERROR: domain dnotnull does not allow null values -INSERT INTO nulltest values ('a', NULL, 'c', 'd', 'c'); -ERROR: domain dnotnull does not allow null values -INSERT INTO nulltest values ('a', 'b', NULL, 'd', 'c'); -ERROR: null value in column "col3" of relation "nulltest" violates not-null constraint -DETAIL: Failing row contains (a, b, null, d, c). -INSERT INTO nulltest values ('a', 'b', 'c', NULL, 'd'); -- Good --- Test copy -COPY nulltest FROM stdin; --fail -ERROR: null value in column "col3" of relation "nulltest" violates not-null constraint -DETAIL: Failing row contains (a, b, null, d, d). -CONTEXT: COPY nulltest, line 1: "a b \N d d" -COPY nulltest FROM stdin; --fail -ERROR: domain dcheck does not allow null values -CONTEXT: COPY nulltest, line 1, column col5: null input --- Last row is bad -COPY nulltest FROM stdin; -ERROR: new row for relation "nulltest" violates check constraint "nulltest_col5_check" -DETAIL: Failing row contains (a, b, c, null, a). -CONTEXT: COPY nulltest, line 3: "a b c \N a" -select * from nulltest; - col1 | col2 | col3 | col4 | col5 -------+------+------+------+------ - a | b | c | d | c - a | b | c | | d -(2 rows) - --- Test out coerced (casted) constraints -SELECT cast('1' as dnotnull); - dnotnull ----------- - 1 -(1 row) - -SELECT cast(NULL as dnotnull); -- fail -ERROR: domain dnotnull does not allow null values -SELECT cast(cast(NULL as dnull) as dnotnull); -- fail -ERROR: domain dnotnull does not allow null values -SELECT cast(col4 as dnotnull) from nulltest; -- fail -ERROR: domain dnotnull does not allow null values --- cleanup -drop table nulltest; -drop domain dnotnull restrict; -drop domain dnull restrict; -drop domain dcheck restrict; -create domain ddef1 int4 DEFAULT 3; -create domain ddef2 oid DEFAULT '12'; --- Type mixing, function returns int8 -create domain ddef3 text DEFAULT 5; -create sequence ddef4_seq; -create domain ddef4 int4 DEFAULT nextval('ddef4_seq'); -create domain ddef5 numeric(8,2) NOT NULL DEFAULT '12.12'; -create table defaulttest - ( col1 ddef1 - , col2 ddef2 - , col3 ddef3 - , col4 ddef4 PRIMARY KEY - , col5 ddef1 NOT NULL DEFAULT NULL - , col6 ddef2 DEFAULT '88' - , col7 ddef4 DEFAULT 8000 - , col8 ddef5 - ); -insert into defaulttest(col4) values(0); -- fails, col5 defaults to null -ERROR: null value in column "col5" of relation "defaulttest" violates not-null constraint -DETAIL: Failing row contains (3, 12, 5, 0, null, 88, 8000, 12.12). -alter table defaulttest alter column col5 drop default; -insert into defaulttest default values; -- succeeds, inserts domain default --- We used to treat SET DEFAULT NULL as equivalent to DROP DEFAULT; wrong -alter table defaulttest alter column col5 set default null; -insert into defaulttest(col4) values(0); -- fails -ERROR: null value in column "col5" of relation "defaulttest" violates not-null constraint -DETAIL: Failing row contains (3, 12, 5, 0, null, 88, 8000, 12.12). -alter table defaulttest alter column col5 drop default; -insert into defaulttest default values; -insert into defaulttest default values; --- Test defaults with copy -COPY defaulttest(col5) FROM stdin; -select * from defaulttest; - col1 | col2 | col3 | col4 | col5 | col6 | col7 | col8 -------+------+------+------+------+------+------+------- - 3 | 12 | 5 | 1 | 3 | 88 | 8000 | 12.12 - 3 | 12 | 5 | 2 | 3 | 88 | 8000 | 12.12 - 3 | 12 | 5 | 3 | 3 | 88 | 8000 | 12.12 - 3 | 12 | 5 | 4 | 42 | 88 | 8000 | 12.12 -(4 rows) - -drop table defaulttest cascade; --- Test ALTER DOMAIN .. NOT NULL -create domain dnotnulltest integer; -create table domnotnull -( col1 dnotnulltest -, col2 dnotnulltest -); -insert into domnotnull default values; -alter domain dnotnulltest set not null; -- fails -ERROR: column "col1" of table "domnotnull" contains null values -update domnotnull set col1 = 5; -alter domain dnotnulltest set not null; -- fails -ERROR: column "col2" of table "domnotnull" contains null values -update domnotnull set col2 = 6; -alter domain dnotnulltest set not null; -update domnotnull set col1 = null; -- fails -ERROR: domain dnotnulltest does not allow null values -alter domain dnotnulltest drop not null; -update domnotnull set col1 = null; -update domnotnull set col1 = 5; --- these constraints can also be added and removed by name -alter domain dnotnulltest add constraint dnotnulltest_notnull not null; -update domnotnull set col1 = null; -- fails -ERROR: domain dnotnulltest does not allow null values -select conname, pg_get_constraintdef(oid) from pg_constraint - where contypid = 'dnotnulltest'::regtype; - conname | pg_get_constraintdef -----------------------+---------------------- - dnotnulltest_notnull | NOT NULL -(1 row) - -alter domain dnotnulltest drop constraint dnotnulltest_notnull; -update domnotnull set col1 = null; -drop domain dnotnulltest cascade; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to column col2 of table domnotnull -drop cascades to column col1 of table domnotnull --- Test ALTER DOMAIN .. DEFAULT .. -create table domdeftest (col1 ddef1); -insert into domdeftest default values; -select * from domdeftest; - col1 ------- - 3 -(1 row) - -alter domain ddef1 set default '42'; -insert into domdeftest default values; -select * from domdeftest; - col1 ------- - 3 - 42 -(2 rows) - -alter domain ddef1 drop default; -insert into domdeftest default values; -select * from domdeftest; - col1 ------- - 3 - 42 - -(3 rows) - -drop table domdeftest; --- Test ALTER DOMAIN .. CONSTRAINT .. -create domain con as integer; -create table domcontest (col1 con); -insert into domcontest values (1); -insert into domcontest values (2); -alter domain con add constraint t check (VALUE < 1); -- fails -ERROR: column "col1" of table "domcontest" contains values that violate the new constraint -alter domain con add constraint t check (VALUE < 34); -alter domain con add check (VALUE > 0); -\dD con - List of domains - Schema | Name | Type | Collation | Nullable | Default | Check ---------+------+---------+-----------+----------+---------+-------------------------------------- - public | con | integer | | | | CHECK (VALUE > 0) CHECK (VALUE < 34) -(1 row) - -insert into domcontest values (-5); -- fails -ERROR: value for domain con violates check constraint "con_check" -insert into domcontest values (42); -- fails -ERROR: value for domain con violates check constraint "t" -insert into domcontest values (5); -alter domain con drop constraint t; -insert into domcontest values (-5); --fails -ERROR: value for domain con violates check constraint "con_check" -insert into domcontest values (42); -alter domain con drop constraint nonexistent; -ERROR: constraint "nonexistent" of domain "con" does not exist -alter domain con drop constraint if exists nonexistent; -NOTICE: constraint "nonexistent" of domain "con" does not exist, skipping --- not-null constraints -create domain connotnull integer; -create table domconnotnulltest -( col1 connotnull -, col2 connotnull -); -insert into domconnotnulltest default values; -alter domain connotnull add not null; -- fails -ERROR: column "col1" of table "domconnotnulltest" contains null values -update domconnotnulltest set col1 = 5; -alter domain connotnull add not null; -- fails -ERROR: column "col2" of table "domconnotnulltest" contains null values -update domconnotnulltest set col2 = 6; -alter domain connotnull add constraint constr1 not null; -select count(*) from pg_constraint where contypid = 'connotnull'::regtype and contype = 'n'; - count -------- - 1 -(1 row) - -alter domain connotnull add constraint constr1bis not null; -- redundant -select count(*) from pg_constraint where contypid = 'connotnull'::regtype and contype = 'n'; - count -------- - 1 -(1 row) - -\dD connotnull - List of domains - Schema | Name | Type | Collation | Nullable | Default | Check ---------+------------+---------+-----------+----------+---------+------- - public | connotnull | integer | | not null | | -(1 row) - -update domconnotnulltest set col1 = null; -- fails -ERROR: domain connotnull does not allow null values -alter domain connotnull drop constraint constr1; -update domconnotnulltest set col1 = null; -drop domain connotnull cascade; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to column col2 of table domconnotnulltest -drop cascades to column col1 of table domconnotnulltest -drop table domconnotnulltest; --- Test ALTER DOMAIN .. CONSTRAINT .. NOT VALID -create domain things AS INT; -CREATE TABLE thethings (stuff things); -INSERT INTO thethings (stuff) VALUES (55); -ALTER DOMAIN things ADD CONSTRAINT meow CHECK (VALUE < 11); -ERROR: column "stuff" of table "thethings" contains values that violate the new constraint -ALTER DOMAIN things ADD CONSTRAINT meow CHECK (VALUE < 11) NOT VALID; -ALTER DOMAIN things VALIDATE CONSTRAINT meow; -ERROR: column "stuff" of table "thethings" contains values that violate the new constraint -UPDATE thethings SET stuff = 10; -ALTER DOMAIN things VALIDATE CONSTRAINT meow; --- Confirm ALTER DOMAIN with RULES. -create table domtab (col1 integer); -create domain dom as integer; -create view domview as select cast(col1 as dom) from domtab; -insert into domtab (col1) values (null); -insert into domtab (col1) values (5); -select * from domview; - col1 ------- - - 5 -(2 rows) - -alter domain dom set not null; -select * from domview; -- fail -ERROR: domain dom does not allow null values -alter domain dom drop not null; -select * from domview; - col1 ------- - - 5 -(2 rows) - -alter domain dom add constraint domchkgt6 check(value > 6); -select * from domview; --fail -ERROR: value for domain dom violates check constraint "domchkgt6" -alter domain dom drop constraint domchkgt6 restrict; -select * from domview; - col1 ------- - - 5 -(2 rows) - --- cleanup -drop domain ddef1 restrict; -drop domain ddef2 restrict; -drop domain ddef3 restrict; -drop domain ddef4 restrict; -drop domain ddef5 restrict; -drop sequence ddef4_seq; --- Test domains over domains -create domain vchar4 varchar(4); -create domain dinter vchar4 check (substring(VALUE, 1, 1) = 'x'); -create domain dtop dinter check (substring(VALUE, 2, 1) = '1'); -select 'x123'::dtop; - dtop ------- - x123 -(1 row) - -select 'x1234'::dtop; -- explicit coercion should truncate - dtop ------- - x123 -(1 row) - -select 'y1234'::dtop; -- fail -ERROR: value for domain dtop violates check constraint "dinter_check" -select 'y123'::dtop; -- fail -ERROR: value for domain dtop violates check constraint "dinter_check" -select 'yz23'::dtop; -- fail -ERROR: value for domain dtop violates check constraint "dinter_check" -select 'xz23'::dtop; -- fail -ERROR: value for domain dtop violates check constraint "dtop_check" -create temp table dtest(f1 dtop); -insert into dtest values('x123'); -insert into dtest values('x1234'); -- fail, implicit coercion -ERROR: value too long for type character varying(4) -insert into dtest values('y1234'); -- fail, implicit coercion -ERROR: value too long for type character varying(4) -insert into dtest values('y123'); -- fail -ERROR: value for domain dtop violates check constraint "dinter_check" -insert into dtest values('yz23'); -- fail -ERROR: value for domain dtop violates check constraint "dinter_check" -insert into dtest values('xz23'); -- fail -ERROR: value for domain dtop violates check constraint "dtop_check" -drop table dtest; -drop domain vchar4 cascade; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to type dinter -drop cascades to type dtop --- Make sure that constraints of newly-added domain columns are --- enforced correctly, even if there's no default value for the new --- column. Per bug #1433 -create domain str_domain as text not null; -create table domain_test (a int, b int); -insert into domain_test values (1, 2); -insert into domain_test values (1, 2); --- should fail -alter table domain_test add column c str_domain; -ERROR: domain str_domain does not allow null values -create domain str_domain2 as text check (value <> 'foo') default 'foo'; --- should fail -alter table domain_test add column d str_domain2; -ERROR: value for domain str_domain2 violates check constraint "str_domain2_check" --- Check that domain constraints on prepared statement parameters of --- unknown type are enforced correctly. -create domain pos_int as int4 check (value > 0) not null; -prepare s1 as select $1::pos_int = 10 as "is_ten"; -execute s1(10); - is_ten --------- - t -(1 row) - -execute s1(0); -- should fail -ERROR: value for domain pos_int violates check constraint "pos_int_check" -execute s1(NULL); -- should fail -ERROR: domain pos_int does not allow null values --- Check that domain constraints on plpgsql function parameters, results, --- and local variables are enforced correctly. -create function doubledecrement(p1 pos_int) returns pos_int as $$ -declare v pos_int; -begin - return p1; -end$$ language plpgsql; -select doubledecrement(3); -- fail because of implicit null assignment -ERROR: domain pos_int does not allow null values -CONTEXT: PL/pgSQL function doubledecrement(pos_int) line 2 during statement block local variable initialization -create or replace function doubledecrement(p1 pos_int) returns pos_int as $$ -declare v pos_int := 0; -begin - return p1; -end$$ language plpgsql; -select doubledecrement(3); -- fail at initialization assignment -ERROR: value for domain pos_int violates check constraint "pos_int_check" -CONTEXT: PL/pgSQL function doubledecrement(pos_int) line 2 during statement block local variable initialization -create or replace function doubledecrement(p1 pos_int) returns pos_int as $$ -declare v pos_int := 1; -begin - v := p1 - 1; - return v - 1; -end$$ language plpgsql; -select doubledecrement(null); -- fail before call -ERROR: domain pos_int does not allow null values -select doubledecrement(0); -- fail before call -ERROR: value for domain pos_int violates check constraint "pos_int_check" -select doubledecrement(1); -- fail at assignment to v -ERROR: value for domain pos_int violates check constraint "pos_int_check" -CONTEXT: PL/pgSQL function doubledecrement(pos_int) line 4 at assignment -select doubledecrement(2); -- fail at return -ERROR: value for domain pos_int violates check constraint "pos_int_check" -CONTEXT: PL/pgSQL function doubledecrement(pos_int) while casting return value to function's return type -select doubledecrement(3); -- good - doubledecrement ------------------ - 1 -(1 row) - --- Check that ALTER DOMAIN tests columns of derived types -create domain posint as int4; --- Currently, this doesn't work for composite types, but verify it complains -create type ddtest1 as (f1 posint); -create table ddtest2(f1 ddtest1); -insert into ddtest2 values(row(-1)); -alter domain posint add constraint c1 check(value >= 0); -ERROR: cannot alter type "posint" because column "ddtest2.f1" uses it -drop table ddtest2; --- Likewise for domains within arrays of composite -create table ddtest2(f1 ddtest1[]); -insert into ddtest2 values('{(-1)}'); -alter domain posint add constraint c1 check(value >= 0); -ERROR: cannot alter type "posint" because column "ddtest2.f1" uses it -drop table ddtest2; --- Likewise for domains within domains over composite -create domain ddtest1d as ddtest1; -create table ddtest2(f1 ddtest1d); -insert into ddtest2 values('(-1)'); -alter domain posint add constraint c1 check(value >= 0); -ERROR: cannot alter type "posint" because column "ddtest2.f1" uses it -drop table ddtest2; -drop domain ddtest1d; --- Likewise for domains within domains over array of composite -create domain ddtest1d as ddtest1[]; -create table ddtest2(f1 ddtest1d); -insert into ddtest2 values('{(-1)}'); -alter domain posint add constraint c1 check(value >= 0); -ERROR: cannot alter type "posint" because column "ddtest2.f1" uses it -drop table ddtest2; -drop domain ddtest1d; --- Doesn't work for ranges, either -create type rposint as range (subtype = posint); -create table ddtest2(f1 rposint); -insert into ddtest2 values('(-1,3]'); -alter domain posint add constraint c1 check(value >= 0); -ERROR: cannot alter type "posint" because column "ddtest2.f1" uses it -drop table ddtest2; -drop type rposint; -alter domain posint add constraint c1 check(value >= 0); -create domain posint2 as posint check (value % 2 = 0); -create table ddtest2(f1 posint2); -insert into ddtest2 values(11); -- fail -ERROR: value for domain posint2 violates check constraint "posint2_check" -insert into ddtest2 values(-2); -- fail -ERROR: value for domain posint2 violates check constraint "c1" -insert into ddtest2 values(2); -alter domain posint add constraint c2 check(value >= 10); -- fail -ERROR: column "f1" of table "ddtest2" contains values that violate the new constraint -alter domain posint add constraint c2 check(value > 0); -- OK -drop table ddtest2; -drop type ddtest1; -drop domain posint cascade; -NOTICE: drop cascades to type posint2 --- --- Check enforcement of domain-related typmod in plpgsql (bug #5717) --- -create or replace function array_elem_check(numeric) returns numeric as $$ -declare - x numeric(4,2)[1]; -begin - x[1] := $1; - return x[1]; -end$$ language plpgsql; -select array_elem_check(121.00); -ERROR: numeric field overflow -DETAIL: A field with precision 4, scale 2 must round to an absolute value less than 10^2. -CONTEXT: PL/pgSQL function array_elem_check(numeric) line 5 at assignment -select array_elem_check(1.23456); - array_elem_check ------------------- - 1.23 -(1 row) - -create domain mynums as numeric(4,2)[1]; -create or replace function array_elem_check(numeric) returns numeric as $$ -declare - x mynums; -begin - x[1] := $1; - return x[1]; -end$$ language plpgsql; -select array_elem_check(121.00); -ERROR: numeric field overflow -DETAIL: A field with precision 4, scale 2 must round to an absolute value less than 10^2. -CONTEXT: PL/pgSQL function array_elem_check(numeric) line 5 at assignment -select array_elem_check(1.23456); - array_elem_check ------------------- - 1.23 -(1 row) - -create domain mynums2 as mynums; -create or replace function array_elem_check(numeric) returns numeric as $$ -declare - x mynums2; -begin - x[1] := $1; - return x[1]; -end$$ language plpgsql; -select array_elem_check(121.00); -ERROR: numeric field overflow -DETAIL: A field with precision 4, scale 2 must round to an absolute value less than 10^2. -CONTEXT: PL/pgSQL function array_elem_check(numeric) line 5 at assignment -select array_elem_check(1.23456); - array_elem_check ------------------- - 1.23 -(1 row) - -drop function array_elem_check(numeric); --- --- Check enforcement of array-level domain constraints --- -create domain orderedpair as int[2] check (value[1] < value[2]); -select array[1,2]::orderedpair; - array -------- - {1,2} -(1 row) - -select array[2,1]::orderedpair; -- fail -ERROR: value for domain orderedpair violates check constraint "orderedpair_check" -create temp table op (f1 orderedpair); -insert into op values (array[1,2]); -insert into op values (array[2,1]); -- fail -ERROR: value for domain orderedpair violates check constraint "orderedpair_check" -update op set f1[2] = 3; -update op set f1[2] = 0; -- fail -ERROR: value for domain orderedpair violates check constraint "orderedpair_check" -select * from op; - f1 -------- - {1,3} -(1 row) - -create or replace function array_elem_check(int) returns int as $$ -declare - x orderedpair := '{1,2}'; -begin - x[2] := $1; - return x[2]; -end$$ language plpgsql; -select array_elem_check(3); - array_elem_check ------------------- - 3 -(1 row) - -select array_elem_check(-1); -ERROR: value for domain orderedpair violates check constraint "orderedpair_check" -CONTEXT: PL/pgSQL function array_elem_check(integer) line 5 at assignment -drop function array_elem_check(int); --- --- Check enforcement of changing constraints in plpgsql --- -create domain di as int; -create function dom_check(int) returns di as $$ -declare d di; -begin - d := $1::di; - return d; -end -$$ language plpgsql immutable; -select dom_check(0); - dom_check ------------ - 0 -(1 row) - -alter domain di add constraint pos check (value > 0); -select dom_check(0); -- fail -ERROR: value for domain di violates check constraint "pos" -CONTEXT: PL/pgSQL function dom_check(integer) line 4 at assignment -alter domain di drop constraint pos; -select dom_check(0); - dom_check ------------ - 0 -(1 row) - --- implicit cast during assignment is a separate code path, test that too -create or replace function dom_check(int) returns di as $$ -declare d di; -begin - d := $1; - return d; -end -$$ language plpgsql immutable; -select dom_check(0); - dom_check ------------ - 0 -(1 row) - -alter domain di add constraint pos check (value > 0); -select dom_check(0); -- fail -ERROR: value for domain di violates check constraint "pos" -CONTEXT: PL/pgSQL function dom_check(integer) line 4 at assignment -alter domain di drop constraint pos; -select dom_check(0); - dom_check ------------ - 0 -(1 row) - -drop function dom_check(int); -drop domain di; --- --- Check use of a (non-inline-able) SQL function in a domain constraint; --- this has caused issues in the past --- -create function sql_is_distinct_from(anyelement, anyelement) -returns boolean language sql -as 'select $1 is distinct from $2 limit 1'; -create domain inotnull int - check (sql_is_distinct_from(value, null)); -select 1::inotnull; - inotnull ----------- - 1 -(1 row) - -select null::inotnull; -ERROR: value for domain inotnull violates check constraint "inotnull_check" -create table dom_table (x inotnull); -insert into dom_table values ('1'); -insert into dom_table values (1); -insert into dom_table values (null); -ERROR: value for domain inotnull violates check constraint "inotnull_check" -drop table dom_table; -drop domain inotnull; -drop function sql_is_distinct_from(anyelement, anyelement); --- --- Renaming --- -create domain testdomain1 as int; -alter domain testdomain1 rename to testdomain2; -alter type testdomain2 rename to testdomain3; -- alter type also works -drop domain testdomain3; --- --- Renaming domain constraints --- -create domain testdomain1 as int constraint unsigned check (value > 0); -alter domain testdomain1 rename constraint unsigned to unsigned_foo; -alter domain testdomain1 drop constraint unsigned_foo; -drop domain testdomain1; --- --- Get the base type of a domain --- -create domain mytext as text; -create domain mytext_child_1 as mytext; -select pg_basetype('mytext'::regtype); - pg_basetype -------------- - text -(1 row) - -select pg_basetype('mytext_child_1'::regtype); - pg_basetype -------------- - text -(1 row) - -select pg_basetype(1); -- expect NULL not error - pg_basetype -------------- - -(1 row) - -drop domain mytext cascade; -NOTICE: drop cascades to type mytext_child_1 --- --- Information schema --- -SELECT * FROM information_schema.column_domain_usage - WHERE domain_name IN ('con', 'dom', 'pos_int', 'things') - ORDER BY domain_name; - domain_catalog | domain_schema | domain_name | table_catalog | table_schema | table_name | column_name -----------------+---------------+-------------+---------------+--------------+------------+------------- - regression | public | con | regression | public | domcontest | col1 - regression | public | dom | regression | public | domview | col1 - regression | public | things | regression | public | thethings | stuff -(3 rows) - -SELECT * FROM information_schema.domain_constraints - WHERE domain_name IN ('con', 'dom', 'pos_int', 'things') - ORDER BY constraint_name; - constraint_catalog | constraint_schema | constraint_name | domain_catalog | domain_schema | domain_name | is_deferrable | initially_deferred ---------------------+-------------------+------------------+----------------+---------------+-------------+---------------+-------------------- - regression | public | con_check | regression | public | con | NO | NO - regression | public | meow | regression | public | things | NO | NO - regression | public | pos_int_check | regression | public | pos_int | NO | NO - regression | public | pos_int_not_null | regression | public | pos_int | NO | NO -(4 rows) - -SELECT * FROM information_schema.domains - WHERE domain_name IN ('con', 'dom', 'pos_int', 'things') - ORDER BY domain_name; - domain_catalog | domain_schema | domain_name | data_type | character_maximum_length | character_octet_length | character_set_catalog | character_set_schema | character_set_name | collation_catalog | collation_schema | collation_name | numeric_precision | numeric_precision_radix | numeric_scale | datetime_precision | interval_type | interval_precision | domain_default | udt_catalog | udt_schema | udt_name | scope_catalog | scope_schema | scope_name | maximum_cardinality | dtd_identifier -----------------+---------------+-------------+-----------+--------------------------+------------------------+-----------------------+----------------------+--------------------+-------------------+------------------+----------------+-------------------+-------------------------+---------------+--------------------+---------------+--------------------+----------------+-------------+------------+----------+---------------+--------------+------------+---------------------+---------------- - regression | public | con | integer | | | | | | | | | 32 | 2 | 0 | | | | | regression | pg_catalog | int4 | | | | | 1 - regression | public | dom | integer | | | | | | | | | 32 | 2 | 0 | | | | | regression | pg_catalog | int4 | | | | | 1 - regression | public | pos_int | integer | | | | | | | | | 32 | 2 | 0 | | | | | regression | pg_catalog | int4 | | | | | 1 - regression | public | things | integer | | | | | | | | | 32 | 2 | 0 | | | | | regression | pg_catalog | int4 | | | | | 1 -(4 rows) - -SELECT * FROM information_schema.check_constraints - WHERE (constraint_schema, constraint_name) - IN (SELECT constraint_schema, constraint_name - FROM information_schema.domain_constraints - WHERE domain_name IN ('con', 'dom', 'pos_int', 'things')) - ORDER BY constraint_name; - constraint_catalog | constraint_schema | constraint_name | check_clause ---------------------+-------------------+------------------+------------------- - regression | public | con_check | (VALUE > 0) - regression | public | meow | (VALUE < 11) - regression | public | pos_int_check | (VALUE > 0) - regression | public | pos_int_not_null | VALUE IS NOT NULL -(4 rows) - +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/rangefuncs.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/rangefuncs.out --- /Users/admin/pgsql/src/test/regress/expected/rangefuncs.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/rangefuncs.out 2024-12-13 13:20:12 @@ -1,2503 +1,2 @@ -CREATE TABLE rngfunc2(rngfuncid int, f2 int); -INSERT INTO rngfunc2 VALUES(1, 11); -INSERT INTO rngfunc2 VALUES(2, 22); -INSERT INTO rngfunc2 VALUES(1, 111); -CREATE FUNCTION rngfunct(int) returns setof rngfunc2 as 'SELECT * FROM rngfunc2 WHERE rngfuncid = $1 ORDER BY f2;' LANGUAGE SQL; --- function with ORDINALITY -select * from rngfunct(1) with ordinality as z(a,b,ord); - a | b | ord ----+-----+----- - 1 | 11 | 1 - 1 | 111 | 2 -(2 rows) - -select * from rngfunct(1) with ordinality as z(a,b,ord) where b > 100; -- ordinal 2, not 1 - a | b | ord ----+-----+----- - 1 | 111 | 2 -(1 row) - --- ordinality vs. column names and types -select a,b,ord from rngfunct(1) with ordinality as z(a,b,ord); - a | b | ord ----+-----+----- - 1 | 11 | 1 - 1 | 111 | 2 -(2 rows) - -select a,ord from unnest(array['a','b']) with ordinality as z(a,ord); - a | ord ----+----- - a | 1 - b | 2 -(2 rows) - -select * from unnest(array['a','b']) with ordinality as z(a,ord); - a | ord ----+----- - a | 1 - b | 2 -(2 rows) - -select a,ord from unnest(array[1.0::float8]) with ordinality as z(a,ord); - a | ord ----+----- - 1 | 1 -(1 row) - -select * from unnest(array[1.0::float8]) with ordinality as z(a,ord); - a | ord ----+----- - 1 | 1 -(1 row) - -select row_to_json(s.*) from generate_series(11,14) with ordinality s; - row_to_json -------------------------- - {"s":11,"ordinality":1} - {"s":12,"ordinality":2} - {"s":13,"ordinality":3} - {"s":14,"ordinality":4} -(4 rows) - --- ordinality vs. views -create temporary view vw_ord as select * from (values (1)) v(n) join rngfunct(1) with ordinality as z(a,b,ord) on (n=ord); -select * from vw_ord; - n | a | b | ord ----+---+----+----- - 1 | 1 | 11 | 1 -(1 row) - -select definition from pg_views where viewname='vw_ord'; - definition -------------------------------------------------------------------------- - SELECT v.n, + - z.a, + - z.b, + - z.ord + - FROM (( VALUES (1)) v(n) + - JOIN rngfunct(1) WITH ORDINALITY z(a, b, ord) ON ((v.n = z.ord))); -(1 row) - -drop view vw_ord; --- multiple functions -select * from rows from(rngfunct(1),rngfunct(2)) with ordinality as z(a,b,c,d,ord); - a | b | c | d | ord ----+-----+---+----+----- - 1 | 11 | 2 | 22 | 1 - 1 | 111 | | | 2 -(2 rows) - -create temporary view vw_ord as select * from (values (1)) v(n) join rows from(rngfunct(1),rngfunct(2)) with ordinality as z(a,b,c,d,ord) on (n=ord); -select * from vw_ord; - n | a | b | c | d | ord ----+---+----+---+----+----- - 1 | 1 | 11 | 2 | 22 | 1 -(1 row) - -select definition from pg_views where viewname='vw_ord'; - definition -------------------------------------------------------------------------------------------------------- - SELECT v.n, + - z.a, + - z.b, + - z.c, + - z.d, + - z.ord + - FROM (( VALUES (1)) v(n) + - JOIN ROWS FROM(rngfunct(1), rngfunct(2)) WITH ORDINALITY z(a, b, c, d, ord) ON ((v.n = z.ord))); -(1 row) - -drop view vw_ord; --- expansions of unnest() -select * from unnest(array[10,20],array['foo','bar'],array[1.0]); - unnest | unnest | unnest ---------+--------+-------- - 10 | foo | 1.0 - 20 | bar | -(2 rows) - -select * from unnest(array[10,20],array['foo','bar'],array[1.0]) with ordinality as z(a,b,c,ord); - a | b | c | ord -----+-----+-----+----- - 10 | foo | 1.0 | 1 - 20 | bar | | 2 -(2 rows) - -select * from rows from(unnest(array[10,20],array['foo','bar'],array[1.0])) with ordinality as z(a,b,c,ord); - a | b | c | ord -----+-----+-----+----- - 10 | foo | 1.0 | 1 - 20 | bar | | 2 -(2 rows) - -select * from rows from(unnest(array[10,20],array['foo','bar']), generate_series(101,102)) with ordinality as z(a,b,c,ord); - a | b | c | ord -----+-----+-----+----- - 10 | foo | 101 | 1 - 20 | bar | 102 | 2 -(2 rows) - -create temporary view vw_ord as select * from unnest(array[10,20],array['foo','bar'],array[1.0]) as z(a,b,c); -select * from vw_ord; - a | b | c -----+-----+----- - 10 | foo | 1.0 - 20 | bar | -(2 rows) - -select definition from pg_views where viewname='vw_ord'; - definition ----------------------------------------------------------------------------------------- - SELECT a, + - b, + - c + - FROM UNNEST(ARRAY[10, 20], ARRAY['foo'::text, 'bar'::text], ARRAY[1.0]) z(a, b, c); -(1 row) - -drop view vw_ord; -create temporary view vw_ord as select * from rows from(unnest(array[10,20],array['foo','bar'],array[1.0])) as z(a,b,c); -select * from vw_ord; - a | b | c -----+-----+----- - 10 | foo | 1.0 - 20 | bar | -(2 rows) - -select definition from pg_views where viewname='vw_ord'; - definition ----------------------------------------------------------------------------------------- - SELECT a, + - b, + - c + - FROM UNNEST(ARRAY[10, 20], ARRAY['foo'::text, 'bar'::text], ARRAY[1.0]) z(a, b, c); -(1 row) - -drop view vw_ord; -create temporary view vw_ord as select * from rows from(unnest(array[10,20],array['foo','bar']), generate_series(1,2)) as z(a,b,c); -select * from vw_ord; - a | b | c -----+-----+--- - 10 | foo | 1 - 20 | bar | 2 -(2 rows) - -select definition from pg_views where viewname='vw_ord'; - definition ----------------------------------------------------------------------------------------------------------------------- - SELECT a, + - b, + - c + - FROM ROWS FROM(unnest(ARRAY[10, 20]), unnest(ARRAY['foo'::text, 'bar'::text]), generate_series(1, 2)) z(a, b, c); -(1 row) - -drop view vw_ord; --- ordinality and multiple functions vs. rewind and reverse scan -begin; -declare rf_cur scroll cursor for select * from rows from(generate_series(1,5),generate_series(1,2)) with ordinality as g(i,j,o); -fetch all from rf_cur; - i | j | o ----+---+--- - 1 | 1 | 1 - 2 | 2 | 2 - 3 | | 3 - 4 | | 4 - 5 | | 5 -(5 rows) - -fetch backward all from rf_cur; - i | j | o ----+---+--- - 5 | | 5 - 4 | | 4 - 3 | | 3 - 2 | 2 | 2 - 1 | 1 | 1 -(5 rows) - -fetch all from rf_cur; - i | j | o ----+---+--- - 1 | 1 | 1 - 2 | 2 | 2 - 3 | | 3 - 4 | | 4 - 5 | | 5 -(5 rows) - -fetch next from rf_cur; - i | j | o ----+---+--- -(0 rows) - -fetch next from rf_cur; - i | j | o ----+---+--- -(0 rows) - -fetch prior from rf_cur; - i | j | o ----+---+--- - 5 | | 5 -(1 row) - -fetch absolute 1 from rf_cur; - i | j | o ----+---+--- - 1 | 1 | 1 -(1 row) - -fetch next from rf_cur; - i | j | o ----+---+--- - 2 | 2 | 2 -(1 row) - -fetch next from rf_cur; - i | j | o ----+---+--- - 3 | | 3 -(1 row) - -fetch next from rf_cur; - i | j | o ----+---+--- - 4 | | 4 -(1 row) - -fetch prior from rf_cur; - i | j | o ----+---+--- - 3 | | 3 -(1 row) - -fetch prior from rf_cur; - i | j | o ----+---+--- - 2 | 2 | 2 -(1 row) - -fetch prior from rf_cur; - i | j | o ----+---+--- - 1 | 1 | 1 -(1 row) - -commit; --- function with implicit LATERAL -select * from rngfunc2, rngfunct(rngfunc2.rngfuncid) z where rngfunc2.f2 = z.f2; - rngfuncid | f2 | rngfuncid | f2 ------------+-----+-----------+----- - 1 | 11 | 1 | 11 - 2 | 22 | 2 | 22 - 1 | 111 | 1 | 111 -(3 rows) - --- function with implicit LATERAL and explicit ORDINALITY -select * from rngfunc2, rngfunct(rngfunc2.rngfuncid) with ordinality as z(rngfuncid,f2,ord) where rngfunc2.f2 = z.f2; - rngfuncid | f2 | rngfuncid | f2 | ord ------------+-----+-----------+-----+----- - 1 | 11 | 1 | 11 | 1 - 2 | 22 | 2 | 22 | 1 - 1 | 111 | 1 | 111 | 2 -(3 rows) - --- function in subselect -select * from rngfunc2 where f2 in (select f2 from rngfunct(rngfunc2.rngfuncid) z where z.rngfuncid = rngfunc2.rngfuncid) ORDER BY 1,2; - rngfuncid | f2 ------------+----- - 1 | 11 - 1 | 111 - 2 | 22 -(3 rows) - --- function in subselect -select * from rngfunc2 where f2 in (select f2 from rngfunct(1) z where z.rngfuncid = rngfunc2.rngfuncid) ORDER BY 1,2; - rngfuncid | f2 ------------+----- - 1 | 11 - 1 | 111 -(2 rows) - --- function in subselect -select * from rngfunc2 where f2 in (select f2 from rngfunct(rngfunc2.rngfuncid) z where z.rngfuncid = 1) ORDER BY 1,2; - rngfuncid | f2 ------------+----- - 1 | 11 - 1 | 111 -(2 rows) - --- nested functions -select rngfunct.rngfuncid, rngfunct.f2 from rngfunct(sin(pi()/2)::int) ORDER BY 1,2; - rngfuncid | f2 ------------+----- - 1 | 11 - 1 | 111 -(2 rows) - -CREATE TABLE rngfunc (rngfuncid int, rngfuncsubid int, rngfuncname text, primary key(rngfuncid,rngfuncsubid)); -INSERT INTO rngfunc VALUES(1,1,'Joe'); -INSERT INTO rngfunc VALUES(1,2,'Ed'); -INSERT INTO rngfunc VALUES(2,1,'Mary'); --- sql, proretset = f, prorettype = b -CREATE FUNCTION getrngfunc1(int) RETURNS int AS 'SELECT $1;' LANGUAGE SQL; -SELECT * FROM getrngfunc1(1) AS t1; - t1 ----- - 1 -(1 row) - -SELECT * FROM getrngfunc1(1) WITH ORDINALITY AS t1(v,o); - v | o ----+--- - 1 | 1 -(1 row) - -CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc1(1); -SELECT * FROM vw_getrngfunc; - getrngfunc1 -------------- - 1 -(1 row) - -DROP VIEW vw_getrngfunc; -CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc1(1) WITH ORDINALITY as t1(v,o); -SELECT * FROM vw_getrngfunc; - v | o ----+--- - 1 | 1 -(1 row) - -DROP VIEW vw_getrngfunc; --- sql, proretset = t, prorettype = b -CREATE FUNCTION getrngfunc2(int) RETURNS setof int AS 'SELECT rngfuncid FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; -SELECT * FROM getrngfunc2(1) AS t1; - t1 ----- - 1 - 1 -(2 rows) - -SELECT * FROM getrngfunc2(1) WITH ORDINALITY AS t1(v,o); - v | o ----+--- - 1 | 1 - 1 | 2 -(2 rows) - -CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc2(1); -SELECT * FROM vw_getrngfunc; - getrngfunc2 -------------- - 1 - 1 -(2 rows) - -DROP VIEW vw_getrngfunc; -CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc2(1) WITH ORDINALITY AS t1(v,o); -SELECT * FROM vw_getrngfunc; - v | o ----+--- - 1 | 1 - 1 | 2 -(2 rows) - -DROP VIEW vw_getrngfunc; --- sql, proretset = t, prorettype = b -CREATE FUNCTION getrngfunc3(int) RETURNS setof text AS 'SELECT rngfuncname FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; -SELECT * FROM getrngfunc3(1) AS t1; - t1 ------ - Joe - Ed -(2 rows) - -SELECT * FROM getrngfunc3(1) WITH ORDINALITY AS t1(v,o); - v | o ------+--- - Joe | 1 - Ed | 2 -(2 rows) - -CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc3(1); -SELECT * FROM vw_getrngfunc; - getrngfunc3 -------------- - Joe - Ed -(2 rows) - -DROP VIEW vw_getrngfunc; -CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc3(1) WITH ORDINALITY AS t1(v,o); -SELECT * FROM vw_getrngfunc; - v | o ------+--- - Joe | 1 - Ed | 2 -(2 rows) - -DROP VIEW vw_getrngfunc; --- sql, proretset = f, prorettype = c -CREATE FUNCTION getrngfunc4(int) RETURNS rngfunc AS 'SELECT * FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; -SELECT * FROM getrngfunc4(1) AS t1; - rngfuncid | rngfuncsubid | rngfuncname ------------+--------------+------------- - 1 | 1 | Joe -(1 row) - -SELECT * FROM getrngfunc4(1) WITH ORDINALITY AS t1(a,b,c,o); - a | b | c | o ----+---+-----+--- - 1 | 1 | Joe | 1 -(1 row) - -CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc4(1); -SELECT * FROM vw_getrngfunc; - rngfuncid | rngfuncsubid | rngfuncname ------------+--------------+------------- - 1 | 1 | Joe -(1 row) - -DROP VIEW vw_getrngfunc; -CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc4(1) WITH ORDINALITY AS t1(a,b,c,o); -SELECT * FROM vw_getrngfunc; - a | b | c | o ----+---+-----+--- - 1 | 1 | Joe | 1 -(1 row) - -DROP VIEW vw_getrngfunc; --- sql, proretset = t, prorettype = c -CREATE FUNCTION getrngfunc5(int) RETURNS setof rngfunc AS 'SELECT * FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; -SELECT * FROM getrngfunc5(1) AS t1; - rngfuncid | rngfuncsubid | rngfuncname ------------+--------------+------------- - 1 | 1 | Joe - 1 | 2 | Ed -(2 rows) - -SELECT * FROM getrngfunc5(1) WITH ORDINALITY AS t1(a,b,c,o); - a | b | c | o ----+---+-----+--- - 1 | 1 | Joe | 1 - 1 | 2 | Ed | 2 -(2 rows) - -CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc5(1); -SELECT * FROM vw_getrngfunc; - rngfuncid | rngfuncsubid | rngfuncname ------------+--------------+------------- - 1 | 1 | Joe - 1 | 2 | Ed -(2 rows) - -DROP VIEW vw_getrngfunc; -CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc5(1) WITH ORDINALITY AS t1(a,b,c,o); -SELECT * FROM vw_getrngfunc; - a | b | c | o ----+---+-----+--- - 1 | 1 | Joe | 1 - 1 | 2 | Ed | 2 -(2 rows) - -DROP VIEW vw_getrngfunc; --- sql, proretset = f, prorettype = record -CREATE FUNCTION getrngfunc6(int) RETURNS RECORD AS 'SELECT * FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; -SELECT * FROM getrngfunc6(1) AS t1(rngfuncid int, rngfuncsubid int, rngfuncname text); - rngfuncid | rngfuncsubid | rngfuncname ------------+--------------+------------- - 1 | 1 | Joe -(1 row) - -SELECT * FROM ROWS FROM( getrngfunc6(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text) ) WITH ORDINALITY; - rngfuncid | rngfuncsubid | rngfuncname | ordinality ------------+--------------+-------------+------------ - 1 | 1 | Joe | 1 -(1 row) - -CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc6(1) AS -(rngfuncid int, rngfuncsubid int, rngfuncname text); -SELECT * FROM vw_getrngfunc; - rngfuncid | rngfuncsubid | rngfuncname ------------+--------------+------------- - 1 | 1 | Joe -(1 row) - -DROP VIEW vw_getrngfunc; -CREATE VIEW vw_getrngfunc AS - SELECT * FROM ROWS FROM( getrngfunc6(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text) ) - WITH ORDINALITY; -SELECT * FROM vw_getrngfunc; - rngfuncid | rngfuncsubid | rngfuncname | ordinality ------------+--------------+-------------+------------ - 1 | 1 | Joe | 1 -(1 row) - -DROP VIEW vw_getrngfunc; --- sql, proretset = t, prorettype = record -CREATE FUNCTION getrngfunc7(int) RETURNS setof record AS 'SELECT * FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; -SELECT * FROM getrngfunc7(1) AS t1(rngfuncid int, rngfuncsubid int, rngfuncname text); - rngfuncid | rngfuncsubid | rngfuncname ------------+--------------+------------- - 1 | 1 | Joe - 1 | 2 | Ed -(2 rows) - -SELECT * FROM ROWS FROM( getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text) ) WITH ORDINALITY; - rngfuncid | rngfuncsubid | rngfuncname | ordinality ------------+--------------+-------------+------------ - 1 | 1 | Joe | 1 - 1 | 2 | Ed | 2 -(2 rows) - -CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc7(1) AS -(rngfuncid int, rngfuncsubid int, rngfuncname text); -SELECT * FROM vw_getrngfunc; - rngfuncid | rngfuncsubid | rngfuncname ------------+--------------+------------- - 1 | 1 | Joe - 1 | 2 | Ed -(2 rows) - -DROP VIEW vw_getrngfunc; -CREATE VIEW vw_getrngfunc AS - SELECT * FROM ROWS FROM( getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text) ) - WITH ORDINALITY; -SELECT * FROM vw_getrngfunc; - rngfuncid | rngfuncsubid | rngfuncname | ordinality ------------+--------------+-------------+------------ - 1 | 1 | Joe | 1 - 1 | 2 | Ed | 2 -(2 rows) - -DROP VIEW vw_getrngfunc; --- plpgsql, proretset = f, prorettype = b -CREATE FUNCTION getrngfunc8(int) RETURNS int AS 'DECLARE rngfuncint int; BEGIN SELECT rngfuncid into rngfuncint FROM rngfunc WHERE rngfuncid = $1; RETURN rngfuncint; END;' LANGUAGE plpgsql; -SELECT * FROM getrngfunc8(1) AS t1; - t1 ----- - 1 -(1 row) - -SELECT * FROM getrngfunc8(1) WITH ORDINALITY AS t1(v,o); - v | o ----+--- - 1 | 1 -(1 row) - -CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc8(1); -SELECT * FROM vw_getrngfunc; - getrngfunc8 -------------- - 1 -(1 row) - -DROP VIEW vw_getrngfunc; -CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc8(1) WITH ORDINALITY AS t1(v,o); -SELECT * FROM vw_getrngfunc; - v | o ----+--- - 1 | 1 -(1 row) - -DROP VIEW vw_getrngfunc; --- plpgsql, proretset = f, prorettype = c -CREATE FUNCTION getrngfunc9(int) RETURNS rngfunc AS 'DECLARE rngfunctup rngfunc%ROWTYPE; BEGIN SELECT * into rngfunctup FROM rngfunc WHERE rngfuncid = $1; RETURN rngfunctup; END;' LANGUAGE plpgsql; -SELECT * FROM getrngfunc9(1) AS t1; - rngfuncid | rngfuncsubid | rngfuncname ------------+--------------+------------- - 1 | 1 | Joe -(1 row) - -SELECT * FROM getrngfunc9(1) WITH ORDINALITY AS t1(a,b,c,o); - a | b | c | o ----+---+-----+--- - 1 | 1 | Joe | 1 -(1 row) - -CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc9(1); -SELECT * FROM vw_getrngfunc; - rngfuncid | rngfuncsubid | rngfuncname ------------+--------------+------------- - 1 | 1 | Joe -(1 row) - -DROP VIEW vw_getrngfunc; -CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc9(1) WITH ORDINALITY AS t1(a,b,c,o); -SELECT * FROM vw_getrngfunc; - a | b | c | o ----+---+-----+--- - 1 | 1 | Joe | 1 -(1 row) - -DROP VIEW vw_getrngfunc; --- mix 'n match kinds, to exercise expandRTE and related logic -select * from rows from(getrngfunc1(1),getrngfunc2(1),getrngfunc3(1),getrngfunc4(1),getrngfunc5(1), - getrngfunc6(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text), - getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text), - getrngfunc8(1),getrngfunc9(1)) - with ordinality as t1(a,b,c,d,e,f,g,h,i,j,k,l,m,o,p,q,r,s,t,u); - a | b | c | d | e | f | g | h | i | j | k | l | m | o | p | q | r | s | t | u ----+---+-----+---+---+-----+---+---+-----+---+---+-----+---+---+-----+---+---+---+-----+--- - 1 | 1 | Joe | 1 | 1 | Joe | 1 | 1 | Joe | 1 | 1 | Joe | 1 | 1 | Joe | 1 | 1 | 1 | Joe | 1 - | 1 | Ed | | | | 1 | 2 | Ed | | | | 1 | 2 | Ed | | | | | 2 -(2 rows) - -select * from rows from(getrngfunc9(1),getrngfunc8(1), - getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text), - getrngfunc6(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text), - getrngfunc5(1),getrngfunc4(1),getrngfunc3(1),getrngfunc2(1),getrngfunc1(1)) - with ordinality as t1(a,b,c,d,e,f,g,h,i,j,k,l,m,o,p,q,r,s,t,u); - a | b | c | d | e | f | g | h | i | j | k | l | m | o | p | q | r | s | t | u ----+---+-----+---+---+---+-----+---+---+-----+---+---+-----+---+---+-----+-----+---+---+--- - 1 | 1 | Joe | 1 | 1 | 1 | Joe | 1 | 1 | Joe | 1 | 1 | Joe | 1 | 1 | Joe | Joe | 1 | 1 | 1 - | | | | 1 | 2 | Ed | | | | 1 | 2 | Ed | | | | Ed | 1 | | 2 -(2 rows) - -create temporary view vw_rngfunc as - select * from rows from(getrngfunc9(1), - getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text), - getrngfunc1(1)) - with ordinality as t1(a,b,c,d,e,f,g,n); -select * from vw_rngfunc; - a | b | c | d | e | f | g | n ----+---+-----+---+---+-----+---+--- - 1 | 1 | Joe | 1 | 1 | Joe | 1 | 1 - | | | 1 | 2 | Ed | | 2 -(2 rows) - -select pg_get_viewdef('vw_rngfunc'); - pg_get_viewdef ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - SELECT a, + - b, + - c, + - d, + - e, + - f, + - g, + - n + - FROM ROWS FROM(getrngfunc9(1), getrngfunc7(1) AS (rngfuncid integer, rngfuncsubid integer, rngfuncname text), getrngfunc1(1)) WITH ORDINALITY t1(a, b, c, d, e, f, g, n); -(1 row) - -drop view vw_rngfunc; -DROP FUNCTION getrngfunc1(int); -DROP FUNCTION getrngfunc2(int); -DROP FUNCTION getrngfunc3(int); -DROP FUNCTION getrngfunc4(int); -DROP FUNCTION getrngfunc5(int); -DROP FUNCTION getrngfunc6(int); -DROP FUNCTION getrngfunc7(int); -DROP FUNCTION getrngfunc8(int); -DROP FUNCTION getrngfunc9(int); -DROP FUNCTION rngfunct(int); -DROP TABLE rngfunc2; -DROP TABLE rngfunc; --- Rescan tests -- -CREATE TEMPORARY SEQUENCE rngfunc_rescan_seq1; -CREATE TEMPORARY SEQUENCE rngfunc_rescan_seq2; -CREATE TYPE rngfunc_rescan_t AS (i integer, s bigint); -CREATE FUNCTION rngfunc_sql(int,int) RETURNS setof rngfunc_rescan_t AS 'SELECT i, nextval(''rngfunc_rescan_seq1'') FROM generate_series($1,$2) i;' LANGUAGE SQL; --- plpgsql functions use materialize mode -CREATE FUNCTION rngfunc_mat(int,int) RETURNS setof rngfunc_rescan_t AS 'begin for i in $1..$2 loop return next (i, nextval(''rngfunc_rescan_seq2'')); end loop; end;' LANGUAGE plpgsql; ---invokes ExecReScanFunctionScan - all these cases should materialize the function only once --- LEFT JOIN on a condition that the planner can't prove to be true is used to ensure the function --- is on the inner path of a nestloop join -SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); - setval | setval ---------+-------- - 1 | 1 -(1 row) - -SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN rngfunc_sql(11,13) ON (r+i)<100; - r | i | s ----+----+--- - 1 | 11 | 1 - 1 | 12 | 2 - 1 | 13 | 3 - 2 | 11 | 1 - 2 | 12 | 2 - 2 | 13 | 3 - 3 | 11 | 1 - 3 | 12 | 2 - 3 | 13 | 3 -(9 rows) - -SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); - setval | setval ---------+-------- - 1 | 1 -(1 row) - -SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN rngfunc_sql(11,13) WITH ORDINALITY AS f(i,s,o) ON (r+i)<100; - r | i | s | o ----+----+---+--- - 1 | 11 | 1 | 1 - 1 | 12 | 2 | 2 - 1 | 13 | 3 | 3 - 2 | 11 | 1 | 1 - 2 | 12 | 2 | 2 - 2 | 13 | 3 | 3 - 3 | 11 | 1 | 1 - 3 | 12 | 2 | 2 - 3 | 13 | 3 | 3 -(9 rows) - -SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); - setval | setval ---------+-------- - 1 | 1 -(1 row) - -SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN rngfunc_mat(11,13) ON (r+i)<100; - r | i | s ----+----+--- - 1 | 11 | 1 - 1 | 12 | 2 - 1 | 13 | 3 - 2 | 11 | 1 - 2 | 12 | 2 - 2 | 13 | 3 - 3 | 11 | 1 - 3 | 12 | 2 - 3 | 13 | 3 -(9 rows) - -SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); - setval | setval ---------+-------- - 1 | 1 -(1 row) - -SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN rngfunc_mat(11,13) WITH ORDINALITY AS f(i,s,o) ON (r+i)<100; - r | i | s | o ----+----+---+--- - 1 | 11 | 1 | 1 - 1 | 12 | 2 | 2 - 1 | 13 | 3 | 3 - 2 | 11 | 1 | 1 - 2 | 12 | 2 | 2 - 2 | 13 | 3 | 3 - 3 | 11 | 1 | 1 - 3 | 12 | 2 | 2 - 3 | 13 | 3 | 3 -(9 rows) - -SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); - setval | setval ---------+-------- - 1 | 1 -(1 row) - -SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN ROWS FROM( rngfunc_sql(11,13), rngfunc_mat(11,13) ) WITH ORDINALITY AS f(i1,s1,i2,s2,o) ON (r+i1+i2)<100; - r | i1 | s1 | i2 | s2 | o ----+----+----+----+----+--- - 1 | 11 | 1 | 11 | 1 | 1 - 1 | 12 | 2 | 12 | 2 | 2 - 1 | 13 | 3 | 13 | 3 | 3 - 2 | 11 | 1 | 11 | 1 | 1 - 2 | 12 | 2 | 12 | 2 | 2 - 2 | 13 | 3 | 13 | 3 | 3 - 3 | 11 | 1 | 11 | 1 | 1 - 3 | 12 | 2 | 12 | 2 | 2 - 3 | 13 | 3 | 13 | 3 | 3 -(9 rows) - -SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN generate_series(11,13) f(i) ON (r+i)<100; - r | i ----+---- - 1 | 11 - 1 | 12 - 1 | 13 - 2 | 11 - 2 | 12 - 2 | 13 - 3 | 11 - 3 | 12 - 3 | 13 -(9 rows) - -SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN generate_series(11,13) WITH ORDINALITY AS f(i,o) ON (r+i)<100; - r | i | o ----+----+--- - 1 | 11 | 1 - 1 | 12 | 2 - 1 | 13 | 3 - 2 | 11 | 1 - 2 | 12 | 2 - 2 | 13 | 3 - 3 | 11 | 1 - 3 | 12 | 2 - 3 | 13 | 3 -(9 rows) - -SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN unnest(array[10,20,30]) f(i) ON (r+i)<100; - r | i ----+---- - 1 | 10 - 1 | 20 - 1 | 30 - 2 | 10 - 2 | 20 - 2 | 30 - 3 | 10 - 3 | 20 - 3 | 30 -(9 rows) - -SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN unnest(array[10,20,30]) WITH ORDINALITY AS f(i,o) ON (r+i)<100; - r | i | o ----+----+--- - 1 | 10 | 1 - 1 | 20 | 2 - 1 | 30 | 3 - 2 | 10 | 1 - 2 | 20 | 2 - 2 | 30 | 3 - 3 | 10 | 1 - 3 | 20 | 2 - 3 | 30 | 3 -(9 rows) - ---invokes ExecReScanFunctionScan with chgParam != NULL (using implied LATERAL) -SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); - setval | setval ---------+-------- - 1 | 1 -(1 row) - -SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_sql(10+r,13); - r | i | s ----+----+--- - 1 | 11 | 1 - 1 | 12 | 2 - 1 | 13 | 3 - 2 | 12 | 4 - 2 | 13 | 5 - 3 | 13 | 6 -(6 rows) - -SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); - setval | setval ---------+-------- - 1 | 1 -(1 row) - -SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_sql(10+r,13) WITH ORDINALITY AS f(i,s,o); - r | i | s | o ----+----+---+--- - 1 | 11 | 1 | 1 - 1 | 12 | 2 | 2 - 1 | 13 | 3 | 3 - 2 | 12 | 4 | 1 - 2 | 13 | 5 | 2 - 3 | 13 | 6 | 1 -(6 rows) - -SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); - setval | setval ---------+-------- - 1 | 1 -(1 row) - -SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_sql(11,10+r); - r | i | s ----+----+--- - 1 | 11 | 1 - 2 | 11 | 2 - 2 | 12 | 3 - 3 | 11 | 4 - 3 | 12 | 5 - 3 | 13 | 6 -(6 rows) - -SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); - setval | setval ---------+-------- - 1 | 1 -(1 row) - -SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_sql(11,10+r) WITH ORDINALITY AS f(i,s,o); - r | i | s | o ----+----+---+--- - 1 | 11 | 1 | 1 - 2 | 11 | 2 | 1 - 2 | 12 | 3 | 2 - 3 | 11 | 4 | 1 - 3 | 12 | 5 | 2 - 3 | 13 | 6 | 3 -(6 rows) - -SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); - setval | setval ---------+-------- - 1 | 1 -(1 row) - -SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), rngfunc_sql(r1,r2); - r1 | r2 | i | s -----+----+----+---- - 11 | 12 | 11 | 1 - 11 | 12 | 12 | 2 - 13 | 15 | 13 | 3 - 13 | 15 | 14 | 4 - 13 | 15 | 15 | 5 - 16 | 20 | 16 | 6 - 16 | 20 | 17 | 7 - 16 | 20 | 18 | 8 - 16 | 20 | 19 | 9 - 16 | 20 | 20 | 10 -(10 rows) - -SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); - setval | setval ---------+-------- - 1 | 1 -(1 row) - -SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), rngfunc_sql(r1,r2) WITH ORDINALITY AS f(i,s,o); - r1 | r2 | i | s | o -----+----+----+----+--- - 11 | 12 | 11 | 1 | 1 - 11 | 12 | 12 | 2 | 2 - 13 | 15 | 13 | 3 | 1 - 13 | 15 | 14 | 4 | 2 - 13 | 15 | 15 | 5 | 3 - 16 | 20 | 16 | 6 | 1 - 16 | 20 | 17 | 7 | 2 - 16 | 20 | 18 | 8 | 3 - 16 | 20 | 19 | 9 | 4 - 16 | 20 | 20 | 10 | 5 -(10 rows) - -SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); - setval | setval ---------+-------- - 1 | 1 -(1 row) - -SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_mat(10+r,13); - r | i | s ----+----+--- - 1 | 11 | 1 - 1 | 12 | 2 - 1 | 13 | 3 - 2 | 12 | 4 - 2 | 13 | 5 - 3 | 13 | 6 -(6 rows) - -SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); - setval | setval ---------+-------- - 1 | 1 -(1 row) - -SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_mat(10+r,13) WITH ORDINALITY AS f(i,s,o); - r | i | s | o ----+----+---+--- - 1 | 11 | 1 | 1 - 1 | 12 | 2 | 2 - 1 | 13 | 3 | 3 - 2 | 12 | 4 | 1 - 2 | 13 | 5 | 2 - 3 | 13 | 6 | 1 -(6 rows) - -SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); - setval | setval ---------+-------- - 1 | 1 -(1 row) - -SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_mat(11,10+r); - r | i | s ----+----+--- - 1 | 11 | 1 - 2 | 11 | 2 - 2 | 12 | 3 - 3 | 11 | 4 - 3 | 12 | 5 - 3 | 13 | 6 -(6 rows) - -SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); - setval | setval ---------+-------- - 1 | 1 -(1 row) - -SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_mat(11,10+r) WITH ORDINALITY AS f(i,s,o); - r | i | s | o ----+----+---+--- - 1 | 11 | 1 | 1 - 2 | 11 | 2 | 1 - 2 | 12 | 3 | 2 - 3 | 11 | 4 | 1 - 3 | 12 | 5 | 2 - 3 | 13 | 6 | 3 -(6 rows) - -SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); - setval | setval ---------+-------- - 1 | 1 -(1 row) - -SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), rngfunc_mat(r1,r2); - r1 | r2 | i | s -----+----+----+---- - 11 | 12 | 11 | 1 - 11 | 12 | 12 | 2 - 13 | 15 | 13 | 3 - 13 | 15 | 14 | 4 - 13 | 15 | 15 | 5 - 16 | 20 | 16 | 6 - 16 | 20 | 17 | 7 - 16 | 20 | 18 | 8 - 16 | 20 | 19 | 9 - 16 | 20 | 20 | 10 -(10 rows) - -SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); - setval | setval ---------+-------- - 1 | 1 -(1 row) - -SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), rngfunc_mat(r1,r2) WITH ORDINALITY AS f(i,s,o); - r1 | r2 | i | s | o -----+----+----+----+--- - 11 | 12 | 11 | 1 | 1 - 11 | 12 | 12 | 2 | 2 - 13 | 15 | 13 | 3 | 1 - 13 | 15 | 14 | 4 | 2 - 13 | 15 | 15 | 5 | 3 - 16 | 20 | 16 | 6 | 1 - 16 | 20 | 17 | 7 | 2 - 16 | 20 | 18 | 8 | 3 - 16 | 20 | 19 | 9 | 4 - 16 | 20 | 20 | 10 | 5 -(10 rows) - --- selective rescan of multiple functions: -SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); - setval | setval ---------+-------- - 1 | 1 -(1 row) - -SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( rngfunc_sql(11,11), rngfunc_mat(10+r,13) ); - r | i | s | i | s ----+----+---+----+--- - 1 | 11 | 1 | 11 | 1 - 1 | | | 12 | 2 - 1 | | | 13 | 3 - 2 | 11 | 1 | 12 | 4 - 2 | | | 13 | 5 - 3 | 11 | 1 | 13 | 6 -(6 rows) - -SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); - setval | setval ---------+-------- - 1 | 1 -(1 row) - -SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( rngfunc_sql(10+r,13), rngfunc_mat(11,11) ); - r | i | s | i | s ----+----+---+----+--- - 1 | 11 | 1 | 11 | 1 - 1 | 12 | 2 | | - 1 | 13 | 3 | | - 2 | 12 | 4 | 11 | 1 - 2 | 13 | 5 | | - 3 | 13 | 6 | 11 | 1 -(6 rows) - -SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); - setval | setval ---------+-------- - 1 | 1 -(1 row) - -SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( rngfunc_sql(10+r,13), rngfunc_mat(10+r,13) ); - r | i | s | i | s ----+----+---+----+--- - 1 | 11 | 1 | 11 | 1 - 1 | 12 | 2 | 12 | 2 - 1 | 13 | 3 | 13 | 3 - 2 | 12 | 4 | 12 | 4 - 2 | 13 | 5 | 13 | 5 - 3 | 13 | 6 | 13 | 6 -(6 rows) - -SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); - setval | setval ---------+-------- - 1 | 1 -(1 row) - -SELECT * FROM generate_series(1,2) r1, generate_series(r1,3) r2, ROWS FROM( rngfunc_sql(10+r1,13), rngfunc_mat(10+r2,13) ); - r1 | r2 | i | s | i | s -----+----+----+----+----+--- - 1 | 1 | 11 | 1 | 11 | 1 - 1 | 1 | 12 | 2 | 12 | 2 - 1 | 1 | 13 | 3 | 13 | 3 - 1 | 2 | 11 | 4 | 12 | 4 - 1 | 2 | 12 | 5 | 13 | 5 - 1 | 2 | 13 | 6 | | - 1 | 3 | 11 | 7 | 13 | 6 - 1 | 3 | 12 | 8 | | - 1 | 3 | 13 | 9 | | - 2 | 2 | 12 | 10 | 12 | 7 - 2 | 2 | 13 | 11 | 13 | 8 - 2 | 3 | 12 | 12 | 13 | 9 - 2 | 3 | 13 | 13 | | -(13 rows) - -SELECT * FROM (VALUES (1),(2),(3)) v(r), generate_series(10+r,20-r) f(i); - r | i ----+---- - 1 | 11 - 1 | 12 - 1 | 13 - 1 | 14 - 1 | 15 - 1 | 16 - 1 | 17 - 1 | 18 - 1 | 19 - 2 | 12 - 2 | 13 - 2 | 14 - 2 | 15 - 2 | 16 - 2 | 17 - 2 | 18 - 3 | 13 - 3 | 14 - 3 | 15 - 3 | 16 - 3 | 17 -(21 rows) - -SELECT * FROM (VALUES (1),(2),(3)) v(r), generate_series(10+r,20-r) WITH ORDINALITY AS f(i,o); - r | i | o ----+----+--- - 1 | 11 | 1 - 1 | 12 | 2 - 1 | 13 | 3 - 1 | 14 | 4 - 1 | 15 | 5 - 1 | 16 | 6 - 1 | 17 | 7 - 1 | 18 | 8 - 1 | 19 | 9 - 2 | 12 | 1 - 2 | 13 | 2 - 2 | 14 | 3 - 2 | 15 | 4 - 2 | 16 | 5 - 2 | 17 | 6 - 2 | 18 | 7 - 3 | 13 | 1 - 3 | 14 | 2 - 3 | 15 | 3 - 3 | 16 | 4 - 3 | 17 | 5 -(21 rows) - -SELECT * FROM (VALUES (1),(2),(3)) v(r), unnest(array[r*10,r*20,r*30]) f(i); - r | i ----+---- - 1 | 10 - 1 | 20 - 1 | 30 - 2 | 20 - 2 | 40 - 2 | 60 - 3 | 30 - 3 | 60 - 3 | 90 -(9 rows) - -SELECT * FROM (VALUES (1),(2),(3)) v(r), unnest(array[r*10,r*20,r*30]) WITH ORDINALITY AS f(i,o); - r | i | o ----+----+--- - 1 | 10 | 1 - 1 | 20 | 2 - 1 | 30 | 3 - 2 | 20 | 1 - 2 | 40 | 2 - 2 | 60 | 3 - 3 | 30 | 1 - 3 | 60 | 2 - 3 | 90 | 3 -(9 rows) - --- deep nesting -SELECT * FROM (VALUES (1),(2),(3)) v1(r1), - LATERAL (SELECT r1, * FROM (VALUES (10),(20),(30)) v2(r2) - LEFT JOIN generate_series(21,23) f(i) ON ((r2+i)<100) OFFSET 0) s1; - r1 | r1 | r2 | i -----+----+----+---- - 1 | 1 | 10 | 21 - 1 | 1 | 10 | 22 - 1 | 1 | 10 | 23 - 1 | 1 | 20 | 21 - 1 | 1 | 20 | 22 - 1 | 1 | 20 | 23 - 1 | 1 | 30 | 21 - 1 | 1 | 30 | 22 - 1 | 1 | 30 | 23 - 2 | 2 | 10 | 21 - 2 | 2 | 10 | 22 - 2 | 2 | 10 | 23 - 2 | 2 | 20 | 21 - 2 | 2 | 20 | 22 - 2 | 2 | 20 | 23 - 2 | 2 | 30 | 21 - 2 | 2 | 30 | 22 - 2 | 2 | 30 | 23 - 3 | 3 | 10 | 21 - 3 | 3 | 10 | 22 - 3 | 3 | 10 | 23 - 3 | 3 | 20 | 21 - 3 | 3 | 20 | 22 - 3 | 3 | 20 | 23 - 3 | 3 | 30 | 21 - 3 | 3 | 30 | 22 - 3 | 3 | 30 | 23 -(27 rows) - -SELECT * FROM (VALUES (1),(2),(3)) v1(r1), - LATERAL (SELECT r1, * FROM (VALUES (10),(20),(30)) v2(r2) - LEFT JOIN generate_series(20+r1,23) f(i) ON ((r2+i)<100) OFFSET 0) s1; - r1 | r1 | r2 | i -----+----+----+---- - 1 | 1 | 10 | 21 - 1 | 1 | 10 | 22 - 1 | 1 | 10 | 23 - 1 | 1 | 20 | 21 - 1 | 1 | 20 | 22 - 1 | 1 | 20 | 23 - 1 | 1 | 30 | 21 - 1 | 1 | 30 | 22 - 1 | 1 | 30 | 23 - 2 | 2 | 10 | 22 - 2 | 2 | 10 | 23 - 2 | 2 | 20 | 22 - 2 | 2 | 20 | 23 - 2 | 2 | 30 | 22 - 2 | 2 | 30 | 23 - 3 | 3 | 10 | 23 - 3 | 3 | 20 | 23 - 3 | 3 | 30 | 23 -(18 rows) - -SELECT * FROM (VALUES (1),(2),(3)) v1(r1), - LATERAL (SELECT r1, * FROM (VALUES (10),(20),(30)) v2(r2) - LEFT JOIN generate_series(r2,r2+3) f(i) ON ((r2+i)<100) OFFSET 0) s1; - r1 | r1 | r2 | i -----+----+----+---- - 1 | 1 | 10 | 10 - 1 | 1 | 10 | 11 - 1 | 1 | 10 | 12 - 1 | 1 | 10 | 13 - 1 | 1 | 20 | 20 - 1 | 1 | 20 | 21 - 1 | 1 | 20 | 22 - 1 | 1 | 20 | 23 - 1 | 1 | 30 | 30 - 1 | 1 | 30 | 31 - 1 | 1 | 30 | 32 - 1 | 1 | 30 | 33 - 2 | 2 | 10 | 10 - 2 | 2 | 10 | 11 - 2 | 2 | 10 | 12 - 2 | 2 | 10 | 13 - 2 | 2 | 20 | 20 - 2 | 2 | 20 | 21 - 2 | 2 | 20 | 22 - 2 | 2 | 20 | 23 - 2 | 2 | 30 | 30 - 2 | 2 | 30 | 31 - 2 | 2 | 30 | 32 - 2 | 2 | 30 | 33 - 3 | 3 | 10 | 10 - 3 | 3 | 10 | 11 - 3 | 3 | 10 | 12 - 3 | 3 | 10 | 13 - 3 | 3 | 20 | 20 - 3 | 3 | 20 | 21 - 3 | 3 | 20 | 22 - 3 | 3 | 20 | 23 - 3 | 3 | 30 | 30 - 3 | 3 | 30 | 31 - 3 | 3 | 30 | 32 - 3 | 3 | 30 | 33 -(36 rows) - -SELECT * FROM (VALUES (1),(2),(3)) v1(r1), - LATERAL (SELECT r1, * FROM (VALUES (10),(20),(30)) v2(r2) - LEFT JOIN generate_series(r1,2+r2/5) f(i) ON ((r2+i)<100) OFFSET 0) s1; - r1 | r1 | r2 | i -----+----+----+--- - 1 | 1 | 10 | 1 - 1 | 1 | 10 | 2 - 1 | 1 | 10 | 3 - 1 | 1 | 10 | 4 - 1 | 1 | 20 | 1 - 1 | 1 | 20 | 2 - 1 | 1 | 20 | 3 - 1 | 1 | 20 | 4 - 1 | 1 | 20 | 5 - 1 | 1 | 20 | 6 - 1 | 1 | 30 | 1 - 1 | 1 | 30 | 2 - 1 | 1 | 30 | 3 - 1 | 1 | 30 | 4 - 1 | 1 | 30 | 5 - 1 | 1 | 30 | 6 - 1 | 1 | 30 | 7 - 1 | 1 | 30 | 8 - 2 | 2 | 10 | 2 - 2 | 2 | 10 | 3 - 2 | 2 | 10 | 4 - 2 | 2 | 20 | 2 - 2 | 2 | 20 | 3 - 2 | 2 | 20 | 4 - 2 | 2 | 20 | 5 - 2 | 2 | 20 | 6 - 2 | 2 | 30 | 2 - 2 | 2 | 30 | 3 - 2 | 2 | 30 | 4 - 2 | 2 | 30 | 5 - 2 | 2 | 30 | 6 - 2 | 2 | 30 | 7 - 2 | 2 | 30 | 8 - 3 | 3 | 10 | 3 - 3 | 3 | 10 | 4 - 3 | 3 | 20 | 3 - 3 | 3 | 20 | 4 - 3 | 3 | 20 | 5 - 3 | 3 | 20 | 6 - 3 | 3 | 30 | 3 - 3 | 3 | 30 | 4 - 3 | 3 | 30 | 5 - 3 | 3 | 30 | 6 - 3 | 3 | 30 | 7 - 3 | 3 | 30 | 8 -(45 rows) - --- check handling of FULL JOIN with multiple lateral references (bug #15741) -SELECT * -FROM (VALUES (1),(2)) v1(r1) - LEFT JOIN LATERAL ( - SELECT * - FROM generate_series(1, v1.r1) AS gs1 - LEFT JOIN LATERAL ( - SELECT * - FROM generate_series(1, gs1) AS gs2 - LEFT JOIN generate_series(1, gs2) AS gs3 ON TRUE - ) AS ss1 ON TRUE - FULL JOIN generate_series(1, v1.r1) AS gs4 ON FALSE - ) AS ss0 ON TRUE; - r1 | gs1 | gs2 | gs3 | gs4 -----+-----+-----+-----+----- - 1 | | | | 1 - 1 | 1 | 1 | 1 | - 2 | | | | 1 - 2 | | | | 2 - 2 | 1 | 1 | 1 | - 2 | 2 | 1 | 1 | - 2 | 2 | 2 | 1 | - 2 | 2 | 2 | 2 | -(8 rows) - -DROP FUNCTION rngfunc_sql(int,int); -DROP FUNCTION rngfunc_mat(int,int); -DROP SEQUENCE rngfunc_rescan_seq1; -DROP SEQUENCE rngfunc_rescan_seq2; --- --- Test cases involving OUT parameters --- -CREATE FUNCTION rngfunc(in f1 int, out f2 int) -AS 'select $1+1' LANGUAGE sql; -SELECT rngfunc(42); - rngfunc ---------- - 43 -(1 row) - -SELECT * FROM rngfunc(42); - f2 ----- - 43 -(1 row) - -SELECT * FROM rngfunc(42) AS p(x); - x ----- - 43 -(1 row) - --- explicit spec of return type is OK -CREATE OR REPLACE FUNCTION rngfunc(in f1 int, out f2 int) RETURNS int -AS 'select $1+1' LANGUAGE sql; --- error, wrong result type -CREATE OR REPLACE FUNCTION rngfunc(in f1 int, out f2 int) RETURNS float -AS 'select $1+1' LANGUAGE sql; -ERROR: function result type must be integer because of OUT parameters --- with multiple OUT params you must get a RECORD result -CREATE OR REPLACE FUNCTION rngfunc(in f1 int, out f2 int, out f3 text) RETURNS int -AS 'select $1+1' LANGUAGE sql; -ERROR: function result type must be record because of OUT parameters -CREATE OR REPLACE FUNCTION rngfunc(in f1 int, out f2 int, out f3 text) -RETURNS record -AS 'select $1+1' LANGUAGE sql; -ERROR: cannot change return type of existing function -HINT: Use DROP FUNCTION rngfunc(integer) first. -CREATE OR REPLACE FUNCTION rngfuncr(in f1 int, out f2 int, out text) -AS $$select $1-1, $1::text || 'z'$$ LANGUAGE sql; -SELECT f1, rngfuncr(f1) FROM int4_tbl; - f1 | rngfuncr --------------+---------------------------- - 0 | (-1,0z) - 123456 | (123455,123456z) - -123456 | (-123457,-123456z) - 2147483647 | (2147483646,2147483647z) - -2147483647 | (-2147483648,-2147483647z) -(5 rows) - -SELECT * FROM rngfuncr(42); - f2 | column2 -----+--------- - 41 | 42z -(1 row) - -SELECT * FROM rngfuncr(42) AS p(a,b); - a | b -----+----- - 41 | 42z -(1 row) - -CREATE OR REPLACE FUNCTION rngfuncb(in f1 int, inout f2 int, out text) -AS $$select $2-1, $1::text || 'z'$$ LANGUAGE sql; -SELECT f1, rngfuncb(f1, f1/2) FROM int4_tbl; - f1 | rngfuncb --------------+---------------------------- - 0 | (-1,0z) - 123456 | (61727,123456z) - -123456 | (-61729,-123456z) - 2147483647 | (1073741822,2147483647z) - -2147483647 | (-1073741824,-2147483647z) -(5 rows) - -SELECT * FROM rngfuncb(42, 99); - f2 | column2 -----+--------- - 98 | 42z -(1 row) - -SELECT * FROM rngfuncb(42, 99) AS p(a,b); - a | b -----+----- - 98 | 42z -(1 row) - --- Can reference function with or without OUT params for DROP, etc -DROP FUNCTION rngfunc(int); -DROP FUNCTION rngfuncr(in f2 int, out f1 int, out text); -DROP FUNCTION rngfuncb(in f1 int, inout f2 int); --- --- For my next trick, polymorphic OUT parameters --- -CREATE FUNCTION dup (f1 anyelement, f2 out anyelement, f3 out anyarray) -AS 'select $1, array[$1,$1]' LANGUAGE sql; -SELECT dup(22); - dup ----------------- - (22,"{22,22}") -(1 row) - -SELECT dup('xyz'); -- fails -ERROR: could not determine polymorphic type because input has type unknown -SELECT dup('xyz'::text); - dup -------------------- - (xyz,"{xyz,xyz}") -(1 row) - -SELECT * FROM dup('xyz'::text); - f2 | f3 ------+----------- - xyz | {xyz,xyz} -(1 row) - --- fails, as we are attempting to rename first argument -CREATE OR REPLACE FUNCTION dup (inout f2 anyelement, out f3 anyarray) -AS 'select $1, array[$1,$1]' LANGUAGE sql; -ERROR: cannot change name of input parameter "f1" -HINT: Use DROP FUNCTION dup(anyelement) first. -DROP FUNCTION dup(anyelement); --- equivalent behavior, though different name exposed for input arg -CREATE OR REPLACE FUNCTION dup (inout f2 anyelement, out f3 anyarray) -AS 'select $1, array[$1,$1]' LANGUAGE sql; -SELECT dup(22); - dup ----------------- - (22,"{22,22}") -(1 row) - -DROP FUNCTION dup(anyelement); --- fails, no way to deduce outputs -CREATE FUNCTION bad (f1 int, out f2 anyelement, out f3 anyarray) -AS 'select $1, array[$1,$1]' LANGUAGE sql; -ERROR: cannot determine result data type -DETAIL: A result of type anyelement requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. -CREATE FUNCTION dup (f1 anycompatible, f2 anycompatiblearray, f3 out anycompatible, f4 out anycompatiblearray) -AS 'select $1, $2' LANGUAGE sql; -SELECT dup(22, array[44]); - dup ------------ - (22,{44}) -(1 row) - -SELECT dup(4.5, array[44]); - dup ------------- - (4.5,{44}) -(1 row) - -SELECT dup(22, array[44::bigint]); - dup ------------ - (22,{44}) -(1 row) - -SELECT *, pg_typeof(f3), pg_typeof(f4) FROM dup(22, array[44::bigint]); - f3 | f4 | pg_typeof | pg_typeof -----+------+-----------+----------- - 22 | {44} | bigint | bigint[] -(1 row) - -DROP FUNCTION dup(f1 anycompatible, f2 anycompatiblearray); -CREATE FUNCTION dup (f1 anycompatiblerange, f2 out anycompatible, f3 out anycompatiblearray, f4 out anycompatiblerange) -AS 'select lower($1), array[lower($1), upper($1)], $1' LANGUAGE sql; -SELECT dup(int4range(4,7)); - dup ---------------------- - (4,"{4,7}","[4,7)") -(1 row) - -SELECT dup(numrange(4,7)); - dup ---------------------- - (4,"{4,7}","[4,7)") -(1 row) - -SELECT dup(textrange('aaa', 'bbb')); - dup -------------------------------- - (aaa,"{aaa,bbb}","[aaa,bbb)") -(1 row) - -DROP FUNCTION dup(f1 anycompatiblerange); --- fails, no way to deduce outputs -CREATE FUNCTION bad (f1 anyarray, out f2 anycompatible, out f3 anycompatiblearray) -AS 'select $1, array[$1,$1]' LANGUAGE sql; -ERROR: cannot determine result data type -DETAIL: A result of type anycompatible requires at least one input of type anycompatible, anycompatiblearray, anycompatiblenonarray, anycompatiblerange, or anycompatiblemultirange. --- --- table functions --- -CREATE OR REPLACE FUNCTION rngfunc() -RETURNS TABLE(a int) -AS $$ SELECT a FROM generate_series(1,5) a(a) $$ LANGUAGE sql; -SELECT * FROM rngfunc(); - a ---- - 1 - 2 - 3 - 4 - 5 -(5 rows) - -DROP FUNCTION rngfunc(); -CREATE OR REPLACE FUNCTION rngfunc(int) -RETURNS TABLE(a int, b int) -AS $$ SELECT a, b - FROM generate_series(1,$1) a(a), - generate_series(1,$1) b(b) $$ LANGUAGE sql; -SELECT * FROM rngfunc(3); - a | b ----+--- - 1 | 1 - 1 | 2 - 1 | 3 - 2 | 1 - 2 | 2 - 2 | 3 - 3 | 1 - 3 | 2 - 3 | 3 -(9 rows) - -DROP FUNCTION rngfunc(int); --- case that causes change of typmod knowledge during inlining -CREATE OR REPLACE FUNCTION rngfunc() -RETURNS TABLE(a varchar(5)) -AS $$ SELECT 'hello'::varchar(5) $$ LANGUAGE sql STABLE; -SELECT * FROM rngfunc() GROUP BY 1; - a -------- - hello -(1 row) - -DROP FUNCTION rngfunc(); --- --- some tests on SQL functions with RETURNING --- -create temp table tt(f1 serial, data text); -create function insert_tt(text) returns int as -$$ insert into tt(data) values($1) returning f1 $$ -language sql; -select insert_tt('foo'); - insert_tt ------------ - 1 -(1 row) - -select insert_tt('bar'); - insert_tt ------------ - 2 -(1 row) - -select * from tt; - f1 | data -----+------ - 1 | foo - 2 | bar -(2 rows) - --- insert will execute to completion even if function needs just 1 row -create or replace function insert_tt(text) returns int as -$$ insert into tt(data) values($1),($1||$1) returning f1 $$ -language sql; -select insert_tt('fool'); - insert_tt ------------ - 3 -(1 row) - -select * from tt; - f1 | data -----+---------- - 1 | foo - 2 | bar - 3 | fool - 4 | foolfool -(4 rows) - --- setof does what's expected -create or replace function insert_tt2(text,text) returns setof int as -$$ insert into tt(data) values($1),($2) returning f1 $$ -language sql; -select insert_tt2('foolish','barrish'); - insert_tt2 ------------- - 5 - 6 -(2 rows) - -select * from insert_tt2('baz','quux'); - insert_tt2 ------------- - 7 - 8 -(2 rows) - -select * from tt; - f1 | data -----+---------- - 1 | foo - 2 | bar - 3 | fool - 4 | foolfool - 5 | foolish - 6 | barrish - 7 | baz - 8 | quux -(8 rows) - --- limit doesn't prevent execution to completion -select insert_tt2('foolish','barrish') limit 1; - insert_tt2 ------------- - 9 -(1 row) - -select * from tt; - f1 | data -----+---------- - 1 | foo - 2 | bar - 3 | fool - 4 | foolfool - 5 | foolish - 6 | barrish - 7 | baz - 8 | quux - 9 | foolish - 10 | barrish -(10 rows) - --- triggers will fire, too -create function noticetrigger() returns trigger as $$ -begin - raise notice 'noticetrigger % %', new.f1, new.data; - return null; -end $$ language plpgsql; -create trigger tnoticetrigger after insert on tt for each row -execute procedure noticetrigger(); -select insert_tt2('foolme','barme') limit 1; -NOTICE: noticetrigger 11 foolme -NOTICE: noticetrigger 12 barme - insert_tt2 ------------- - 11 -(1 row) - -select * from tt; - f1 | data -----+---------- - 1 | foo - 2 | bar - 3 | fool - 4 | foolfool - 5 | foolish - 6 | barrish - 7 | baz - 8 | quux - 9 | foolish - 10 | barrish - 11 | foolme - 12 | barme -(12 rows) - --- and rules work -create temp table tt_log(f1 int, data text); -create rule insert_tt_rule as on insert to tt do also - insert into tt_log values(new.*); -select insert_tt2('foollog','barlog') limit 1; -NOTICE: noticetrigger 13 foollog -NOTICE: noticetrigger 14 barlog - insert_tt2 ------------- - 13 -(1 row) - -select * from tt; - f1 | data -----+---------- - 1 | foo - 2 | bar - 3 | fool - 4 | foolfool - 5 | foolish - 6 | barrish - 7 | baz - 8 | quux - 9 | foolish - 10 | barrish - 11 | foolme - 12 | barme - 13 | foollog - 14 | barlog -(14 rows) - --- note that nextval() gets executed a second time in the rule expansion, --- which is expected. -select * from tt_log; - f1 | data -----+--------- - 15 | foollog - 16 | barlog -(2 rows) - --- test case for a whole-row-variable bug -create function rngfunc1(n integer, out a text, out b text) - returns setof record - language sql - as $$ select 'foo ' || i, 'bar ' || i from generate_series(1,$1) i $$; -set work_mem='64kB'; -select t.a, t, t.a from rngfunc1(10000) t limit 1; - a | t | a --------+-------------------+------- - foo 1 | ("foo 1","bar 1") | foo 1 -(1 row) - -reset work_mem; -select t.a, t, t.a from rngfunc1(10000) t limit 1; - a | t | a --------+-------------------+------- - foo 1 | ("foo 1","bar 1") | foo 1 -(1 row) - -drop function rngfunc1(n integer); --- test use of SQL functions returning record --- this is supported in some cases where the query doesn't specify --- the actual record type ... -create function array_to_set(anyarray) returns setof record as $$ - select i AS "index", $1[i] AS "value" from generate_subscripts($1, 1) i -$$ language sql strict immutable; -select array_to_set(array['one', 'two']); - array_to_set --------------- - (1,one) - (2,two) -(2 rows) - -select * from array_to_set(array['one', 'two']) as t(f1 int,f2 text); - f1 | f2 -----+----- - 1 | one - 2 | two -(2 rows) - -select * from array_to_set(array['one', 'two']); -- fail -ERROR: a column definition list is required for functions returning "record" -LINE 1: select * from array_to_set(array['one', 'two']); - ^ --- after-the-fact coercion of the columns is now possible, too -select * from array_to_set(array['one', 'two']) as t(f1 numeric(4,2),f2 text); - f1 | f2 -------+----- - 1.00 | one - 2.00 | two -(2 rows) - --- and if it doesn't work, you get a compile-time not run-time error -select * from array_to_set(array['one', 'two']) as t(f1 point,f2 text); -ERROR: return type mismatch in function declared to return record -DETAIL: Final statement returns integer instead of point at column 1. -CONTEXT: SQL function "array_to_set" during startup --- with "strict", this function can't be inlined in FROM -explain (verbose, costs off) - select * from array_to_set(array['one', 'two']) as t(f1 numeric(4,2),f2 text); - QUERY PLAN ----------------------------------------------------- - Function Scan on public.array_to_set t - Output: f1, f2 - Function Call: array_to_set('{one,two}'::text[]) -(3 rows) - --- but without, it can be: -create or replace function array_to_set(anyarray) returns setof record as $$ - select i AS "index", $1[i] AS "value" from generate_subscripts($1, 1) i -$$ language sql immutable; -select array_to_set(array['one', 'two']); - array_to_set --------------- - (1,one) - (2,two) -(2 rows) - -select * from array_to_set(array['one', 'two']) as t(f1 int,f2 text); - f1 | f2 -----+----- - 1 | one - 2 | two -(2 rows) - -select * from array_to_set(array['one', 'two']) as t(f1 numeric(4,2),f2 text); - f1 | f2 -------+----- - 1.00 | one - 2.00 | two -(2 rows) - -select * from array_to_set(array['one', 'two']) as t(f1 point,f2 text); -ERROR: return type mismatch in function declared to return record -DETAIL: Final statement returns integer instead of point at column 1. -CONTEXT: SQL function "array_to_set" during inlining -explain (verbose, costs off) - select * from array_to_set(array['one', 'two']) as t(f1 numeric(4,2),f2 text); - QUERY PLAN --------------------------------------------------------------- - Function Scan on pg_catalog.generate_subscripts i - Output: i.i, ('{one,two}'::text[])[i.i] - Function Call: generate_subscripts('{one,two}'::text[], 1) -(3 rows) - -create temp table rngfunc(f1 int8, f2 int8); -create function testrngfunc() returns record as $$ - insert into rngfunc values (1,2) returning *; -$$ language sql; -select testrngfunc(); - testrngfunc -------------- - (1,2) -(1 row) - -select * from testrngfunc() as t(f1 int8,f2 int8); - f1 | f2 -----+---- - 1 | 2 -(1 row) - -select * from testrngfunc(); -- fail -ERROR: a column definition list is required for functions returning "record" -LINE 1: select * from testrngfunc(); - ^ -drop function testrngfunc(); -create function testrngfunc() returns setof record as $$ - insert into rngfunc values (1,2), (3,4) returning *; -$$ language sql; -select testrngfunc(); - testrngfunc -------------- - (1,2) - (3,4) -(2 rows) - -select * from testrngfunc() as t(f1 int8,f2 int8); - f1 | f2 -----+---- - 1 | 2 - 3 | 4 -(2 rows) - -select * from testrngfunc(); -- fail -ERROR: a column definition list is required for functions returning "record" -LINE 1: select * from testrngfunc(); - ^ -drop function testrngfunc(); --- Check that typmod imposed by a composite type is honored -create type rngfunc_type as (f1 numeric(35,6), f2 numeric(35,2)); -create function testrngfunc() returns rngfunc_type as $$ - select 7.136178319899999964, 7.136178319899999964; -$$ language sql immutable; -explain (verbose, costs off) -select testrngfunc(); - QUERY PLAN -------------------------------------------- - Result - Output: '(7.136178,7.14)'::rngfunc_type -(2 rows) - -select testrngfunc(); - testrngfunc ------------------ - (7.136178,7.14) -(1 row) - -explain (verbose, costs off) -select * from testrngfunc(); - QUERY PLAN --------------------------------------------------- - Function Scan on testrngfunc - Output: f1, f2 - Function Call: '(7.136178,7.14)'::rngfunc_type -(3 rows) - -select * from testrngfunc(); - f1 | f2 -----------+------ - 7.136178 | 7.14 -(1 row) - -create or replace function testrngfunc() returns rngfunc_type as $$ - select 7.136178319899999964, 7.136178319899999964; -$$ language sql volatile; -explain (verbose, costs off) -select testrngfunc(); - QUERY PLAN -------------------------- - Result - Output: testrngfunc() -(2 rows) - -select testrngfunc(); - testrngfunc ------------------ - (7.136178,7.14) -(1 row) - -explain (verbose, costs off) -select * from testrngfunc(); - QUERY PLAN -------------------------------------- - Function Scan on public.testrngfunc - Output: f1, f2 - Function Call: testrngfunc() -(3 rows) - -select * from testrngfunc(); - f1 | f2 -----------+------ - 7.136178 | 7.14 -(1 row) - -drop function testrngfunc(); -create function testrngfunc() returns setof rngfunc_type as $$ - select 7.136178319899999964, 7.136178319899999964; -$$ language sql immutable; -explain (verbose, costs off) -select testrngfunc(); - QUERY PLAN -------------------------- - ProjectSet - Output: testrngfunc() - -> Result -(3 rows) - -select testrngfunc(); - testrngfunc ------------------ - (7.136178,7.14) -(1 row) - -explain (verbose, costs off) -select * from testrngfunc(); - QUERY PLAN --------------------------------------------------------- - Result - Output: 7.136178::numeric(35,6), 7.14::numeric(35,2) -(2 rows) - -select * from testrngfunc(); - f1 | f2 -----------+------ - 7.136178 | 7.14 -(1 row) - -create or replace function testrngfunc() returns setof rngfunc_type as $$ - select 7.136178319899999964, 7.136178319899999964; -$$ language sql volatile; -explain (verbose, costs off) -select testrngfunc(); - QUERY PLAN -------------------------- - ProjectSet - Output: testrngfunc() - -> Result -(3 rows) - -select testrngfunc(); - testrngfunc ------------------ - (7.136178,7.14) -(1 row) - -explain (verbose, costs off) -select * from testrngfunc(); - QUERY PLAN -------------------------------------- - Function Scan on public.testrngfunc - Output: f1, f2 - Function Call: testrngfunc() -(3 rows) - -select * from testrngfunc(); - f1 | f2 -----------+------ - 7.136178 | 7.14 -(1 row) - -create or replace function testrngfunc() returns setof rngfunc_type as $$ - select 1, 2 union select 3, 4 order by 1; -$$ language sql immutable; -explain (verbose, costs off) -select testrngfunc(); - QUERY PLAN -------------------------- - ProjectSet - Output: testrngfunc() - -> Result -(3 rows) - -select testrngfunc(); - testrngfunc ------------------ - (1.000000,2.00) - (3.000000,4.00) -(2 rows) - -explain (verbose, costs off) -select * from testrngfunc(); - QUERY PLAN ----------------------------------------------------------- - Subquery Scan on "*SELECT*" - Output: "*SELECT*"."?column?", "*SELECT*"."?column?_1" - -> Unique - Output: (1), (2) - -> Sort - Output: (1), (2) - Sort Key: (1), (2) - -> Append - -> Result - Output: 1, 2 - -> Result - Output: 3, 4 -(12 rows) - -select * from testrngfunc(); - f1 | f2 -----------+------ - 1.000000 | 2.00 - 3.000000 | 4.00 -(2 rows) - --- Check a couple of error cases while we're here -select * from testrngfunc() as t(f1 int8,f2 int8); -- fail, composite result -ERROR: a column definition list is redundant for a function returning a named composite type -LINE 1: select * from testrngfunc() as t(f1 int8,f2 int8); - ^ -select * from pg_get_keywords() as t(f1 int8,f2 int8); -- fail, OUT params -ERROR: a column definition list is redundant for a function with OUT parameters -LINE 1: select * from pg_get_keywords() as t(f1 int8,f2 int8); - ^ -select * from sin(3) as t(f1 int8,f2 int8); -- fail, scalar result type -ERROR: a column definition list is only allowed for functions returning "record" -LINE 1: select * from sin(3) as t(f1 int8,f2 int8); - ^ -drop type rngfunc_type cascade; -NOTICE: drop cascades to function testrngfunc() --- --- Check some cases involving added/dropped columns in a rowtype result --- -create temp table users (userid text, seq int, email text, todrop bool, moredrop int, enabled bool); -insert into users values ('id',1,'email',true,11,true); -insert into users values ('id2',2,'email2',true,12,true); -alter table users drop column todrop; -create or replace function get_first_user() returns users as -$$ SELECT * FROM users ORDER BY userid LIMIT 1; $$ -language sql stable; -SELECT get_first_user(); - get_first_user -------------------- - (id,1,email,11,t) -(1 row) - -SELECT * FROM get_first_user(); - userid | seq | email | moredrop | enabled ---------+-----+-------+----------+--------- - id | 1 | email | 11 | t -(1 row) - -create or replace function get_users() returns setof users as -$$ SELECT * FROM users ORDER BY userid; $$ -language sql stable; -SELECT get_users(); - get_users ---------------------- - (id,1,email,11,t) - (id2,2,email2,12,t) -(2 rows) - -SELECT * FROM get_users(); - userid | seq | email | moredrop | enabled ---------+-----+--------+----------+--------- - id | 1 | email | 11 | t - id2 | 2 | email2 | 12 | t -(2 rows) - -SELECT * FROM get_users() WITH ORDINALITY; -- make sure ordinality copes - userid | seq | email | moredrop | enabled | ordinality ---------+-----+--------+----------+---------+------------ - id | 1 | email | 11 | t | 1 - id2 | 2 | email2 | 12 | t | 2 -(2 rows) - --- multiple functions vs. dropped columns -SELECT * FROM ROWS FROM(generate_series(10,11), get_users()) WITH ORDINALITY; - generate_series | userid | seq | email | moredrop | enabled | ordinality ------------------+--------+-----+--------+----------+---------+------------ - 10 | id | 1 | email | 11 | t | 1 - 11 | id2 | 2 | email2 | 12 | t | 2 -(2 rows) - -SELECT * FROM ROWS FROM(get_users(), generate_series(10,11)) WITH ORDINALITY; - userid | seq | email | moredrop | enabled | generate_series | ordinality ---------+-----+--------+----------+---------+-----------------+------------ - id | 1 | email | 11 | t | 10 | 1 - id2 | 2 | email2 | 12 | t | 11 | 2 -(2 rows) - --- check that we can cope with post-parsing changes in rowtypes -create temp view usersview as -SELECT * FROM ROWS FROM(get_users(), generate_series(10,11)) WITH ORDINALITY; -select * from usersview; - userid | seq | email | moredrop | enabled | generate_series | ordinality ---------+-----+--------+----------+---------+-----------------+------------ - id | 1 | email | 11 | t | 10 | 1 - id2 | 2 | email2 | 12 | t | 11 | 2 -(2 rows) - -alter table users add column junk text; -select * from usersview; - userid | seq | email | moredrop | enabled | generate_series | ordinality ---------+-----+--------+----------+---------+-----------------+------------ - id | 1 | email | 11 | t | 10 | 1 - id2 | 2 | email2 | 12 | t | 11 | 2 -(2 rows) - -alter table users drop column moredrop; -- fail, view has reference -ERROR: cannot drop column moredrop of table users because other objects depend on it -DETAIL: view usersview depends on column moredrop of table users -HINT: Use DROP ... CASCADE to drop the dependent objects too. --- We used to have a bug that would allow the above to succeed, posing --- hazards for later execution of the view. Check that the internal --- defenses for those hazards haven't bit-rotted, in case some other --- bug with similar symptoms emerges. -begin; --- destroy the dependency entry that prevents the DROP: -delete from pg_depend where - objid = (select oid from pg_rewrite - where ev_class = 'usersview'::regclass and rulename = '_RETURN') - and refobjsubid = 5 -returning pg_describe_object(classid, objid, objsubid) as obj, - pg_describe_object(refclassid, refobjid, refobjsubid) as ref, - deptype; - obj | ref | deptype ---------------------------------+--------------------------------+--------- - rule _RETURN on view usersview | column moredrop of table users | n -(1 row) - -alter table users drop column moredrop; -select * from usersview; -- expect clean failure -ERROR: attribute 5 of type record has been dropped -rollback; -alter table users alter column seq type numeric; -- fail, view has reference -ERROR: cannot alter type of a column used by a view or rule -DETAIL: rule _RETURN on view usersview depends on column "seq" --- likewise, check we don't crash if the dependency goes wrong -begin; --- destroy the dependency entry that prevents the ALTER: -delete from pg_depend where - objid = (select oid from pg_rewrite - where ev_class = 'usersview'::regclass and rulename = '_RETURN') - and refobjsubid = 2 -returning pg_describe_object(classid, objid, objsubid) as obj, - pg_describe_object(refclassid, refobjid, refobjsubid) as ref, - deptype; - obj | ref | deptype ---------------------------------+---------------------------+--------- - rule _RETURN on view usersview | column seq of table users | n -(1 row) - -alter table users alter column seq type numeric; -select * from usersview; -- expect clean failure -ERROR: attribute 2 of type record has wrong type -DETAIL: Table has type numeric, but query expects integer. -rollback; -drop view usersview; -drop function get_first_user(); -drop function get_users(); -drop table users; --- check behavior with type coercion required for a set-op -create or replace function rngfuncbar() returns setof text as -$$ select 'foo'::varchar union all select 'bar'::varchar ; $$ -language sql stable; -select rngfuncbar(); - rngfuncbar ------------- - foo - bar -(2 rows) - -select * from rngfuncbar(); - rngfuncbar ------------- - foo - bar -(2 rows) - --- this function is now inlinable, too: -explain (verbose, costs off) select * from rngfuncbar(); - QUERY PLAN ------------------------------------------------- - Result - Output: ('foo'::character varying) - -> Append - -> Result - Output: 'foo'::character varying - -> Result - Output: 'bar'::character varying -(7 rows) - -drop function rngfuncbar(); --- check handling of a SQL function with multiple OUT params (bug #5777) -create or replace function rngfuncbar(out integer, out numeric) as -$$ select (1, 2.1) $$ language sql; -select * from rngfuncbar(); - column1 | column2 ----------+--------- - 1 | 2.1 -(1 row) - -create or replace function rngfuncbar(out integer, out numeric) as -$$ select (1, 2) $$ language sql; -select * from rngfuncbar(); -- fail -ERROR: function return row and query-specified return row do not match -DETAIL: Returned type integer at ordinal position 2, but query expects numeric. -create or replace function rngfuncbar(out integer, out numeric) as -$$ select (1, 2.1, 3) $$ language sql; -select * from rngfuncbar(); -- fail -ERROR: function return row and query-specified return row do not match -DETAIL: Returned row contains 3 attributes, but query expects 2. -drop function rngfuncbar(); --- check whole-row-Var handling in nested lateral functions (bug #11703) -create function extractq2(t int8_tbl) returns int8 as $$ - select t.q2 -$$ language sql immutable; -explain (verbose, costs off) -select x from int8_tbl, extractq2(int8_tbl) f(x); - QUERY PLAN ------------------------------------------- - Nested Loop - Output: f.x - -> Seq Scan on public.int8_tbl - Output: int8_tbl.q1, int8_tbl.q2 - -> Function Scan on f - Output: f.x - Function Call: int8_tbl.q2 -(7 rows) - -select x from int8_tbl, extractq2(int8_tbl) f(x); - x -------------------- - 456 - 4567890123456789 - 123 - 4567890123456789 - -4567890123456789 -(5 rows) - -create function extractq2_2(t int8_tbl) returns table(ret1 int8) as $$ - select extractq2(t) offset 0 -$$ language sql immutable; -explain (verbose, costs off) -select x from int8_tbl, extractq2_2(int8_tbl) f(x); - QUERY PLAN ------------------------------------ - Nested Loop - Output: ((int8_tbl.*).q2) - -> Seq Scan on public.int8_tbl - Output: int8_tbl.* - -> Result - Output: (int8_tbl.*).q2 -(6 rows) - -select x from int8_tbl, extractq2_2(int8_tbl) f(x); - x -------------------- - 456 - 4567890123456789 - 123 - 4567890123456789 - -4567890123456789 -(5 rows) - --- without the "offset 0", this function gets optimized quite differently -create function extractq2_2_opt(t int8_tbl) returns table(ret1 int8) as $$ - select extractq2(t) -$$ language sql immutable; -explain (verbose, costs off) -select x from int8_tbl, extractq2_2_opt(int8_tbl) f(x); - QUERY PLAN ------------------------------ - Seq Scan on public.int8_tbl - Output: int8_tbl.q2 -(2 rows) - -select x from int8_tbl, extractq2_2_opt(int8_tbl) f(x); - x -------------------- - 456 - 4567890123456789 - 123 - 4567890123456789 - -4567890123456789 -(5 rows) - --- check handling of nulls in SRF results (bug #7808) -create type rngfunc2 as (a integer, b text); -select *, row_to_json(u) from unnest(array[(1,'foo')::rngfunc2, null::rngfunc2]) u; - a | b | row_to_json ----+-----+--------------------- - 1 | foo | {"a":1,"b":"foo"} - | | {"a":null,"b":null} -(2 rows) - -select *, row_to_json(u) from unnest(array[null::rngfunc2, null::rngfunc2]) u; - a | b | row_to_json ----+---+--------------------- - | | {"a":null,"b":null} - | | {"a":null,"b":null} -(2 rows) - -select *, row_to_json(u) from unnest(array[null::rngfunc2, (1,'foo')::rngfunc2, null::rngfunc2]) u; - a | b | row_to_json ----+-----+--------------------- - | | {"a":null,"b":null} - 1 | foo | {"a":1,"b":"foo"} - | | {"a":null,"b":null} -(3 rows) - -select *, row_to_json(u) from unnest(array[]::rngfunc2[]) u; - a | b | row_to_json ----+---+------------- -(0 rows) - -drop type rngfunc2; --- check handling of functions pulled up into function RTEs (bug #17227) -explain (verbose, costs off) -select * from - (select jsonb_path_query_array(module->'lectures', '$[*]') as lecture - from unnest(array['{"lectures": [{"id": "1"}]}'::jsonb]) - as unnested_modules(module)) as ss, - jsonb_to_recordset(ss.lecture) as j (id text); - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------- - Nested Loop - Output: jsonb_path_query_array((unnested_modules.module -> 'lectures'::text), '$[*]'::jsonpath, '{}'::jsonb, false), j.id - -> Function Scan on pg_catalog.unnest unnested_modules - Output: unnested_modules.module - Function Call: unnest('{"{\"lectures\": [{\"id\": \"1\"}]}"}'::jsonb[]) - -> Function Scan on pg_catalog.jsonb_to_recordset j - Output: j.id - Function Call: jsonb_to_recordset(jsonb_path_query_array((unnested_modules.module -> 'lectures'::text), '$[*]'::jsonpath, '{}'::jsonb, false)) -(8 rows) - -select * from - (select jsonb_path_query_array(module->'lectures', '$[*]') as lecture - from unnest(array['{"lectures": [{"id": "1"}]}'::jsonb]) - as unnested_modules(module)) as ss, - jsonb_to_recordset(ss.lecture) as j (id text); - lecture | id ----------------+---- - [{"id": "1"}] | 1 -(1 row) - --- check detection of mismatching record types with a const-folded expression -with a(b) as (values (row(1,2,3))) -select * from a, coalesce(b) as c(d int, e int); -- fail -ERROR: function return row and query-specified return row do not match -DETAIL: Returned row contains 3 attributes, but query expects 2. -with a(b) as (values (row(1,2,3))) -select * from a, coalesce(b) as c(d int, e int, f int, g int); -- fail -ERROR: function return row and query-specified return row do not match -DETAIL: Returned row contains 3 attributes, but query expects 4. -with a(b) as (values (row(1,2,3))) -select * from a, coalesce(b) as c(d int, e int, f float); -- fail -ERROR: function return row and query-specified return row do not match -DETAIL: Returned type integer at ordinal position 3, but query expects double precision. -select * from int8_tbl, coalesce(row(1)) as (a int, b int); -- fail -ERROR: function return row and query-specified return row do not match -DETAIL: Returned row contains 1 attribute, but query expects 2. +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/prepare.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/prepare.out --- /Users/admin/pgsql/src/test/regress/expected/prepare.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/prepare.out 2024-12-13 13:20:12 @@ -1,194 +1,2 @@ --- Regression tests for prepareable statements. We query the content --- of the pg_prepared_statements view as prepared statements are --- created and removed. -SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements; - name | statement | parameter_types | result_types -------+-----------+-----------------+-------------- -(0 rows) - -PREPARE q1 AS SELECT 1 AS a; -EXECUTE q1; - a ---- - 1 -(1 row) - -SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements; - name | statement | parameter_types | result_types -------+------------------------------+-----------------+-------------- - q1 | PREPARE q1 AS SELECT 1 AS a; | {} | {integer} -(1 row) - --- should fail -PREPARE q1 AS SELECT 2; -ERROR: prepared statement "q1" already exists --- should succeed -DEALLOCATE q1; -PREPARE q1 AS SELECT 2; -EXECUTE q1; - ?column? ----------- - 2 -(1 row) - -PREPARE q2 AS SELECT 2 AS b; -SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements; - name | statement | parameter_types | result_types -------+------------------------------+-----------------+-------------- - q1 | PREPARE q1 AS SELECT 2; | {} | {integer} - q2 | PREPARE q2 AS SELECT 2 AS b; | {} | {integer} -(2 rows) - --- sql92 syntax -DEALLOCATE PREPARE q1; -SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements; - name | statement | parameter_types | result_types -------+------------------------------+-----------------+-------------- - q2 | PREPARE q2 AS SELECT 2 AS b; | {} | {integer} -(1 row) - -DEALLOCATE PREPARE q2; --- the view should return the empty set again -SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements; - name | statement | parameter_types | result_types -------+-----------+-----------------+-------------- -(0 rows) - --- parameterized queries -PREPARE q2(text) AS - SELECT datname, datistemplate, datallowconn - FROM pg_database WHERE datname = $1; -EXECUTE q2('postgres'); - datname | datistemplate | datallowconn -----------+---------------+-------------- - postgres | f | t -(1 row) - -PREPARE q3(text, int, float, boolean, smallint) AS - SELECT * FROM tenk1 WHERE string4 = $1 AND (four = $2 OR - ten = $3::bigint OR true = $4 OR odd = $5::int) - ORDER BY unique1; -EXECUTE q3('AAAAxx', 5::smallint, 10.5::float, false, 4::bigint); - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 2 | 2716 | 0 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 4 | 5 | CAAAAA | MAEAAA | AAAAxx - 102 | 612 | 0 | 2 | 2 | 2 | 2 | 102 | 102 | 102 | 102 | 4 | 5 | YDAAAA | OXAAAA | AAAAxx - 802 | 2908 | 0 | 2 | 2 | 2 | 2 | 802 | 802 | 802 | 802 | 4 | 5 | WEAAAA | WHEAAA | AAAAxx - 902 | 1104 | 0 | 2 | 2 | 2 | 2 | 902 | 902 | 902 | 902 | 4 | 5 | SIAAAA | MQBAAA | AAAAxx - 1002 | 2580 | 0 | 2 | 2 | 2 | 2 | 2 | 1002 | 1002 | 1002 | 4 | 5 | OMAAAA | GVDAAA | AAAAxx - 1602 | 8148 | 0 | 2 | 2 | 2 | 2 | 602 | 1602 | 1602 | 1602 | 4 | 5 | QJAAAA | KBMAAA | AAAAxx - 1702 | 7940 | 0 | 2 | 2 | 2 | 2 | 702 | 1702 | 1702 | 1702 | 4 | 5 | MNAAAA | KTLAAA | AAAAxx - 2102 | 6184 | 0 | 2 | 2 | 2 | 2 | 102 | 102 | 2102 | 2102 | 4 | 5 | WCAAAA | WDJAAA | AAAAxx - 2202 | 8028 | 0 | 2 | 2 | 2 | 2 | 202 | 202 | 2202 | 2202 | 4 | 5 | SGAAAA | UWLAAA | AAAAxx - 2302 | 7112 | 0 | 2 | 2 | 2 | 2 | 302 | 302 | 2302 | 2302 | 4 | 5 | OKAAAA | ONKAAA | AAAAxx - 2902 | 6816 | 0 | 2 | 2 | 2 | 2 | 902 | 902 | 2902 | 2902 | 4 | 5 | QHAAAA | ECKAAA | AAAAxx - 3202 | 7128 | 0 | 2 | 2 | 2 | 2 | 202 | 1202 | 3202 | 3202 | 4 | 5 | ETAAAA | EOKAAA | AAAAxx - 3902 | 9224 | 0 | 2 | 2 | 2 | 2 | 902 | 1902 | 3902 | 3902 | 4 | 5 | CUAAAA | UQNAAA | AAAAxx - 4102 | 7676 | 0 | 2 | 2 | 2 | 2 | 102 | 102 | 4102 | 4102 | 4 | 5 | UBAAAA | GJLAAA | AAAAxx - 4202 | 6628 | 0 | 2 | 2 | 2 | 2 | 202 | 202 | 4202 | 4202 | 4 | 5 | QFAAAA | YUJAAA | AAAAxx - 4502 | 412 | 0 | 2 | 2 | 2 | 2 | 502 | 502 | 4502 | 4502 | 4 | 5 | ERAAAA | WPAAAA | AAAAxx - 4702 | 2520 | 0 | 2 | 2 | 2 | 2 | 702 | 702 | 4702 | 4702 | 4 | 5 | WYAAAA | YSDAAA | AAAAxx - 4902 | 1600 | 0 | 2 | 2 | 2 | 2 | 902 | 902 | 4902 | 4902 | 4 | 5 | OGAAAA | OJCAAA | AAAAxx - 5602 | 8796 | 0 | 2 | 2 | 2 | 2 | 602 | 1602 | 602 | 5602 | 4 | 5 | MHAAAA | IANAAA | AAAAxx - 6002 | 8932 | 0 | 2 | 2 | 2 | 2 | 2 | 2 | 1002 | 6002 | 4 | 5 | WWAAAA | OFNAAA | AAAAxx - 6402 | 3808 | 0 | 2 | 2 | 2 | 2 | 402 | 402 | 1402 | 6402 | 4 | 5 | GMAAAA | MQFAAA | AAAAxx - 7602 | 1040 | 0 | 2 | 2 | 2 | 2 | 602 | 1602 | 2602 | 7602 | 4 | 5 | KGAAAA | AOBAAA | AAAAxx - 7802 | 7508 | 0 | 2 | 2 | 2 | 2 | 802 | 1802 | 2802 | 7802 | 4 | 5 | COAAAA | UCLAAA | AAAAxx - 8002 | 9980 | 0 | 2 | 2 | 2 | 2 | 2 | 2 | 3002 | 8002 | 4 | 5 | UVAAAA | WTOAAA | AAAAxx - 8302 | 7800 | 0 | 2 | 2 | 2 | 2 | 302 | 302 | 3302 | 8302 | 4 | 5 | IHAAAA | AOLAAA | AAAAxx - 8402 | 5708 | 0 | 2 | 2 | 2 | 2 | 402 | 402 | 3402 | 8402 | 4 | 5 | ELAAAA | OLIAAA | AAAAxx - 8602 | 5440 | 0 | 2 | 2 | 2 | 2 | 602 | 602 | 3602 | 8602 | 4 | 5 | WSAAAA | GBIAAA | AAAAxx - 9502 | 1812 | 0 | 2 | 2 | 2 | 2 | 502 | 1502 | 4502 | 9502 | 4 | 5 | MBAAAA | SRCAAA | AAAAxx - 9602 | 9972 | 0 | 2 | 2 | 2 | 2 | 602 | 1602 | 4602 | 9602 | 4 | 5 | IFAAAA | OTOAAA | AAAAxx -(29 rows) - --- too few params -EXECUTE q3('bool'); -ERROR: wrong number of parameters for prepared statement "q3" -DETAIL: Expected 5 parameters but got 1. --- too many params -EXECUTE q3('bytea', 5::smallint, 10.5::float, false, 4::bigint, true); -ERROR: wrong number of parameters for prepared statement "q3" -DETAIL: Expected 5 parameters but got 6. --- wrong param types -EXECUTE q3(5::smallint, 10.5::float, false, 4::bigint, 'bytea'); -ERROR: parameter $3 of type boolean cannot be coerced to the expected type double precision -LINE 1: EXECUTE q3(5::smallint, 10.5::float, false, 4::bigint, 'byte... - ^ -HINT: You will need to rewrite or cast the expression. --- invalid type -PREPARE q4(nonexistenttype) AS SELECT $1; -ERROR: type "nonexistenttype" does not exist -LINE 1: PREPARE q4(nonexistenttype) AS SELECT $1; - ^ --- create table as execute -PREPARE q5(int, text) AS - SELECT * FROM tenk1 WHERE unique1 = $1 OR stringu1 = $2 - ORDER BY unique1; -CREATE TEMPORARY TABLE q5_prep_results AS EXECUTE q5(200, 'DTAAAA'); -SELECT * FROM q5_prep_results; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- - 200 | 9441 | 0 | 0 | 0 | 0 | 0 | 200 | 200 | 200 | 200 | 0 | 1 | SHAAAA | DZNAAA | HHHHxx - 497 | 9092 | 1 | 1 | 7 | 17 | 97 | 497 | 497 | 497 | 497 | 194 | 195 | DTAAAA | SLNAAA | AAAAxx - 1173 | 6699 | 1 | 1 | 3 | 13 | 73 | 173 | 1173 | 1173 | 1173 | 146 | 147 | DTAAAA | RXJAAA | VVVVxx - 1849 | 8143 | 1 | 1 | 9 | 9 | 49 | 849 | 1849 | 1849 | 1849 | 98 | 99 | DTAAAA | FBMAAA | VVVVxx - 2525 | 64 | 1 | 1 | 5 | 5 | 25 | 525 | 525 | 2525 | 2525 | 50 | 51 | DTAAAA | MCAAAA | AAAAxx - 3201 | 7309 | 1 | 1 | 1 | 1 | 1 | 201 | 1201 | 3201 | 3201 | 2 | 3 | DTAAAA | DVKAAA | HHHHxx - 3877 | 4060 | 1 | 1 | 7 | 17 | 77 | 877 | 1877 | 3877 | 3877 | 154 | 155 | DTAAAA | EAGAAA | AAAAxx - 4553 | 4113 | 1 | 1 | 3 | 13 | 53 | 553 | 553 | 4553 | 4553 | 106 | 107 | DTAAAA | FCGAAA | HHHHxx - 5229 | 6407 | 1 | 1 | 9 | 9 | 29 | 229 | 1229 | 229 | 5229 | 58 | 59 | DTAAAA | LMJAAA | VVVVxx - 5905 | 9537 | 1 | 1 | 5 | 5 | 5 | 905 | 1905 | 905 | 5905 | 10 | 11 | DTAAAA | VCOAAA | HHHHxx - 6581 | 4686 | 1 | 1 | 1 | 1 | 81 | 581 | 581 | 1581 | 6581 | 162 | 163 | DTAAAA | GYGAAA | OOOOxx - 7257 | 1895 | 1 | 1 | 7 | 17 | 57 | 257 | 1257 | 2257 | 7257 | 114 | 115 | DTAAAA | XUCAAA | VVVVxx - 7933 | 4514 | 1 | 1 | 3 | 13 | 33 | 933 | 1933 | 2933 | 7933 | 66 | 67 | DTAAAA | QRGAAA | OOOOxx - 8609 | 5918 | 1 | 1 | 9 | 9 | 9 | 609 | 609 | 3609 | 8609 | 18 | 19 | DTAAAA | QTIAAA | OOOOxx - 9285 | 8469 | 1 | 1 | 5 | 5 | 85 | 285 | 1285 | 4285 | 9285 | 170 | 171 | DTAAAA | TNMAAA | HHHHxx - 9961 | 2058 | 1 | 1 | 1 | 1 | 61 | 961 | 1961 | 4961 | 9961 | 122 | 123 | DTAAAA | EBDAAA | OOOOxx -(16 rows) - -CREATE TEMPORARY TABLE q5_prep_nodata AS EXECUTE q5(200, 'DTAAAA') - WITH NO DATA; -SELECT * FROM q5_prep_nodata; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- -(0 rows) - --- unknown or unspecified parameter types: should succeed -PREPARE q6 AS - SELECT * FROM tenk1 WHERE unique1 = $1 AND stringu1 = $2; -PREPARE q7(unknown) AS - SELECT * FROM road WHERE thepath = $1; --- DML statements -PREPARE q8 AS - UPDATE tenk1 SET stringu1 = $2 WHERE unique1 = $1; -SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements - ORDER BY name; - name | statement | parameter_types | result_types -------+------------------------------------------------------------------+----------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------- - q2 | PREPARE q2(text) AS +| {text} | {name,boolean,boolean} - | SELECT datname, datistemplate, datallowconn +| | - | FROM pg_database WHERE datname = $1; | | - q3 | PREPARE q3(text, int, float, boolean, smallint) AS +| {text,integer,"double precision",boolean,smallint} | {integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,name,name,name} - | SELECT * FROM tenk1 WHERE string4 = $1 AND (four = $2 OR+| | - | ten = $3::bigint OR true = $4 OR odd = $5::int) +| | - | ORDER BY unique1; | | - q5 | PREPARE q5(int, text) AS +| {integer,text} | {integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,name,name,name} - | SELECT * FROM tenk1 WHERE unique1 = $1 OR stringu1 = $2 +| | - | ORDER BY unique1; | | - q6 | PREPARE q6 AS +| {integer,name} | {integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,name,name,name} - | SELECT * FROM tenk1 WHERE unique1 = $1 AND stringu1 = $2; | | - q7 | PREPARE q7(unknown) AS +| {path} | {text,path} - | SELECT * FROM road WHERE thepath = $1; | | - q8 | PREPARE q8 AS +| {integer,name} | - | UPDATE tenk1 SET stringu1 = $2 WHERE unique1 = $1; | | -(6 rows) - --- test DEALLOCATE ALL; -DEALLOCATE ALL; -SELECT name, statement, parameter_types FROM pg_prepared_statements - ORDER BY name; - name | statement | parameter_types -------+-----------+----------------- -(0 rows) - +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/conversion.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/conversion.out --- /Users/admin/pgsql/src/test/regress/expected/conversion.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/conversion.out 2024-12-13 13:20:12 @@ -1,734 +1,2 @@ --- --- create user defined conversion --- --- directory paths and dlsuffix are passed to us in environment variables -\getenv libdir PG_LIBDIR -\getenv dlsuffix PG_DLSUFFIX -\set regresslib :libdir '/regress' :dlsuffix -CREATE FUNCTION test_enc_conversion(bytea, name, name, bool, validlen OUT int, result OUT bytea) - AS :'regresslib', 'test_enc_conversion' - LANGUAGE C STRICT; -CREATE USER regress_conversion_user WITH NOCREATEDB NOCREATEROLE; -SET SESSION AUTHORIZATION regress_conversion_user; -CREATE CONVERSION myconv FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; --- --- cannot make same name conversion in same schema --- -CREATE CONVERSION myconv FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; -ERROR: conversion "myconv" already exists --- --- create default conversion with qualified name --- -CREATE DEFAULT CONVERSION public.mydef FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; --- --- cannot make default conversion with same schema/for_encoding/to_encoding --- -CREATE DEFAULT CONVERSION public.mydef2 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; -ERROR: default conversion for LATIN1 to UTF8 already exists --- test comments -COMMENT ON CONVERSION myconv_bad IS 'foo'; -ERROR: conversion "myconv_bad" does not exist -COMMENT ON CONVERSION myconv IS 'bar'; -COMMENT ON CONVERSION myconv IS NULL; --- --- drop user defined conversion --- -DROP CONVERSION myconv; -DROP CONVERSION mydef; --- --- Note: the built-in conversions are exercised in opr_sanity.sql, --- so there's no need to do that here. --- --- --- return to the superuser --- -RESET SESSION AUTHORIZATION; -DROP USER regress_conversion_user; --- --- Test built-in conversion functions. --- --- Helper function to test a conversion. Uses the test_enc_conversion function --- that was created in the create_function_0 test. -create or replace function test_conv( - input IN bytea, - src_encoding IN text, - dst_encoding IN text, - result OUT bytea, - errorat OUT bytea, - error OUT text) -language plpgsql as -$$ -declare - validlen int; -begin - -- First try to perform the conversion with noError = false. If that errors out, - -- capture the error message, and try again with noError = true. The second call - -- should succeed and return the position of the error, return that too. - begin - select * into validlen, result from test_enc_conversion(input, src_encoding, dst_encoding, false); - errorat = NULL; - error := NULL; - exception when others then - error := sqlerrm; - select * into validlen, result from test_enc_conversion(input, src_encoding, dst_encoding, true); - errorat = substr(input, validlen + 1); - end; - return; -end; -$$; --- --- UTF-8 --- --- The description column must be unique. -CREATE TABLE utf8_verification_inputs (inbytes bytea, description text PRIMARY KEY); -insert into utf8_verification_inputs values - ('\x66006f', 'NUL byte'), - ('\xaf', 'bare continuation'), - ('\xc5', 'missing second byte in 2-byte char'), - ('\xc080', 'smallest 2-byte overlong'), - ('\xc1bf', 'largest 2-byte overlong'), - ('\xc280', 'next 2-byte after overlongs'), - ('\xdfbf', 'largest 2-byte'), - ('\xe9af', 'missing third byte in 3-byte char'), - ('\xe08080', 'smallest 3-byte overlong'), - ('\xe09fbf', 'largest 3-byte overlong'), - ('\xe0a080', 'next 3-byte after overlong'), - ('\xed9fbf', 'last before surrogates'), - ('\xeda080', 'smallest surrogate'), - ('\xedbfbf', 'largest surrogate'), - ('\xee8080', 'next after surrogates'), - ('\xefbfbf', 'largest 3-byte'), - ('\xf1afbf', 'missing fourth byte in 4-byte char'), - ('\xf0808080', 'smallest 4-byte overlong'), - ('\xf08fbfbf', 'largest 4-byte overlong'), - ('\xf0908080', 'next 4-byte after overlong'), - ('\xf48fbfbf', 'largest 4-byte'), - ('\xf4908080', 'smallest too large'), - ('\xfa9a9a8a8a', '5-byte'); --- Test UTF-8 verification slow path -select description, (test_conv(inbytes, 'utf8', 'utf8')).* from utf8_verification_inputs; - description | result | errorat | error -------------------------------------+------------+--------------+---------------------------------------------------------------- - NUL byte | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00 - bare continuation | \x | \xaf | invalid byte sequence for encoding "UTF8": 0xaf - missing second byte in 2-byte char | \x | \xc5 | invalid byte sequence for encoding "UTF8": 0xc5 - smallest 2-byte overlong | \x | \xc080 | invalid byte sequence for encoding "UTF8": 0xc0 0x80 - largest 2-byte overlong | \x | \xc1bf | invalid byte sequence for encoding "UTF8": 0xc1 0xbf - next 2-byte after overlongs | \xc280 | | - largest 2-byte | \xdfbf | | - missing third byte in 3-byte char | \x | \xe9af | invalid byte sequence for encoding "UTF8": 0xe9 0xaf - smallest 3-byte overlong | \x | \xe08080 | invalid byte sequence for encoding "UTF8": 0xe0 0x80 0x80 - largest 3-byte overlong | \x | \xe09fbf | invalid byte sequence for encoding "UTF8": 0xe0 0x9f 0xbf - next 3-byte after overlong | \xe0a080 | | - last before surrogates | \xed9fbf | | - smallest surrogate | \x | \xeda080 | invalid byte sequence for encoding "UTF8": 0xed 0xa0 0x80 - largest surrogate | \x | \xedbfbf | invalid byte sequence for encoding "UTF8": 0xed 0xbf 0xbf - next after surrogates | \xee8080 | | - largest 3-byte | \xefbfbf | | - missing fourth byte in 4-byte char | \x | \xf1afbf | invalid byte sequence for encoding "UTF8": 0xf1 0xaf 0xbf - smallest 4-byte overlong | \x | \xf0808080 | invalid byte sequence for encoding "UTF8": 0xf0 0x80 0x80 0x80 - largest 4-byte overlong | \x | \xf08fbfbf | invalid byte sequence for encoding "UTF8": 0xf0 0x8f 0xbf 0xbf - next 4-byte after overlong | \xf0908080 | | - largest 4-byte | \xf48fbfbf | | - smallest too large | \x | \xf4908080 | invalid byte sequence for encoding "UTF8": 0xf4 0x90 0x80 0x80 - 5-byte | \x | \xfa9a9a8a8a | invalid byte sequence for encoding "UTF8": 0xfa -(23 rows) - --- Test UTF-8 verification with ASCII padding appended to provide --- coverage for algorithms that work on multiple bytes at a time. --- The error message for a sequence starting with a 4-byte lead --- will contain all 4 bytes if they are present, so various --- expressions below add 3 ASCII bytes to the end to ensure --- consistent error messages. --- The number 64 below needs to be at least the value of STRIDE_LENGTH in wchar.c. --- Test multibyte verification in fast path -with test_bytes as ( - select - inbytes, - description, - (test_conv(inbytes || repeat('.', 3)::bytea, 'utf8', 'utf8')).error - from utf8_verification_inputs -), test_padded as ( - select - description, - (test_conv(inbytes || repeat('.', 64)::bytea, 'utf8', 'utf8')).error - from test_bytes -) -select - description, - b.error as orig_error, - p.error as error_after_padding -from test_padded p -join test_bytes b -using (description) -where p.error is distinct from b.error -order by description; - description | orig_error | error_after_padding --------------+------------+--------------------- -(0 rows) - --- Test ASCII verification in fast path where incomplete --- UTF-8 sequences fall at the end of the preceding chunk. -with test_bytes as ( - select - inbytes, - description, - (test_conv(inbytes || repeat('.', 3)::bytea, 'utf8', 'utf8')).error - from utf8_verification_inputs -), test_padded as ( - select - description, - (test_conv(repeat('.', 64 - length(inbytes))::bytea || inbytes || repeat('.', 64)::bytea, 'utf8', 'utf8')).error - from test_bytes -) -select - description, - b.error as orig_error, - p.error as error_after_padding -from test_padded p -join test_bytes b -using (description) -where p.error is distinct from b.error -order by description; - description | orig_error | error_after_padding --------------+------------+--------------------- -(0 rows) - --- Test cases where UTF-8 sequences within short text --- come after the fast path returns. -with test_bytes as ( - select - inbytes, - description, - (test_conv(inbytes || repeat('.', 3)::bytea, 'utf8', 'utf8')).error - from utf8_verification_inputs -), test_padded as ( - select - description, - (test_conv(repeat('.', 64)::bytea || inbytes || repeat('.', 3)::bytea, 'utf8', 'utf8')).error - from test_bytes -) -select - description, - b.error as orig_error, - p.error as error_after_padding -from test_padded p -join test_bytes b -using (description) -where p.error is distinct from b.error -order by description; - description | orig_error | error_after_padding --------------+------------+--------------------- -(0 rows) - --- Test cases where incomplete UTF-8 sequences fall at the --- end of the part checked by the fast path. -with test_bytes as ( - select - inbytes, - description, - (test_conv(inbytes || repeat('.', 3)::bytea, 'utf8', 'utf8')).error - from utf8_verification_inputs -), test_padded as ( - select - description, - (test_conv(repeat('.', 64 - length(inbytes))::bytea || inbytes || repeat('.', 3)::bytea, 'utf8', 'utf8')).error - from test_bytes -) -select - description, - b.error as orig_error, - p.error as error_after_padding -from test_padded p -join test_bytes b -using (description) -where p.error is distinct from b.error -order by description; - description | orig_error | error_after_padding --------------+------------+--------------------- -(0 rows) - -CREATE TABLE utf8_inputs (inbytes bytea, description text); -insert into utf8_inputs values - ('\x666f6f', 'valid, pure ASCII'), - ('\xc3a4c3b6', 'valid, extra latin chars'), - ('\xd184d0bed0be', 'valid, cyrillic'), - ('\x666f6fe8b1a1', 'valid, kanji/Chinese'), - ('\xe382abe3829a', 'valid, two chars that combine to one in EUC_JIS_2004'), - ('\xe382ab', 'only first half of combined char in EUC_JIS_2004'), - ('\xe382abe382', 'incomplete combination when converted EUC_JIS_2004'), - ('\xecbd94eb81bceba6ac', 'valid, Hangul, Korean'), - ('\x666f6fefa8aa', 'valid, needs mapping function to convert to GB18030'), - ('\x66e8b1ff6f6f', 'invalid byte sequence'), - ('\x66006f', 'invalid, NUL byte'), - ('\x666f6fe8b100', 'invalid, NUL byte'), - ('\x666f6fe8b1', 'incomplete character at end'); --- Test UTF-8 verification -select description, (test_conv(inbytes, 'utf8', 'utf8')).* from utf8_inputs; - description | result | errorat | error -------------------------------------------------------+----------------------+--------------+----------------------------------------------------------- - valid, pure ASCII | \x666f6f | | - valid, extra latin chars | \xc3a4c3b6 | | - valid, cyrillic | \xd184d0bed0be | | - valid, kanji/Chinese | \x666f6fe8b1a1 | | - valid, two chars that combine to one in EUC_JIS_2004 | \xe382abe3829a | | - only first half of combined char in EUC_JIS_2004 | \xe382ab | | - incomplete combination when converted EUC_JIS_2004 | \xe382ab | \xe382 | invalid byte sequence for encoding "UTF8": 0xe3 0x82 - valid, Hangul, Korean | \xecbd94eb81bceba6ac | | - valid, needs mapping function to convert to GB18030 | \x666f6fefa8aa | | - invalid byte sequence | \x66 | \xe8b1ff6f6f | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0xff - invalid, NUL byte | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00 - invalid, NUL byte | \x666f6f | \xe8b100 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0x00 - incomplete character at end | \x666f6f | \xe8b1 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 -(13 rows) - --- Test conversions from UTF-8 -select description, inbytes, (test_conv(inbytes, 'utf8', 'euc_jis_2004')).* from utf8_inputs; - description | inbytes | result | errorat | error -------------------------------------------------------+----------------------+----------------+----------------------+------------------------------------------------------------------------------------------------------------- - valid, pure ASCII | \x666f6f | \x666f6f | | - valid, extra latin chars | \xc3a4c3b6 | \xa9daa9ec | | - valid, cyrillic | \xd184d0bed0be | \xa7e6a7e0a7e0 | | - valid, kanji/Chinese | \x666f6fe8b1a1 | \x666f6fbedd | | - valid, two chars that combine to one in EUC_JIS_2004 | \xe382abe3829a | \xa5f7 | | - only first half of combined char in EUC_JIS_2004 | \xe382ab | \xa5ab | | - incomplete combination when converted EUC_JIS_2004 | \xe382abe382 | \x | \xe382abe382 | invalid byte sequence for encoding "UTF8": 0xe3 0x82 - valid, Hangul, Korean | \xecbd94eb81bceba6ac | \x | \xecbd94eb81bceba6ac | character with byte sequence 0xec 0xbd 0x94 in encoding "UTF8" has no equivalent in encoding "EUC_JIS_2004" - valid, needs mapping function to convert to GB18030 | \x666f6fefa8aa | \x666f6f | \xefa8aa | character with byte sequence 0xef 0xa8 0xaa in encoding "UTF8" has no equivalent in encoding "EUC_JIS_2004" - invalid byte sequence | \x66e8b1ff6f6f | \x66 | \xe8b1ff6f6f | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0xff - invalid, NUL byte | \x66006f | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00 - invalid, NUL byte | \x666f6fe8b100 | \x666f6f | \xe8b100 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0x00 - incomplete character at end | \x666f6fe8b1 | \x666f6f | \xe8b1 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 -(13 rows) - -select description, inbytes, (test_conv(inbytes, 'utf8', 'latin1')).* from utf8_inputs; - description | inbytes | result | errorat | error -------------------------------------------------------+----------------------+----------+----------------------+------------------------------------------------------------------------------------------------------- - valid, pure ASCII | \x666f6f | \x666f6f | | - valid, extra latin chars | \xc3a4c3b6 | \xe4f6 | | - valid, cyrillic | \xd184d0bed0be | \x | \xd184d0bed0be | character with byte sequence 0xd1 0x84 in encoding "UTF8" has no equivalent in encoding "LATIN1" - valid, kanji/Chinese | \x666f6fe8b1a1 | \x666f6f | \xe8b1a1 | character with byte sequence 0xe8 0xb1 0xa1 in encoding "UTF8" has no equivalent in encoding "LATIN1" - valid, two chars that combine to one in EUC_JIS_2004 | \xe382abe3829a | \x | \xe382abe3829a | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN1" - only first half of combined char in EUC_JIS_2004 | \xe382ab | \x | \xe382ab | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN1" - incomplete combination when converted EUC_JIS_2004 | \xe382abe382 | \x | \xe382abe382 | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN1" - valid, Hangul, Korean | \xecbd94eb81bceba6ac | \x | \xecbd94eb81bceba6ac | character with byte sequence 0xec 0xbd 0x94 in encoding "UTF8" has no equivalent in encoding "LATIN1" - valid, needs mapping function to convert to GB18030 | \x666f6fefa8aa | \x666f6f | \xefa8aa | character with byte sequence 0xef 0xa8 0xaa in encoding "UTF8" has no equivalent in encoding "LATIN1" - invalid byte sequence | \x66e8b1ff6f6f | \x66 | \xe8b1ff6f6f | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0xff - invalid, NUL byte | \x66006f | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00 - invalid, NUL byte | \x666f6fe8b100 | \x666f6f | \xe8b100 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0x00 - incomplete character at end | \x666f6fe8b1 | \x666f6f | \xe8b1 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 -(13 rows) - -select description, inbytes, (test_conv(inbytes, 'utf8', 'latin2')).* from utf8_inputs; - description | inbytes | result | errorat | error -------------------------------------------------------+----------------------+----------+----------------------+------------------------------------------------------------------------------------------------------- - valid, pure ASCII | \x666f6f | \x666f6f | | - valid, extra latin chars | \xc3a4c3b6 | \xe4f6 | | - valid, cyrillic | \xd184d0bed0be | \x | \xd184d0bed0be | character with byte sequence 0xd1 0x84 in encoding "UTF8" has no equivalent in encoding "LATIN2" - valid, kanji/Chinese | \x666f6fe8b1a1 | \x666f6f | \xe8b1a1 | character with byte sequence 0xe8 0xb1 0xa1 in encoding "UTF8" has no equivalent in encoding "LATIN2" - valid, two chars that combine to one in EUC_JIS_2004 | \xe382abe3829a | \x | \xe382abe3829a | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN2" - only first half of combined char in EUC_JIS_2004 | \xe382ab | \x | \xe382ab | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN2" - incomplete combination when converted EUC_JIS_2004 | \xe382abe382 | \x | \xe382abe382 | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN2" - valid, Hangul, Korean | \xecbd94eb81bceba6ac | \x | \xecbd94eb81bceba6ac | character with byte sequence 0xec 0xbd 0x94 in encoding "UTF8" has no equivalent in encoding "LATIN2" - valid, needs mapping function to convert to GB18030 | \x666f6fefa8aa | \x666f6f | \xefa8aa | character with byte sequence 0xef 0xa8 0xaa in encoding "UTF8" has no equivalent in encoding "LATIN2" - invalid byte sequence | \x66e8b1ff6f6f | \x66 | \xe8b1ff6f6f | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0xff - invalid, NUL byte | \x66006f | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00 - invalid, NUL byte | \x666f6fe8b100 | \x666f6f | \xe8b100 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0x00 - incomplete character at end | \x666f6fe8b1 | \x666f6f | \xe8b1 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 -(13 rows) - -select description, inbytes, (test_conv(inbytes, 'utf8', 'latin5')).* from utf8_inputs; - description | inbytes | result | errorat | error -------------------------------------------------------+----------------------+----------+----------------------+------------------------------------------------------------------------------------------------------- - valid, pure ASCII | \x666f6f | \x666f6f | | - valid, extra latin chars | \xc3a4c3b6 | \xe4f6 | | - valid, cyrillic | \xd184d0bed0be | \x | \xd184d0bed0be | character with byte sequence 0xd1 0x84 in encoding "UTF8" has no equivalent in encoding "LATIN5" - valid, kanji/Chinese | \x666f6fe8b1a1 | \x666f6f | \xe8b1a1 | character with byte sequence 0xe8 0xb1 0xa1 in encoding "UTF8" has no equivalent in encoding "LATIN5" - valid, two chars that combine to one in EUC_JIS_2004 | \xe382abe3829a | \x | \xe382abe3829a | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN5" - only first half of combined char in EUC_JIS_2004 | \xe382ab | \x | \xe382ab | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN5" - incomplete combination when converted EUC_JIS_2004 | \xe382abe382 | \x | \xe382abe382 | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN5" - valid, Hangul, Korean | \xecbd94eb81bceba6ac | \x | \xecbd94eb81bceba6ac | character with byte sequence 0xec 0xbd 0x94 in encoding "UTF8" has no equivalent in encoding "LATIN5" - valid, needs mapping function to convert to GB18030 | \x666f6fefa8aa | \x666f6f | \xefa8aa | character with byte sequence 0xef 0xa8 0xaa in encoding "UTF8" has no equivalent in encoding "LATIN5" - invalid byte sequence | \x66e8b1ff6f6f | \x66 | \xe8b1ff6f6f | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0xff - invalid, NUL byte | \x66006f | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00 - invalid, NUL byte | \x666f6fe8b100 | \x666f6f | \xe8b100 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0x00 - incomplete character at end | \x666f6fe8b1 | \x666f6f | \xe8b1 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 -(13 rows) - -select description, inbytes, (test_conv(inbytes, 'utf8', 'koi8r')).* from utf8_inputs; - description | inbytes | result | errorat | error -------------------------------------------------------+----------------------+----------+----------------------+------------------------------------------------------------------------------------------------------ - valid, pure ASCII | \x666f6f | \x666f6f | | - valid, extra latin chars | \xc3a4c3b6 | \x | \xc3a4c3b6 | character with byte sequence 0xc3 0xa4 in encoding "UTF8" has no equivalent in encoding "KOI8R" - valid, cyrillic | \xd184d0bed0be | \xc6cfcf | | - valid, kanji/Chinese | \x666f6fe8b1a1 | \x666f6f | \xe8b1a1 | character with byte sequence 0xe8 0xb1 0xa1 in encoding "UTF8" has no equivalent in encoding "KOI8R" - valid, two chars that combine to one in EUC_JIS_2004 | \xe382abe3829a | \x | \xe382abe3829a | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "KOI8R" - only first half of combined char in EUC_JIS_2004 | \xe382ab | \x | \xe382ab | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "KOI8R" - incomplete combination when converted EUC_JIS_2004 | \xe382abe382 | \x | \xe382abe382 | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "KOI8R" - valid, Hangul, Korean | \xecbd94eb81bceba6ac | \x | \xecbd94eb81bceba6ac | character with byte sequence 0xec 0xbd 0x94 in encoding "UTF8" has no equivalent in encoding "KOI8R" - valid, needs mapping function to convert to GB18030 | \x666f6fefa8aa | \x666f6f | \xefa8aa | character with byte sequence 0xef 0xa8 0xaa in encoding "UTF8" has no equivalent in encoding "KOI8R" - invalid byte sequence | \x66e8b1ff6f6f | \x66 | \xe8b1ff6f6f | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0xff - invalid, NUL byte | \x66006f | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00 - invalid, NUL byte | \x666f6fe8b100 | \x666f6f | \xe8b100 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0x00 - incomplete character at end | \x666f6fe8b1 | \x666f6f | \xe8b1 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 -(13 rows) - -select description, inbytes, (test_conv(inbytes, 'utf8', 'gb18030')).* from utf8_inputs; - description | inbytes | result | errorat | error -------------------------------------------------------+----------------------+----------------------------+--------------+----------------------------------------------------------- - valid, pure ASCII | \x666f6f | \x666f6f | | - valid, extra latin chars | \xc3a4c3b6 | \x81308a3181308b32 | | - valid, cyrillic | \xd184d0bed0be | \xa7e6a7e0a7e0 | | - valid, kanji/Chinese | \x666f6fe8b1a1 | \x666f6fcff3 | | - valid, two chars that combine to one in EUC_JIS_2004 | \xe382abe3829a | \xa5ab8139a732 | | - only first half of combined char in EUC_JIS_2004 | \xe382ab | \xa5ab | | - incomplete combination when converted EUC_JIS_2004 | \xe382abe382 | \xa5ab | \xe382 | invalid byte sequence for encoding "UTF8": 0xe3 0x82 - valid, Hangul, Korean | \xecbd94eb81bceba6ac | \x8334e5398238c4338330b335 | | - valid, needs mapping function to convert to GB18030 | \x666f6fefa8aa | \x666f6f84309c38 | | - invalid byte sequence | \x66e8b1ff6f6f | \x66 | \xe8b1ff6f6f | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0xff - invalid, NUL byte | \x66006f | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00 - invalid, NUL byte | \x666f6fe8b100 | \x666f6f | \xe8b100 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0x00 - incomplete character at end | \x666f6fe8b1 | \x666f6f | \xe8b1 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 -(13 rows) - --- --- EUC_JIS_2004 --- -CREATE TABLE euc_jis_2004_inputs (inbytes bytea, description text); -insert into euc_jis_2004_inputs values - ('\x666f6f', 'valid, pure ASCII'), - ('\x666f6fbedd', 'valid'), - ('\xa5f7', 'valid, translates to two UTF-8 chars '), - ('\xbeddbe', 'incomplete char '), - ('\x666f6f00bedd', 'invalid, NUL byte'), - ('\x666f6fbe00dd', 'invalid, NUL byte'), - ('\x666f6fbedd00', 'invalid, NUL byte'), - ('\xbe04', 'invalid byte sequence'); --- Test EUC_JIS_2004 verification -select description, inbytes, (test_conv(inbytes, 'euc_jis_2004', 'euc_jis_2004')).* from euc_jis_2004_inputs; - description | inbytes | result | errorat | error ----------------------------------------+----------------+--------------+----------+-------------------------------------------------------------- - valid, pure ASCII | \x666f6f | \x666f6f | | - valid | \x666f6fbedd | \x666f6fbedd | | - valid, translates to two UTF-8 chars | \xa5f7 | \xa5f7 | | - incomplete char | \xbeddbe | \xbedd | \xbe | invalid byte sequence for encoding "EUC_JIS_2004": 0xbe - invalid, NUL byte | \x666f6f00bedd | \x666f6f | \x00bedd | invalid byte sequence for encoding "EUC_JIS_2004": 0x00 - invalid, NUL byte | \x666f6fbe00dd | \x666f6f | \xbe00dd | invalid byte sequence for encoding "EUC_JIS_2004": 0xbe 0x00 - invalid, NUL byte | \x666f6fbedd00 | \x666f6fbedd | \x00 | invalid byte sequence for encoding "EUC_JIS_2004": 0x00 - invalid byte sequence | \xbe04 | \x | \xbe04 | invalid byte sequence for encoding "EUC_JIS_2004": 0xbe 0x04 -(8 rows) - --- Test conversions from EUC_JIS_2004 -select description, inbytes, (test_conv(inbytes, 'euc_jis_2004', 'utf8')).* from euc_jis_2004_inputs; - description | inbytes | result | errorat | error ----------------------------------------+----------------+----------------+----------+-------------------------------------------------------------- - valid, pure ASCII | \x666f6f | \x666f6f | | - valid | \x666f6fbedd | \x666f6fe8b1a1 | | - valid, translates to two UTF-8 chars | \xa5f7 | \xe382abe3829a | | - incomplete char | \xbeddbe | \xe8b1a1 | \xbe | invalid byte sequence for encoding "EUC_JIS_2004": 0xbe - invalid, NUL byte | \x666f6f00bedd | \x666f6f | \x00bedd | invalid byte sequence for encoding "EUC_JIS_2004": 0x00 - invalid, NUL byte | \x666f6fbe00dd | \x666f6f | \xbe00dd | invalid byte sequence for encoding "EUC_JIS_2004": 0xbe 0x00 - invalid, NUL byte | \x666f6fbedd00 | \x666f6fe8b1a1 | \x00 | invalid byte sequence for encoding "EUC_JIS_2004": 0x00 - invalid byte sequence | \xbe04 | \x | \xbe04 | invalid byte sequence for encoding "EUC_JIS_2004": 0xbe 0x04 -(8 rows) - --- --- SHIFT-JIS-2004 --- -CREATE TABLE shiftjis2004_inputs (inbytes bytea, description text); -insert into shiftjis2004_inputs values - ('\x666f6f', 'valid, pure ASCII'), - ('\x666f6f8fdb', 'valid'), - ('\x666f6f81c0', 'valid, no translation to UTF-8'), - ('\x666f6f82f5', 'valid, translates to two UTF-8 chars '), - ('\x666f6f8fdb8f', 'incomplete char '), - ('\x666f6f820a', 'incomplete char, followed by newline '), - ('\x666f6f008fdb', 'invalid, NUL byte'), - ('\x666f6f8f00db', 'invalid, NUL byte'), - ('\x666f6f8fdb00', 'invalid, NUL byte'); --- Test SHIFT-JIS-2004 verification -select description, inbytes, (test_conv(inbytes, 'shiftjis2004', 'shiftjis2004')).* from shiftjis2004_inputs; - description | inbytes | result | errorat | error ----------------------------------------+----------------+--------------+----------+---------------------------------------------------------------- - valid, pure ASCII | \x666f6f | \x666f6f | | - valid | \x666f6f8fdb | \x666f6f8fdb | | - valid, no translation to UTF-8 | \x666f6f81c0 | \x666f6f81c0 | | - valid, translates to two UTF-8 chars | \x666f6f82f5 | \x666f6f82f5 | | - incomplete char | \x666f6f8fdb8f | \x666f6f8fdb | \x8f | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x8f - incomplete char, followed by newline | \x666f6f820a | \x666f6f | \x820a | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x82 0x0a - invalid, NUL byte | \x666f6f008fdb | \x666f6f | \x008fdb | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x00 - invalid, NUL byte | \x666f6f8f00db | \x666f6f | \x8f00db | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x8f 0x00 - invalid, NUL byte | \x666f6f8fdb00 | \x666f6f8fdb | \x00 | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x00 -(9 rows) - --- Test conversions from SHIFT-JIS-2004 -select description, inbytes, (test_conv(inbytes, 'shiftjis2004', 'utf8')).* from shiftjis2004_inputs; - description | inbytes | result | errorat | error ----------------------------------------+----------------+----------------------+----------+---------------------------------------------------------------- - valid, pure ASCII | \x666f6f | \x666f6f | | - valid | \x666f6f8fdb | \x666f6fe8b1a1 | | - valid, no translation to UTF-8 | \x666f6f81c0 | \x666f6fe28a84 | | - valid, translates to two UTF-8 chars | \x666f6f82f5 | \x666f6fe3818be3829a | | - incomplete char | \x666f6f8fdb8f | \x666f6fe8b1a1 | \x8f | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x8f - incomplete char, followed by newline | \x666f6f820a | \x666f6f | \x820a | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x82 0x0a - invalid, NUL byte | \x666f6f008fdb | \x666f6f | \x008fdb | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x00 - invalid, NUL byte | \x666f6f8f00db | \x666f6f | \x8f00db | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x8f 0x00 - invalid, NUL byte | \x666f6f8fdb00 | \x666f6fe8b1a1 | \x00 | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x00 -(9 rows) - -select description, inbytes, (test_conv(inbytes, 'shiftjis2004', 'euc_jis_2004')).* from shiftjis2004_inputs; - description | inbytes | result | errorat | error ----------------------------------------+----------------+--------------+----------+---------------------------------------------------------------- - valid, pure ASCII | \x666f6f | \x666f6f | | - valid | \x666f6f8fdb | \x666f6fbedd | | - valid, no translation to UTF-8 | \x666f6f81c0 | \x666f6fa2c2 | | - valid, translates to two UTF-8 chars | \x666f6f82f5 | \x666f6fa4f7 | | - incomplete char | \x666f6f8fdb8f | \x666f6fbedd | \x8f | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x8f - incomplete char, followed by newline | \x666f6f820a | \x666f6f | \x820a | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x82 0x0a - invalid, NUL byte | \x666f6f008fdb | \x666f6f | \x008fdb | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x00 - invalid, NUL byte | \x666f6f8f00db | \x666f6f | \x8f00db | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x8f 0x00 - invalid, NUL byte | \x666f6f8fdb00 | \x666f6fbedd | \x00 | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x00 -(9 rows) - --- --- GB18030 --- -CREATE TABLE gb18030_inputs (inbytes bytea, description text); -insert into gb18030_inputs values - ('\x666f6f', 'valid, pure ASCII'), - ('\x666f6fcff3', 'valid'), - ('\x666f6f8431a530', 'valid, no translation to UTF-8'), - ('\x666f6f84309c38', 'valid, translates to UTF-8 by mapping function'), - ('\x666f6f84309c', 'incomplete char '), - ('\x666f6f84309c0a', 'incomplete char, followed by newline '), - ('\x666f6f84309c3800', 'invalid, NUL byte'), - ('\x666f6f84309c0038', 'invalid, NUL byte'); --- Test GB18030 verification -select description, inbytes, (test_conv(inbytes, 'gb18030', 'gb18030')).* from gb18030_inputs; - description | inbytes | result | errorat | error -------------------------------------------------+--------------------+------------------+--------------+------------------------------------------------------------------- - valid, pure ASCII | \x666f6f | \x666f6f | | - valid | \x666f6fcff3 | \x666f6fcff3 | | - valid, no translation to UTF-8 | \x666f6f8431a530 | \x666f6f8431a530 | | - valid, translates to UTF-8 by mapping function | \x666f6f84309c38 | \x666f6f84309c38 | | - incomplete char | \x666f6f84309c | \x666f6f | \x84309c | invalid byte sequence for encoding "GB18030": 0x84 0x30 0x9c - incomplete char, followed by newline | \x666f6f84309c0a | \x666f6f | \x84309c0a | invalid byte sequence for encoding "GB18030": 0x84 0x30 0x9c 0x0a - invalid, NUL byte | \x666f6f84309c3800 | \x666f6f84309c38 | \x00 | invalid byte sequence for encoding "GB18030": 0x00 - invalid, NUL byte | \x666f6f84309c0038 | \x666f6f | \x84309c0038 | invalid byte sequence for encoding "GB18030": 0x84 0x30 0x9c 0x00 -(8 rows) - --- Test conversions from GB18030 -select description, inbytes, (test_conv(inbytes, 'gb18030', 'utf8')).* from gb18030_inputs; - description | inbytes | result | errorat | error -------------------------------------------------+--------------------+----------------+--------------+------------------------------------------------------------------------------------------------------------- - valid, pure ASCII | \x666f6f | \x666f6f | | - valid | \x666f6fcff3 | \x666f6fe8b1a1 | | - valid, no translation to UTF-8 | \x666f6f8431a530 | \x666f6f | \x8431a530 | character with byte sequence 0x84 0x31 0xa5 0x30 in encoding "GB18030" has no equivalent in encoding "UTF8" - valid, translates to UTF-8 by mapping function | \x666f6f84309c38 | \x666f6fefa8aa | | - incomplete char | \x666f6f84309c | \x666f6f | \x84309c | invalid byte sequence for encoding "GB18030": 0x84 0x30 0x9c - incomplete char, followed by newline | \x666f6f84309c0a | \x666f6f | \x84309c0a | invalid byte sequence for encoding "GB18030": 0x84 0x30 0x9c 0x0a - invalid, NUL byte | \x666f6f84309c3800 | \x666f6fefa8aa | \x00 | invalid byte sequence for encoding "GB18030": 0x00 - invalid, NUL byte | \x666f6f84309c0038 | \x666f6f | \x84309c0038 | invalid byte sequence for encoding "GB18030": 0x84 0x30 0x9c 0x00 -(8 rows) - --- --- ISO-8859-5 --- -CREATE TABLE iso8859_5_inputs (inbytes bytea, description text); -insert into iso8859_5_inputs values - ('\x666f6f', 'valid, pure ASCII'), - ('\xe4dede', 'valid'), - ('\x00', 'invalid, NUL byte'), - ('\xe400dede', 'invalid, NUL byte'), - ('\xe4dede00', 'invalid, NUL byte'); --- Test ISO-8859-5 verification -select description, inbytes, (test_conv(inbytes, 'iso8859-5', 'iso8859-5')).* from iso8859_5_inputs; - description | inbytes | result | errorat | error --------------------+------------+----------+----------+------------------------------------------------------- - valid, pure ASCII | \x666f6f | \x666f6f | | - valid | \xe4dede | \xe4dede | | - invalid, NUL byte | \x00 | \x | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00 - invalid, NUL byte | \xe400dede | \xe4 | \x00dede | invalid byte sequence for encoding "ISO_8859_5": 0x00 - invalid, NUL byte | \xe4dede00 | \xe4dede | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00 -(5 rows) - --- Test conversions from ISO-8859-5 -select description, inbytes, (test_conv(inbytes, 'iso8859-5', 'utf8')).* from iso8859_5_inputs; - description | inbytes | result | errorat | error --------------------+------------+----------------+----------+------------------------------------------------------- - valid, pure ASCII | \x666f6f | \x666f6f | | - valid | \xe4dede | \xd184d0bed0be | | - invalid, NUL byte | \x00 | \x | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00 - invalid, NUL byte | \xe400dede | \xd184 | \x00dede | invalid byte sequence for encoding "ISO_8859_5": 0x00 - invalid, NUL byte | \xe4dede00 | \xd184d0bed0be | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00 -(5 rows) - -select description, inbytes, (test_conv(inbytes, 'iso8859-5', 'koi8r')).* from iso8859_5_inputs; - description | inbytes | result | errorat | error --------------------+------------+----------+----------+------------------------------------------------------- - valid, pure ASCII | \x666f6f | \x666f6f | | - valid | \xe4dede | \xc6cfcf | | - invalid, NUL byte | \x00 | \x | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00 - invalid, NUL byte | \xe400dede | \xc6 | \x00dede | invalid byte sequence for encoding "ISO_8859_5": 0x00 - invalid, NUL byte | \xe4dede00 | \xc6cfcf | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00 -(5 rows) - -select description, inbytes, (test_conv(inbytes, 'iso8859_5', 'mule_internal')).* from iso8859_5_inputs; - description | inbytes | result | errorat | error --------------------+------------+----------------+----------+------------------------------------------------------- - valid, pure ASCII | \x666f6f | \x666f6f | | - valid | \xe4dede | \x8bc68bcf8bcf | | - invalid, NUL byte | \x00 | \x | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00 - invalid, NUL byte | \xe400dede | \x8bc6 | \x00dede | invalid byte sequence for encoding "ISO_8859_5": 0x00 - invalid, NUL byte | \xe4dede00 | \x8bc68bcf8bcf | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00 -(5 rows) - --- --- Big5 --- -CREATE TABLE big5_inputs (inbytes bytea, description text); -insert into big5_inputs values - ('\x666f6f', 'valid, pure ASCII'), - ('\x666f6fb648', 'valid'), - ('\x666f6fa27f', 'valid, no translation to UTF-8'), - ('\x666f6fb60048', 'invalid, NUL byte'), - ('\x666f6fb64800', 'invalid, NUL byte'); --- Test Big5 verification -select description, inbytes, (test_conv(inbytes, 'big5', 'big5')).* from big5_inputs; - description | inbytes | result | errorat | error ---------------------------------+----------------+--------------+----------+------------------------------------------------------ - valid, pure ASCII | \x666f6f | \x666f6f | | - valid | \x666f6fb648 | \x666f6fb648 | | - valid, no translation to UTF-8 | \x666f6fa27f | \x666f6fa27f | | - invalid, NUL byte | \x666f6fb60048 | \x666f6f | \xb60048 | invalid byte sequence for encoding "BIG5": 0xb6 0x00 - invalid, NUL byte | \x666f6fb64800 | \x666f6fb648 | \x00 | invalid byte sequence for encoding "BIG5": 0x00 -(5 rows) - --- Test conversions from Big5 -select description, inbytes, (test_conv(inbytes, 'big5', 'utf8')).* from big5_inputs; - description | inbytes | result | errorat | error ---------------------------------+----------------+----------------+----------+------------------------------------------------------------------------------------------------ - valid, pure ASCII | \x666f6f | \x666f6f | | - valid | \x666f6fb648 | \x666f6fe8b1a1 | | - valid, no translation to UTF-8 | \x666f6fa27f | \x666f6f | \xa27f | character with byte sequence 0xa2 0x7f in encoding "BIG5" has no equivalent in encoding "UTF8" - invalid, NUL byte | \x666f6fb60048 | \x666f6f | \xb60048 | invalid byte sequence for encoding "BIG5": 0xb6 0x00 - invalid, NUL byte | \x666f6fb64800 | \x666f6fe8b1a1 | \x00 | invalid byte sequence for encoding "BIG5": 0x00 -(5 rows) - -select description, inbytes, (test_conv(inbytes, 'big5', 'mule_internal')).* from big5_inputs; - description | inbytes | result | errorat | error ---------------------------------+----------------+----------------+----------+------------------------------------------------------ - valid, pure ASCII | \x666f6f | \x666f6f | | - valid | \x666f6fb648 | \x666f6f95e2af | | - valid, no translation to UTF-8 | \x666f6fa27f | \x666f6f95a3c1 | | - invalid, NUL byte | \x666f6fb60048 | \x666f6f | \xb60048 | invalid byte sequence for encoding "BIG5": 0xb6 0x00 - invalid, NUL byte | \x666f6fb64800 | \x666f6f95e2af | \x00 | invalid byte sequence for encoding "BIG5": 0x00 -(5 rows) - --- --- MULE_INTERNAL --- -CREATE TABLE mic_inputs (inbytes bytea, description text); -insert into mic_inputs values - ('\x666f6f', 'valid, pure ASCII'), - ('\x8bc68bcf8bcf', 'valid (in KOI8R)'), - ('\x8bc68bcf8b', 'invalid,incomplete char'), - ('\x92bedd', 'valid (in SHIFT_JIS)'), - ('\x92be', 'invalid, incomplete char)'), - ('\x666f6f95a3c1', 'valid (in Big5)'), - ('\x666f6f95a3', 'invalid, incomplete char'), - ('\x9200bedd', 'invalid, NUL byte'), - ('\x92bedd00', 'invalid, NUL byte'), - ('\x8b00c68bcf8bcf', 'invalid, NUL byte'); --- Test MULE_INTERNAL verification -select description, inbytes, (test_conv(inbytes, 'mule_internal', 'mule_internal')).* from mic_inputs; - description | inbytes | result | errorat | error ----------------------------+------------------+----------------+------------------+-------------------------------------------------------------------- - valid, pure ASCII | \x666f6f | \x666f6f | | - valid (in KOI8R) | \x8bc68bcf8bcf | \x8bc68bcf8bcf | | - invalid,incomplete char | \x8bc68bcf8b | \x8bc68bcf | \x8b | invalid byte sequence for encoding "MULE_INTERNAL": 0x8b - valid (in SHIFT_JIS) | \x92bedd | \x92bedd | | - invalid, incomplete char) | \x92be | \x | \x92be | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0xbe - valid (in Big5) | \x666f6f95a3c1 | \x666f6f95a3c1 | | - invalid, incomplete char | \x666f6f95a3 | \x666f6f | \x95a3 | invalid byte sequence for encoding "MULE_INTERNAL": 0x95 0xa3 - invalid, NUL byte | \x9200bedd | \x | \x9200bedd | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0x00 0xbe - invalid, NUL byte | \x92bedd00 | \x92bedd | \x00 | invalid byte sequence for encoding "MULE_INTERNAL": 0x00 - invalid, NUL byte | \x8b00c68bcf8bcf | \x | \x8b00c68bcf8bcf | invalid byte sequence for encoding "MULE_INTERNAL": 0x8b 0x00 -(10 rows) - --- Test conversions from MULE_INTERNAL -select description, inbytes, (test_conv(inbytes, 'mule_internal', 'koi8r')).* from mic_inputs; - description | inbytes | result | errorat | error ----------------------------+------------------+----------+------------------+--------------------------------------------------------------------------------------------------------------- - valid, pure ASCII | \x666f6f | \x666f6f | | - valid (in KOI8R) | \x8bc68bcf8bcf | \xc6cfcf | | - invalid,incomplete char | \x8bc68bcf8b | \xc6cf | \x8b | invalid byte sequence for encoding "MULE_INTERNAL": 0x8b - valid (in SHIFT_JIS) | \x92bedd | \x | \x92bedd | character with byte sequence 0x92 0xbe 0xdd in encoding "MULE_INTERNAL" has no equivalent in encoding "KOI8R" - invalid, incomplete char) | \x92be | \x | \x92be | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0xbe - valid (in Big5) | \x666f6f95a3c1 | \x666f6f | \x95a3c1 | character with byte sequence 0x95 0xa3 0xc1 in encoding "MULE_INTERNAL" has no equivalent in encoding "KOI8R" - invalid, incomplete char | \x666f6f95a3 | \x666f6f | \x95a3 | invalid byte sequence for encoding "MULE_INTERNAL": 0x95 0xa3 - invalid, NUL byte | \x9200bedd | \x | \x9200bedd | character with byte sequence 0x92 0x00 0xbe in encoding "MULE_INTERNAL" has no equivalent in encoding "KOI8R" - invalid, NUL byte | \x92bedd00 | \x | \x92bedd00 | character with byte sequence 0x92 0xbe 0xdd in encoding "MULE_INTERNAL" has no equivalent in encoding "KOI8R" - invalid, NUL byte | \x8b00c68bcf8bcf | \x | \x8b00c68bcf8bcf | character with byte sequence 0x8b 0x00 in encoding "MULE_INTERNAL" has no equivalent in encoding "KOI8R" -(10 rows) - -select description, inbytes, (test_conv(inbytes, 'mule_internal', 'iso8859-5')).* from mic_inputs; - description | inbytes | result | errorat | error ----------------------------+------------------+----------+------------------+-------------------------------------------------------------------------------------------------------------------- - valid, pure ASCII | \x666f6f | \x666f6f | | - valid (in KOI8R) | \x8bc68bcf8bcf | \xe4dede | | - invalid,incomplete char | \x8bc68bcf8b | \xe4de | \x8b | invalid byte sequence for encoding "MULE_INTERNAL": 0x8b - valid (in SHIFT_JIS) | \x92bedd | \x | \x92bedd | character with byte sequence 0x92 0xbe 0xdd in encoding "MULE_INTERNAL" has no equivalent in encoding "ISO_8859_5" - invalid, incomplete char) | \x92be | \x | \x92be | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0xbe - valid (in Big5) | \x666f6f95a3c1 | \x666f6f | \x95a3c1 | character with byte sequence 0x95 0xa3 0xc1 in encoding "MULE_INTERNAL" has no equivalent in encoding "ISO_8859_5" - invalid, incomplete char | \x666f6f95a3 | \x666f6f | \x95a3 | invalid byte sequence for encoding "MULE_INTERNAL": 0x95 0xa3 - invalid, NUL byte | \x9200bedd | \x | \x9200bedd | character with byte sequence 0x92 0x00 0xbe in encoding "MULE_INTERNAL" has no equivalent in encoding "ISO_8859_5" - invalid, NUL byte | \x92bedd00 | \x | \x92bedd00 | character with byte sequence 0x92 0xbe 0xdd in encoding "MULE_INTERNAL" has no equivalent in encoding "ISO_8859_5" - invalid, NUL byte | \x8b00c68bcf8bcf | \x | \x8b00c68bcf8bcf | character with byte sequence 0x8b 0x00 in encoding "MULE_INTERNAL" has no equivalent in encoding "ISO_8859_5" -(10 rows) - -select description, inbytes, (test_conv(inbytes, 'mule_internal', 'sjis')).* from mic_inputs; - description | inbytes | result | errorat | error ----------------------------+------------------+----------+------------------+-------------------------------------------------------------------------------------------------------------- - valid, pure ASCII | \x666f6f | \x666f6f | | - valid (in KOI8R) | \x8bc68bcf8bcf | \x | \x8bc68bcf8bcf | character with byte sequence 0x8b 0xc6 in encoding "MULE_INTERNAL" has no equivalent in encoding "SJIS" - invalid,incomplete char | \x8bc68bcf8b | \x | \x8bc68bcf8b | character with byte sequence 0x8b 0xc6 in encoding "MULE_INTERNAL" has no equivalent in encoding "SJIS" - valid (in SHIFT_JIS) | \x92bedd | \x8fdb | | - invalid, incomplete char) | \x92be | \x | \x92be | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0xbe - valid (in Big5) | \x666f6f95a3c1 | \x666f6f | \x95a3c1 | character with byte sequence 0x95 0xa3 0xc1 in encoding "MULE_INTERNAL" has no equivalent in encoding "SJIS" - invalid, incomplete char | \x666f6f95a3 | \x666f6f | \x95a3 | invalid byte sequence for encoding "MULE_INTERNAL": 0x95 0xa3 - invalid, NUL byte | \x9200bedd | \x | \x9200bedd | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0x00 0xbe - invalid, NUL byte | \x92bedd00 | \x8fdb | \x00 | invalid byte sequence for encoding "MULE_INTERNAL": 0x00 - invalid, NUL byte | \x8b00c68bcf8bcf | \x | \x8b00c68bcf8bcf | invalid byte sequence for encoding "MULE_INTERNAL": 0x8b 0x00 -(10 rows) - -select description, inbytes, (test_conv(inbytes, 'mule_internal', 'big5')).* from mic_inputs; - description | inbytes | result | errorat | error ----------------------------+------------------+--------------+------------------+-------------------------------------------------------------------------------------------------------------- - valid, pure ASCII | \x666f6f | \x666f6f | | - valid (in KOI8R) | \x8bc68bcf8bcf | \x | \x8bc68bcf8bcf | character with byte sequence 0x8b 0xc6 in encoding "MULE_INTERNAL" has no equivalent in encoding "BIG5" - invalid,incomplete char | \x8bc68bcf8b | \x | \x8bc68bcf8b | character with byte sequence 0x8b 0xc6 in encoding "MULE_INTERNAL" has no equivalent in encoding "BIG5" - valid (in SHIFT_JIS) | \x92bedd | \x | \x92bedd | character with byte sequence 0x92 0xbe 0xdd in encoding "MULE_INTERNAL" has no equivalent in encoding "BIG5" - invalid, incomplete char) | \x92be | \x | \x92be | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0xbe - valid (in Big5) | \x666f6f95a3c1 | \x666f6fa2a1 | | - invalid, incomplete char | \x666f6f95a3 | \x666f6f | \x95a3 | invalid byte sequence for encoding "MULE_INTERNAL": 0x95 0xa3 - invalid, NUL byte | \x9200bedd | \x | \x9200bedd | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0x00 0xbe - invalid, NUL byte | \x92bedd00 | \x | \x92bedd00 | character with byte sequence 0x92 0xbe 0xdd in encoding "MULE_INTERNAL" has no equivalent in encoding "BIG5" - invalid, NUL byte | \x8b00c68bcf8bcf | \x | \x8b00c68bcf8bcf | invalid byte sequence for encoding "MULE_INTERNAL": 0x8b 0x00 -(10 rows) - -select description, inbytes, (test_conv(inbytes, 'mule_internal', 'euc_jp')).* from mic_inputs; - description | inbytes | result | errorat | error ----------------------------+------------------+----------+------------------+---------------------------------------------------------------------------------------------------------------- - valid, pure ASCII | \x666f6f | \x666f6f | | - valid (in KOI8R) | \x8bc68bcf8bcf | \x | \x8bc68bcf8bcf | character with byte sequence 0x8b 0xc6 in encoding "MULE_INTERNAL" has no equivalent in encoding "EUC_JP" - invalid,incomplete char | \x8bc68bcf8b | \x | \x8bc68bcf8b | character with byte sequence 0x8b 0xc6 in encoding "MULE_INTERNAL" has no equivalent in encoding "EUC_JP" - valid (in SHIFT_JIS) | \x92bedd | \xbedd | | - invalid, incomplete char) | \x92be | \x | \x92be | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0xbe - valid (in Big5) | \x666f6f95a3c1 | \x666f6f | \x95a3c1 | character with byte sequence 0x95 0xa3 0xc1 in encoding "MULE_INTERNAL" has no equivalent in encoding "EUC_JP" - invalid, incomplete char | \x666f6f95a3 | \x666f6f | \x95a3 | invalid byte sequence for encoding "MULE_INTERNAL": 0x95 0xa3 - invalid, NUL byte | \x9200bedd | \x | \x9200bedd | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0x00 0xbe - invalid, NUL byte | \x92bedd00 | \xbedd | \x00 | invalid byte sequence for encoding "MULE_INTERNAL": 0x00 - invalid, NUL byte | \x8b00c68bcf8bcf | \x | \x8b00c68bcf8bcf | invalid byte sequence for encoding "MULE_INTERNAL": 0x8b 0x00 -(10 rows) - +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/truncate.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/truncate.out --- /Users/admin/pgsql/src/test/regress/expected/truncate.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/truncate.out 2024-12-13 13:20:12 @@ -1,594 +1,2 @@ --- Test basic TRUNCATE functionality. -CREATE TABLE truncate_a (col1 integer primary key); -INSERT INTO truncate_a VALUES (1); -INSERT INTO truncate_a VALUES (2); -SELECT * FROM truncate_a; - col1 ------- - 1 - 2 -(2 rows) - --- Roll truncate back -BEGIN; -TRUNCATE truncate_a; -ROLLBACK; -SELECT * FROM truncate_a; - col1 ------- - 1 - 2 -(2 rows) - --- Commit the truncate this time -BEGIN; -TRUNCATE truncate_a; -COMMIT; -SELECT * FROM truncate_a; - col1 ------- -(0 rows) - --- Test foreign-key checks -CREATE TABLE trunc_b (a int REFERENCES truncate_a); -CREATE TABLE trunc_c (a serial PRIMARY KEY); -CREATE TABLE trunc_d (a int REFERENCES trunc_c); -CREATE TABLE trunc_e (a int REFERENCES truncate_a, b int REFERENCES trunc_c); -TRUNCATE TABLE truncate_a; -- fail -ERROR: cannot truncate a table referenced in a foreign key constraint -DETAIL: Table "trunc_b" references "truncate_a". -HINT: Truncate table "trunc_b" at the same time, or use TRUNCATE ... CASCADE. -TRUNCATE TABLE truncate_a,trunc_b; -- fail -ERROR: cannot truncate a table referenced in a foreign key constraint -DETAIL: Table "trunc_e" references "truncate_a". -HINT: Truncate table "trunc_e" at the same time, or use TRUNCATE ... CASCADE. -TRUNCATE TABLE truncate_a,trunc_b,trunc_e; -- ok -TRUNCATE TABLE truncate_a,trunc_e; -- fail -ERROR: cannot truncate a table referenced in a foreign key constraint -DETAIL: Table "trunc_b" references "truncate_a". -HINT: Truncate table "trunc_b" at the same time, or use TRUNCATE ... CASCADE. -TRUNCATE TABLE trunc_c; -- fail -ERROR: cannot truncate a table referenced in a foreign key constraint -DETAIL: Table "trunc_d" references "trunc_c". -HINT: Truncate table "trunc_d" at the same time, or use TRUNCATE ... CASCADE. -TRUNCATE TABLE trunc_c,trunc_d; -- fail -ERROR: cannot truncate a table referenced in a foreign key constraint -DETAIL: Table "trunc_e" references "trunc_c". -HINT: Truncate table "trunc_e" at the same time, or use TRUNCATE ... CASCADE. -TRUNCATE TABLE trunc_c,trunc_d,trunc_e; -- ok -TRUNCATE TABLE trunc_c,trunc_d,trunc_e,truncate_a; -- fail -ERROR: cannot truncate a table referenced in a foreign key constraint -DETAIL: Table "trunc_b" references "truncate_a". -HINT: Truncate table "trunc_b" at the same time, or use TRUNCATE ... CASCADE. -TRUNCATE TABLE trunc_c,trunc_d,trunc_e,truncate_a,trunc_b; -- ok -TRUNCATE TABLE truncate_a RESTRICT; -- fail -ERROR: cannot truncate a table referenced in a foreign key constraint -DETAIL: Table "trunc_b" references "truncate_a". -HINT: Truncate table "trunc_b" at the same time, or use TRUNCATE ... CASCADE. -TRUNCATE TABLE truncate_a CASCADE; -- ok -NOTICE: truncate cascades to table "trunc_b" -NOTICE: truncate cascades to table "trunc_e" --- circular references -ALTER TABLE truncate_a ADD FOREIGN KEY (col1) REFERENCES trunc_c; --- Add some data to verify that truncating actually works ... -INSERT INTO trunc_c VALUES (1); -INSERT INTO truncate_a VALUES (1); -INSERT INTO trunc_b VALUES (1); -INSERT INTO trunc_d VALUES (1); -INSERT INTO trunc_e VALUES (1,1); -TRUNCATE TABLE trunc_c; -ERROR: cannot truncate a table referenced in a foreign key constraint -DETAIL: Table "truncate_a" references "trunc_c". -HINT: Truncate table "truncate_a" at the same time, or use TRUNCATE ... CASCADE. -TRUNCATE TABLE trunc_c,truncate_a; -ERROR: cannot truncate a table referenced in a foreign key constraint -DETAIL: Table "trunc_d" references "trunc_c". -HINT: Truncate table "trunc_d" at the same time, or use TRUNCATE ... CASCADE. -TRUNCATE TABLE trunc_c,truncate_a,trunc_d; -ERROR: cannot truncate a table referenced in a foreign key constraint -DETAIL: Table "trunc_e" references "trunc_c". -HINT: Truncate table "trunc_e" at the same time, or use TRUNCATE ... CASCADE. -TRUNCATE TABLE trunc_c,truncate_a,trunc_d,trunc_e; -ERROR: cannot truncate a table referenced in a foreign key constraint -DETAIL: Table "trunc_b" references "truncate_a". -HINT: Truncate table "trunc_b" at the same time, or use TRUNCATE ... CASCADE. -TRUNCATE TABLE trunc_c,truncate_a,trunc_d,trunc_e,trunc_b; --- Verify that truncating did actually work -SELECT * FROM truncate_a - UNION ALL - SELECT * FROM trunc_c - UNION ALL - SELECT * FROM trunc_b - UNION ALL - SELECT * FROM trunc_d; - col1 ------- -(0 rows) - -SELECT * FROM trunc_e; - a | b ----+--- -(0 rows) - --- Add data again to test TRUNCATE ... CASCADE -INSERT INTO trunc_c VALUES (1); -INSERT INTO truncate_a VALUES (1); -INSERT INTO trunc_b VALUES (1); -INSERT INTO trunc_d VALUES (1); -INSERT INTO trunc_e VALUES (1,1); -TRUNCATE TABLE trunc_c CASCADE; -- ok -NOTICE: truncate cascades to table "truncate_a" -NOTICE: truncate cascades to table "trunc_d" -NOTICE: truncate cascades to table "trunc_e" -NOTICE: truncate cascades to table "trunc_b" -SELECT * FROM truncate_a - UNION ALL - SELECT * FROM trunc_c - UNION ALL - SELECT * FROM trunc_b - UNION ALL - SELECT * FROM trunc_d; - col1 ------- -(0 rows) - -SELECT * FROM trunc_e; - a | b ----+--- -(0 rows) - -DROP TABLE truncate_a,trunc_c,trunc_b,trunc_d,trunc_e CASCADE; --- Test TRUNCATE with inheritance -CREATE TABLE trunc_f (col1 integer primary key); -INSERT INTO trunc_f VALUES (1); -INSERT INTO trunc_f VALUES (2); -CREATE TABLE trunc_fa (col2a text) INHERITS (trunc_f); -INSERT INTO trunc_fa VALUES (3, 'three'); -CREATE TABLE trunc_fb (col2b int) INHERITS (trunc_f); -INSERT INTO trunc_fb VALUES (4, 444); -CREATE TABLE trunc_faa (col3 text) INHERITS (trunc_fa); -INSERT INTO trunc_faa VALUES (5, 'five', 'FIVE'); -BEGIN; -SELECT * FROM trunc_f; - col1 ------- - 1 - 2 - 3 - 4 - 5 -(5 rows) - -TRUNCATE trunc_f; -SELECT * FROM trunc_f; - col1 ------- -(0 rows) - -ROLLBACK; -BEGIN; -SELECT * FROM trunc_f; - col1 ------- - 1 - 2 - 3 - 4 - 5 -(5 rows) - -TRUNCATE ONLY trunc_f; -SELECT * FROM trunc_f; - col1 ------- - 3 - 4 - 5 -(3 rows) - -ROLLBACK; -BEGIN; -SELECT * FROM trunc_f; - col1 ------- - 1 - 2 - 3 - 4 - 5 -(5 rows) - -SELECT * FROM trunc_fa; - col1 | col2a -------+------- - 3 | three - 5 | five -(2 rows) - -SELECT * FROM trunc_faa; - col1 | col2a | col3 -------+-------+------ - 5 | five | FIVE -(1 row) - -TRUNCATE ONLY trunc_fb, ONLY trunc_fa; -SELECT * FROM trunc_f; - col1 ------- - 1 - 2 - 5 -(3 rows) - -SELECT * FROM trunc_fa; - col1 | col2a -------+------- - 5 | five -(1 row) - -SELECT * FROM trunc_faa; - col1 | col2a | col3 -------+-------+------ - 5 | five | FIVE -(1 row) - -ROLLBACK; -BEGIN; -SELECT * FROM trunc_f; - col1 ------- - 1 - 2 - 3 - 4 - 5 -(5 rows) - -SELECT * FROM trunc_fa; - col1 | col2a -------+------- - 3 | three - 5 | five -(2 rows) - -SELECT * FROM trunc_faa; - col1 | col2a | col3 -------+-------+------ - 5 | five | FIVE -(1 row) - -TRUNCATE ONLY trunc_fb, trunc_fa; -SELECT * FROM trunc_f; - col1 ------- - 1 - 2 -(2 rows) - -SELECT * FROM trunc_fa; - col1 | col2a -------+------- -(0 rows) - -SELECT * FROM trunc_faa; - col1 | col2a | col3 -------+-------+------ -(0 rows) - -ROLLBACK; -DROP TABLE trunc_f CASCADE; -NOTICE: drop cascades to 3 other objects -DETAIL: drop cascades to table trunc_fa -drop cascades to table trunc_faa -drop cascades to table trunc_fb --- Test ON TRUNCATE triggers -CREATE TABLE trunc_trigger_test (f1 int, f2 text, f3 text); -CREATE TABLE trunc_trigger_log (tgop text, tglevel text, tgwhen text, - tgargv text, tgtable name, rowcount bigint); -CREATE FUNCTION trunctrigger() RETURNS trigger as $$ -declare c bigint; -begin - execute 'select count(*) from ' || quote_ident(tg_table_name) into c; - insert into trunc_trigger_log values - (TG_OP, TG_LEVEL, TG_WHEN, TG_ARGV[0], tg_table_name, c); - return null; -end; -$$ LANGUAGE plpgsql; --- basic before trigger -INSERT INTO trunc_trigger_test VALUES(1, 'foo', 'bar'), (2, 'baz', 'quux'); -CREATE TRIGGER t -BEFORE TRUNCATE ON trunc_trigger_test -FOR EACH STATEMENT -EXECUTE PROCEDURE trunctrigger('before trigger truncate'); -SELECT count(*) as "Row count in test table" FROM trunc_trigger_test; - Row count in test table -------------------------- - 2 -(1 row) - -SELECT * FROM trunc_trigger_log; - tgop | tglevel | tgwhen | tgargv | tgtable | rowcount -------+---------+--------+--------+---------+---------- -(0 rows) - -TRUNCATE trunc_trigger_test; -SELECT count(*) as "Row count in test table" FROM trunc_trigger_test; - Row count in test table -------------------------- - 0 -(1 row) - -SELECT * FROM trunc_trigger_log; - tgop | tglevel | tgwhen | tgargv | tgtable | rowcount -----------+-----------+--------+-------------------------+--------------------+---------- - TRUNCATE | STATEMENT | BEFORE | before trigger truncate | trunc_trigger_test | 2 -(1 row) - -DROP TRIGGER t ON trunc_trigger_test; -truncate trunc_trigger_log; --- same test with an after trigger -INSERT INTO trunc_trigger_test VALUES(1, 'foo', 'bar'), (2, 'baz', 'quux'); -CREATE TRIGGER tt -AFTER TRUNCATE ON trunc_trigger_test -FOR EACH STATEMENT -EXECUTE PROCEDURE trunctrigger('after trigger truncate'); -SELECT count(*) as "Row count in test table" FROM trunc_trigger_test; - Row count in test table -------------------------- - 2 -(1 row) - -SELECT * FROM trunc_trigger_log; - tgop | tglevel | tgwhen | tgargv | tgtable | rowcount -------+---------+--------+--------+---------+---------- -(0 rows) - -TRUNCATE trunc_trigger_test; -SELECT count(*) as "Row count in test table" FROM trunc_trigger_test; - Row count in test table -------------------------- - 0 -(1 row) - -SELECT * FROM trunc_trigger_log; - tgop | tglevel | tgwhen | tgargv | tgtable | rowcount -----------+-----------+--------+------------------------+--------------------+---------- - TRUNCATE | STATEMENT | AFTER | after trigger truncate | trunc_trigger_test | 0 -(1 row) - -DROP TABLE trunc_trigger_test; -DROP TABLE trunc_trigger_log; -DROP FUNCTION trunctrigger(); --- test TRUNCATE ... RESTART IDENTITY -CREATE SEQUENCE truncate_a_id1 START WITH 33; -CREATE TABLE truncate_a (id serial, - id1 integer default nextval('truncate_a_id1')); -ALTER SEQUENCE truncate_a_id1 OWNED BY truncate_a.id1; -INSERT INTO truncate_a DEFAULT VALUES; -INSERT INTO truncate_a DEFAULT VALUES; -SELECT * FROM truncate_a; - id | id1 -----+----- - 1 | 33 - 2 | 34 -(2 rows) - -TRUNCATE truncate_a; -INSERT INTO truncate_a DEFAULT VALUES; -INSERT INTO truncate_a DEFAULT VALUES; -SELECT * FROM truncate_a; - id | id1 -----+----- - 3 | 35 - 4 | 36 -(2 rows) - -TRUNCATE truncate_a RESTART IDENTITY; -INSERT INTO truncate_a DEFAULT VALUES; -INSERT INTO truncate_a DEFAULT VALUES; -SELECT * FROM truncate_a; - id | id1 -----+----- - 1 | 33 - 2 | 34 -(2 rows) - -CREATE TABLE truncate_b (id int GENERATED ALWAYS AS IDENTITY (START WITH 44)); -INSERT INTO truncate_b DEFAULT VALUES; -INSERT INTO truncate_b DEFAULT VALUES; -SELECT * FROM truncate_b; - id ----- - 44 - 45 -(2 rows) - -TRUNCATE truncate_b; -INSERT INTO truncate_b DEFAULT VALUES; -INSERT INTO truncate_b DEFAULT VALUES; -SELECT * FROM truncate_b; - id ----- - 46 - 47 -(2 rows) - -TRUNCATE truncate_b RESTART IDENTITY; -INSERT INTO truncate_b DEFAULT VALUES; -INSERT INTO truncate_b DEFAULT VALUES; -SELECT * FROM truncate_b; - id ----- - 44 - 45 -(2 rows) - --- check rollback of a RESTART IDENTITY operation -BEGIN; -TRUNCATE truncate_a RESTART IDENTITY; -INSERT INTO truncate_a DEFAULT VALUES; -SELECT * FROM truncate_a; - id | id1 -----+----- - 1 | 33 -(1 row) - -ROLLBACK; -INSERT INTO truncate_a DEFAULT VALUES; -INSERT INTO truncate_a DEFAULT VALUES; -SELECT * FROM truncate_a; - id | id1 -----+----- - 1 | 33 - 2 | 34 - 3 | 35 - 4 | 36 -(4 rows) - -DROP TABLE truncate_a; -SELECT nextval('truncate_a_id1'); -- fail, seq should have been dropped -ERROR: relation "truncate_a_id1" does not exist -LINE 1: SELECT nextval('truncate_a_id1'); - ^ --- partitioned table -CREATE TABLE truncparted (a int, b char) PARTITION BY LIST (a); --- error, can't truncate a partitioned table -TRUNCATE ONLY truncparted; -ERROR: cannot truncate only a partitioned table -HINT: Do not specify the ONLY keyword, or use TRUNCATE ONLY on the partitions directly. -CREATE TABLE truncparted1 PARTITION OF truncparted FOR VALUES IN (1); -INSERT INTO truncparted VALUES (1, 'a'); --- error, must truncate partitions -TRUNCATE ONLY truncparted; -ERROR: cannot truncate only a partitioned table -HINT: Do not specify the ONLY keyword, or use TRUNCATE ONLY on the partitions directly. -TRUNCATE truncparted; -DROP TABLE truncparted; --- foreign key on partitioned table: partition key is referencing column. --- Make sure truncate did execute on all tables -CREATE FUNCTION tp_ins_data() RETURNS void LANGUAGE plpgsql AS $$ - BEGIN - INSERT INTO truncprim VALUES (1), (100), (150); - INSERT INTO truncpart VALUES (1), (100), (150); - END -$$; -CREATE FUNCTION tp_chk_data(OUT pktb regclass, OUT pkval int, OUT fktb regclass, OUT fkval int) - RETURNS SETOF record LANGUAGE plpgsql AS $$ - BEGIN - RETURN QUERY SELECT - pk.tableoid::regclass, pk.a, fk.tableoid::regclass, fk.a - FROM truncprim pk FULL JOIN truncpart fk USING (a) - ORDER BY 2, 4; - END -$$; -CREATE TABLE truncprim (a int PRIMARY KEY); -CREATE TABLE truncpart (a int REFERENCES truncprim) - PARTITION BY RANGE (a); -CREATE TABLE truncpart_1 PARTITION OF truncpart FOR VALUES FROM (0) TO (100); -CREATE TABLE truncpart_2 PARTITION OF truncpart FOR VALUES FROM (100) TO (200) - PARTITION BY RANGE (a); -CREATE TABLE truncpart_2_1 PARTITION OF truncpart_2 FOR VALUES FROM (100) TO (150); -CREATE TABLE truncpart_2_d PARTITION OF truncpart_2 DEFAULT; -TRUNCATE TABLE truncprim; -- should fail -ERROR: cannot truncate a table referenced in a foreign key constraint -DETAIL: Table "truncpart" references "truncprim". -HINT: Truncate table "truncpart" at the same time, or use TRUNCATE ... CASCADE. -select tp_ins_data(); - tp_ins_data -------------- - -(1 row) - --- should truncate everything -TRUNCATE TABLE truncprim, truncpart; -select * from tp_chk_data(); - pktb | pkval | fktb | fkval -------+-------+------+------- -(0 rows) - -select tp_ins_data(); - tp_ins_data -------------- - -(1 row) - --- should truncate everything -TRUNCATE TABLE truncprim CASCADE; -NOTICE: truncate cascades to table "truncpart" -NOTICE: truncate cascades to table "truncpart_1" -NOTICE: truncate cascades to table "truncpart_2" -NOTICE: truncate cascades to table "truncpart_2_1" -NOTICE: truncate cascades to table "truncpart_2_d" -SELECT * FROM tp_chk_data(); - pktb | pkval | fktb | fkval -------+-------+------+------- -(0 rows) - -SELECT tp_ins_data(); - tp_ins_data -------------- - -(1 row) - --- should truncate all partitions -TRUNCATE TABLE truncpart; -SELECT * FROM tp_chk_data(); - pktb | pkval | fktb | fkval ------------+-------+------+------- - truncprim | 1 | | - truncprim | 100 | | - truncprim | 150 | | -(3 rows) - -DROP TABLE truncprim, truncpart; -DROP FUNCTION tp_ins_data(), tp_chk_data(); --- test cascade when referencing a partitioned table -CREATE TABLE trunc_a (a INT PRIMARY KEY) PARTITION BY RANGE (a); -CREATE TABLE trunc_a1 PARTITION OF trunc_a FOR VALUES FROM (0) TO (10); -CREATE TABLE trunc_a2 PARTITION OF trunc_a FOR VALUES FROM (10) TO (20) - PARTITION BY RANGE (a); -CREATE TABLE trunc_a21 PARTITION OF trunc_a2 FOR VALUES FROM (10) TO (12); -CREATE TABLE trunc_a22 PARTITION OF trunc_a2 FOR VALUES FROM (12) TO (16); -CREATE TABLE trunc_a2d PARTITION OF trunc_a2 DEFAULT; -CREATE TABLE trunc_a3 PARTITION OF trunc_a FOR VALUES FROM (20) TO (30); -INSERT INTO trunc_a VALUES (0), (5), (10), (15), (20), (25); --- truncate a partition cascading to a table -CREATE TABLE ref_b ( - b INT PRIMARY KEY, - a INT REFERENCES trunc_a(a) ON DELETE CASCADE -); -INSERT INTO ref_b VALUES (10, 0), (50, 5), (100, 10), (150, 15); -TRUNCATE TABLE trunc_a1 CASCADE; -NOTICE: truncate cascades to table "ref_b" -SELECT a FROM ref_b; - a ---- -(0 rows) - -DROP TABLE ref_b; --- truncate a partition cascading to a partitioned table -CREATE TABLE ref_c ( - c INT PRIMARY KEY, - a INT REFERENCES trunc_a(a) ON DELETE CASCADE -) PARTITION BY RANGE (c); -CREATE TABLE ref_c1 PARTITION OF ref_c FOR VALUES FROM (100) TO (200); -CREATE TABLE ref_c2 PARTITION OF ref_c FOR VALUES FROM (200) TO (300); -INSERT INTO ref_c VALUES (100, 10), (150, 15), (200, 20), (250, 25); -TRUNCATE TABLE trunc_a21 CASCADE; -NOTICE: truncate cascades to table "ref_c" -NOTICE: truncate cascades to table "ref_c1" -NOTICE: truncate cascades to table "ref_c2" -SELECT a as "from table ref_c" FROM ref_c; - from table ref_c ------------------- -(0 rows) - -SELECT a as "from table trunc_a" FROM trunc_a ORDER BY a; - from table trunc_a --------------------- - 15 - 20 - 25 -(3 rows) - -DROP TABLE trunc_a, ref_c; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/alter_table.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/alter_table.out --- /Users/admin/pgsql/src/test/regress/expected/alter_table.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/alter_table.out 2024-12-13 13:20:12 @@ -1,4740 +1,2 @@ --- --- ALTER_TABLE --- --- Clean up in case a prior regression run failed -SET client_min_messages TO 'warning'; -DROP ROLE IF EXISTS regress_alter_table_user1; -RESET client_min_messages; -CREATE USER regress_alter_table_user1; --- --- add attribute --- -CREATE TABLE attmp (initial int4); -COMMENT ON TABLE attmp_wrong IS 'table comment'; -ERROR: relation "attmp_wrong" does not exist -COMMENT ON TABLE attmp IS 'table comment'; -COMMENT ON TABLE attmp IS NULL; -ALTER TABLE attmp ADD COLUMN xmin integer; -- fails -ERROR: column name "xmin" conflicts with a system column name -ALTER TABLE attmp ADD COLUMN a int4 default 3; -ALTER TABLE attmp ADD COLUMN b name; -ALTER TABLE attmp ADD COLUMN c text; -ALTER TABLE attmp ADD COLUMN d float8; -ALTER TABLE attmp ADD COLUMN e float4; -ALTER TABLE attmp ADD COLUMN f int2; -ALTER TABLE attmp ADD COLUMN g polygon; -ALTER TABLE attmp ADD COLUMN i char; -ALTER TABLE attmp ADD COLUMN k int4; -ALTER TABLE attmp ADD COLUMN l tid; -ALTER TABLE attmp ADD COLUMN m xid; -ALTER TABLE attmp ADD COLUMN n oidvector; ---ALTER TABLE attmp ADD COLUMN o lock; -ALTER TABLE attmp ADD COLUMN p boolean; -ALTER TABLE attmp ADD COLUMN q point; -ALTER TABLE attmp ADD COLUMN r lseg; -ALTER TABLE attmp ADD COLUMN s path; -ALTER TABLE attmp ADD COLUMN t box; -ALTER TABLE attmp ADD COLUMN v timestamp; -ALTER TABLE attmp ADD COLUMN w interval; -ALTER TABLE attmp ADD COLUMN x float8[]; -ALTER TABLE attmp ADD COLUMN y float4[]; -ALTER TABLE attmp ADD COLUMN z int2[]; -INSERT INTO attmp (a, b, c, d, e, f, g, i, k, l, m, n, p, q, r, s, t, - v, w, x, y, z) - VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', - 'c', - 314159, '(1,1)', '512', - '1 2 3 4 5 6 7 8', true, '(1.1,1.1)', '(4.1,4.1,3.1,3.1)', - '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', - 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}'); -SELECT * FROM attmp; - initial | a | b | c | d | e | f | g | i | k | l | m | n | p | q | r | s | t | v | w | x | y | z ----------+---+------+------+-----+-----+---+-----------------------+---+--------+-------+-----+-----------------+---+-----------+-----------------------+-----------------------------+---------------------+--------------------------+------------------+-----------+-----------+----------- - | 4 | name | text | 4.1 | 4.1 | 2 | ((4.1,4.1),(3.1,3.1)) | c | 314159 | (1,1) | 512 | 1 2 3 4 5 6 7 8 | t | (1.1,1.1) | [(4.1,4.1),(3.1,3.1)] | ((0,2),(4.1,4.1),(3.1,3.1)) | (4.1,4.1),(3.1,3.1) | Thu Jan 01 00:00:00 1970 | @ 1 hour 10 secs | {1,2,3,4} | {1,2,3,4} | {1,2,3,4} -(1 row) - -DROP TABLE attmp; --- the wolf bug - schema mods caused inconsistent row descriptors -CREATE TABLE attmp ( - initial int4 -); -ALTER TABLE attmp ADD COLUMN a int4; -ALTER TABLE attmp ADD COLUMN b name; -ALTER TABLE attmp ADD COLUMN c text; -ALTER TABLE attmp ADD COLUMN d float8; -ALTER TABLE attmp ADD COLUMN e float4; -ALTER TABLE attmp ADD COLUMN f int2; -ALTER TABLE attmp ADD COLUMN g polygon; -ALTER TABLE attmp ADD COLUMN i char; -ALTER TABLE attmp ADD COLUMN k int4; -ALTER TABLE attmp ADD COLUMN l tid; -ALTER TABLE attmp ADD COLUMN m xid; -ALTER TABLE attmp ADD COLUMN n oidvector; ---ALTER TABLE attmp ADD COLUMN o lock; -ALTER TABLE attmp ADD COLUMN p boolean; -ALTER TABLE attmp ADD COLUMN q point; -ALTER TABLE attmp ADD COLUMN r lseg; -ALTER TABLE attmp ADD COLUMN s path; -ALTER TABLE attmp ADD COLUMN t box; -ALTER TABLE attmp ADD COLUMN v timestamp; -ALTER TABLE attmp ADD COLUMN w interval; -ALTER TABLE attmp ADD COLUMN x float8[]; -ALTER TABLE attmp ADD COLUMN y float4[]; -ALTER TABLE attmp ADD COLUMN z int2[]; -INSERT INTO attmp (a, b, c, d, e, f, g, i, k, l, m, n, p, q, r, s, t, - v, w, x, y, z) - VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', - 'c', - 314159, '(1,1)', '512', - '1 2 3 4 5 6 7 8', true, '(1.1,1.1)', '(4.1,4.1,3.1,3.1)', - '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', - 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}'); -SELECT * FROM attmp; - initial | a | b | c | d | e | f | g | i | k | l | m | n | p | q | r | s | t | v | w | x | y | z ----------+---+------+------+-----+-----+---+-----------------------+---+--------+-------+-----+-----------------+---+-----------+-----------------------+-----------------------------+---------------------+--------------------------+------------------+-----------+-----------+----------- - | 4 | name | text | 4.1 | 4.1 | 2 | ((4.1,4.1),(3.1,3.1)) | c | 314159 | (1,1) | 512 | 1 2 3 4 5 6 7 8 | t | (1.1,1.1) | [(4.1,4.1),(3.1,3.1)] | ((0,2),(4.1,4.1),(3.1,3.1)) | (4.1,4.1),(3.1,3.1) | Thu Jan 01 00:00:00 1970 | @ 1 hour 10 secs | {1,2,3,4} | {1,2,3,4} | {1,2,3,4} -(1 row) - -CREATE INDEX attmp_idx ON attmp (a, (d + e), b); -ALTER INDEX attmp_idx ALTER COLUMN 0 SET STATISTICS 1000; -ERROR: column number must be in range from 1 to 32767 -LINE 1: ALTER INDEX attmp_idx ALTER COLUMN 0 SET STATISTICS 1000; - ^ -ALTER INDEX attmp_idx ALTER COLUMN 1 SET STATISTICS 1000; -ERROR: cannot alter statistics on non-expression column "a" of index "attmp_idx" -HINT: Alter statistics on table column instead. -ALTER INDEX attmp_idx ALTER COLUMN 2 SET STATISTICS 1000; -\d+ attmp_idx - Index "public.attmp_idx" - Column | Type | Key? | Definition | Storage | Stats target ---------+------------------+------+------------+---------+-------------- - a | integer | yes | a | plain | - expr | double precision | yes | (d + e) | plain | 1000 - b | cstring | yes | b | plain | -btree, for table "public.attmp" - -ALTER INDEX attmp_idx ALTER COLUMN 3 SET STATISTICS 1000; -ERROR: cannot alter statistics on non-expression column "b" of index "attmp_idx" -HINT: Alter statistics on table column instead. -ALTER INDEX attmp_idx ALTER COLUMN 4 SET STATISTICS 1000; -ERROR: column number 4 of relation "attmp_idx" does not exist -ALTER INDEX attmp_idx ALTER COLUMN 2 SET STATISTICS -1; -DROP TABLE attmp; --- --- rename - check on both non-temp and temp tables --- -CREATE TABLE attmp (regtable int); -CREATE TEMP TABLE attmp (attmptable int); -ALTER TABLE attmp RENAME TO attmp_new; -SELECT * FROM attmp; - regtable ----------- -(0 rows) - -SELECT * FROM attmp_new; - attmptable ------------- -(0 rows) - -ALTER TABLE attmp RENAME TO attmp_new2; -SELECT * FROM attmp; -- should fail -ERROR: relation "attmp" does not exist -LINE 1: SELECT * FROM attmp; - ^ -SELECT * FROM attmp_new; - attmptable ------------- -(0 rows) - -SELECT * FROM attmp_new2; - regtable ----------- -(0 rows) - -DROP TABLE attmp_new; -DROP TABLE attmp_new2; --- check rename of partitioned tables and indexes also -CREATE TABLE part_attmp (a int primary key) partition by range (a); -CREATE TABLE part_attmp1 PARTITION OF part_attmp FOR VALUES FROM (0) TO (100); -ALTER INDEX part_attmp_pkey RENAME TO part_attmp_index; -ALTER INDEX part_attmp1_pkey RENAME TO part_attmp1_index; -ALTER TABLE part_attmp RENAME TO part_at2tmp; -ALTER TABLE part_attmp1 RENAME TO part_at2tmp1; -SET ROLE regress_alter_table_user1; -ALTER INDEX part_attmp_index RENAME TO fail; -ERROR: must be owner of index part_attmp_index -ALTER INDEX part_attmp1_index RENAME TO fail; -ERROR: must be owner of index part_attmp1_index -ALTER TABLE part_at2tmp RENAME TO fail; -ERROR: must be owner of table part_at2tmp -ALTER TABLE part_at2tmp1 RENAME TO fail; -ERROR: must be owner of table part_at2tmp1 -RESET ROLE; -DROP TABLE part_at2tmp; --- --- check renaming to a table's array type's autogenerated name --- (the array type's name should get out of the way) --- -CREATE TABLE attmp_array (id int); -CREATE TABLE attmp_array2 (id int); -SELECT typname FROM pg_type WHERE oid = 'attmp_array[]'::regtype; - typname --------------- - _attmp_array -(1 row) - -SELECT typname FROM pg_type WHERE oid = 'attmp_array2[]'::regtype; - typname ---------------- - _attmp_array2 -(1 row) - -ALTER TABLE attmp_array2 RENAME TO _attmp_array; -SELECT typname FROM pg_type WHERE oid = 'attmp_array[]'::regtype; - typname ---------------- - __attmp_array -(1 row) - -SELECT typname FROM pg_type WHERE oid = '_attmp_array[]'::regtype; - typname ------------------ - __attmp_array_1 -(1 row) - -DROP TABLE _attmp_array; -DROP TABLE attmp_array; --- renaming to table's own array type's name is an interesting corner case -CREATE TABLE attmp_array (id int); -SELECT typname FROM pg_type WHERE oid = 'attmp_array[]'::regtype; - typname --------------- - _attmp_array -(1 row) - -ALTER TABLE attmp_array RENAME TO _attmp_array; -SELECT typname FROM pg_type WHERE oid = '_attmp_array[]'::regtype; - typname ---------------- - __attmp_array -(1 row) - -DROP TABLE _attmp_array; --- ALTER TABLE ... RENAME on non-table relations --- renaming indexes (FIXME: this should probably test the index's functionality) -ALTER INDEX IF EXISTS __onek_unique1 RENAME TO attmp_onek_unique1; -NOTICE: relation "__onek_unique1" does not exist, skipping -ALTER INDEX IF EXISTS __attmp_onek_unique1 RENAME TO onek_unique1; -NOTICE: relation "__attmp_onek_unique1" does not exist, skipping -ALTER INDEX onek_unique1 RENAME TO attmp_onek_unique1; -ALTER INDEX attmp_onek_unique1 RENAME TO onek_unique1; -SET ROLE regress_alter_table_user1; -ALTER INDEX onek_unique1 RENAME TO fail; -- permission denied -ERROR: must be owner of index onek_unique1 -RESET ROLE; --- rename statements with mismatching statement and object types -CREATE TABLE alter_idx_rename_test (a INT); -CREATE INDEX alter_idx_rename_test_idx ON alter_idx_rename_test (a); -CREATE TABLE alter_idx_rename_test_parted (a INT) PARTITION BY LIST (a); -CREATE INDEX alter_idx_rename_test_parted_idx ON alter_idx_rename_test_parted (a); -BEGIN; -ALTER INDEX alter_idx_rename_test RENAME TO alter_idx_rename_test_2; -ALTER INDEX alter_idx_rename_test_parted RENAME TO alter_idx_rename_test_parted_2; -SELECT relation::regclass, mode FROM pg_locks -WHERE pid = pg_backend_pid() AND locktype = 'relation' - AND relation::regclass::text LIKE 'alter\_idx%' -ORDER BY relation::regclass::text COLLATE "C"; - relation | mode ---------------------------------+--------------------- - alter_idx_rename_test_2 | AccessExclusiveLock - alter_idx_rename_test_parted_2 | AccessExclusiveLock -(2 rows) - -COMMIT; -BEGIN; -ALTER INDEX alter_idx_rename_test_idx RENAME TO alter_idx_rename_test_idx_2; -ALTER INDEX alter_idx_rename_test_parted_idx RENAME TO alter_idx_rename_test_parted_idx_2; -SELECT relation::regclass, mode FROM pg_locks -WHERE pid = pg_backend_pid() AND locktype = 'relation' - AND relation::regclass::text LIKE 'alter\_idx%' -ORDER BY relation::regclass::text COLLATE "C"; - relation | mode -------------------------------------+-------------------------- - alter_idx_rename_test_idx_2 | ShareUpdateExclusiveLock - alter_idx_rename_test_parted_idx_2 | ShareUpdateExclusiveLock -(2 rows) - -COMMIT; -BEGIN; -ALTER TABLE alter_idx_rename_test_idx_2 RENAME TO alter_idx_rename_test_idx_3; -ALTER TABLE alter_idx_rename_test_parted_idx_2 RENAME TO alter_idx_rename_test_parted_idx_3; -SELECT relation::regclass, mode FROM pg_locks -WHERE pid = pg_backend_pid() AND locktype = 'relation' - AND relation::regclass::text LIKE 'alter\_idx%' -ORDER BY relation::regclass::text COLLATE "C"; - relation | mode -------------------------------------+--------------------- - alter_idx_rename_test_idx_3 | AccessExclusiveLock - alter_idx_rename_test_parted_idx_3 | AccessExclusiveLock -(2 rows) - -COMMIT; -DROP TABLE alter_idx_rename_test_2; --- renaming views -CREATE VIEW attmp_view (unique1) AS SELECT unique1 FROM tenk1; -ALTER TABLE attmp_view RENAME TO attmp_view_new; -SET ROLE regress_alter_table_user1; -ALTER VIEW attmp_view_new RENAME TO fail; -- permission denied -ERROR: must be owner of view attmp_view_new -RESET ROLE; --- hack to ensure we get an indexscan here -set enable_seqscan to off; -set enable_bitmapscan to off; --- 5 values, sorted -SELECT unique1 FROM tenk1 WHERE unique1 < 5; - unique1 ---------- - 0 - 1 - 2 - 3 - 4 -(5 rows) - -reset enable_seqscan; -reset enable_bitmapscan; -DROP VIEW attmp_view_new; --- toast-like relation name -alter table stud_emp rename to pg_toast_stud_emp; -alter table pg_toast_stud_emp rename to stud_emp; --- renaming index should rename constraint as well -ALTER TABLE onek ADD CONSTRAINT onek_unique1_constraint UNIQUE (unique1); -ALTER INDEX onek_unique1_constraint RENAME TO onek_unique1_constraint_foo; -ALTER TABLE onek DROP CONSTRAINT onek_unique1_constraint_foo; --- renaming constraint -ALTER TABLE onek ADD CONSTRAINT onek_check_constraint CHECK (unique1 >= 0); -ALTER TABLE onek RENAME CONSTRAINT onek_check_constraint TO onek_check_constraint_foo; -ALTER TABLE onek DROP CONSTRAINT onek_check_constraint_foo; --- renaming constraint should rename index as well -ALTER TABLE onek ADD CONSTRAINT onek_unique1_constraint UNIQUE (unique1); -DROP INDEX onek_unique1_constraint; -- to see whether it's there -ERROR: cannot drop index onek_unique1_constraint because constraint onek_unique1_constraint on table onek requires it -HINT: You can drop constraint onek_unique1_constraint on table onek instead. -ALTER TABLE onek RENAME CONSTRAINT onek_unique1_constraint TO onek_unique1_constraint_foo; -DROP INDEX onek_unique1_constraint_foo; -- to see whether it's there -ERROR: cannot drop index onek_unique1_constraint_foo because constraint onek_unique1_constraint_foo on table onek requires it -HINT: You can drop constraint onek_unique1_constraint_foo on table onek instead. -ALTER TABLE onek DROP CONSTRAINT onek_unique1_constraint_foo; --- renaming constraints vs. inheritance -CREATE TABLE constraint_rename_test (a int CONSTRAINT con1 CHECK (a > 0), b int, c int); -\d constraint_rename_test - Table "public.constraint_rename_test" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | - c | integer | | | -Check constraints: - "con1" CHECK (a > 0) - -CREATE TABLE constraint_rename_test2 (a int CONSTRAINT con1 CHECK (a > 0), d int) INHERITS (constraint_rename_test); -NOTICE: merging column "a" with inherited definition -NOTICE: merging constraint "con1" with inherited definition -\d constraint_rename_test2 - Table "public.constraint_rename_test2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | - c | integer | | | - d | integer | | | -Check constraints: - "con1" CHECK (a > 0) -Inherits: constraint_rename_test - -ALTER TABLE constraint_rename_test2 RENAME CONSTRAINT con1 TO con1foo; -- fail -ERROR: cannot rename inherited constraint "con1" -ALTER TABLE ONLY constraint_rename_test RENAME CONSTRAINT con1 TO con1foo; -- fail -ERROR: inherited constraint "con1" must be renamed in child tables too -ALTER TABLE constraint_rename_test RENAME CONSTRAINT con1 TO con1foo; -- ok -\d constraint_rename_test - Table "public.constraint_rename_test" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | - c | integer | | | -Check constraints: - "con1foo" CHECK (a > 0) -Number of child tables: 1 (Use \d+ to list them.) - -\d constraint_rename_test2 - Table "public.constraint_rename_test2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | - c | integer | | | - d | integer | | | -Check constraints: - "con1foo" CHECK (a > 0) -Inherits: constraint_rename_test - -ALTER TABLE constraint_rename_test ADD CONSTRAINT con2 CHECK (b > 0) NO INHERIT; -ALTER TABLE ONLY constraint_rename_test RENAME CONSTRAINT con2 TO con2foo; -- ok -ALTER TABLE constraint_rename_test RENAME CONSTRAINT con2foo TO con2bar; -- ok -\d constraint_rename_test - Table "public.constraint_rename_test" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | - c | integer | | | -Check constraints: - "con1foo" CHECK (a > 0) - "con2bar" CHECK (b > 0) NO INHERIT -Number of child tables: 1 (Use \d+ to list them.) - -\d constraint_rename_test2 - Table "public.constraint_rename_test2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | - c | integer | | | - d | integer | | | -Check constraints: - "con1foo" CHECK (a > 0) -Inherits: constraint_rename_test - -ALTER TABLE constraint_rename_test ADD CONSTRAINT con3 PRIMARY KEY (a); -ALTER TABLE constraint_rename_test RENAME CONSTRAINT con3 TO con3foo; -- ok -\d constraint_rename_test - Table "public.constraint_rename_test" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | not null | - b | integer | | | - c | integer | | | -Indexes: - "con3foo" PRIMARY KEY, btree (a) -Check constraints: - "con1foo" CHECK (a > 0) - "con2bar" CHECK (b > 0) NO INHERIT -Number of child tables: 1 (Use \d+ to list them.) - -\d constraint_rename_test2 - Table "public.constraint_rename_test2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | not null | - b | integer | | | - c | integer | | | - d | integer | | | -Check constraints: - "con1foo" CHECK (a > 0) -Inherits: constraint_rename_test - -DROP TABLE constraint_rename_test2; -DROP TABLE constraint_rename_test; -ALTER TABLE IF EXISTS constraint_not_exist RENAME CONSTRAINT con3 TO con3foo; -- ok -NOTICE: relation "constraint_not_exist" does not exist, skipping -ALTER TABLE IF EXISTS constraint_rename_test ADD CONSTRAINT con4 UNIQUE (a); -NOTICE: relation "constraint_rename_test" does not exist, skipping --- renaming constraints with cache reset of target relation -CREATE TABLE constraint_rename_cache (a int, - CONSTRAINT chk_a CHECK (a > 0), - PRIMARY KEY (a)); -ALTER TABLE constraint_rename_cache - RENAME CONSTRAINT chk_a TO chk_a_new; -ALTER TABLE constraint_rename_cache - RENAME CONSTRAINT constraint_rename_cache_pkey TO constraint_rename_pkey_new; -CREATE TABLE like_constraint_rename_cache - (LIKE constraint_rename_cache INCLUDING ALL); -\d like_constraint_rename_cache - Table "public.like_constraint_rename_cache" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | not null | -Indexes: - "like_constraint_rename_cache_pkey" PRIMARY KEY, btree (a) -Check constraints: - "chk_a_new" CHECK (a > 0) - -DROP TABLE constraint_rename_cache; -DROP TABLE like_constraint_rename_cache; --- FOREIGN KEY CONSTRAINT adding TEST -CREATE TABLE attmp2 (a int primary key); -CREATE TABLE attmp3 (a int, b int); -CREATE TABLE attmp4 (a int, b int, unique(a,b)); -CREATE TABLE attmp5 (a int, b int); --- Insert rows into attmp2 (pktable) -INSERT INTO attmp2 values (1); -INSERT INTO attmp2 values (2); -INSERT INTO attmp2 values (3); -INSERT INTO attmp2 values (4); --- Insert rows into attmp3 -INSERT INTO attmp3 values (1,10); -INSERT INTO attmp3 values (1,20); -INSERT INTO attmp3 values (5,50); --- Try (and fail) to add constraint due to invalid source columns -ALTER TABLE attmp3 add constraint attmpconstr foreign key(c) references attmp2 match full; -ERROR: column "c" referenced in foreign key constraint does not exist --- Try (and fail) to add constraint due to invalid destination columns explicitly given -ALTER TABLE attmp3 add constraint attmpconstr foreign key(a) references attmp2(b) match full; -ERROR: column "b" referenced in foreign key constraint does not exist --- Try (and fail) to add constraint due to invalid data -ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full; -ERROR: insert or update on table "attmp3" violates foreign key constraint "attmpconstr" -DETAIL: Key (a)=(5) is not present in table "attmp2". --- Delete failing row -DELETE FROM attmp3 where a=5; --- Try (and succeed) -ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full; -ALTER TABLE attmp3 drop constraint attmpconstr; -INSERT INTO attmp3 values (5,50); --- Try NOT VALID and then VALIDATE CONSTRAINT, but fails. Delete failure then re-validate -ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full NOT VALID; -ALTER TABLE attmp3 validate constraint attmpconstr; -ERROR: insert or update on table "attmp3" violates foreign key constraint "attmpconstr" -DETAIL: Key (a)=(5) is not present in table "attmp2". --- Delete failing row -DELETE FROM attmp3 where a=5; --- Try (and succeed) and repeat to show it works on already valid constraint -ALTER TABLE attmp3 validate constraint attmpconstr; -ALTER TABLE attmp3 validate constraint attmpconstr; --- Try a non-verified CHECK constraint -ALTER TABLE attmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10); -- fail -ERROR: check constraint "b_greater_than_ten" of relation "attmp3" is violated by some row -ALTER TABLE attmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10) NOT VALID; -- succeeds -ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- fails -ERROR: check constraint "b_greater_than_ten" of relation "attmp3" is violated by some row -DELETE FROM attmp3 WHERE NOT b > 10; -ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds -ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds --- Test inherited NOT VALID CHECK constraints -select * from attmp3; - a | b ----+---- - 1 | 20 -(1 row) - -CREATE TABLE attmp6 () INHERITS (attmp3); -CREATE TABLE attmp7 () INHERITS (attmp3); -INSERT INTO attmp6 VALUES (6, 30), (7, 16); -ALTER TABLE attmp3 ADD CONSTRAINT b_le_20 CHECK (b <= 20) NOT VALID; -ALTER TABLE attmp3 VALIDATE CONSTRAINT b_le_20; -- fails -ERROR: check constraint "b_le_20" of relation "attmp6" is violated by some row -DELETE FROM attmp6 WHERE b > 20; -ALTER TABLE attmp3 VALIDATE CONSTRAINT b_le_20; -- succeeds --- An already validated constraint must not be revalidated -CREATE FUNCTION boo(int) RETURNS int IMMUTABLE STRICT LANGUAGE plpgsql AS $$ BEGIN RAISE NOTICE 'boo: %', $1; RETURN $1; END; $$; -INSERT INTO attmp7 VALUES (8, 18); -ALTER TABLE attmp7 ADD CONSTRAINT identity CHECK (b = boo(b)); -NOTICE: boo: 18 -ALTER TABLE attmp3 ADD CONSTRAINT IDENTITY check (b = boo(b)) NOT VALID; -NOTICE: merging constraint "identity" with inherited definition -ALTER TABLE attmp3 VALIDATE CONSTRAINT identity; -NOTICE: boo: 20 -NOTICE: boo: 16 --- A NO INHERIT constraint should not be looked for in children during VALIDATE CONSTRAINT -create table parent_noinh_convalid (a int); -create table child_noinh_convalid () inherits (parent_noinh_convalid); -insert into parent_noinh_convalid values (1); -insert into child_noinh_convalid values (1); -alter table parent_noinh_convalid add constraint check_a_is_2 check (a = 2) no inherit not valid; --- fail, because of the row in parent -alter table parent_noinh_convalid validate constraint check_a_is_2; -ERROR: check constraint "check_a_is_2" of relation "parent_noinh_convalid" is violated by some row -delete from only parent_noinh_convalid; --- ok (parent itself contains no violating rows) -alter table parent_noinh_convalid validate constraint check_a_is_2; -select convalidated from pg_constraint where conrelid = 'parent_noinh_convalid'::regclass and conname = 'check_a_is_2'; - convalidated --------------- - t -(1 row) - --- cleanup -drop table parent_noinh_convalid, child_noinh_convalid; --- Try (and fail) to create constraint from attmp5(a) to attmp4(a) - unique constraint on --- attmp4 is a,b -ALTER TABLE attmp5 add constraint attmpconstr foreign key(a) references attmp4(a) match full; -ERROR: there is no unique constraint matching given keys for referenced table "attmp4" -DROP TABLE attmp7; -DROP TABLE attmp6; -DROP TABLE attmp5; -DROP TABLE attmp4; -DROP TABLE attmp3; -DROP TABLE attmp2; --- NOT VALID with plan invalidation -- ensure we don't use a constraint for --- exclusion until validated -set constraint_exclusion TO 'partition'; -create table nv_parent (d date, check (false) no inherit not valid); --- not valid constraint added at creation time should automatically become valid -\d nv_parent - Table "public.nv_parent" - Column | Type | Collation | Nullable | Default ---------+------+-----------+----------+--------- - d | date | | | -Check constraints: - "nv_parent_check" CHECK (false) NO INHERIT - -create table nv_child_2010 () inherits (nv_parent); -create table nv_child_2011 () inherits (nv_parent); -alter table nv_child_2010 add check (d between '2010-01-01'::date and '2010-12-31'::date) not valid; -alter table nv_child_2011 add check (d between '2011-01-01'::date and '2011-12-31'::date) not valid; -explain (costs off) select * from nv_parent where d between '2011-08-01' and '2011-08-31'; - QUERY PLAN ---------------------------------------------------------------------------- - Append - -> Seq Scan on nv_parent nv_parent_1 - Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date)) - -> Seq Scan on nv_child_2010 nv_parent_2 - Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date)) - -> Seq Scan on nv_child_2011 nv_parent_3 - Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date)) -(7 rows) - -create table nv_child_2009 (check (d between '2009-01-01'::date and '2009-12-31'::date)) inherits (nv_parent); -explain (costs off) select * from nv_parent where d between '2011-08-01'::date and '2011-08-31'::date; - QUERY PLAN ---------------------------------------------------------------------------- - Append - -> Seq Scan on nv_parent nv_parent_1 - Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date)) - -> Seq Scan on nv_child_2010 nv_parent_2 - Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date)) - -> Seq Scan on nv_child_2011 nv_parent_3 - Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date)) -(7 rows) - -explain (costs off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date; - QUERY PLAN ---------------------------------------------------------------------------- - Append - -> Seq Scan on nv_parent nv_parent_1 - Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date)) - -> Seq Scan on nv_child_2010 nv_parent_2 - Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date)) - -> Seq Scan on nv_child_2011 nv_parent_3 - Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date)) - -> Seq Scan on nv_child_2009 nv_parent_4 - Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date)) -(9 rows) - --- after validation, the constraint should be used -alter table nv_child_2011 VALIDATE CONSTRAINT nv_child_2011_d_check; -explain (costs off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date; - QUERY PLAN ---------------------------------------------------------------------------- - Append - -> Seq Scan on nv_parent nv_parent_1 - Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date)) - -> Seq Scan on nv_child_2010 nv_parent_2 - Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date)) - -> Seq Scan on nv_child_2009 nv_parent_3 - Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date)) -(7 rows) - --- add an inherited NOT VALID constraint -alter table nv_parent add check (d between '2001-01-01'::date and '2099-12-31'::date) not valid; -\d nv_child_2009 - Table "public.nv_child_2009" - Column | Type | Collation | Nullable | Default ---------+------+-----------+----------+--------- - d | date | | | -Check constraints: - "nv_child_2009_d_check" CHECK (d >= '01-01-2009'::date AND d <= '12-31-2009'::date) - "nv_parent_d_check" CHECK (d >= '01-01-2001'::date AND d <= '12-31-2099'::date) NOT VALID -Inherits: nv_parent - --- we leave nv_parent and children around to help test pg_dump logic --- Foreign key adding test with mixed types --- Note: these tables are TEMP to avoid name conflicts when this test --- is run in parallel with foreign_key.sql. -CREATE TEMP TABLE PKTABLE (ptest1 int PRIMARY KEY); -INSERT INTO PKTABLE VALUES(42); -CREATE TEMP TABLE FKTABLE (ftest1 inet); --- This next should fail, because int=inet does not exist -ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable; -ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented -DETAIL: Key columns "ftest1" of the referencing table and "ptest1" of the referenced table are of incompatible types: inet and integer. --- This should also fail for the same reason, but here we --- give the column name -ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable(ptest1); -ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented -DETAIL: Key columns "ftest1" of the referencing table and "ptest1" of the referenced table are of incompatible types: inet and integer. -DROP TABLE FKTABLE; --- This should succeed, even though they are different types, --- because int=int8 exists and is a member of the integer opfamily -CREATE TEMP TABLE FKTABLE (ftest1 int8); -ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable; --- Check it actually works -INSERT INTO FKTABLE VALUES(42); -- should succeed -INSERT INTO FKTABLE VALUES(43); -- should fail -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" -DETAIL: Key (ftest1)=(43) is not present in table "pktable". -DROP TABLE FKTABLE; --- This should fail, because we'd have to cast numeric to int which is --- not an implicit coercion (or use numeric=numeric, but that's not part --- of the integer opfamily) -CREATE TEMP TABLE FKTABLE (ftest1 numeric); -ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable; -ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented -DETAIL: Key columns "ftest1" of the referencing table and "ptest1" of the referenced table are of incompatible types: numeric and integer. -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- On the other hand, this should work because int implicitly promotes to --- numeric, and we allow promotion on the FK side -CREATE TEMP TABLE PKTABLE (ptest1 numeric PRIMARY KEY); -INSERT INTO PKTABLE VALUES(42); -CREATE TEMP TABLE FKTABLE (ftest1 int); -ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable; --- Check it actually works -INSERT INTO FKTABLE VALUES(42); -- should succeed -INSERT INTO FKTABLE VALUES(43); -- should fail -ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" -DETAIL: Key (ftest1)=(43) is not present in table "pktable". -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; -CREATE TEMP TABLE PKTABLE (ptest1 int, ptest2 inet, - PRIMARY KEY(ptest1, ptest2)); --- This should fail, because we just chose really odd types -CREATE TEMP TABLE FKTABLE (ftest1 cidr, ftest2 timestamp); -ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2) references pktable; -ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented -DETAIL: Key columns "ftest1" of the referencing table and "ptest1" of the referenced table are of incompatible types: cidr and integer. -DROP TABLE FKTABLE; --- Again, so should this... -CREATE TEMP TABLE FKTABLE (ftest1 cidr, ftest2 timestamp); -ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2) - references pktable(ptest1, ptest2); -ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented -DETAIL: Key columns "ftest1" of the referencing table and "ptest1" of the referenced table are of incompatible types: cidr and integer. -DROP TABLE FKTABLE; --- This fails because we mixed up the column ordering -CREATE TEMP TABLE FKTABLE (ftest1 int, ftest2 inet); -ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2) - references pktable(ptest2, ptest1); -ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented -DETAIL: Key columns "ftest1" of the referencing table and "ptest2" of the referenced table are of incompatible types: integer and inet. --- As does this... -ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest2, ftest1) - references pktable(ptest1, ptest2); -ERROR: foreign key constraint "fktable_ftest2_ftest1_fkey" cannot be implemented -DETAIL: Key columns "ftest2" of the referencing table and "ptest1" of the referenced table are of incompatible types: inet and integer. -DROP TABLE FKTABLE; -DROP TABLE PKTABLE; --- Test that ALTER CONSTRAINT updates trigger deferrability properly -CREATE TEMP TABLE PKTABLE (ptest1 int primary key); -CREATE TEMP TABLE FKTABLE (ftest1 int); -ALTER TABLE FKTABLE ADD CONSTRAINT fknd FOREIGN KEY(ftest1) REFERENCES pktable - ON DELETE CASCADE ON UPDATE NO ACTION NOT DEFERRABLE; -ALTER TABLE FKTABLE ADD CONSTRAINT fkdd FOREIGN KEY(ftest1) REFERENCES pktable - ON DELETE CASCADE ON UPDATE NO ACTION DEFERRABLE INITIALLY DEFERRED; -ALTER TABLE FKTABLE ADD CONSTRAINT fkdi FOREIGN KEY(ftest1) REFERENCES pktable - ON DELETE CASCADE ON UPDATE NO ACTION DEFERRABLE INITIALLY IMMEDIATE; -ALTER TABLE FKTABLE ADD CONSTRAINT fknd2 FOREIGN KEY(ftest1) REFERENCES pktable - ON DELETE CASCADE ON UPDATE NO ACTION DEFERRABLE INITIALLY DEFERRED; -ALTER TABLE FKTABLE ALTER CONSTRAINT fknd2 NOT DEFERRABLE; -ALTER TABLE FKTABLE ADD CONSTRAINT fkdd2 FOREIGN KEY(ftest1) REFERENCES pktable - ON DELETE CASCADE ON UPDATE NO ACTION NOT DEFERRABLE; -ALTER TABLE FKTABLE ALTER CONSTRAINT fkdd2 DEFERRABLE INITIALLY DEFERRED; -ALTER TABLE FKTABLE ADD CONSTRAINT fkdi2 FOREIGN KEY(ftest1) REFERENCES pktable - ON DELETE CASCADE ON UPDATE NO ACTION NOT DEFERRABLE; -ALTER TABLE FKTABLE ALTER CONSTRAINT fkdi2 DEFERRABLE INITIALLY IMMEDIATE; -SELECT conname, tgfoid::regproc, tgtype, tgdeferrable, tginitdeferred -FROM pg_trigger JOIN pg_constraint con ON con.oid = tgconstraint -WHERE tgrelid = 'pktable'::regclass -ORDER BY 1,2,3; - conname | tgfoid | tgtype | tgdeferrable | tginitdeferred ----------+------------------------+--------+--------------+---------------- - fkdd | "RI_FKey_cascade_del" | 9 | f | f - fkdd | "RI_FKey_noaction_upd" | 17 | t | t - fkdd2 | "RI_FKey_cascade_del" | 9 | f | f - fkdd2 | "RI_FKey_noaction_upd" | 17 | t | t - fkdi | "RI_FKey_cascade_del" | 9 | f | f - fkdi | "RI_FKey_noaction_upd" | 17 | t | f - fkdi2 | "RI_FKey_cascade_del" | 9 | f | f - fkdi2 | "RI_FKey_noaction_upd" | 17 | t | f - fknd | "RI_FKey_cascade_del" | 9 | f | f - fknd | "RI_FKey_noaction_upd" | 17 | f | f - fknd2 | "RI_FKey_cascade_del" | 9 | f | f - fknd2 | "RI_FKey_noaction_upd" | 17 | f | f -(12 rows) - -SELECT conname, tgfoid::regproc, tgtype, tgdeferrable, tginitdeferred -FROM pg_trigger JOIN pg_constraint con ON con.oid = tgconstraint -WHERE tgrelid = 'fktable'::regclass -ORDER BY 1,2,3; - conname | tgfoid | tgtype | tgdeferrable | tginitdeferred ----------+---------------------+--------+--------------+---------------- - fkdd | "RI_FKey_check_ins" | 5 | t | t - fkdd | "RI_FKey_check_upd" | 17 | t | t - fkdd2 | "RI_FKey_check_ins" | 5 | t | t - fkdd2 | "RI_FKey_check_upd" | 17 | t | t - fkdi | "RI_FKey_check_ins" | 5 | t | f - fkdi | "RI_FKey_check_upd" | 17 | t | f - fkdi2 | "RI_FKey_check_ins" | 5 | t | f - fkdi2 | "RI_FKey_check_upd" | 17 | t | f - fknd | "RI_FKey_check_ins" | 5 | f | f - fknd | "RI_FKey_check_upd" | 17 | f | f - fknd2 | "RI_FKey_check_ins" | 5 | f | f - fknd2 | "RI_FKey_check_upd" | 17 | f | f -(12 rows) - --- temp tables should go away by themselves, need not drop them. --- test check constraint adding -create table atacc1 ( test int ); --- add a check constraint -alter table atacc1 add constraint atacc_test1 check (test>3); --- should fail -insert into atacc1 (test) values (2); -ERROR: new row for relation "atacc1" violates check constraint "atacc_test1" -DETAIL: Failing row contains (2). --- should succeed -insert into atacc1 (test) values (4); -drop table atacc1; --- let's do one where the check fails when added -create table atacc1 ( test int ); --- insert a soon to be failing row -insert into atacc1 (test) values (2); --- add a check constraint (fails) -alter table atacc1 add constraint atacc_test1 check (test>3); -ERROR: check constraint "atacc_test1" of relation "atacc1" is violated by some row -insert into atacc1 (test) values (4); -drop table atacc1; --- let's do one where the check fails because the column doesn't exist -create table atacc1 ( test int ); --- add a check constraint (fails) -alter table atacc1 add constraint atacc_test1 check (test1>3); -ERROR: column "test1" does not exist -HINT: Perhaps you meant to reference the column "atacc1.test". -drop table atacc1; --- something a little more complicated -create table atacc1 ( test int, test2 int, test3 int); --- add a check constraint (fails) -alter table atacc1 add constraint atacc_test1 check (test+test23), test2 int); -alter table atacc1 add check (test2>test); --- should fail for $2 -insert into atacc1 (test2, test) values (3, 4); -ERROR: new row for relation "atacc1" violates check constraint "atacc1_check" -DETAIL: Failing row contains (4, 3). -drop table atacc1; --- inheritance related tests -create table atacc1 (test int); -create table atacc2 (test2 int); -create table atacc3 (test3 int) inherits (atacc1, atacc2); -alter table atacc2 add constraint foo check (test2>0); --- fail and then succeed on atacc2 -insert into atacc2 (test2) values (-3); -ERROR: new row for relation "atacc2" violates check constraint "foo" -DETAIL: Failing row contains (-3). -insert into atacc2 (test2) values (3); --- fail and then succeed on atacc3 -insert into atacc3 (test2) values (-3); -ERROR: new row for relation "atacc3" violates check constraint "foo" -DETAIL: Failing row contains (null, -3, null). -insert into atacc3 (test2) values (3); -drop table atacc3; -drop table atacc2; -drop table atacc1; --- same things with one created with INHERIT -create table atacc1 (test int); -create table atacc2 (test2 int); -create table atacc3 (test3 int) inherits (atacc1, atacc2); -alter table atacc3 no inherit atacc2; --- fail -alter table atacc3 no inherit atacc2; -ERROR: relation "atacc2" is not a parent of relation "atacc3" --- make sure it really isn't a child -insert into atacc3 (test2) values (3); -select test2 from atacc2; - test2 -------- -(0 rows) - --- fail due to missing constraint -alter table atacc2 add constraint foo check (test2>0); -alter table atacc3 inherit atacc2; -ERROR: child table is missing constraint "foo" --- fail due to missing column -alter table atacc3 rename test2 to testx; -alter table atacc3 inherit atacc2; -ERROR: child table is missing column "test2" --- fail due to mismatched data type -alter table atacc3 add test2 bool; -alter table atacc3 inherit atacc2; -ERROR: child table "atacc3" has different type for column "test2" -alter table atacc3 drop test2; --- succeed -alter table atacc3 add test2 int; -update atacc3 set test2 = 4 where test2 is null; -alter table atacc3 add constraint foo check (test2>0); -alter table atacc3 inherit atacc2; --- fail due to duplicates and circular inheritance -alter table atacc3 inherit atacc2; -ERROR: relation "atacc2" would be inherited from more than once -alter table atacc2 inherit atacc3; -ERROR: circular inheritance not allowed -DETAIL: "atacc3" is already a child of "atacc2". -alter table atacc2 inherit atacc2; -ERROR: circular inheritance not allowed -DETAIL: "atacc2" is already a child of "atacc2". --- test that we really are a child now (should see 4 not 3 and cascade should go through) -select test2 from atacc2; - test2 -------- - 4 -(1 row) - -drop table atacc2 cascade; -NOTICE: drop cascades to table atacc3 -drop table atacc1; --- adding only to a parent is allowed as of 9.2 -create table atacc1 (test int); -create table atacc2 (test2 int) inherits (atacc1); --- ok: -alter table atacc1 add constraint foo check (test>0) no inherit; --- check constraint is not there on child -insert into atacc2 (test) values (-3); --- check constraint is there on parent -insert into atacc1 (test) values (-3); -ERROR: new row for relation "atacc1" violates check constraint "foo" -DETAIL: Failing row contains (-3). -insert into atacc1 (test) values (3); --- fail, violating row: -alter table atacc2 add constraint foo check (test>0) no inherit; -ERROR: check constraint "foo" of relation "atacc2" is violated by some row -drop table atacc2; -drop table atacc1; --- test unique constraint adding -create table atacc1 ( test int ) ; --- add a unique constraint -alter table atacc1 add constraint atacc_test1 unique (test); --- insert first value -insert into atacc1 (test) values (2); --- should fail -insert into atacc1 (test) values (2); -ERROR: duplicate key value violates unique constraint "atacc_test1" -DETAIL: Key (test)=(2) already exists. --- should succeed -insert into atacc1 (test) values (4); --- try to create duplicates via alter table using - should fail -alter table atacc1 alter column test type integer using 0; -ERROR: could not create unique index "atacc_test1" -DETAIL: Key (test)=(0) is duplicated. -drop table atacc1; --- let's do one where the unique constraint fails when added -create table atacc1 ( test int ); --- insert soon to be failing rows -insert into atacc1 (test) values (2); -insert into atacc1 (test) values (2); --- add a unique constraint (fails) -alter table atacc1 add constraint atacc_test1 unique (test); -ERROR: could not create unique index "atacc_test1" -DETAIL: Key (test)=(2) is duplicated. -insert into atacc1 (test) values (3); -drop table atacc1; --- let's do one where the unique constraint fails --- because the column doesn't exist -create table atacc1 ( test int ); --- add a unique constraint (fails) -alter table atacc1 add constraint atacc_test1 unique (test1); -ERROR: column "test1" named in key does not exist -drop table atacc1; --- something a little more complicated -create table atacc1 ( test int, test2 int); --- add a unique constraint -alter table atacc1 add constraint atacc_test1 unique (test, test2); --- insert initial value -insert into atacc1 (test,test2) values (4,4); --- should fail -insert into atacc1 (test,test2) values (4,4); -ERROR: duplicate key value violates unique constraint "atacc_test1" -DETAIL: Key (test, test2)=(4, 4) already exists. --- should all succeed -insert into atacc1 (test,test2) values (4,5); -insert into atacc1 (test,test2) values (5,4); -insert into atacc1 (test,test2) values (5,5); -drop table atacc1; --- lets do some naming tests -create table atacc1 (test int, test2 int, unique(test)); -alter table atacc1 add unique (test2); --- should fail for @@ second one @@ -insert into atacc1 (test2, test) values (3, 3); -insert into atacc1 (test2, test) values (2, 3); -ERROR: duplicate key value violates unique constraint "atacc1_test_key" -DETAIL: Key (test)=(3) already exists. -drop table atacc1; --- test primary key constraint adding -create table atacc1 ( id serial, test int) ; --- add a primary key constraint -alter table atacc1 add constraint atacc_test1 primary key (test); --- insert first value -insert into atacc1 (test) values (2); --- should fail -insert into atacc1 (test) values (2); -ERROR: duplicate key value violates unique constraint "atacc_test1" -DETAIL: Key (test)=(2) already exists. --- should succeed -insert into atacc1 (test) values (4); --- inserting NULL should fail -insert into atacc1 (test) values(NULL); -ERROR: null value in column "test" of relation "atacc1" violates not-null constraint -DETAIL: Failing row contains (4, null). --- try adding a second primary key (should fail) -alter table atacc1 add constraint atacc_oid1 primary key(id); -ERROR: multiple primary keys for table "atacc1" are not allowed --- drop first primary key constraint -alter table atacc1 drop constraint atacc_test1 restrict; --- try adding a primary key on oid (should succeed) -alter table atacc1 add constraint atacc_oid1 primary key(id); -drop table atacc1; --- let's do one where the primary key constraint fails when added -create table atacc1 ( test int ); --- insert soon to be failing rows -insert into atacc1 (test) values (2); -insert into atacc1 (test) values (2); --- add a primary key (fails) -alter table atacc1 add constraint atacc_test1 primary key (test); -ERROR: could not create unique index "atacc_test1" -DETAIL: Key (test)=(2) is duplicated. -insert into atacc1 (test) values (3); -drop table atacc1; --- let's do another one where the primary key constraint fails when added -create table atacc1 ( test int ); --- insert soon to be failing row -insert into atacc1 (test) values (NULL); --- add a primary key (fails) -alter table atacc1 add constraint atacc_test1 primary key (test); -ERROR: column "test" of relation "atacc1" contains null values -insert into atacc1 (test) values (3); -drop table atacc1; --- let's do one where the primary key constraint fails --- because the column doesn't exist -create table atacc1 ( test int ); --- add a primary key constraint (fails) -alter table atacc1 add constraint atacc_test1 primary key (test1); -ERROR: column "test1" of relation "atacc1" does not exist -drop table atacc1; --- adding a new column as primary key to a non-empty table. --- should fail unless the column has a non-null default value. -create table atacc1 ( test int ); -insert into atacc1 (test) values (0); --- add a primary key column without a default (fails). -alter table atacc1 add column test2 int primary key; -ERROR: column "test2" of relation "atacc1" contains null values --- now add a primary key column with a default (succeeds). -alter table atacc1 add column test2 int default 0 primary key; -drop table atacc1; --- this combination used to have order-of-execution problems (bug #15580) -create table atacc1 (a int); -insert into atacc1 values(1); -alter table atacc1 - add column b float8 not null default random(), - add primary key(a); -drop table atacc1; --- additionally, we've seen issues with foreign key validation not being --- properly delayed until after a table rewrite. Check that works ok. -create table atacc1 (a int primary key); -alter table atacc1 add constraint atacc1_fkey foreign key (a) references atacc1 (a) not valid; -alter table atacc1 validate constraint atacc1_fkey, alter a type bigint; -drop table atacc1; --- we've also seen issues with check constraints being validated at the wrong --- time when there's a pending table rewrite. -create table atacc1 (a bigint, b int); -insert into atacc1 values(1,1); -alter table atacc1 add constraint atacc1_chk check(b = 1) not valid; -alter table atacc1 validate constraint atacc1_chk, alter a type int; -drop table atacc1; --- same as above, but ensure the constraint violation is detected -create table atacc1 (a bigint, b int); -insert into atacc1 values(1,2); -alter table atacc1 add constraint atacc1_chk check(b = 1) not valid; -alter table atacc1 validate constraint atacc1_chk, alter a type int; -ERROR: check constraint "atacc1_chk" of relation "atacc1" is violated by some row -drop table atacc1; --- something a little more complicated -create table atacc1 ( test int, test2 int); --- add a primary key constraint -alter table atacc1 add constraint atacc_test1 primary key (test, test2); --- try adding a second primary key - should fail -alter table atacc1 add constraint atacc_test2 primary key (test); -ERROR: multiple primary keys for table "atacc1" are not allowed --- insert initial value -insert into atacc1 (test,test2) values (4,4); --- should fail -insert into atacc1 (test,test2) values (4,4); -ERROR: duplicate key value violates unique constraint "atacc_test1" -DETAIL: Key (test, test2)=(4, 4) already exists. -insert into atacc1 (test,test2) values (NULL,3); -ERROR: null value in column "test" of relation "atacc1" violates not-null constraint -DETAIL: Failing row contains (null, 3). -insert into atacc1 (test,test2) values (3, NULL); -ERROR: null value in column "test2" of relation "atacc1" violates not-null constraint -DETAIL: Failing row contains (3, null). -insert into atacc1 (test,test2) values (NULL,NULL); -ERROR: null value in column "test" of relation "atacc1" violates not-null constraint -DETAIL: Failing row contains (null, null). --- should all succeed -insert into atacc1 (test,test2) values (4,5); -insert into atacc1 (test,test2) values (5,4); -insert into atacc1 (test,test2) values (5,5); -drop table atacc1; --- lets do some naming tests -create table atacc1 (test int, test2 int, primary key(test)); --- only first should succeed -insert into atacc1 (test2, test) values (3, 3); -insert into atacc1 (test2, test) values (2, 3); -ERROR: duplicate key value violates unique constraint "atacc1_pkey" -DETAIL: Key (test)=(3) already exists. -insert into atacc1 (test2, test) values (1, NULL); -ERROR: null value in column "test" of relation "atacc1" violates not-null constraint -DETAIL: Failing row contains (null, 1). -drop table atacc1; --- alter table / alter column [set/drop] not null tests --- try altering system catalogs, should fail -alter table pg_class alter column relname drop not null; -ERROR: permission denied: "pg_class" is a system catalog -alter table pg_class alter relname set not null; -ERROR: permission denied: "pg_class" is a system catalog --- try altering non-existent table, should fail -alter table non_existent alter column bar set not null; -ERROR: relation "non_existent" does not exist -alter table non_existent alter column bar drop not null; -ERROR: relation "non_existent" does not exist --- test setting columns to null and not null and vice versa --- test checking for null values and primary key -create table atacc1 (test int not null); -alter table atacc1 add constraint "atacc1_pkey" primary key (test); -\d atacc1 - Table "public.atacc1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - test | integer | | not null | -Indexes: - "atacc1_pkey" PRIMARY KEY, btree (test) - -alter table atacc1 alter column test drop not null; -ERROR: column "test" is in a primary key -\d atacc1 - Table "public.atacc1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - test | integer | | not null | -Indexes: - "atacc1_pkey" PRIMARY KEY, btree (test) - -alter table atacc1 drop constraint "atacc1_pkey"; -alter table atacc1 alter column test drop not null; -\d atacc1 - Table "public.atacc1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - test | integer | | | - -insert into atacc1 values (null); -alter table atacc1 alter test set not null; -ERROR: column "test" of relation "atacc1" contains null values -delete from atacc1; -alter table atacc1 alter test set not null; --- try altering a non-existent column, should fail -alter table atacc1 alter bar set not null; -ERROR: column "bar" of relation "atacc1" does not exist -alter table atacc1 alter bar drop not null; -ERROR: column "bar" of relation "atacc1" does not exist --- try creating a view and altering that, should fail -create view myview as select * from atacc1; -alter table myview alter column test drop not null; -ERROR: ALTER action ALTER COLUMN ... DROP NOT NULL cannot be performed on relation "myview" -DETAIL: This operation is not supported for views. -alter table myview alter column test set not null; -ERROR: ALTER action ALTER COLUMN ... SET NOT NULL cannot be performed on relation "myview" -DETAIL: This operation is not supported for views. -drop view myview; -drop table atacc1; --- set not null verified by constraints -create table atacc1 (test_a int, test_b int); -insert into atacc1 values (null, 1); --- constraint not cover all values, should fail -alter table atacc1 add constraint atacc1_constr_or check(test_a is not null or test_b < 10); -alter table atacc1 alter test_a set not null; -ERROR: column "test_a" of relation "atacc1" contains null values -alter table atacc1 drop constraint atacc1_constr_or; --- not valid constraint, should fail -alter table atacc1 add constraint atacc1_constr_invalid check(test_a is not null) not valid; -alter table atacc1 alter test_a set not null; -ERROR: column "test_a" of relation "atacc1" contains null values -alter table atacc1 drop constraint atacc1_constr_invalid; --- with valid constraint -update atacc1 set test_a = 1; -alter table atacc1 add constraint atacc1_constr_a_valid check(test_a is not null); -alter table atacc1 alter test_a set not null; -delete from atacc1; -insert into atacc1 values (2, null); -alter table atacc1 alter test_a drop not null; --- test multiple set not null at same time --- test_a checked by atacc1_constr_a_valid, test_b should fail by table scan -alter table atacc1 alter test_a set not null, alter test_b set not null; -ERROR: column "test_b" of relation "atacc1" contains null values --- commands order has no importance -alter table atacc1 alter test_b set not null, alter test_a set not null; -ERROR: column "test_b" of relation "atacc1" contains null values --- valid one by table scan, one by check constraints -update atacc1 set test_b = 1; -alter table atacc1 alter test_b set not null, alter test_a set not null; -alter table atacc1 alter test_a drop not null, alter test_b drop not null; --- both column has check constraints -alter table atacc1 add constraint atacc1_constr_b_valid check(test_b is not null); -alter table atacc1 alter test_b set not null, alter test_a set not null; -drop table atacc1; --- test inheritance -create table parent (a int); -create table child (b varchar(255)) inherits (parent); -alter table parent alter a set not null; -insert into parent values (NULL); -ERROR: null value in column "a" of relation "parent" violates not-null constraint -DETAIL: Failing row contains (null). -insert into child (a, b) values (NULL, 'foo'); -ERROR: null value in column "a" of relation "child" violates not-null constraint -DETAIL: Failing row contains (null, foo). -alter table parent alter a drop not null; -insert into parent values (NULL); -insert into child (a, b) values (NULL, 'foo'); -alter table only parent alter a set not null; -ERROR: column "a" of relation "parent" contains null values -alter table child alter a set not null; -ERROR: column "a" of relation "child" contains null values -drop table child; -drop table parent; --- test setting and removing default values -create table def_test ( - c1 int4 default 5, - c2 text default 'initial_default' -); -insert into def_test default values; -alter table def_test alter column c1 drop default; -insert into def_test default values; -alter table def_test alter column c2 drop default; -insert into def_test default values; -alter table def_test alter column c1 set default 10; -alter table def_test alter column c2 set default 'new_default'; -insert into def_test default values; -select * from def_test; - c1 | c2 -----+----------------- - 5 | initial_default - | initial_default - | - 10 | new_default -(4 rows) - --- set defaults to an incorrect type: this should fail -alter table def_test alter column c1 set default 'wrong_datatype'; -ERROR: invalid input syntax for type integer: "wrong_datatype" -alter table def_test alter column c2 set default 20; --- set defaults on a non-existent column: this should fail -alter table def_test alter column c3 set default 30; -ERROR: column "c3" of relation "def_test" does not exist --- set defaults on views: we need to create a view, add a rule --- to allow insertions into it, and then alter the view to add --- a default -create view def_view_test as select * from def_test; -create rule def_view_test_ins as - on insert to def_view_test - do instead insert into def_test select new.*; -insert into def_view_test default values; -alter table def_view_test alter column c1 set default 45; -insert into def_view_test default values; -alter table def_view_test alter column c2 set default 'view_default'; -insert into def_view_test default values; -select * from def_view_test; - c1 | c2 -----+----------------- - 5 | initial_default - | initial_default - | - 10 | new_default - | - 45 | - 45 | view_default -(7 rows) - -drop rule def_view_test_ins on def_view_test; -drop view def_view_test; -drop table def_test; --- alter table / drop column tests --- try altering system catalogs, should fail -alter table pg_class drop column relname; -ERROR: permission denied: "pg_class" is a system catalog --- try altering non-existent table, should fail -alter table nosuchtable drop column bar; -ERROR: relation "nosuchtable" does not exist --- test dropping columns -create table atacc1 (a int4 not null, b int4, c int4 not null, d int4); -insert into atacc1 values (1, 2, 3, 4); -alter table atacc1 drop a; -alter table atacc1 drop a; -ERROR: column "a" of relation "atacc1" does not exist --- SELECTs -select * from atacc1; - b | c | d ----+---+--- - 2 | 3 | 4 -(1 row) - -select * from atacc1 order by a; -ERROR: column "a" does not exist -LINE 1: select * from atacc1 order by a; - ^ -select * from atacc1 order by "........pg.dropped.1........"; -ERROR: column "........pg.dropped.1........" does not exist -LINE 1: select * from atacc1 order by "........pg.dropped.1........"... - ^ -select * from atacc1 group by a; -ERROR: column "a" does not exist -LINE 1: select * from atacc1 group by a; - ^ -select * from atacc1 group by "........pg.dropped.1........"; -ERROR: column "........pg.dropped.1........" does not exist -LINE 1: select * from atacc1 group by "........pg.dropped.1........"... - ^ -select atacc1.* from atacc1; - b | c | d ----+---+--- - 2 | 3 | 4 -(1 row) - -select a from atacc1; -ERROR: column "a" does not exist -LINE 1: select a from atacc1; - ^ -select atacc1.a from atacc1; -ERROR: column atacc1.a does not exist -LINE 1: select atacc1.a from atacc1; - ^ -select b,c,d from atacc1; - b | c | d ----+---+--- - 2 | 3 | 4 -(1 row) - -select a,b,c,d from atacc1; -ERROR: column "a" does not exist -LINE 1: select a,b,c,d from atacc1; - ^ -select * from atacc1 where a = 1; -ERROR: column "a" does not exist -LINE 1: select * from atacc1 where a = 1; - ^ -select "........pg.dropped.1........" from atacc1; -ERROR: column "........pg.dropped.1........" does not exist -LINE 1: select "........pg.dropped.1........" from atacc1; - ^ -select atacc1."........pg.dropped.1........" from atacc1; -ERROR: column atacc1.........pg.dropped.1........ does not exist -LINE 1: select atacc1."........pg.dropped.1........" from atacc1; - ^ -select "........pg.dropped.1........",b,c,d from atacc1; -ERROR: column "........pg.dropped.1........" does not exist -LINE 1: select "........pg.dropped.1........",b,c,d from atacc1; - ^ -select * from atacc1 where "........pg.dropped.1........" = 1; -ERROR: column "........pg.dropped.1........" does not exist -LINE 1: select * from atacc1 where "........pg.dropped.1........" = ... - ^ --- UPDATEs -update atacc1 set a = 3; -ERROR: column "a" of relation "atacc1" does not exist -LINE 1: update atacc1 set a = 3; - ^ -update atacc1 set b = 2 where a = 3; -ERROR: column "a" does not exist -LINE 1: update atacc1 set b = 2 where a = 3; - ^ -update atacc1 set "........pg.dropped.1........" = 3; -ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist -LINE 1: update atacc1 set "........pg.dropped.1........" = 3; - ^ -update atacc1 set b = 2 where "........pg.dropped.1........" = 3; -ERROR: column "........pg.dropped.1........" does not exist -LINE 1: update atacc1 set b = 2 where "........pg.dropped.1........"... - ^ --- INSERTs -insert into atacc1 values (10, 11, 12, 13); -ERROR: INSERT has more expressions than target columns -LINE 1: insert into atacc1 values (10, 11, 12, 13); - ^ -insert into atacc1 values (default, 11, 12, 13); -ERROR: INSERT has more expressions than target columns -LINE 1: insert into atacc1 values (default, 11, 12, 13); - ^ -insert into atacc1 values (11, 12, 13); -insert into atacc1 (a) values (10); -ERROR: column "a" of relation "atacc1" does not exist -LINE 1: insert into atacc1 (a) values (10); - ^ -insert into atacc1 (a) values (default); -ERROR: column "a" of relation "atacc1" does not exist -LINE 1: insert into atacc1 (a) values (default); - ^ -insert into atacc1 (a,b,c,d) values (10,11,12,13); -ERROR: column "a" of relation "atacc1" does not exist -LINE 1: insert into atacc1 (a,b,c,d) values (10,11,12,13); - ^ -insert into atacc1 (a,b,c,d) values (default,11,12,13); -ERROR: column "a" of relation "atacc1" does not exist -LINE 1: insert into atacc1 (a,b,c,d) values (default,11,12,13); - ^ -insert into atacc1 (b,c,d) values (11,12,13); -insert into atacc1 ("........pg.dropped.1........") values (10); -ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist -LINE 1: insert into atacc1 ("........pg.dropped.1........") values (... - ^ -insert into atacc1 ("........pg.dropped.1........") values (default); -ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist -LINE 1: insert into atacc1 ("........pg.dropped.1........") values (... - ^ -insert into atacc1 ("........pg.dropped.1........",b,c,d) values (10,11,12,13); -ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist -LINE 1: insert into atacc1 ("........pg.dropped.1........",b,c,d) va... - ^ -insert into atacc1 ("........pg.dropped.1........",b,c,d) values (default,11,12,13); -ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist -LINE 1: insert into atacc1 ("........pg.dropped.1........",b,c,d) va... - ^ --- DELETEs -delete from atacc1 where a = 3; -ERROR: column "a" does not exist -LINE 1: delete from atacc1 where a = 3; - ^ -delete from atacc1 where "........pg.dropped.1........" = 3; -ERROR: column "........pg.dropped.1........" does not exist -LINE 1: delete from atacc1 where "........pg.dropped.1........" = 3; - ^ -delete from atacc1; --- try dropping a non-existent column, should fail -alter table atacc1 drop bar; -ERROR: column "bar" of relation "atacc1" does not exist --- try removing an oid column, should succeed (as it's nonexistent) -alter table atacc1 SET WITHOUT OIDS; --- try adding an oid column, should fail (not supported) -alter table atacc1 SET WITH OIDS; -ERROR: syntax error at or near "WITH" -LINE 1: alter table atacc1 SET WITH OIDS; - ^ --- try dropping the xmin column, should fail -alter table atacc1 drop xmin; -ERROR: cannot drop system column "xmin" --- try creating a view and altering that, should fail -create view myview as select * from atacc1; -select * from myview; - b | c | d ----+---+--- -(0 rows) - -alter table myview drop d; -ERROR: ALTER action DROP COLUMN cannot be performed on relation "myview" -DETAIL: This operation is not supported for views. -drop view myview; --- test some commands to make sure they fail on the dropped column -analyze atacc1(a); -ERROR: column "a" of relation "atacc1" does not exist -analyze atacc1("........pg.dropped.1........"); -ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist -vacuum analyze atacc1(a); -ERROR: column "a" of relation "atacc1" does not exist -vacuum analyze atacc1("........pg.dropped.1........"); -ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist -comment on column atacc1.a is 'testing'; -ERROR: column "a" of relation "atacc1" does not exist -comment on column atacc1."........pg.dropped.1........" is 'testing'; -ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist -alter table atacc1 alter a set storage plain; -ERROR: column "a" of relation "atacc1" does not exist -alter table atacc1 alter "........pg.dropped.1........" set storage plain; -ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist -alter table atacc1 alter a set statistics 0; -ERROR: column "a" of relation "atacc1" does not exist -alter table atacc1 alter "........pg.dropped.1........" set statistics 0; -ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist -alter table atacc1 alter a set default 3; -ERROR: column "a" of relation "atacc1" does not exist -alter table atacc1 alter "........pg.dropped.1........" set default 3; -ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist -alter table atacc1 alter a drop default; -ERROR: column "a" of relation "atacc1" does not exist -alter table atacc1 alter "........pg.dropped.1........" drop default; -ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist -alter table atacc1 alter a set not null; -ERROR: column "a" of relation "atacc1" does not exist -alter table atacc1 alter "........pg.dropped.1........" set not null; -ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist -alter table atacc1 alter a drop not null; -ERROR: column "a" of relation "atacc1" does not exist -alter table atacc1 alter "........pg.dropped.1........" drop not null; -ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist -alter table atacc1 rename a to x; -ERROR: column "a" does not exist -alter table atacc1 rename "........pg.dropped.1........" to x; -ERROR: column "........pg.dropped.1........" does not exist -alter table atacc1 add primary key(a); -ERROR: column "a" of relation "atacc1" does not exist -alter table atacc1 add primary key("........pg.dropped.1........"); -ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist -alter table atacc1 add unique(a); -ERROR: column "a" named in key does not exist -alter table atacc1 add unique("........pg.dropped.1........"); -ERROR: column "........pg.dropped.1........" named in key does not exist -alter table atacc1 add check (a > 3); -ERROR: column "a" does not exist -alter table atacc1 add check ("........pg.dropped.1........" > 3); -ERROR: column "........pg.dropped.1........" does not exist -create table atacc2 (id int4 unique); -alter table atacc1 add foreign key (a) references atacc2(id); -ERROR: column "a" referenced in foreign key constraint does not exist -alter table atacc1 add foreign key ("........pg.dropped.1........") references atacc2(id); -ERROR: column "........pg.dropped.1........" referenced in foreign key constraint does not exist -alter table atacc2 add foreign key (id) references atacc1(a); -ERROR: column "a" referenced in foreign key constraint does not exist -alter table atacc2 add foreign key (id) references atacc1("........pg.dropped.1........"); -ERROR: column "........pg.dropped.1........" referenced in foreign key constraint does not exist -drop table atacc2; -create index "testing_idx" on atacc1(a); -ERROR: column "a" does not exist -create index "testing_idx" on atacc1("........pg.dropped.1........"); -ERROR: column "........pg.dropped.1........" does not exist --- test create as and select into -insert into atacc1 values (21, 22, 23); -create table attest1 as select * from atacc1; -select * from attest1; - b | c | d -----+----+---- - 21 | 22 | 23 -(1 row) - -drop table attest1; -select * into attest2 from atacc1; -select * from attest2; - b | c | d -----+----+---- - 21 | 22 | 23 -(1 row) - -drop table attest2; --- try dropping all columns -alter table atacc1 drop c; -alter table atacc1 drop d; -alter table atacc1 drop b; -select * from atacc1; --- -(1 row) - -drop table atacc1; --- test constraint error reporting in presence of dropped columns -create table atacc1 (id serial primary key, value int check (value < 10)); -insert into atacc1(value) values (100); -ERROR: new row for relation "atacc1" violates check constraint "atacc1_value_check" -DETAIL: Failing row contains (1, 100). -alter table atacc1 drop column value; -alter table atacc1 add column value int check (value < 10); -insert into atacc1(value) values (100); -ERROR: new row for relation "atacc1" violates check constraint "atacc1_value_check" -DETAIL: Failing row contains (2, 100). -insert into atacc1(id, value) values (null, 0); -ERROR: null value in column "id" of relation "atacc1" violates not-null constraint -DETAIL: Failing row contains (null, 0). -drop table atacc1; --- test inheritance -create table parent (a int, b int, c int); -insert into parent values (1, 2, 3); -alter table parent drop a; -create table child (d varchar(255)) inherits (parent); -insert into child values (12, 13, 'testing'); -select * from parent; - b | c -----+---- - 2 | 3 - 12 | 13 -(2 rows) - -select * from child; - b | c | d -----+----+--------- - 12 | 13 | testing -(1 row) - -alter table parent drop c; -select * from parent; - b ----- - 2 - 12 -(2 rows) - -select * from child; - b | d -----+--------- - 12 | testing -(1 row) - -drop table child; -drop table parent; --- check error cases for inheritance column merging -create table parent (a float8, b numeric(10,4), c text collate "C"); -create table child (a float4) inherits (parent); -- fail -NOTICE: merging column "a" with inherited definition -ERROR: column "a" has a type conflict -DETAIL: double precision versus real -create table child (b decimal(10,7)) inherits (parent); -- fail -NOTICE: moving and merging column "b" with inherited definition -DETAIL: User-specified column moved to the position of the inherited column. -ERROR: column "b" has a type conflict -DETAIL: numeric(10,4) versus numeric(10,7) -create table child (c text collate "POSIX") inherits (parent); -- fail -NOTICE: moving and merging column "c" with inherited definition -DETAIL: User-specified column moved to the position of the inherited column. -ERROR: column "c" has a collation conflict -DETAIL: "C" versus "POSIX" -create table child (a double precision, b decimal(10,4)) inherits (parent); -NOTICE: merging column "a" with inherited definition -NOTICE: merging column "b" with inherited definition -drop table child; -drop table parent; --- test copy in/out -create table attest (a int4, b int4, c int4); -insert into attest values (1,2,3); -alter table attest drop a; -copy attest to stdout; -2 3 -copy attest(a) to stdout; -ERROR: column "a" of relation "attest" does not exist -copy attest("........pg.dropped.1........") to stdout; -ERROR: column "........pg.dropped.1........" of relation "attest" does not exist -copy attest from stdin; -ERROR: extra data after last expected column -CONTEXT: COPY attest, line 1: "10 11 12" -select * from attest; - b | c ----+--- - 2 | 3 -(1 row) - -copy attest from stdin; -select * from attest; - b | c -----+---- - 2 | 3 - 21 | 22 -(2 rows) - -copy attest(a) from stdin; -ERROR: column "a" of relation "attest" does not exist -copy attest("........pg.dropped.1........") from stdin; -ERROR: column "........pg.dropped.1........" of relation "attest" does not exist -copy attest(b,c) from stdin; -select * from attest; - b | c -----+---- - 2 | 3 - 21 | 22 - 31 | 32 -(3 rows) - -drop table attest; --- test inheritance -create table dropColumn (a int, b int, e int); -create table dropColumnChild (c int) inherits (dropColumn); -create table dropColumnAnother (d int) inherits (dropColumnChild); --- these two should fail -alter table dropColumnchild drop column a; -ERROR: cannot drop inherited column "a" -alter table only dropColumnChild drop column b; -ERROR: cannot drop inherited column "b" --- these three should work -alter table only dropColumn drop column e; -alter table dropColumnChild drop column c; -alter table dropColumn drop column a; -create table renameColumn (a int); -create table renameColumnChild (b int) inherits (renameColumn); -create table renameColumnAnother (c int) inherits (renameColumnChild); --- these three should fail -alter table renameColumnChild rename column a to d; -ERROR: cannot rename inherited column "a" -alter table only renameColumnChild rename column a to d; -ERROR: inherited column "a" must be renamed in child tables too -alter table only renameColumn rename column a to d; -ERROR: inherited column "a" must be renamed in child tables too --- these should work -alter table renameColumn rename column a to d; -alter table renameColumnChild rename column b to a; --- these should work -alter table if exists doesnt_exist_tab rename column a to d; -NOTICE: relation "doesnt_exist_tab" does not exist, skipping -alter table if exists doesnt_exist_tab rename column b to a; -NOTICE: relation "doesnt_exist_tab" does not exist, skipping --- this should work -alter table renameColumn add column w int; --- this should fail -alter table only renameColumn add column x int; -ERROR: column must be added to child tables too --- Test corner cases in dropping of inherited columns -create table p1 (f1 int, f2 int); -create table c1 (f1 int not null) inherits(p1); -NOTICE: merging column "f1" with inherited definition --- should be rejected since c1.f1 is inherited -alter table c1 drop column f1; -ERROR: cannot drop inherited column "f1" --- should work -alter table p1 drop column f1; --- c1.f1 is still there, but no longer inherited -select f1 from c1; - f1 ----- -(0 rows) - -alter table c1 drop column f1; -select f1 from c1; -ERROR: column "f1" does not exist -LINE 1: select f1 from c1; - ^ -HINT: Perhaps you meant to reference the column "c1.f2". -drop table p1 cascade; -NOTICE: drop cascades to table c1 -create table p1 (f1 int, f2 int); -create table c1 () inherits(p1); --- should be rejected since c1.f1 is inherited -alter table c1 drop column f1; -ERROR: cannot drop inherited column "f1" -alter table p1 drop column f1; --- c1.f1 is dropped now, since there is no local definition for it -select f1 from c1; -ERROR: column "f1" does not exist -LINE 1: select f1 from c1; - ^ -HINT: Perhaps you meant to reference the column "c1.f2". -drop table p1 cascade; -NOTICE: drop cascades to table c1 -create table p1 (f1 int, f2 int); -create table c1 () inherits(p1); --- should be rejected since c1.f1 is inherited -alter table c1 drop column f1; -ERROR: cannot drop inherited column "f1" -alter table only p1 drop column f1; --- c1.f1 is NOT dropped, but must now be considered non-inherited -alter table c1 drop column f1; -drop table p1 cascade; -NOTICE: drop cascades to table c1 -create table p1 (f1 int, f2 int); -create table c1 (f1 int not null) inherits(p1); -NOTICE: merging column "f1" with inherited definition --- should be rejected since c1.f1 is inherited -alter table c1 drop column f1; -ERROR: cannot drop inherited column "f1" -alter table only p1 drop column f1; --- c1.f1 is still there, but no longer inherited -alter table c1 drop column f1; -drop table p1 cascade; -NOTICE: drop cascades to table c1 -create table p1(id int, name text); -create table p2(id2 int, name text, height int); -create table c1(age int) inherits(p1,p2); -NOTICE: merging multiple inherited definitions of column "name" -create table gc1() inherits (c1); -select relname, attname, attinhcount, attislocal -from pg_class join pg_attribute on (pg_class.oid = pg_attribute.attrelid) -where relname in ('p1','p2','c1','gc1') and attnum > 0 and not attisdropped -order by relname, attnum; - relname | attname | attinhcount | attislocal ----------+---------+-------------+------------ - c1 | id | 1 | f - c1 | name | 2 | f - c1 | id2 | 1 | f - c1 | height | 1 | f - c1 | age | 0 | t - gc1 | id | 1 | f - gc1 | name | 1 | f - gc1 | id2 | 1 | f - gc1 | height | 1 | f - gc1 | age | 1 | f - p1 | id | 0 | t - p1 | name | 0 | t - p2 | id2 | 0 | t - p2 | name | 0 | t - p2 | height | 0 | t -(15 rows) - --- should work -alter table only p1 drop column name; --- should work. Now c1.name is local and inhcount is 0. -alter table p2 drop column name; --- should be rejected since its inherited -alter table gc1 drop column name; -ERROR: cannot drop inherited column "name" --- should work, and drop gc1.name along -alter table c1 drop column name; --- should fail: column does not exist -alter table gc1 drop column name; -ERROR: column "name" of relation "gc1" does not exist --- should work and drop the attribute in all tables -alter table p2 drop column height; --- IF EXISTS test -create table dropColumnExists (); -alter table dropColumnExists drop column non_existing; --fail -ERROR: column "non_existing" of relation "dropcolumnexists" does not exist -alter table dropColumnExists drop column if exists non_existing; --succeed -NOTICE: column "non_existing" of relation "dropcolumnexists" does not exist, skipping -select relname, attname, attinhcount, attislocal -from pg_class join pg_attribute on (pg_class.oid = pg_attribute.attrelid) -where relname in ('p1','p2','c1','gc1') and attnum > 0 and not attisdropped -order by relname, attnum; - relname | attname | attinhcount | attislocal ----------+---------+-------------+------------ - c1 | id | 1 | f - c1 | id2 | 1 | f - c1 | age | 0 | t - gc1 | id | 1 | f - gc1 | id2 | 1 | f - gc1 | age | 1 | f - p1 | id | 0 | t - p2 | id2 | 0 | t -(8 rows) - -drop table p1, p2 cascade; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table c1 -drop cascades to table gc1 --- test attinhcount tracking with merged columns -create table depth0(); -create table depth1(c text) inherits (depth0); -create table depth2() inherits (depth1); -alter table depth0 add c text; -NOTICE: merging definition of column "c" for child "depth1" -select attrelid::regclass, attname, attinhcount, attislocal -from pg_attribute -where attnum > 0 and attrelid::regclass in ('depth0', 'depth1', 'depth2') -order by attrelid::regclass::text, attnum; - attrelid | attname | attinhcount | attislocal -----------+---------+-------------+------------ - depth0 | c | 0 | t - depth1 | c | 1 | t - depth2 | c | 1 | f -(3 rows) - --- test renumbering of child-table columns in inherited operations -create table p1 (f1 int); -create table c1 (f2 text, f3 int) inherits (p1); -alter table p1 add column a1 int check (a1 > 0); -alter table p1 add column f2 text; -NOTICE: merging definition of column "f2" for child "c1" -insert into p1 values (1,2,'abc'); -insert into c1 values(11,'xyz',33,0); -- should fail -ERROR: new row for relation "c1" violates check constraint "p1_a1_check" -DETAIL: Failing row contains (11, xyz, 33, 0). -insert into c1 values(11,'xyz',33,22); -select * from p1; - f1 | a1 | f2 -----+----+----- - 1 | 2 | abc - 11 | 22 | xyz -(2 rows) - -update p1 set a1 = a1 + 1, f2 = upper(f2); -select * from p1; - f1 | a1 | f2 -----+----+----- - 1 | 3 | ABC - 11 | 23 | XYZ -(2 rows) - -drop table p1 cascade; -NOTICE: drop cascades to table c1 --- test that operations with a dropped column do not try to reference --- its datatype -create domain mytype as text; -create temp table foo (f1 text, f2 mytype, f3 text); -insert into foo values('bb','cc','dd'); -select * from foo; - f1 | f2 | f3 -----+----+---- - bb | cc | dd -(1 row) - -drop domain mytype cascade; -NOTICE: drop cascades to column f2 of table foo -select * from foo; - f1 | f3 -----+---- - bb | dd -(1 row) - -insert into foo values('qq','rr'); -select * from foo; - f1 | f3 -----+---- - bb | dd - qq | rr -(2 rows) - -update foo set f3 = 'zz'; -select * from foo; - f1 | f3 -----+---- - bb | zz - qq | zz -(2 rows) - -select f3,max(f1) from foo group by f3; - f3 | max -----+----- - zz | qq -(1 row) - --- Simple tests for alter table column type -alter table foo alter f1 TYPE integer; -- fails -ERROR: column "f1" cannot be cast automatically to type integer -HINT: You might need to specify "USING f1::integer". -alter table foo alter f1 TYPE varchar(10); -create table anothertab (atcol1 serial8, atcol2 boolean, - constraint anothertab_chk check (atcol1 <= 3)); -insert into anothertab (atcol1, atcol2) values (default, true); -insert into anothertab (atcol1, atcol2) values (default, false); -select * from anothertab; - atcol1 | atcol2 ---------+-------- - 1 | t - 2 | f -(2 rows) - -alter table anothertab alter column atcol1 type boolean; -- fails -ERROR: column "atcol1" cannot be cast automatically to type boolean -HINT: You might need to specify "USING atcol1::boolean". -alter table anothertab alter column atcol1 type boolean using atcol1::int; -- fails -ERROR: result of USING clause for column "atcol1" cannot be cast automatically to type boolean -HINT: You might need to add an explicit cast. -alter table anothertab alter column atcol1 type integer; -select * from anothertab; - atcol1 | atcol2 ---------+-------- - 1 | t - 2 | f -(2 rows) - -insert into anothertab (atcol1, atcol2) values (45, null); -- fails -ERROR: new row for relation "anothertab" violates check constraint "anothertab_chk" -DETAIL: Failing row contains (45, null). -insert into anothertab (atcol1, atcol2) values (default, null); -select * from anothertab; - atcol1 | atcol2 ---------+-------- - 1 | t - 2 | f - 3 | -(3 rows) - -alter table anothertab alter column atcol2 type text - using case when atcol2 is true then 'IT WAS TRUE' - when atcol2 is false then 'IT WAS FALSE' - else 'IT WAS NULL!' end; -select * from anothertab; - atcol1 | atcol2 ---------+-------------- - 1 | IT WAS TRUE - 2 | IT WAS FALSE - 3 | IT WAS NULL! -(3 rows) - -alter table anothertab alter column atcol1 type boolean - using case when atcol1 % 2 = 0 then true else false end; -- fails -ERROR: default for column "atcol1" cannot be cast automatically to type boolean -alter table anothertab alter column atcol1 drop default; -alter table anothertab alter column atcol1 type boolean - using case when atcol1 % 2 = 0 then true else false end; -- fails -ERROR: operator does not exist: boolean <= integer -HINT: No operator matches the given name and argument types. You might need to add explicit type casts. -alter table anothertab drop constraint anothertab_chk; -alter table anothertab drop constraint anothertab_chk; -- fails -ERROR: constraint "anothertab_chk" of relation "anothertab" does not exist -alter table anothertab drop constraint IF EXISTS anothertab_chk; -- succeeds -NOTICE: constraint "anothertab_chk" of relation "anothertab" does not exist, skipping -alter table anothertab alter column atcol1 type boolean - using case when atcol1 % 2 = 0 then true else false end; -select * from anothertab; - atcol1 | atcol2 ---------+-------------- - f | IT WAS TRUE - t | IT WAS FALSE - f | IT WAS NULL! -(3 rows) - -drop table anothertab; --- Test index handling in alter table column type (cf. bugs #15835, #15865) -create table anothertab(f1 int primary key, f2 int unique, - f3 int, f4 int, f5 int); -alter table anothertab - add exclude using btree (f3 with =); -alter table anothertab - add exclude using btree (f4 with =) where (f4 is not null); -alter table anothertab - add exclude using btree (f4 with =) where (f5 > 0); -alter table anothertab - add unique(f1,f4); -create index on anothertab(f2,f3); -create unique index on anothertab(f4); -\d anothertab - Table "public.anothertab" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - f1 | integer | | not null | - f2 | integer | | | - f3 | integer | | | - f4 | integer | | | - f5 | integer | | | -Indexes: - "anothertab_pkey" PRIMARY KEY, btree (f1) - "anothertab_f1_f4_key" UNIQUE CONSTRAINT, btree (f1, f4) - "anothertab_f2_f3_idx" btree (f2, f3) - "anothertab_f2_key" UNIQUE CONSTRAINT, btree (f2) - "anothertab_f3_excl" EXCLUDE USING btree (f3 WITH =) - "anothertab_f4_excl" EXCLUDE USING btree (f4 WITH =) WHERE (f4 IS NOT NULL) - "anothertab_f4_excl1" EXCLUDE USING btree (f4 WITH =) WHERE (f5 > 0) - "anothertab_f4_idx" UNIQUE, btree (f4) - -alter table anothertab alter column f1 type bigint; -alter table anothertab - alter column f2 type bigint, - alter column f3 type bigint, - alter column f4 type bigint; -alter table anothertab alter column f5 type bigint; -\d anothertab - Table "public.anothertab" - Column | Type | Collation | Nullable | Default ---------+--------+-----------+----------+--------- - f1 | bigint | | not null | - f2 | bigint | | | - f3 | bigint | | | - f4 | bigint | | | - f5 | bigint | | | -Indexes: - "anothertab_pkey" PRIMARY KEY, btree (f1) - "anothertab_f1_f4_key" UNIQUE CONSTRAINT, btree (f1, f4) - "anothertab_f2_f3_idx" btree (f2, f3) - "anothertab_f2_key" UNIQUE CONSTRAINT, btree (f2) - "anothertab_f3_excl" EXCLUDE USING btree (f3 WITH =) - "anothertab_f4_excl" EXCLUDE USING btree (f4 WITH =) WHERE (f4 IS NOT NULL) - "anothertab_f4_excl1" EXCLUDE USING btree (f4 WITH =) WHERE (f5 > 0) - "anothertab_f4_idx" UNIQUE, btree (f4) - -drop table anothertab; --- test that USING expressions are parsed before column alter type / drop steps -create table another (f1 int, f2 text, f3 text); -insert into another values(1, 'one', 'uno'); -insert into another values(2, 'two', 'due'); -insert into another values(3, 'three', 'tre'); -select * from another; - f1 | f2 | f3 -----+-------+----- - 1 | one | uno - 2 | two | due - 3 | three | tre -(3 rows) - -alter table another - alter f1 type text using f2 || ' and ' || f3 || ' more', - alter f2 type bigint using f1 * 10, - drop column f3; -select * from another; - f1 | f2 ---------------------+---- - one and uno more | 10 - two and due more | 20 - three and tre more | 30 -(3 rows) - -drop table another; --- Create an index that skips WAL, then perform a SET DATA TYPE that skips --- rewriting the index. -begin; -create table skip_wal_skip_rewrite_index (c varchar(10) primary key); -alter table skip_wal_skip_rewrite_index alter c type varchar(20); -commit; --- We disallow changing table's row type if it's used for storage -create table at_tab1 (a int, b text); -create table at_tab2 (x int, y at_tab1); -alter table at_tab1 alter column b type varchar; -- fails -ERROR: cannot alter table "at_tab1" because column "at_tab2.y" uses its row type -drop table at_tab2; --- Use of row type in an expression is defended differently -create table at_tab2 (x int, y text, check((x,y)::at_tab1 = (1,'42')::at_tab1)); -alter table at_tab1 alter column b type varchar; -- allowed, but ... -insert into at_tab2 values(1,'42'); -- ... this will fail -ERROR: ROW() column has type text instead of type character varying -drop table at_tab1, at_tab2; --- Check it for a partitioned table, too -create table at_tab1 (a int, b text) partition by list(a); -create table at_tab2 (x int, y at_tab1); -alter table at_tab1 alter column b type varchar; -- fails -ERROR: cannot alter table "at_tab1" because column "at_tab2.y" uses its row type -drop table at_tab1, at_tab2; --- Alter column type that's part of a partitioned index -create table at_partitioned (a int, b text) partition by range (a); -create table at_part_1 partition of at_partitioned for values from (0) to (1000); -insert into at_partitioned values (512, '0.123'); -create table at_part_2 (b text, a int); -insert into at_part_2 values ('1.234', 1024); -create index on at_partitioned (b); -create index on at_partitioned (a); -\d at_part_1 - Table "public.at_part_1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | text | | | -Partition of: at_partitioned FOR VALUES FROM (0) TO (1000) -Indexes: - "at_part_1_a_idx" btree (a) - "at_part_1_b_idx" btree (b) - -\d at_part_2 - Table "public.at_part_2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - b | text | | | - a | integer | | | - -alter table at_partitioned attach partition at_part_2 for values from (1000) to (2000); -\d at_part_2 - Table "public.at_part_2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - b | text | | | - a | integer | | | -Partition of: at_partitioned FOR VALUES FROM (1000) TO (2000) -Indexes: - "at_part_2_a_idx" btree (a) - "at_part_2_b_idx" btree (b) - -alter table at_partitioned alter column b type numeric using b::numeric; -\d at_part_1 - Table "public.at_part_1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | numeric | | | -Partition of: at_partitioned FOR VALUES FROM (0) TO (1000) -Indexes: - "at_part_1_a_idx" btree (a) - "at_part_1_b_idx" btree (b) - -\d at_part_2 - Table "public.at_part_2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - b | numeric | | | - a | integer | | | -Partition of: at_partitioned FOR VALUES FROM (1000) TO (2000) -Indexes: - "at_part_2_a_idx" btree (a) - "at_part_2_b_idx" btree (b) - -drop table at_partitioned; --- Alter column type when no table rewrite is required --- Also check that comments are preserved -create table at_partitioned(id int, name varchar(64), unique (id, name)) - partition by hash(id); -comment on constraint at_partitioned_id_name_key on at_partitioned is 'parent constraint'; -comment on index at_partitioned_id_name_key is 'parent index'; -create table at_partitioned_0 partition of at_partitioned - for values with (modulus 2, remainder 0); -comment on constraint at_partitioned_0_id_name_key on at_partitioned_0 is 'child 0 constraint'; -comment on index at_partitioned_0_id_name_key is 'child 0 index'; -create table at_partitioned_1 partition of at_partitioned - for values with (modulus 2, remainder 1); -comment on constraint at_partitioned_1_id_name_key on at_partitioned_1 is 'child 1 constraint'; -comment on index at_partitioned_1_id_name_key is 'child 1 index'; -insert into at_partitioned values(1, 'foo'); -insert into at_partitioned values(3, 'bar'); -create temp table old_oids as - select relname, oid as oldoid, relfilenode as oldfilenode - from pg_class where relname like 'at_partitioned%'; -select relname, - c.oid = oldoid as orig_oid, - case relfilenode - when 0 then 'none' - when c.oid then 'own' - when oldfilenode then 'orig' - else 'OTHER' - end as storage, - obj_description(c.oid, 'pg_class') as desc - from pg_class c left join old_oids using (relname) - where relname like 'at_partitioned%' - order by relname; - relname | orig_oid | storage | desc -------------------------------+----------+---------+--------------- - at_partitioned | t | none | - at_partitioned_0 | t | own | - at_partitioned_0_id_name_key | t | own | child 0 index - at_partitioned_1 | t | own | - at_partitioned_1_id_name_key | t | own | child 1 index - at_partitioned_id_name_key | t | none | parent index -(6 rows) - -select conname, obj_description(oid, 'pg_constraint') as desc - from pg_constraint where conname like 'at_partitioned%' - order by conname; - conname | desc -------------------------------+-------------------- - at_partitioned_0_id_name_key | child 0 constraint - at_partitioned_1_id_name_key | child 1 constraint - at_partitioned_id_name_key | parent constraint -(3 rows) - -alter table at_partitioned alter column name type varchar(127); -select relname, - c.oid = oldoid as orig_oid, - case relfilenode - when 0 then 'none' - when c.oid then 'own' - when oldfilenode then 'orig' - else 'OTHER' - end as storage, - obj_description(c.oid, 'pg_class') as desc - from pg_class c left join old_oids using (relname) - where relname like 'at_partitioned%' - order by relname; - relname | orig_oid | storage | desc -------------------------------+----------+---------+-------------- - at_partitioned | t | none | - at_partitioned_0 | t | own | - at_partitioned_0_id_name_key | f | own | - at_partitioned_1 | t | own | - at_partitioned_1_id_name_key | f | own | - at_partitioned_id_name_key | f | none | parent index -(6 rows) - -select conname, obj_description(oid, 'pg_constraint') as desc - from pg_constraint where conname like 'at_partitioned%' - order by conname; - conname | desc -------------------------------+------------------- - at_partitioned_0_id_name_key | - at_partitioned_1_id_name_key | - at_partitioned_id_name_key | parent constraint -(3 rows) - --- Don't remove this DROP, it exposes bug #15672 -drop table at_partitioned; --- disallow recursive containment of row types -create temp table recur1 (f1 int); -alter table recur1 add column f2 recur1; -- fails -ERROR: composite type recur1 cannot be made a member of itself -alter table recur1 add column f2 recur1[]; -- fails -ERROR: composite type recur1 cannot be made a member of itself -create domain array_of_recur1 as recur1[]; -alter table recur1 add column f2 array_of_recur1; -- fails -ERROR: composite type recur1 cannot be made a member of itself -create temp table recur2 (f1 int, f2 recur1); -alter table recur1 add column f2 recur2; -- fails -ERROR: composite type recur1 cannot be made a member of itself -alter table recur1 add column f2 int; -alter table recur1 alter column f2 type recur2; -- fails -ERROR: composite type recur1 cannot be made a member of itself --- SET STORAGE may need to add a TOAST table -create table test_storage (a text, c text storage plain); -select reltoastrelid <> 0 as has_toast_table - from pg_class where oid = 'test_storage'::regclass; - has_toast_table ------------------ - t -(1 row) - -alter table test_storage alter a set storage plain; --- rewrite table to remove its TOAST table; need a non-constant column default -alter table test_storage add b int default random()::int; -select reltoastrelid <> 0 as has_toast_table - from pg_class where oid = 'test_storage'::regclass; - has_toast_table ------------------ - f -(1 row) - -alter table test_storage alter a set storage default; -- re-add TOAST table -select reltoastrelid <> 0 as has_toast_table - from pg_class where oid = 'test_storage'::regclass; - has_toast_table ------------------ - t -(1 row) - --- check STORAGE correctness -create table test_storage_failed (a text, b int storage extended); -ERROR: column data type integer can only have storage PLAIN --- test that SET STORAGE propagates to index correctly -create index test_storage_idx on test_storage (b, a); -alter table test_storage alter column a set storage external; -\d+ test_storage - Table "public.test_storage" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+-------------------+----------+--------------+------------- - a | text | | | | external | | - c | text | | | | plain | | - b | integer | | | random()::integer | plain | | -Indexes: - "test_storage_idx" btree (b, a) - -\d+ test_storage_idx - Index "public.test_storage_idx" - Column | Type | Key? | Definition | Storage | Stats target ---------+---------+------+------------+----------+-------------- - b | integer | yes | b | plain | - a | text | yes | a | external | -btree, for table "public.test_storage" - --- ALTER COLUMN TYPE with a check constraint and a child table (bug #13779) -CREATE TABLE test_inh_check (a float check (a > 10.2), b float); -CREATE TABLE test_inh_check_child() INHERITS(test_inh_check); -\d test_inh_check - Table "public.test_inh_check" - Column | Type | Collation | Nullable | Default ---------+------------------+-----------+----------+--------- - a | double precision | | | - b | double precision | | | -Check constraints: - "test_inh_check_a_check" CHECK (a > 10.2::double precision) -Number of child tables: 1 (Use \d+ to list them.) - -\d test_inh_check_child - Table "public.test_inh_check_child" - Column | Type | Collation | Nullable | Default ---------+------------------+-----------+----------+--------- - a | double precision | | | - b | double precision | | | -Check constraints: - "test_inh_check_a_check" CHECK (a > 10.2::double precision) -Inherits: test_inh_check - -select relname, conname, coninhcount, conislocal, connoinherit - from pg_constraint c, pg_class r - where relname like 'test_inh_check%' and c.conrelid = r.oid - order by 1, 2; - relname | conname | coninhcount | conislocal | connoinherit -----------------------+------------------------+-------------+------------+-------------- - test_inh_check | test_inh_check_a_check | 0 | t | f - test_inh_check_child | test_inh_check_a_check | 1 | f | f -(2 rows) - -ALTER TABLE test_inh_check ALTER COLUMN a TYPE numeric; -\d test_inh_check - Table "public.test_inh_check" - Column | Type | Collation | Nullable | Default ---------+------------------+-----------+----------+--------- - a | numeric | | | - b | double precision | | | -Check constraints: - "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision) -Number of child tables: 1 (Use \d+ to list them.) - -\d test_inh_check_child - Table "public.test_inh_check_child" - Column | Type | Collation | Nullable | Default ---------+------------------+-----------+----------+--------- - a | numeric | | | - b | double precision | | | -Check constraints: - "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision) -Inherits: test_inh_check - -select relname, conname, coninhcount, conislocal, connoinherit - from pg_constraint c, pg_class r - where relname like 'test_inh_check%' and c.conrelid = r.oid - order by 1, 2; - relname | conname | coninhcount | conislocal | connoinherit -----------------------+------------------------+-------------+------------+-------------- - test_inh_check | test_inh_check_a_check | 0 | t | f - test_inh_check_child | test_inh_check_a_check | 1 | f | f -(2 rows) - --- also try noinherit, local, and local+inherited cases -ALTER TABLE test_inh_check ADD CONSTRAINT bnoinherit CHECK (b > 100) NO INHERIT; -ALTER TABLE test_inh_check_child ADD CONSTRAINT blocal CHECK (b < 1000); -ALTER TABLE test_inh_check_child ADD CONSTRAINT bmerged CHECK (b > 1); -ALTER TABLE test_inh_check ADD CONSTRAINT bmerged CHECK (b > 1); -NOTICE: merging constraint "bmerged" with inherited definition -\d test_inh_check - Table "public.test_inh_check" - Column | Type | Collation | Nullable | Default ---------+------------------+-----------+----------+--------- - a | numeric | | | - b | double precision | | | -Check constraints: - "bmerged" CHECK (b > 1::double precision) - "bnoinherit" CHECK (b > 100::double precision) NO INHERIT - "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision) -Number of child tables: 1 (Use \d+ to list them.) - -\d test_inh_check_child - Table "public.test_inh_check_child" - Column | Type | Collation | Nullable | Default ---------+------------------+-----------+----------+--------- - a | numeric | | | - b | double precision | | | -Check constraints: - "blocal" CHECK (b < 1000::double precision) - "bmerged" CHECK (b > 1::double precision) - "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision) -Inherits: test_inh_check - -select relname, conname, coninhcount, conislocal, connoinherit - from pg_constraint c, pg_class r - where relname like 'test_inh_check%' and c.conrelid = r.oid - order by 1, 2; - relname | conname | coninhcount | conislocal | connoinherit -----------------------+------------------------+-------------+------------+-------------- - test_inh_check | bmerged | 0 | t | f - test_inh_check | bnoinherit | 0 | t | t - test_inh_check | test_inh_check_a_check | 0 | t | f - test_inh_check_child | blocal | 0 | t | f - test_inh_check_child | bmerged | 1 | t | f - test_inh_check_child | test_inh_check_a_check | 1 | f | f -(6 rows) - -ALTER TABLE test_inh_check ALTER COLUMN b TYPE numeric; -NOTICE: merging constraint "bmerged" with inherited definition -\d test_inh_check - Table "public.test_inh_check" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | numeric | | | - b | numeric | | | -Check constraints: - "bmerged" CHECK (b::double precision > 1::double precision) - "bnoinherit" CHECK (b::double precision > 100::double precision) NO INHERIT - "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision) -Number of child tables: 1 (Use \d+ to list them.) - -\d test_inh_check_child - Table "public.test_inh_check_child" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | numeric | | | - b | numeric | | | -Check constraints: - "blocal" CHECK (b::double precision < 1000::double precision) - "bmerged" CHECK (b::double precision > 1::double precision) - "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision) -Inherits: test_inh_check - -select relname, conname, coninhcount, conislocal, connoinherit - from pg_constraint c, pg_class r - where relname like 'test_inh_check%' and c.conrelid = r.oid - order by 1, 2; - relname | conname | coninhcount | conislocal | connoinherit -----------------------+------------------------+-------------+------------+-------------- - test_inh_check | bmerged | 0 | t | f - test_inh_check | bnoinherit | 0 | t | t - test_inh_check | test_inh_check_a_check | 0 | t | f - test_inh_check_child | blocal | 0 | t | f - test_inh_check_child | bmerged | 1 | t | f - test_inh_check_child | test_inh_check_a_check | 1 | f | f -(6 rows) - --- ALTER COLUMN TYPE with different schema in children --- Bug at https://postgr.es/m/20170102225618.GA10071@telsasoft.com -CREATE TABLE test_type_diff (f1 int); -CREATE TABLE test_type_diff_c (extra smallint) INHERITS (test_type_diff); -ALTER TABLE test_type_diff ADD COLUMN f2 int; -INSERT INTO test_type_diff_c VALUES (1, 2, 3); -ALTER TABLE test_type_diff ALTER COLUMN f2 TYPE bigint USING f2::bigint; -CREATE TABLE test_type_diff2 (int_two int2, int_four int4, int_eight int8); -CREATE TABLE test_type_diff2_c1 (int_four int4, int_eight int8, int_two int2); -CREATE TABLE test_type_diff2_c2 (int_eight int8, int_two int2, int_four int4); -CREATE TABLE test_type_diff2_c3 (int_two int2, int_four int4, int_eight int8); -ALTER TABLE test_type_diff2_c1 INHERIT test_type_diff2; -ALTER TABLE test_type_diff2_c2 INHERIT test_type_diff2; -ALTER TABLE test_type_diff2_c3 INHERIT test_type_diff2; -INSERT INTO test_type_diff2_c1 VALUES (1, 2, 3); -INSERT INTO test_type_diff2_c2 VALUES (4, 5, 6); -INSERT INTO test_type_diff2_c3 VALUES (7, 8, 9); -ALTER TABLE test_type_diff2 ALTER COLUMN int_four TYPE int8 USING int_four::int8; --- whole-row references are disallowed -ALTER TABLE test_type_diff2 ALTER COLUMN int_four TYPE int4 USING (pg_column_size(test_type_diff2)); -ERROR: cannot convert whole-row table reference -DETAIL: USING expression contains a whole-row table reference. --- check for rollback of ANALYZE corrupting table property flags (bug #11638) -CREATE TABLE check_fk_presence_1 (id int PRIMARY KEY, t text); -CREATE TABLE check_fk_presence_2 (id int REFERENCES check_fk_presence_1, t text); -BEGIN; -ALTER TABLE check_fk_presence_2 DROP CONSTRAINT check_fk_presence_2_id_fkey; -ANALYZE check_fk_presence_2; -ROLLBACK; -\d check_fk_presence_2 - Table "public.check_fk_presence_2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - id | integer | | | - t | text | | | -Foreign-key constraints: - "check_fk_presence_2_id_fkey" FOREIGN KEY (id) REFERENCES check_fk_presence_1(id) - -DROP TABLE check_fk_presence_1, check_fk_presence_2; --- check column addition within a view (bug #14876) -create table at_base_table(id int, stuff text); -insert into at_base_table values (23, 'skidoo'); -create view at_view_1 as select * from at_base_table bt; -create view at_view_2 as select *, to_json(v1) as j from at_view_1 v1; -\d+ at_view_1 - View "public.at_view_1" - Column | Type | Collation | Nullable | Default | Storage | Description ---------+---------+-----------+----------+---------+----------+------------- - id | integer | | | | plain | - stuff | text | | | | extended | -View definition: - SELECT id, - stuff - FROM at_base_table bt; - -\d+ at_view_2 - View "public.at_view_2" - Column | Type | Collation | Nullable | Default | Storage | Description ---------+---------+-----------+----------+---------+----------+------------- - id | integer | | | | plain | - stuff | text | | | | extended | - j | json | | | | extended | -View definition: - SELECT id, - stuff, - to_json(v1.*) AS j - FROM at_view_1 v1; - -explain (verbose, costs off) select * from at_view_2; - QUERY PLAN ----------------------------------------------------------- - Seq Scan on public.at_base_table bt - Output: bt.id, bt.stuff, to_json(ROW(bt.id, bt.stuff)) -(2 rows) - -select * from at_view_2; - id | stuff | j -----+--------+---------------------------- - 23 | skidoo | {"id":23,"stuff":"skidoo"} -(1 row) - -create or replace view at_view_1 as select *, 2+2 as more from at_base_table bt; -\d+ at_view_1 - View "public.at_view_1" - Column | Type | Collation | Nullable | Default | Storage | Description ---------+---------+-----------+----------+---------+----------+------------- - id | integer | | | | plain | - stuff | text | | | | extended | - more | integer | | | | plain | -View definition: - SELECT id, - stuff, - 2 + 2 AS more - FROM at_base_table bt; - -\d+ at_view_2 - View "public.at_view_2" - Column | Type | Collation | Nullable | Default | Storage | Description ---------+---------+-----------+----------+---------+----------+------------- - id | integer | | | | plain | - stuff | text | | | | extended | - j | json | | | | extended | -View definition: - SELECT id, - stuff, - to_json(v1.*) AS j - FROM at_view_1 v1; - -explain (verbose, costs off) select * from at_view_2; - QUERY PLAN -------------------------------------------------------------- - Seq Scan on public.at_base_table bt - Output: bt.id, bt.stuff, to_json(ROW(bt.id, bt.stuff, 4)) -(2 rows) - -select * from at_view_2; - id | stuff | j -----+--------+------------------------------------- - 23 | skidoo | {"id":23,"stuff":"skidoo","more":4} -(1 row) - -drop view at_view_2; -drop view at_view_1; -drop table at_base_table; --- related case (bug #17811) -begin; -create temp table t1 as select * from int8_tbl; -create temp view v1 as select 1::int8 as q1; -create temp view v2 as select * from v1; -create or replace temp view v1 with (security_barrier = true) - as select * from t1; -create temp table log (q1 int8, q2 int8); -create rule v1_upd_rule as on update to v1 - do also insert into log values (new.*); -update v2 set q1 = q1 + 1 where q1 = 123; -select * from t1; - q1 | q2 -------------------+------------------- - 4567890123456789 | 123 - 4567890123456789 | 4567890123456789 - 4567890123456789 | -4567890123456789 - 124 | 456 - 124 | 4567890123456789 -(5 rows) - -select * from log; - q1 | q2 ------+------------------ - 124 | 456 - 124 | 4567890123456789 -(2 rows) - -rollback; --- check adding a column not itself requiring a rewrite, together with --- a column requiring a default (bug #16038) --- ensure that rewrites aren't silently optimized away, removing the --- value of the test -CREATE FUNCTION check_ddl_rewrite(p_tablename regclass, p_ddl text) -RETURNS boolean -LANGUAGE plpgsql AS $$ -DECLARE - v_relfilenode oid; -BEGIN - v_relfilenode := relfilenode FROM pg_class WHERE oid = p_tablename; - - EXECUTE p_ddl; - - RETURN v_relfilenode <> (SELECT relfilenode FROM pg_class WHERE oid = p_tablename); -END; -$$; -CREATE TABLE rewrite_test(col text); -INSERT INTO rewrite_test VALUES ('something'); -INSERT INTO rewrite_test VALUES (NULL); --- empty[12] don't need rewrite, but notempty[12]_rewrite will force one -SELECT check_ddl_rewrite('rewrite_test', $$ - ALTER TABLE rewrite_test - ADD COLUMN empty1 text, - ADD COLUMN notempty1_rewrite serial; -$$); - check_ddl_rewrite -------------------- - t -(1 row) - -SELECT check_ddl_rewrite('rewrite_test', $$ - ALTER TABLE rewrite_test - ADD COLUMN notempty2_rewrite serial, - ADD COLUMN empty2 text; -$$); - check_ddl_rewrite -------------------- - t -(1 row) - --- also check that fast defaults cause no problem, first without rewrite -SELECT check_ddl_rewrite('rewrite_test', $$ - ALTER TABLE rewrite_test - ADD COLUMN empty3 text, - ADD COLUMN notempty3_norewrite int default 42; -$$); - check_ddl_rewrite -------------------- - f -(1 row) - -SELECT check_ddl_rewrite('rewrite_test', $$ - ALTER TABLE rewrite_test - ADD COLUMN notempty4_norewrite int default 42, - ADD COLUMN empty4 text; -$$); - check_ddl_rewrite -------------------- - f -(1 row) - --- then with rewrite -SELECT check_ddl_rewrite('rewrite_test', $$ - ALTER TABLE rewrite_test - ADD COLUMN empty5 text, - ADD COLUMN notempty5_norewrite int default 42, - ADD COLUMN notempty5_rewrite serial; -$$); - check_ddl_rewrite -------------------- - t -(1 row) - -SELECT check_ddl_rewrite('rewrite_test', $$ - ALTER TABLE rewrite_test - ADD COLUMN notempty6_rewrite serial, - ADD COLUMN empty6 text, - ADD COLUMN notempty6_norewrite int default 42; -$$); - check_ddl_rewrite -------------------- - t -(1 row) - --- cleanup -DROP FUNCTION check_ddl_rewrite(regclass, text); -DROP TABLE rewrite_test; --- --- lock levels --- -drop type lockmodes; -ERROR: type "lockmodes" does not exist -create type lockmodes as enum ( - 'SIReadLock' -,'AccessShareLock' -,'RowShareLock' -,'RowExclusiveLock' -,'ShareUpdateExclusiveLock' -,'ShareLock' -,'ShareRowExclusiveLock' -,'ExclusiveLock' -,'AccessExclusiveLock' -); -drop view my_locks; -ERROR: view "my_locks" does not exist -create or replace view my_locks as -select case when c.relname like 'pg_toast%' then 'pg_toast' else c.relname end, max(mode::lockmodes) as max_lockmode -from pg_locks l join pg_class c on l.relation = c.oid -where virtualtransaction = ( - select virtualtransaction - from pg_locks - where transactionid = pg_current_xact_id()::xid) -and locktype = 'relation' -and relnamespace != (select oid from pg_namespace where nspname = 'pg_catalog') -and c.relname != 'my_locks' -group by c.relname; -create table alterlock (f1 int primary key, f2 text); -insert into alterlock values (1, 'foo'); -create table alterlock2 (f3 int primary key, f1 int); -insert into alterlock2 values (1, 1); -begin; alter table alterlock alter column f2 set statistics 150; -select * from my_locks order by 1; - relname | max_lockmode ------------+-------------------------- - alterlock | ShareUpdateExclusiveLock -(1 row) - -rollback; -begin; alter table alterlock cluster on alterlock_pkey; -select * from my_locks order by 1; - relname | max_lockmode -----------------+-------------------------- - alterlock | ShareUpdateExclusiveLock - alterlock_pkey | ShareUpdateExclusiveLock -(2 rows) - -commit; -begin; alter table alterlock set without cluster; -select * from my_locks order by 1; - relname | max_lockmode ------------+-------------------------- - alterlock | ShareUpdateExclusiveLock -(1 row) - -commit; -begin; alter table alterlock set (fillfactor = 100); -select * from my_locks order by 1; - relname | max_lockmode ------------+-------------------------- - alterlock | ShareUpdateExclusiveLock - pg_toast | ShareUpdateExclusiveLock -(2 rows) - -commit; -begin; alter table alterlock reset (fillfactor); -select * from my_locks order by 1; - relname | max_lockmode ------------+-------------------------- - alterlock | ShareUpdateExclusiveLock - pg_toast | ShareUpdateExclusiveLock -(2 rows) - -commit; -begin; alter table alterlock set (toast.autovacuum_enabled = off); -select * from my_locks order by 1; - relname | max_lockmode ------------+-------------------------- - alterlock | ShareUpdateExclusiveLock - pg_toast | ShareUpdateExclusiveLock -(2 rows) - -commit; -begin; alter table alterlock set (autovacuum_enabled = off); -select * from my_locks order by 1; - relname | max_lockmode ------------+-------------------------- - alterlock | ShareUpdateExclusiveLock - pg_toast | ShareUpdateExclusiveLock -(2 rows) - -commit; -begin; alter table alterlock alter column f2 set (n_distinct = 1); -select * from my_locks order by 1; - relname | max_lockmode ------------+-------------------------- - alterlock | ShareUpdateExclusiveLock -(1 row) - -rollback; --- test that mixing options with different lock levels works as expected -begin; alter table alterlock set (autovacuum_enabled = off, fillfactor = 80); -select * from my_locks order by 1; - relname | max_lockmode ------------+-------------------------- - alterlock | ShareUpdateExclusiveLock - pg_toast | ShareUpdateExclusiveLock -(2 rows) - -commit; -begin; alter table alterlock alter column f2 set storage extended; -select * from my_locks order by 1; - relname | max_lockmode ------------+--------------------- - alterlock | AccessExclusiveLock -(1 row) - -rollback; -begin; alter table alterlock alter column f2 set default 'x'; -select * from my_locks order by 1; - relname | max_lockmode ------------+--------------------- - alterlock | AccessExclusiveLock -(1 row) - -rollback; -begin; -create trigger ttdummy - before delete or update on alterlock - for each row - execute procedure - ttdummy (1, 1); -select * from my_locks order by 1; - relname | max_lockmode ------------+----------------------- - alterlock | ShareRowExclusiveLock -(1 row) - -rollback; -begin; -select * from my_locks order by 1; - relname | max_lockmode ----------+-------------- -(0 rows) - -alter table alterlock2 add foreign key (f1) references alterlock (f1); -select * from my_locks order by 1; - relname | max_lockmode ------------------+----------------------- - alterlock | ShareRowExclusiveLock - alterlock2 | ShareRowExclusiveLock - alterlock2_pkey | AccessShareLock - alterlock_pkey | AccessShareLock -(4 rows) - -rollback; -begin; -alter table alterlock2 -add constraint alterlock2nv foreign key (f1) references alterlock (f1) NOT VALID; -select * from my_locks order by 1; - relname | max_lockmode -------------+----------------------- - alterlock | ShareRowExclusiveLock - alterlock2 | ShareRowExclusiveLock -(2 rows) - -commit; -begin; -alter table alterlock2 validate constraint alterlock2nv; -select * from my_locks order by 1; - relname | max_lockmode ------------------+-------------------------- - alterlock | RowShareLock - alterlock2 | ShareUpdateExclusiveLock - alterlock2_pkey | AccessShareLock - alterlock_pkey | AccessShareLock -(4 rows) - -rollback; -create or replace view my_locks as -select case when c.relname like 'pg_toast%' then 'pg_toast' else c.relname end, max(mode::lockmodes) as max_lockmode -from pg_locks l join pg_class c on l.relation = c.oid -where virtualtransaction = ( - select virtualtransaction - from pg_locks - where transactionid = pg_current_xact_id()::xid) -and locktype = 'relation' -and relnamespace != (select oid from pg_namespace where nspname = 'pg_catalog') -and c.relname = 'my_locks' -group by c.relname; --- raise exception -alter table my_locks set (autovacuum_enabled = false); -ERROR: unrecognized parameter "autovacuum_enabled" -alter view my_locks set (autovacuum_enabled = false); -ERROR: unrecognized parameter "autovacuum_enabled" -alter table my_locks reset (autovacuum_enabled); -alter view my_locks reset (autovacuum_enabled); -begin; -alter view my_locks set (security_barrier=off); -select * from my_locks order by 1; - relname | max_lockmode -----------+--------------------- - my_locks | AccessExclusiveLock -(1 row) - -alter view my_locks reset (security_barrier); -rollback; --- this test intentionally applies the ALTER TABLE command against a view, but --- uses a view option so we expect this to succeed. This form of SQL is --- accepted for historical reasons, as shown in the docs for ALTER VIEW -begin; -alter table my_locks set (security_barrier=off); -select * from my_locks order by 1; - relname | max_lockmode -----------+--------------------- - my_locks | AccessExclusiveLock -(1 row) - -alter table my_locks reset (security_barrier); -rollback; --- cleanup -drop table alterlock2; -drop table alterlock; -drop view my_locks; -drop type lockmodes; --- --- alter function --- -create function test_strict(text) returns text as - 'select coalesce($1, ''got passed a null'');' - language sql returns null on null input; -select test_strict(NULL); - test_strict -------------- - -(1 row) - -alter function test_strict(text) called on null input; -select test_strict(NULL); - test_strict -------------------- - got passed a null -(1 row) - -create function non_strict(text) returns text as - 'select coalesce($1, ''got passed a null'');' - language sql called on null input; -select non_strict(NULL); - non_strict -------------------- - got passed a null -(1 row) - -alter function non_strict(text) returns null on null input; -select non_strict(NULL); - non_strict ------------- - -(1 row) - --- --- alter object set schema --- -create schema alter1; -create schema alter2; -create table alter1.t1(f1 serial primary key, f2 int check (f2 > 0)); -create view alter1.v1 as select * from alter1.t1; -create function alter1.plus1(int) returns int as 'select $1+1' language sql; -create domain alter1.posint integer check (value > 0); -create type alter1.ctype as (f1 int, f2 text); -create function alter1.same(alter1.ctype, alter1.ctype) returns boolean language sql -as 'select $1.f1 is not distinct from $2.f1 and $1.f2 is not distinct from $2.f2'; -create operator alter1.=(procedure = alter1.same, leftarg = alter1.ctype, rightarg = alter1.ctype); -create operator class alter1.ctype_hash_ops default for type alter1.ctype using hash as - operator 1 alter1.=(alter1.ctype, alter1.ctype); -create conversion alter1.latin1_to_utf8 for 'latin1' to 'utf8' from iso8859_1_to_utf8; -create text search parser alter1.prs(start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype); -create text search configuration alter1.cfg(parser = alter1.prs); -create text search template alter1.tmpl(init = dsimple_init, lexize = dsimple_lexize); -create text search dictionary alter1.dict(template = alter1.tmpl); -insert into alter1.t1(f2) values(11); -insert into alter1.t1(f2) values(12); -alter table alter1.t1 set schema alter1; -- no-op, same schema -alter table alter1.t1 set schema alter2; -alter table alter1.v1 set schema alter2; -alter function alter1.plus1(int) set schema alter2; -alter domain alter1.posint set schema alter2; -alter operator class alter1.ctype_hash_ops using hash set schema alter2; -alter operator family alter1.ctype_hash_ops using hash set schema alter2; -alter operator alter1.=(alter1.ctype, alter1.ctype) set schema alter2; -alter function alter1.same(alter1.ctype, alter1.ctype) set schema alter2; -alter type alter1.ctype set schema alter1; -- no-op, same schema -alter type alter1.ctype set schema alter2; -alter conversion alter1.latin1_to_utf8 set schema alter2; -alter text search parser alter1.prs set schema alter2; -alter text search configuration alter1.cfg set schema alter2; -alter text search template alter1.tmpl set schema alter2; -alter text search dictionary alter1.dict set schema alter2; --- this should succeed because nothing is left in alter1 -drop schema alter1; -insert into alter2.t1(f2) values(13); -insert into alter2.t1(f2) values(14); -select * from alter2.t1; - f1 | f2 -----+---- - 1 | 11 - 2 | 12 - 3 | 13 - 4 | 14 -(4 rows) - -select * from alter2.v1; - f1 | f2 -----+---- - 1 | 11 - 2 | 12 - 3 | 13 - 4 | 14 -(4 rows) - -select alter2.plus1(41); - plus1 -------- - 42 -(1 row) - --- clean up -drop schema alter2 cascade; -NOTICE: drop cascades to 13 other objects -DETAIL: drop cascades to table alter2.t1 -drop cascades to view alter2.v1 -drop cascades to function alter2.plus1(integer) -drop cascades to type alter2.posint -drop cascades to type alter2.ctype -drop cascades to function alter2.same(alter2.ctype,alter2.ctype) -drop cascades to operator alter2.=(alter2.ctype,alter2.ctype) -drop cascades to operator family alter2.ctype_hash_ops for access method hash -drop cascades to conversion alter2.latin1_to_utf8 -drop cascades to text search parser alter2.prs -drop cascades to text search configuration alter2.cfg -drop cascades to text search template alter2.tmpl -drop cascades to text search dictionary alter2.dict --- --- composite types --- -CREATE TYPE test_type AS (a int); -\d test_type - Composite type "public.test_type" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - -ALTER TYPE nosuchtype ADD ATTRIBUTE b text; -- fails -ERROR: relation "nosuchtype" does not exist -ALTER TYPE test_type ADD ATTRIBUTE b text; -\d test_type - Composite type "public.test_type" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | text | | | - -ALTER TYPE test_type ADD ATTRIBUTE b text; -- fails -ERROR: column "b" of relation "test_type" already exists -ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE varchar; -\d test_type - Composite type "public.test_type" - Column | Type | Collation | Nullable | Default ---------+-------------------+-----------+----------+--------- - a | integer | | | - b | character varying | | | - -ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE integer; -\d test_type - Composite type "public.test_type" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | - -ALTER TYPE test_type DROP ATTRIBUTE b; -\d test_type - Composite type "public.test_type" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - -ALTER TYPE test_type DROP ATTRIBUTE c; -- fails -ERROR: column "c" of relation "test_type" does not exist -ALTER TYPE test_type DROP ATTRIBUTE IF EXISTS c; -NOTICE: column "c" of relation "test_type" does not exist, skipping -ALTER TYPE test_type DROP ATTRIBUTE a, ADD ATTRIBUTE d boolean; -\d test_type - Composite type "public.test_type" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - d | boolean | | | - -ALTER TYPE test_type RENAME ATTRIBUTE a TO aa; -ERROR: column "a" does not exist -ALTER TYPE test_type RENAME ATTRIBUTE d TO dd; -\d test_type - Composite type "public.test_type" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - dd | boolean | | | - -DROP TYPE test_type; -CREATE TYPE test_type1 AS (a int, b text); -CREATE TABLE test_tbl1 (x int, y test_type1); -ALTER TYPE test_type1 ALTER ATTRIBUTE b TYPE varchar; -- fails -ERROR: cannot alter type "test_type1" because column "test_tbl1.y" uses it -DROP TABLE test_tbl1; -CREATE TABLE test_tbl1 (x int, y text); -CREATE INDEX test_tbl1_idx ON test_tbl1((row(x,y)::test_type1)); -ALTER TYPE test_type1 ALTER ATTRIBUTE b TYPE varchar; -- fails -ERROR: cannot alter type "test_type1" because column "test_tbl1_idx.row" uses it -DROP TABLE test_tbl1; -DROP TYPE test_type1; -CREATE TYPE test_type2 AS (a int, b text); -CREATE TABLE test_tbl2 OF test_type2; -CREATE TABLE test_tbl2_subclass () INHERITS (test_tbl2); -\d test_type2 - Composite type "public.test_type2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | text | | | - -\d test_tbl2 - Table "public.test_tbl2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | text | | | -Number of child tables: 1 (Use \d+ to list them.) -Typed table of type: test_type2 - -ALTER TYPE test_type2 ADD ATTRIBUTE c text; -- fails -ERROR: cannot alter type "test_type2" because it is the type of a typed table -HINT: Use ALTER ... CASCADE to alter the typed tables too. -ALTER TYPE test_type2 ADD ATTRIBUTE c text CASCADE; -\d test_type2 - Composite type "public.test_type2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | text | | | - c | text | | | - -\d test_tbl2 - Table "public.test_tbl2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | text | | | - c | text | | | -Number of child tables: 1 (Use \d+ to list them.) -Typed table of type: test_type2 - -ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar; -- fails -ERROR: cannot alter type "test_type2" because it is the type of a typed table -HINT: Use ALTER ... CASCADE to alter the typed tables too. -ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar CASCADE; -\d test_type2 - Composite type "public.test_type2" - Column | Type | Collation | Nullable | Default ---------+-------------------+-----------+----------+--------- - a | integer | | | - b | character varying | | | - c | text | | | - -\d test_tbl2 - Table "public.test_tbl2" - Column | Type | Collation | Nullable | Default ---------+-------------------+-----------+----------+--------- - a | integer | | | - b | character varying | | | - c | text | | | -Number of child tables: 1 (Use \d+ to list them.) -Typed table of type: test_type2 - -ALTER TYPE test_type2 DROP ATTRIBUTE b; -- fails -ERROR: cannot alter type "test_type2" because it is the type of a typed table -HINT: Use ALTER ... CASCADE to alter the typed tables too. -ALTER TYPE test_type2 DROP ATTRIBUTE b CASCADE; -\d test_type2 - Composite type "public.test_type2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - c | text | | | - -\d test_tbl2 - Table "public.test_tbl2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - c | text | | | -Number of child tables: 1 (Use \d+ to list them.) -Typed table of type: test_type2 - -ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa; -- fails -ERROR: cannot alter type "test_type2" because it is the type of a typed table -HINT: Use ALTER ... CASCADE to alter the typed tables too. -ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa CASCADE; -\d test_type2 - Composite type "public.test_type2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - aa | integer | | | - c | text | | | - -\d test_tbl2 - Table "public.test_tbl2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - aa | integer | | | - c | text | | | -Number of child tables: 1 (Use \d+ to list them.) -Typed table of type: test_type2 - -\d test_tbl2_subclass - Table "public.test_tbl2_subclass" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - aa | integer | | | - c | text | | | -Inherits: test_tbl2 - -DROP TABLE test_tbl2_subclass, test_tbl2; -DROP TYPE test_type2; -CREATE TYPE test_typex AS (a int, b text); -CREATE TABLE test_tblx (x int, y test_typex check ((y).a > 0)); -ALTER TYPE test_typex DROP ATTRIBUTE a; -- fails -ERROR: cannot drop column a of composite type test_typex because other objects depend on it -DETAIL: constraint test_tblx_y_check on table test_tblx depends on column a of composite type test_typex -HINT: Use DROP ... CASCADE to drop the dependent objects too. -ALTER TYPE test_typex DROP ATTRIBUTE a CASCADE; -NOTICE: drop cascades to constraint test_tblx_y_check on table test_tblx -\d test_tblx - Table "public.test_tblx" - Column | Type | Collation | Nullable | Default ---------+------------+-----------+----------+--------- - x | integer | | | - y | test_typex | | | - -DROP TABLE test_tblx; -DROP TYPE test_typex; --- This test isn't that interesting on its own, but the purpose is to leave --- behind a table to test pg_upgrade with. The table has a composite type --- column in it, and the composite type has a dropped attribute. -CREATE TYPE test_type3 AS (a int); -CREATE TABLE test_tbl3 (c) AS SELECT '(1)'::test_type3; -ALTER TYPE test_type3 DROP ATTRIBUTE a, ADD ATTRIBUTE b int; -CREATE TYPE test_type_empty AS (); -DROP TYPE test_type_empty; --- --- typed tables: OF / NOT OF --- -CREATE TYPE tt_t0 AS (z inet, x int, y numeric(8,2)); -ALTER TYPE tt_t0 DROP ATTRIBUTE z; -CREATE TABLE tt0 (x int NOT NULL, y numeric(8,2)); -- OK -CREATE TABLE tt1 (x int, y bigint); -- wrong base type -CREATE TABLE tt2 (x int, y numeric(9,2)); -- wrong typmod -CREATE TABLE tt3 (y numeric(8,2), x int); -- wrong column order -CREATE TABLE tt4 (x int); -- too few columns -CREATE TABLE tt5 (x int, y numeric(8,2), z int); -- too few columns -CREATE TABLE tt6 () INHERITS (tt0); -- can't have a parent -CREATE TABLE tt7 (x int, q text, y numeric(8,2)); -ALTER TABLE tt7 DROP q; -- OK -ALTER TABLE tt0 OF tt_t0; -ALTER TABLE tt1 OF tt_t0; -ERROR: table "tt1" has different type for column "y" -ALTER TABLE tt2 OF tt_t0; -ERROR: table "tt2" has different type for column "y" -ALTER TABLE tt3 OF tt_t0; -ERROR: table has column "y" where type requires "x" -ALTER TABLE tt4 OF tt_t0; -ERROR: table is missing column "y" -ALTER TABLE tt5 OF tt_t0; -ERROR: table has extra column "z" -ALTER TABLE tt6 OF tt_t0; -ERROR: typed tables cannot inherit -ALTER TABLE tt7 OF tt_t0; -CREATE TYPE tt_t1 AS (x int, y numeric(8,2)); -ALTER TABLE tt7 OF tt_t1; -- reassign an already-typed table -ALTER TABLE tt7 NOT OF; -\d tt7 - Table "public.tt7" - Column | Type | Collation | Nullable | Default ---------+--------------+-----------+----------+--------- - x | integer | | | - y | numeric(8,2) | | | - --- make sure we can drop a constraint on the parent but it remains on the child -CREATE TABLE test_drop_constr_parent (c text CHECK (c IS NOT NULL)); -CREATE TABLE test_drop_constr_child () INHERITS (test_drop_constr_parent); -ALTER TABLE ONLY test_drop_constr_parent DROP CONSTRAINT "test_drop_constr_parent_c_check"; --- should fail -INSERT INTO test_drop_constr_child (c) VALUES (NULL); -ERROR: new row for relation "test_drop_constr_child" violates check constraint "test_drop_constr_parent_c_check" -DETAIL: Failing row contains (null). -DROP TABLE test_drop_constr_parent CASCADE; -NOTICE: drop cascades to table test_drop_constr_child --- --- IF EXISTS test --- -ALTER TABLE IF EXISTS tt8 ADD COLUMN f int; -NOTICE: relation "tt8" does not exist, skipping -ALTER TABLE IF EXISTS tt8 ADD CONSTRAINT xxx PRIMARY KEY(f); -NOTICE: relation "tt8" does not exist, skipping -ALTER TABLE IF EXISTS tt8 ADD CHECK (f BETWEEN 0 AND 10); -NOTICE: relation "tt8" does not exist, skipping -ALTER TABLE IF EXISTS tt8 ALTER COLUMN f SET DEFAULT 0; -NOTICE: relation "tt8" does not exist, skipping -ALTER TABLE IF EXISTS tt8 RENAME COLUMN f TO f1; -NOTICE: relation "tt8" does not exist, skipping -ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2; -NOTICE: relation "tt8" does not exist, skipping -CREATE TABLE tt8(a int); -CREATE SCHEMA alter2; -ALTER TABLE IF EXISTS tt8 ADD COLUMN f int; -ALTER TABLE IF EXISTS tt8 ADD CONSTRAINT xxx PRIMARY KEY(f); -ALTER TABLE IF EXISTS tt8 ADD CHECK (f BETWEEN 0 AND 10); -ALTER TABLE IF EXISTS tt8 ALTER COLUMN f SET DEFAULT 0; -ALTER TABLE IF EXISTS tt8 RENAME COLUMN f TO f1; -ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2; -\d alter2.tt8 - Table "alter2.tt8" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - f1 | integer | | not null | 0 -Indexes: - "xxx" PRIMARY KEY, btree (f1) -Check constraints: - "tt8_f_check" CHECK (f1 >= 0 AND f1 <= 10) - -DROP TABLE alter2.tt8; -DROP SCHEMA alter2; --- --- Check conflicts between index and CHECK constraint names --- -CREATE TABLE tt9(c integer); -ALTER TABLE tt9 ADD CHECK(c > 1); -ALTER TABLE tt9 ADD CHECK(c > 2); -- picks nonconflicting name -ALTER TABLE tt9 ADD CONSTRAINT foo CHECK(c > 3); -ALTER TABLE tt9 ADD CONSTRAINT foo CHECK(c > 4); -- fail, dup name -ERROR: constraint "foo" for relation "tt9" already exists -ALTER TABLE tt9 ADD UNIQUE(c); -ALTER TABLE tt9 ADD UNIQUE(c); -- picks nonconflicting name -ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key UNIQUE(c); -- fail, dup name -ERROR: relation "tt9_c_key" already exists -ALTER TABLE tt9 ADD CONSTRAINT foo UNIQUE(c); -- fail, dup name -ERROR: constraint "foo" for relation "tt9" already exists -ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key CHECK(c > 5); -- fail, dup name -ERROR: constraint "tt9_c_key" for relation "tt9" already exists -ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key2 CHECK(c > 6); -ALTER TABLE tt9 ADD UNIQUE(c); -- picks nonconflicting name -\d tt9 - Table "public.tt9" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - c | integer | | | -Indexes: - "tt9_c_key" UNIQUE CONSTRAINT, btree (c) - "tt9_c_key1" UNIQUE CONSTRAINT, btree (c) - "tt9_c_key3" UNIQUE CONSTRAINT, btree (c) -Check constraints: - "foo" CHECK (c > 3) - "tt9_c_check" CHECK (c > 1) - "tt9_c_check1" CHECK (c > 2) - "tt9_c_key2" CHECK (c > 6) - -DROP TABLE tt9; --- Check that comments on constraints and indexes are not lost at ALTER TABLE. -CREATE TABLE comment_test ( - id int, - constraint id_notnull_constraint not null id, - positive_col int CHECK (positive_col > 0), - indexed_col int, - CONSTRAINT comment_test_pk PRIMARY KEY (id)); -CREATE INDEX comment_test_index ON comment_test(indexed_col); -COMMENT ON COLUMN comment_test.id IS 'Column ''id'' on comment_test'; -COMMENT ON INDEX comment_test_index IS 'Simple index on comment_test'; -COMMENT ON CONSTRAINT comment_test_positive_col_check ON comment_test IS 'CHECK constraint on comment_test.positive_col'; -COMMENT ON CONSTRAINT comment_test_pk ON comment_test IS 'PRIMARY KEY constraint of comment_test'; -COMMENT ON CONSTRAINT id_notnull_constraint ON comment_test IS 'NOT NULL constraint of comment_test'; -COMMENT ON INDEX comment_test_pk IS 'Index backing the PRIMARY KEY of comment_test'; -SELECT col_description('comment_test'::regclass, 1) as comment; - comment ------------------------------ - Column 'id' on comment_test -(1 row) - -SELECT indexrelid::regclass::text as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test'::regclass ORDER BY 1, 2; - index | comment ---------------------+----------------------------------------------- - comment_test_index | Simple index on comment_test - comment_test_pk | Index backing the PRIMARY KEY of comment_test -(2 rows) - -SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass ORDER BY 1, 2; - constraint | comment ----------------------------------+----------------------------------------------- - comment_test_pk | PRIMARY KEY constraint of comment_test - comment_test_positive_col_check | CHECK constraint on comment_test.positive_col - id_notnull_constraint | NOT NULL constraint of comment_test -(3 rows) - --- Change the datatype of all the columns. ALTER TABLE is optimized to not --- rebuild an index if the new data type is binary compatible with the old --- one. Check do a dummy ALTER TABLE that doesn't change the datatype --- first, to test that no-op codepath, and another one that does. -ALTER TABLE comment_test ALTER COLUMN indexed_col SET DATA TYPE int; -ALTER TABLE comment_test ALTER COLUMN indexed_col SET DATA TYPE text; -ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE int; -ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE text; -ALTER TABLE comment_test ALTER COLUMN positive_col SET DATA TYPE int; -ALTER TABLE comment_test ALTER COLUMN positive_col SET DATA TYPE bigint; --- Some error cases. -ALTER TABLE comment_test ALTER COLUMN xmin SET DATA TYPE x; -ERROR: cannot alter system column "xmin" -ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE x; -ERROR: type "x" does not exist -ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE int COLLATE "C"; -ERROR: collations are not supported by type integer --- Check that the comments are intact. -SELECT col_description('comment_test'::regclass, 1) as comment; - comment ------------------------------ - Column 'id' on comment_test -(1 row) - -SELECT indexrelid::regclass::text as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test'::regclass ORDER BY 1, 2; - index | comment ---------------------+----------------------------------------------- - comment_test_index | Simple index on comment_test - comment_test_pk | Index backing the PRIMARY KEY of comment_test -(2 rows) - -SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass ORDER BY 1, 2; - constraint | comment ----------------------------------+----------------------------------------------- - comment_test_pk | PRIMARY KEY constraint of comment_test - comment_test_positive_col_check | CHECK constraint on comment_test.positive_col - id_notnull_constraint | NOT NULL constraint of comment_test -(3 rows) - --- Check compatibility for foreign keys and comments. This is done --- separately as rebuilding the column type of the parent leads --- to an error and would reduce the test scope. -CREATE TABLE comment_test_child ( - id text CONSTRAINT comment_test_child_fk REFERENCES comment_test); -CREATE INDEX comment_test_child_fk ON comment_test_child(id); -COMMENT ON COLUMN comment_test_child.id IS 'Column ''id'' on comment_test_child'; -COMMENT ON INDEX comment_test_child_fk IS 'Index backing the FOREIGN KEY of comment_test_child'; -COMMENT ON CONSTRAINT comment_test_child_fk ON comment_test_child IS 'FOREIGN KEY constraint of comment_test_child'; --- Change column type of parent -ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE text; -ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE int USING id::integer; -ERROR: foreign key constraint "comment_test_child_fk" cannot be implemented -DETAIL: Key columns "id" of the referencing table and "id" of the referenced table are of incompatible types: text and integer. --- Comments should be intact -SELECT col_description('comment_test_child'::regclass, 1) as comment; - comment ------------------------------------ - Column 'id' on comment_test_child -(1 row) - -SELECT indexrelid::regclass::text as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test_child'::regclass ORDER BY 1, 2; - index | comment ------------------------+----------------------------------------------------- - comment_test_child_fk | Index backing the FOREIGN KEY of comment_test_child -(1 row) - -SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test_child'::regclass ORDER BY 1, 2; - constraint | comment ------------------------+---------------------------------------------- - comment_test_child_fk | FOREIGN KEY constraint of comment_test_child -(1 row) - --- Check that we map relation oids to filenodes and back correctly. Only --- display bad mappings so the test output doesn't change all the time. A --- filenode function call can return NULL for a relation dropped concurrently --- with the call's surrounding query, so ignore a NULL mapped_oid for --- relations that no longer exist after all calls finish. -CREATE TEMP TABLE filenode_mapping AS -SELECT - oid, mapped_oid, reltablespace, relfilenode, relname -FROM pg_class, - pg_filenode_relation(reltablespace, pg_relation_filenode(oid)) AS mapped_oid -WHERE relkind IN ('r', 'i', 'S', 't', 'm') AND mapped_oid IS DISTINCT FROM oid; -SELECT m.* FROM filenode_mapping m LEFT JOIN pg_class c ON c.oid = m.oid -WHERE c.oid IS NOT NULL OR m.mapped_oid IS NOT NULL; - oid | mapped_oid | reltablespace | relfilenode | relname ------+------------+---------------+-------------+--------- -(0 rows) - --- Checks on creating and manipulation of user defined relations in --- pg_catalog. -SHOW allow_system_table_mods; - allow_system_table_mods -------------------------- - off -(1 row) - --- disallowed because of search_path issues with pg_dump -CREATE TABLE pg_catalog.new_system_table(); -ERROR: permission denied to create "pg_catalog.new_system_table" -DETAIL: System catalog modifications are currently disallowed. --- instead create in public first, move to catalog -CREATE TABLE new_system_table(id serial primary key, othercol text); -ALTER TABLE new_system_table SET SCHEMA pg_catalog; -ALTER TABLE new_system_table SET SCHEMA public; -ALTER TABLE new_system_table SET SCHEMA pg_catalog; --- will be ignored -- already there: -ALTER TABLE new_system_table SET SCHEMA pg_catalog; -ALTER TABLE new_system_table RENAME TO old_system_table; -CREATE INDEX old_system_table__othercol ON old_system_table (othercol); -INSERT INTO old_system_table(othercol) VALUES ('somedata'), ('otherdata'); -UPDATE old_system_table SET id = -id; -DELETE FROM old_system_table WHERE othercol = 'somedata'; -TRUNCATE old_system_table; -ALTER TABLE old_system_table DROP CONSTRAINT new_system_table_pkey; -ALTER TABLE old_system_table DROP COLUMN othercol; -DROP TABLE old_system_table; --- set logged -CREATE UNLOGGED TABLE unlogged1(f1 SERIAL PRIMARY KEY, f2 TEXT); -- has sequence, toast --- check relpersistence of an unlogged table -SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged1' -UNION ALL -SELECT r.relname || ' toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^unlogged1' -UNION ALL -SELECT r.relname || ' toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^unlogged1' -ORDER BY relname; - relname | relkind | relpersistence ------------------------+---------+---------------- - unlogged1 | r | u - unlogged1 toast index | i | u - unlogged1 toast table | t | u - unlogged1_f1_seq | S | u - unlogged1_pkey | i | u -(5 rows) - -CREATE UNLOGGED TABLE unlogged2(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES unlogged1); -- foreign key -CREATE UNLOGGED TABLE unlogged3(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES unlogged3); -- self-referencing foreign key -ALTER TABLE unlogged3 SET LOGGED; -- skip self-referencing foreign key -ALTER TABLE unlogged2 SET LOGGED; -- fails because a foreign key to an unlogged table exists -ERROR: could not change table "unlogged2" to logged because it references unlogged table "unlogged1" -ALTER TABLE unlogged1 SET LOGGED; --- check relpersistence of an unlogged table after changing to permanent -SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged1' -UNION ALL -SELECT r.relname || ' toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^unlogged1' -UNION ALL -SELECT r.relname || ' toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^unlogged1' -ORDER BY relname; - relname | relkind | relpersistence ------------------------+---------+---------------- - unlogged1 | r | p - unlogged1 toast index | i | p - unlogged1 toast table | t | p - unlogged1_f1_seq | S | p - unlogged1_pkey | i | p -(5 rows) - -ALTER TABLE unlogged1 SET LOGGED; -- silently do nothing -DROP TABLE unlogged3; -DROP TABLE unlogged2; -DROP TABLE unlogged1; --- set unlogged -CREATE TABLE logged1(f1 SERIAL PRIMARY KEY, f2 TEXT); -- has sequence, toast --- check relpersistence of a permanent table -SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^logged1' -UNION ALL -SELECT r.relname || ' toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^logged1' -UNION ALL -SELECT r.relname ||' toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^logged1' -ORDER BY relname; - relname | relkind | relpersistence ----------------------+---------+---------------- - logged1 | r | p - logged1 toast index | i | p - logged1 toast table | t | p - logged1_f1_seq | S | p - logged1_pkey | i | p -(5 rows) - -CREATE TABLE logged2(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES logged1); -- foreign key -CREATE TABLE logged3(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES logged3); -- self-referencing foreign key -ALTER TABLE logged1 SET UNLOGGED; -- fails because a foreign key from a permanent table exists -ERROR: could not change table "logged1" to unlogged because it references logged table "logged2" -ALTER TABLE logged3 SET UNLOGGED; -- skip self-referencing foreign key -ALTER TABLE logged2 SET UNLOGGED; -ALTER TABLE logged1 SET UNLOGGED; --- check relpersistence of a permanent table after changing to unlogged -SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^logged1' -UNION ALL -SELECT r.relname || ' toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^logged1' -UNION ALL -SELECT r.relname || ' toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^logged1' -ORDER BY relname; - relname | relkind | relpersistence ----------------------+---------+---------------- - logged1 | r | u - logged1 toast index | i | u - logged1 toast table | t | u - logged1_f1_seq | S | u - logged1_pkey | i | u -(5 rows) - -ALTER TABLE logged1 SET UNLOGGED; -- silently do nothing -DROP TABLE logged3; -DROP TABLE logged2; -DROP TABLE logged1; --- test ADD COLUMN IF NOT EXISTS -CREATE TABLE test_add_column(c1 integer); -\d test_add_column - Table "public.test_add_column" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - c1 | integer | | | - -ALTER TABLE test_add_column - ADD COLUMN c2 integer; -\d test_add_column - Table "public.test_add_column" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - c1 | integer | | | - c2 | integer | | | - -ALTER TABLE test_add_column - ADD COLUMN c2 integer; -- fail because c2 already exists -ERROR: column "c2" of relation "test_add_column" already exists -ALTER TABLE ONLY test_add_column - ADD COLUMN c2 integer; -- fail because c2 already exists -ERROR: column "c2" of relation "test_add_column" already exists -\d test_add_column - Table "public.test_add_column" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - c1 | integer | | | - c2 | integer | | | - -ALTER TABLE test_add_column - ADD COLUMN IF NOT EXISTS c2 integer; -- skipping because c2 already exists -NOTICE: column "c2" of relation "test_add_column" already exists, skipping -ALTER TABLE ONLY test_add_column - ADD COLUMN IF NOT EXISTS c2 integer; -- skipping because c2 already exists -NOTICE: column "c2" of relation "test_add_column" already exists, skipping -\d test_add_column - Table "public.test_add_column" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - c1 | integer | | | - c2 | integer | | | - -ALTER TABLE test_add_column - ADD COLUMN c2 integer, -- fail because c2 already exists - ADD COLUMN c3 integer primary key; -ERROR: column "c2" of relation "test_add_column" already exists -\d test_add_column - Table "public.test_add_column" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - c1 | integer | | | - c2 | integer | | | - -ALTER TABLE test_add_column - ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists - ADD COLUMN c3 integer primary key; -NOTICE: column "c2" of relation "test_add_column" already exists, skipping -\d test_add_column - Table "public.test_add_column" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - c1 | integer | | | - c2 | integer | | | - c3 | integer | | not null | -Indexes: - "test_add_column_pkey" PRIMARY KEY, btree (c3) - -ALTER TABLE test_add_column - ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists - ADD COLUMN IF NOT EXISTS c3 integer primary key; -- skipping because c3 already exists -NOTICE: column "c2" of relation "test_add_column" already exists, skipping -NOTICE: column "c3" of relation "test_add_column" already exists, skipping -\d test_add_column - Table "public.test_add_column" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - c1 | integer | | | - c2 | integer | | | - c3 | integer | | not null | -Indexes: - "test_add_column_pkey" PRIMARY KEY, btree (c3) - -ALTER TABLE test_add_column - ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists - ADD COLUMN IF NOT EXISTS c3 integer, -- skipping because c3 already exists - ADD COLUMN c4 integer REFERENCES test_add_column; -NOTICE: column "c2" of relation "test_add_column" already exists, skipping -NOTICE: column "c3" of relation "test_add_column" already exists, skipping -\d test_add_column - Table "public.test_add_column" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - c1 | integer | | | - c2 | integer | | | - c3 | integer | | not null | - c4 | integer | | | -Indexes: - "test_add_column_pkey" PRIMARY KEY, btree (c3) -Foreign-key constraints: - "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3) -Referenced by: - TABLE "test_add_column" CONSTRAINT "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3) - -ALTER TABLE test_add_column - ADD COLUMN IF NOT EXISTS c4 integer REFERENCES test_add_column; -NOTICE: column "c4" of relation "test_add_column" already exists, skipping -\d test_add_column - Table "public.test_add_column" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - c1 | integer | | | - c2 | integer | | | - c3 | integer | | not null | - c4 | integer | | | -Indexes: - "test_add_column_pkey" PRIMARY KEY, btree (c3) -Foreign-key constraints: - "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3) -Referenced by: - TABLE "test_add_column" CONSTRAINT "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3) - -ALTER TABLE test_add_column - ADD COLUMN IF NOT EXISTS c5 SERIAL CHECK (c5 > 8); -\d test_add_column - Table "public.test_add_column" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------------------------------------------- - c1 | integer | | | - c2 | integer | | | - c3 | integer | | not null | - c4 | integer | | | - c5 | integer | | not null | nextval('test_add_column_c5_seq'::regclass) -Indexes: - "test_add_column_pkey" PRIMARY KEY, btree (c3) -Check constraints: - "test_add_column_c5_check" CHECK (c5 > 8) -Foreign-key constraints: - "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3) -Referenced by: - TABLE "test_add_column" CONSTRAINT "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3) - -ALTER TABLE test_add_column - ADD COLUMN IF NOT EXISTS c5 SERIAL CHECK (c5 > 10); -NOTICE: column "c5" of relation "test_add_column" already exists, skipping -\d test_add_column* - Table "public.test_add_column" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------------------------------------------- - c1 | integer | | | - c2 | integer | | | - c3 | integer | | not null | - c4 | integer | | | - c5 | integer | | not null | nextval('test_add_column_c5_seq'::regclass) -Indexes: - "test_add_column_pkey" PRIMARY KEY, btree (c3) -Check constraints: - "test_add_column_c5_check" CHECK (c5 > 8) -Foreign-key constraints: - "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3) -Referenced by: - TABLE "test_add_column" CONSTRAINT "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3) - - Sequence "public.test_add_column_c5_seq" - Type | Start | Minimum | Maximum | Increment | Cycles? | Cache ----------+-------+---------+------------+-----------+---------+------- - integer | 1 | 1 | 2147483647 | 1 | no | 1 -Owned by: public.test_add_column.c5 - - Index "public.test_add_column_pkey" - Column | Type | Key? | Definition ---------+---------+------+------------ - c3 | integer | yes | c3 -primary key, btree, for table "public.test_add_column" - -DROP TABLE test_add_column; -\d test_add_column* --- assorted cases with multiple ALTER TABLE steps -CREATE TABLE ataddindex(f1 INT); -INSERT INTO ataddindex VALUES (42), (43); -CREATE UNIQUE INDEX ataddindexi0 ON ataddindex(f1); -ALTER TABLE ataddindex - ADD PRIMARY KEY USING INDEX ataddindexi0, - ALTER f1 TYPE BIGINT; -\d ataddindex - Table "public.ataddindex" - Column | Type | Collation | Nullable | Default ---------+--------+-----------+----------+--------- - f1 | bigint | | not null | -Indexes: - "ataddindexi0" PRIMARY KEY, btree (f1) - -DROP TABLE ataddindex; -CREATE TABLE ataddindex(f1 VARCHAR(10)); -INSERT INTO ataddindex(f1) VALUES ('foo'), ('a'); -ALTER TABLE ataddindex - ALTER f1 SET DATA TYPE TEXT, - ADD EXCLUDE ((f1 LIKE 'a') WITH =); -\d ataddindex - Table "public.ataddindex" - Column | Type | Collation | Nullable | Default ---------+------+-----------+----------+--------- - f1 | text | | | -Indexes: - "ataddindex_expr_excl" EXCLUDE USING btree ((f1 ~~ 'a'::text) WITH =) - -DROP TABLE ataddindex; -CREATE TABLE ataddindex(id int, ref_id int); -ALTER TABLE ataddindex - ADD PRIMARY KEY (id), - ADD FOREIGN KEY (ref_id) REFERENCES ataddindex; -\d ataddindex - Table "public.ataddindex" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - id | integer | | not null | - ref_id | integer | | | -Indexes: - "ataddindex_pkey" PRIMARY KEY, btree (id) -Foreign-key constraints: - "ataddindex_ref_id_fkey" FOREIGN KEY (ref_id) REFERENCES ataddindex(id) -Referenced by: - TABLE "ataddindex" CONSTRAINT "ataddindex_ref_id_fkey" FOREIGN KEY (ref_id) REFERENCES ataddindex(id) - -DROP TABLE ataddindex; -CREATE TABLE ataddindex(id int, ref_id int); -ALTER TABLE ataddindex - ADD UNIQUE (id), - ADD FOREIGN KEY (ref_id) REFERENCES ataddindex (id); -\d ataddindex - Table "public.ataddindex" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - id | integer | | | - ref_id | integer | | | -Indexes: - "ataddindex_id_key" UNIQUE CONSTRAINT, btree (id) -Foreign-key constraints: - "ataddindex_ref_id_fkey" FOREIGN KEY (ref_id) REFERENCES ataddindex(id) -Referenced by: - TABLE "ataddindex" CONSTRAINT "ataddindex_ref_id_fkey" FOREIGN KEY (ref_id) REFERENCES ataddindex(id) - -DROP TABLE ataddindex; -CREATE TABLE atnotnull1 (); -ALTER TABLE atnotnull1 - ADD COLUMN a INT, - ALTER a SET NOT NULL; -ALTER TABLE atnotnull1 - ADD COLUMN b INT, - ADD NOT NULL b; -ALTER TABLE atnotnull1 - ADD COLUMN c INT, - ADD PRIMARY KEY (c); -\d+ atnotnull1 - Table "public.atnotnull1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - a | integer | | not null | | plain | | - b | integer | | not null | | plain | | - c | integer | | not null | | plain | | -Indexes: - "atnotnull1_pkey" PRIMARY KEY, btree (c) -Not-null constraints: - "atnotnull1_a_not_null" NOT NULL "a" - "atnotnull1_b_not_null" NOT NULL "b" - "atnotnull1_c_not_null" NOT NULL "c" - --- cannot drop column that is part of the partition key -CREATE TABLE partitioned ( - a int, - b int -) PARTITION BY RANGE (a, (a+b+1)); -ALTER TABLE partitioned DROP COLUMN a; -ERROR: cannot drop column "a" because it is part of the partition key of relation "partitioned" -ALTER TABLE partitioned ALTER COLUMN a TYPE char(5); -ERROR: cannot alter column "a" because it is part of the partition key of relation "partitioned" -ALTER TABLE partitioned DROP COLUMN b; -ERROR: cannot drop column "b" because it is part of the partition key of relation "partitioned" -ALTER TABLE partitioned ALTER COLUMN b TYPE char(5); -ERROR: cannot alter column "b" because it is part of the partition key of relation "partitioned" --- specifying storage parameters for partitioned tables is not supported -ALTER TABLE partitioned SET (fillfactor=100); -ERROR: cannot specify storage parameters for a partitioned table -HINT: Specify storage parameters for its leaf partitions instead. --- partitioned table cannot participate in regular inheritance -CREATE TABLE nonpartitioned ( - a int, - b int -); -ALTER TABLE partitioned INHERIT nonpartitioned; -ERROR: cannot change inheritance of partitioned table -ALTER TABLE nonpartitioned INHERIT partitioned; -ERROR: cannot inherit from partitioned table "partitioned" --- cannot add NO INHERIT constraint to partitioned tables -ALTER TABLE partitioned ADD CONSTRAINT chk_a CHECK (a > 0) NO INHERIT; -ERROR: cannot add NO INHERIT constraint to partitioned table "partitioned" -DROP TABLE partitioned, nonpartitioned; --- --- ATTACH PARTITION --- --- check that target table is partitioned -CREATE TABLE unparted ( - a int -); -CREATE TABLE fail_part (like unparted); -ALTER TABLE unparted ATTACH PARTITION fail_part FOR VALUES IN ('a'); -ERROR: ALTER action ATTACH PARTITION cannot be performed on relation "unparted" -DETAIL: This operation is not supported for tables. -DROP TABLE unparted, fail_part; --- check that partition bound is compatible -CREATE TABLE list_parted ( - a int NOT NULL, - b char(2) COLLATE "C", - CONSTRAINT check_a CHECK (a > 0) -) PARTITION BY LIST (a); -CREATE TABLE fail_part (LIKE list_parted); -ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES FROM (1) TO (10); -ERROR: invalid bound specification for a list partition -LINE 1: ...list_parted ATTACH PARTITION fail_part FOR VALUES FROM (1) T... - ^ -DROP TABLE fail_part; --- check that the table being attached exists -ALTER TABLE list_parted ATTACH PARTITION nonexistent FOR VALUES IN (1); -ERROR: relation "nonexistent" does not exist --- check ownership of the source table -CREATE ROLE regress_test_me; -CREATE ROLE regress_test_not_me; -CREATE TABLE not_owned_by_me (LIKE list_parted); -ALTER TABLE not_owned_by_me OWNER TO regress_test_not_me; -SET SESSION AUTHORIZATION regress_test_me; -CREATE TABLE owned_by_me ( - a int -) PARTITION BY LIST (a); -ALTER TABLE owned_by_me ATTACH PARTITION not_owned_by_me FOR VALUES IN (1); -ERROR: must be owner of table not_owned_by_me -RESET SESSION AUTHORIZATION; -DROP TABLE owned_by_me, not_owned_by_me; -DROP ROLE regress_test_not_me; -DROP ROLE regress_test_me; --- check that the table being attached is not part of regular inheritance -CREATE TABLE parent (LIKE list_parted); -CREATE TABLE child () INHERITS (parent); -ALTER TABLE list_parted ATTACH PARTITION child FOR VALUES IN (1); -ERROR: cannot attach inheritance child as partition -ALTER TABLE list_parted ATTACH PARTITION parent FOR VALUES IN (1); -ERROR: cannot attach inheritance parent as partition -DROP TABLE child; --- now it should work, with a little tweak -ALTER TABLE parent ADD CONSTRAINT check_a CHECK (a > 0); -ALTER TABLE list_parted ATTACH PARTITION parent FOR VALUES IN (1); --- test insert/update, per bug #18550 -INSERT INTO parent VALUES (1); -UPDATE parent SET a = 2 WHERE a = 1; -ERROR: new row for relation "parent" violates partition constraint -DETAIL: Failing row contains (2, null). -DROP TABLE parent CASCADE; --- check any TEMP-ness -CREATE TEMP TABLE temp_parted (a int) PARTITION BY LIST (a); -CREATE TABLE perm_part (a int); -ALTER TABLE temp_parted ATTACH PARTITION perm_part FOR VALUES IN (1); -ERROR: cannot attach a permanent relation as partition of temporary relation "temp_parted" -DROP TABLE temp_parted, perm_part; --- check that the table being attached is not a typed table -CREATE TYPE mytype AS (a int); -CREATE TABLE fail_part OF mytype; -ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); -ERROR: cannot attach a typed table as partition -DROP TYPE mytype CASCADE; -NOTICE: drop cascades to table fail_part --- check that the table being attached has only columns present in the parent -CREATE TABLE fail_part (like list_parted, c int); -ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); -ERROR: table "fail_part" contains column "c" not found in parent "list_parted" -DETAIL: The new partition may contain only the columns present in parent. -DROP TABLE fail_part; --- check that the table being attached has every column of the parent -CREATE TABLE fail_part (a int NOT NULL); -ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); -ERROR: child table is missing column "b" -DROP TABLE fail_part; --- check that columns match in type, collation and NOT NULL status -CREATE TABLE fail_part ( - b char(3), - a int NOT NULL -); -ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); -ERROR: child table "fail_part" has different type for column "b" -ALTER TABLE fail_part ALTER b TYPE char (2) COLLATE "POSIX"; -ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); -ERROR: child table "fail_part" has different collation for column "b" -DROP TABLE fail_part; --- check that the table being attached has all constraints of the parent -CREATE TABLE fail_part ( - b char(2) COLLATE "C", - a int NOT NULL -); -ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); -ERROR: child table is missing constraint "check_a" --- check that the constraint matches in definition with parent's constraint -ALTER TABLE fail_part ADD CONSTRAINT check_a CHECK (a >= 0); -ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); -ERROR: child table "fail_part" has different definition for check constraint "check_a" -DROP TABLE fail_part; --- check the attributes and constraints after partition is attached -CREATE TABLE part_1 ( - a int NOT NULL, - b char(2) COLLATE "C", - CONSTRAINT check_a CHECK (a > 0) -); -ALTER TABLE list_parted ATTACH PARTITION part_1 FOR VALUES IN (1); --- attislocal and conislocal are always false for merged attributes and constraints respectively. -SELECT attislocal, attinhcount FROM pg_attribute WHERE attrelid = 'part_1'::regclass AND attnum > 0; - attislocal | attinhcount -------------+------------- - f | 1 - f | 1 -(2 rows) - -SELECT conislocal, coninhcount FROM pg_constraint WHERE conrelid = 'part_1'::regclass AND conname = 'check_a'; - conislocal | coninhcount -------------+------------- - f | 1 -(1 row) - --- check that NOT NULL NO INHERIT cannot be merged to a normal NOT NULL -CREATE TABLE part_fail (a int NOT NULL NO INHERIT, - b char(2) COLLATE "C", - CONSTRAINT check_a CHECK (a > 0) -); -ALTER TABLE list_parted ATTACH PARTITION part_fail FOR VALUES IN (2); -ERROR: constraint "part_fail_a_not_null" conflicts with non-inherited constraint on child table "part_fail" -DROP TABLE part_fail; --- check that the new partition won't overlap with an existing partition -CREATE TABLE fail_part (LIKE part_1 INCLUDING CONSTRAINTS); -ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); -ERROR: partition "fail_part" would overlap partition "part_1" -LINE 1: ...LE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); - ^ -DROP TABLE fail_part; --- check that an existing table can be attached as a default partition -CREATE TABLE def_part (LIKE list_parted INCLUDING CONSTRAINTS); -ALTER TABLE list_parted ATTACH PARTITION def_part DEFAULT; --- check attaching default partition fails if a default partition already --- exists -CREATE TABLE fail_def_part (LIKE part_1 INCLUDING CONSTRAINTS); -ALTER TABLE list_parted ATTACH PARTITION fail_def_part DEFAULT; -ERROR: partition "fail_def_part" conflicts with existing default partition "def_part" -LINE 1: ...ER TABLE list_parted ATTACH PARTITION fail_def_part DEFAULT; - ^ --- check validation when attaching list partitions -CREATE TABLE list_parted2 ( - a int, - b char -) PARTITION BY LIST (a); --- check that violating rows are correctly reported -CREATE TABLE part_2 (LIKE list_parted2); -INSERT INTO part_2 VALUES (3, 'a'); -ALTER TABLE list_parted2 ATTACH PARTITION part_2 FOR VALUES IN (2); -ERROR: partition constraint of relation "part_2" is violated by some row --- should be ok after deleting the bad row -DELETE FROM part_2; -ALTER TABLE list_parted2 ATTACH PARTITION part_2 FOR VALUES IN (2); --- check partition cannot be attached if default has some row for its values -CREATE TABLE list_parted2_def PARTITION OF list_parted2 DEFAULT; -INSERT INTO list_parted2_def VALUES (11, 'z'); -CREATE TABLE part_3 (LIKE list_parted2); -ALTER TABLE list_parted2 ATTACH PARTITION part_3 FOR VALUES IN (11); -ERROR: updated partition constraint for default partition "list_parted2_def" would be violated by some row --- should be ok after deleting the bad row -DELETE FROM list_parted2_def WHERE a = 11; -ALTER TABLE list_parted2 ATTACH PARTITION part_3 FOR VALUES IN (11); --- adding constraints that describe the desired partition constraint --- (or more restrictive) will help skip the validation scan -CREATE TABLE part_3_4 ( - LIKE list_parted2, - CONSTRAINT check_a CHECK (a IN (3)) -); --- however, if a list partition does not accept nulls, there should be --- an explicit NOT NULL constraint on the partition key column for the --- validation scan to be skipped; -ALTER TABLE list_parted2 ATTACH PARTITION part_3_4 FOR VALUES IN (3, 4); --- adding a NOT NULL constraint will cause the scan to be skipped -ALTER TABLE list_parted2 DETACH PARTITION part_3_4; -ALTER TABLE part_3_4 ALTER a SET NOT NULL; -ALTER TABLE list_parted2 ATTACH PARTITION part_3_4 FOR VALUES IN (3, 4); --- check if default partition scan skipped -ALTER TABLE list_parted2_def ADD CONSTRAINT check_a CHECK (a IN (5, 6)); -CREATE TABLE part_55_66 PARTITION OF list_parted2 FOR VALUES IN (55, 66); --- check validation when attaching range partitions -CREATE TABLE range_parted ( - a int, - b int -) PARTITION BY RANGE (a, b); --- check that violating rows are correctly reported -CREATE TABLE part1 ( - a int NOT NULL CHECK (a = 1), - b int NOT NULL CHECK (b >= 1 AND b <= 10) -); -INSERT INTO part1 VALUES (1, 10); --- Remember the TO bound is exclusive -ALTER TABLE range_parted ATTACH PARTITION part1 FOR VALUES FROM (1, 1) TO (1, 10); -ERROR: partition constraint of relation "part1" is violated by some row --- should be ok after deleting the bad row -DELETE FROM part1; -ALTER TABLE range_parted ATTACH PARTITION part1 FOR VALUES FROM (1, 1) TO (1, 10); --- adding constraints that describe the desired partition constraint --- (or more restrictive) will help skip the validation scan -CREATE TABLE part2 ( - a int NOT NULL CHECK (a = 1), - b int NOT NULL CHECK (b >= 10 AND b < 18) -); -ALTER TABLE range_parted ATTACH PARTITION part2 FOR VALUES FROM (1, 10) TO (1, 20); --- Create default partition -CREATE TABLE partr_def1 PARTITION OF range_parted DEFAULT; --- Only one default partition is allowed, hence, following should give error -CREATE TABLE partr_def2 (LIKE part1 INCLUDING CONSTRAINTS); -ALTER TABLE range_parted ATTACH PARTITION partr_def2 DEFAULT; -ERROR: partition "partr_def2" conflicts with existing default partition "partr_def1" -LINE 1: ...LTER TABLE range_parted ATTACH PARTITION partr_def2 DEFAULT; - ^ --- Overlapping partitions cannot be attached, hence, following should give error -INSERT INTO partr_def1 VALUES (2, 10); -CREATE TABLE part3 (LIKE range_parted); -ALTER TABLE range_parted ATTACH partition part3 FOR VALUES FROM (2, 10) TO (2, 20); -ERROR: updated partition constraint for default partition "partr_def1" would be violated by some row --- Attaching partitions should be successful when there are no overlapping rows -ALTER TABLE range_parted ATTACH partition part3 FOR VALUES FROM (3, 10) TO (3, 20); --- check that leaf partitions are scanned when attaching a partitioned --- table -CREATE TABLE part_5 ( - LIKE list_parted2 -) PARTITION BY LIST (b); --- check that violating rows are correctly reported -CREATE TABLE part_5_a PARTITION OF part_5 FOR VALUES IN ('a'); -INSERT INTO part_5_a (a, b) VALUES (6, 'a'); -ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5); -ERROR: partition constraint of relation "part_5_a" is violated by some row --- delete the faulting row and also add a constraint to skip the scan -DELETE FROM part_5_a WHERE a NOT IN (3); -ALTER TABLE part_5 ADD CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 5); -ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5); -ALTER TABLE list_parted2 DETACH PARTITION part_5; -ALTER TABLE part_5 DROP CONSTRAINT check_a; --- scan should again be skipped, even though NOT NULL is now a column property -ALTER TABLE part_5 ADD CONSTRAINT check_a CHECK (a IN (5)), ALTER a SET NOT NULL; -ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5); --- Check the case where attnos of the partitioning columns in the table being --- attached differs from the parent. It should not affect the constraint- --- checking logic that allows to skip the scan. -CREATE TABLE part_6 ( - c int, - LIKE list_parted2, - CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 6) -); -ALTER TABLE part_6 DROP c; -ALTER TABLE list_parted2 ATTACH PARTITION part_6 FOR VALUES IN (6); --- Similar to above, but the table being attached is a partitioned table --- whose partition has still different attnos for the root partitioning --- columns. -CREATE TABLE part_7 ( - LIKE list_parted2, - CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 7) -) PARTITION BY LIST (b); -CREATE TABLE part_7_a_null ( - c int, - d int, - e int, - LIKE list_parted2, -- 'a' will have attnum = 4 - CONSTRAINT check_b CHECK (b IS NULL OR b = 'a'), - CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 7) -); -ALTER TABLE part_7_a_null DROP c, DROP d, DROP e; -ALTER TABLE part_7 ATTACH PARTITION part_7_a_null FOR VALUES IN ('a', null); -ALTER TABLE list_parted2 ATTACH PARTITION part_7 FOR VALUES IN (7); --- Same example, but check this time that the constraint correctly detects --- violating rows -ALTER TABLE list_parted2 DETACH PARTITION part_7; -ALTER TABLE part_7 DROP CONSTRAINT check_a; -- thusly, scan won't be skipped -INSERT INTO part_7 (a, b) VALUES (8, null), (9, 'a'); -SELECT tableoid::regclass, a, b FROM part_7 order by a; - tableoid | a | b ----------------+---+--- - part_7_a_null | 8 | - part_7_a_null | 9 | a -(2 rows) - -ALTER TABLE list_parted2 ATTACH PARTITION part_7 FOR VALUES IN (7); -ERROR: partition constraint of relation "part_7_a_null" is violated by some row --- check that leaf partitions of default partition are scanned when --- attaching a partitioned table. -ALTER TABLE part_5 DROP CONSTRAINT check_a; -CREATE TABLE part5_def PARTITION OF part_5 DEFAULT PARTITION BY LIST(a); -CREATE TABLE part5_def_p1 PARTITION OF part5_def FOR VALUES IN (5); -INSERT INTO part5_def_p1 VALUES (5, 'y'); -CREATE TABLE part5_p1 (LIKE part_5); -ALTER TABLE part_5 ATTACH PARTITION part5_p1 FOR VALUES IN ('y'); -ERROR: updated partition constraint for default partition "part5_def_p1" would be violated by some row --- should be ok after deleting the bad row -DELETE FROM part5_def_p1 WHERE b = 'y'; -ALTER TABLE part_5 ATTACH PARTITION part5_p1 FOR VALUES IN ('y'); --- check that the table being attached is not already a partition -ALTER TABLE list_parted2 ATTACH PARTITION part_2 FOR VALUES IN (2); -ERROR: "part_2" is already a partition --- check that circular inheritance is not allowed -ALTER TABLE part_5 ATTACH PARTITION list_parted2 FOR VALUES IN ('b'); -ERROR: circular inheritance not allowed -DETAIL: "part_5" is already a child of "list_parted2". -ALTER TABLE list_parted2 ATTACH PARTITION list_parted2 FOR VALUES IN (0); -ERROR: circular inheritance not allowed -DETAIL: "list_parted2" is already a child of "list_parted2". --- If a partitioned table being created or an existing table being attached --- as a partition does not have a constraint that would allow validation scan --- to be skipped, but an individual partition does, then the partition's --- validation scan is skipped. -CREATE TABLE quuux (a int, b text) PARTITION BY LIST (a); -CREATE TABLE quuux_default PARTITION OF quuux DEFAULT PARTITION BY LIST (b); -CREATE TABLE quuux_default1 PARTITION OF quuux_default ( - CONSTRAINT check_1 CHECK (a IS NOT NULL AND a = 1) -) FOR VALUES IN ('b'); -CREATE TABLE quuux1 (a int, b text); -ALTER TABLE quuux ATTACH PARTITION quuux1 FOR VALUES IN (1); -- validate! -CREATE TABLE quuux2 (a int, b text); -ALTER TABLE quuux ATTACH PARTITION quuux2 FOR VALUES IN (2); -- skip validation -DROP TABLE quuux1, quuux2; --- should validate for quuux1, but not for quuux2 -CREATE TABLE quuux1 PARTITION OF quuux FOR VALUES IN (1); -CREATE TABLE quuux2 PARTITION OF quuux FOR VALUES IN (2); -DROP TABLE quuux; --- check validation when attaching hash partitions --- Use hand-rolled hash functions and operator class to get predictable result --- on different machines. part_test_int4_ops is defined in test_setup.sql. --- check that the new partition won't overlap with an existing partition -CREATE TABLE hash_parted ( - a int, - b int -) PARTITION BY HASH (a part_test_int4_ops); -CREATE TABLE hpart_1 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 4, REMAINDER 0); -CREATE TABLE fail_part (LIKE hpart_1); -ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 8, REMAINDER 4); -ERROR: partition "fail_part" would overlap partition "hpart_1" -LINE 1: ...hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODU... - ^ -ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 8, REMAINDER 0); -ERROR: partition "fail_part" would overlap partition "hpart_1" -LINE 1: ...hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODU... - ^ -DROP TABLE fail_part; --- check validation when attaching hash partitions --- check that violating rows are correctly reported -CREATE TABLE hpart_2 (LIKE hash_parted); -INSERT INTO hpart_2 VALUES (3, 0); -ALTER TABLE hash_parted ATTACH PARTITION hpart_2 FOR VALUES WITH (MODULUS 4, REMAINDER 1); -ERROR: partition constraint of relation "hpart_2" is violated by some row --- should be ok after deleting the bad row -DELETE FROM hpart_2; -ALTER TABLE hash_parted ATTACH PARTITION hpart_2 FOR VALUES WITH (MODULUS 4, REMAINDER 1); --- check that leaf partitions are scanned when attaching a partitioned --- table -CREATE TABLE hpart_5 ( - LIKE hash_parted -) PARTITION BY LIST (b); --- check that violating rows are correctly reported -CREATE TABLE hpart_5_a PARTITION OF hpart_5 FOR VALUES IN ('1', '2', '3'); -INSERT INTO hpart_5_a (a, b) VALUES (7, 1); -ALTER TABLE hash_parted ATTACH PARTITION hpart_5 FOR VALUES WITH (MODULUS 4, REMAINDER 2); -ERROR: partition constraint of relation "hpart_5_a" is violated by some row --- should be ok after deleting the bad row -DELETE FROM hpart_5_a; -ALTER TABLE hash_parted ATTACH PARTITION hpart_5 FOR VALUES WITH (MODULUS 4, REMAINDER 2); --- check that the table being attach is with valid modulus and remainder value -CREATE TABLE fail_part(LIKE hash_parted); -ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 0, REMAINDER 1); -ERROR: modulus for hash partition must be an integer value greater than zero -ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 8, REMAINDER 8); -ERROR: remainder for hash partition must be less than modulus -ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 3, REMAINDER 2); -ERROR: every hash partition modulus must be a factor of the next larger modulus -DETAIL: The new modulus 3 is not a factor of 4, the modulus of existing partition "hpart_1". -DROP TABLE fail_part; --- --- DETACH PARTITION --- --- check that the table is partitioned at all -CREATE TABLE regular_table (a int); -ALTER TABLE regular_table DETACH PARTITION any_name; -ERROR: ALTER action DETACH PARTITION cannot be performed on relation "regular_table" -DETAIL: This operation is not supported for tables. -ALTER TABLE regular_table DETACH PARTITION any_name CONCURRENTLY; -ERROR: ALTER action DETACH PARTITION cannot be performed on relation "regular_table" -DETAIL: This operation is not supported for tables. -ALTER TABLE regular_table DETACH PARTITION any_name FINALIZE; -ERROR: ALTER action DETACH PARTITION ... FINALIZE cannot be performed on relation "regular_table" -DETAIL: This operation is not supported for tables. -DROP TABLE regular_table; --- check that the partition being detached exists at all -ALTER TABLE list_parted2 DETACH PARTITION part_4; -ERROR: relation "part_4" does not exist -ALTER TABLE hash_parted DETACH PARTITION hpart_4; -ERROR: relation "hpart_4" does not exist --- check that the partition being detached is actually a partition of the parent -CREATE TABLE not_a_part (a int); -ALTER TABLE list_parted2 DETACH PARTITION not_a_part; -ERROR: relation "not_a_part" is not a partition of relation "list_parted2" -ALTER TABLE list_parted2 DETACH PARTITION part_1; -ERROR: relation "part_1" is not a partition of relation "list_parted2" -ALTER TABLE hash_parted DETACH PARTITION not_a_part; -ERROR: relation "not_a_part" is not a partition of relation "hash_parted" -DROP TABLE not_a_part; --- check that, after being detached, attinhcount/coninhcount is dropped to 0 and --- attislocal/conislocal is set to true -ALTER TABLE list_parted2 DETACH PARTITION part_3_4; -SELECT attinhcount, attislocal FROM pg_attribute WHERE attrelid = 'part_3_4'::regclass AND attnum > 0; - attinhcount | attislocal --------------+------------ - 0 | t - 0 | t -(2 rows) - -SELECT coninhcount, conislocal FROM pg_constraint WHERE conrelid = 'part_3_4'::regclass AND conname = 'check_a'; - coninhcount | conislocal --------------+------------ - 0 | t -(1 row) - -DROP TABLE part_3_4; --- check that a detached partition is not dropped on dropping a partitioned table -CREATE TABLE range_parted2 ( - a int -) PARTITION BY RANGE(a); -CREATE TABLE part_rp PARTITION OF range_parted2 FOR VALUES FROM (0) to (100); -ALTER TABLE range_parted2 DETACH PARTITION part_rp; -DROP TABLE range_parted2; -SELECT * from part_rp; - a ---- -(0 rows) - -DROP TABLE part_rp; --- concurrent detach -CREATE TABLE range_parted2 ( - a int -) PARTITION BY RANGE(a); -CREATE TABLE part_rp PARTITION OF range_parted2 FOR VALUES FROM (0) to (100); -BEGIN; --- doesn't work in a partition block -ALTER TABLE range_parted2 DETACH PARTITION part_rp CONCURRENTLY; -ERROR: ALTER TABLE ... DETACH CONCURRENTLY cannot run inside a transaction block -COMMIT; -CREATE TABLE part_rpd PARTITION OF range_parted2 DEFAULT; --- doesn't work if there's a default partition -ALTER TABLE range_parted2 DETACH PARTITION part_rp CONCURRENTLY; -ERROR: cannot detach partitions concurrently when a default partition exists --- doesn't work for the default partition -ALTER TABLE range_parted2 DETACH PARTITION part_rpd CONCURRENTLY; -ERROR: cannot detach partitions concurrently when a default partition exists -DROP TABLE part_rpd; --- works fine -ALTER TABLE range_parted2 DETACH PARTITION part_rp CONCURRENTLY; -\d+ range_parted2 - Partitioned table "public.range_parted2" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - a | integer | | | | plain | | -Partition key: RANGE (a) -Number of partitions: 0 - --- constraint should be created -\d part_rp - Table "public.part_rp" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Check constraints: - "part_rp_a_check" CHECK (a IS NOT NULL AND a >= 0 AND a < 100) - -CREATE TABLE part_rp100 PARTITION OF range_parted2 (CHECK (a>=123 AND a<133 AND a IS NOT NULL)) FOR VALUES FROM (100) to (200); -ALTER TABLE range_parted2 DETACH PARTITION part_rp100 CONCURRENTLY; --- redundant constraint should not be created -\d part_rp100 - Table "public.part_rp100" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Check constraints: - "part_rp100_a_check" CHECK (a >= 123 AND a < 133 AND a IS NOT NULL) - -DROP TABLE range_parted2; --- Check ALTER TABLE commands for partitioned tables and partitions --- cannot add/drop column to/from *only* the parent -ALTER TABLE ONLY list_parted2 ADD COLUMN c int; -ERROR: column must be added to child tables too -ALTER TABLE ONLY list_parted2 DROP COLUMN b; -ERROR: cannot drop column from only the partitioned table when partitions exist -HINT: Do not specify the ONLY keyword. --- cannot add a column to partition or drop an inherited one -ALTER TABLE part_2 ADD COLUMN c text; -ERROR: cannot add column to a partition -ALTER TABLE part_2 DROP COLUMN b; -ERROR: cannot drop inherited column "b" --- Nor rename, alter type -ALTER TABLE part_2 RENAME COLUMN b to c; -ERROR: cannot rename inherited column "b" -ALTER TABLE part_2 ALTER COLUMN b TYPE text; -ERROR: cannot alter inherited column "b" --- cannot add NOT NULL or check constraints to *only* the parent, when --- partitions exist -ALTER TABLE ONLY list_parted2 ALTER b SET NOT NULL; -ERROR: constraint must be added to child tables too -HINT: Do not specify the ONLY keyword. -ALTER TABLE ONLY list_parted2 ADD CONSTRAINT check_b CHECK (b <> 'zz'); -ERROR: constraint must be added to child tables too --- dropping them is ok though -ALTER TABLE list_parted2 ALTER b SET NOT NULL; -ALTER TABLE ONLY list_parted2 ALTER b DROP NOT NULL; -ALTER TABLE list_parted2 ADD CONSTRAINT check_b CHECK (b <> 'zz'); -ALTER TABLE ONLY list_parted2 DROP CONSTRAINT check_b; --- ... and the partitions should still have both -\d+ part_2 - Table "public.part_2" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+--------------+-----------+----------+---------+----------+--------------+------------- - a | integer | | | | plain | | - b | character(1) | | not null | | extended | | -Partition of: list_parted2 FOR VALUES IN (2) -Partition constraint: ((a IS NOT NULL) AND (a = 2)) -Check constraints: - "check_b" CHECK (b <> 'zz'::bpchar) -Not-null constraints: - "list_parted2_b_not_null" NOT NULL "b" - --- It's alright though, if no partitions are yet created -CREATE TABLE parted_no_parts (a int) PARTITION BY LIST (a); -ALTER TABLE ONLY parted_no_parts ALTER a SET NOT NULL; -ALTER TABLE ONLY parted_no_parts ADD CONSTRAINT check_a CHECK (a > 0); -DROP TABLE parted_no_parts; --- cannot drop inherited NOT NULL or check constraints from partition -ALTER TABLE list_parted2 ALTER b SET NOT NULL, ADD CONSTRAINT check_a2 CHECK (a > 0); -ALTER TABLE part_2 ALTER b DROP NOT NULL; -ERROR: column "b" is marked NOT NULL in parent table -ALTER TABLE part_2 DROP CONSTRAINT check_a2; -ERROR: cannot drop inherited constraint "check_a2" of relation "part_2" --- can't drop NOT NULL from under an invalid PK -CREATE TABLE list_parted3 (a int NOT NULL) PARTITION BY LIST (a); -CREATE TABLE list_parted3_1 PARTITION OF list_parted3 FOR VALUES IN (1); -ALTER TABLE ONLY list_parted3 ADD PRIMARY KEY (a); -ALTER TABLE ONLY list_parted3 DROP CONSTRAINT list_parted3_a_not_null; -ERROR: column "a" is in a primary key --- Doesn't make sense to add NO INHERIT constraints on partitioned tables -ALTER TABLE list_parted2 add constraint check_b2 check (b <> 'zz') NO INHERIT; -ERROR: cannot add NO INHERIT constraint to partitioned table "list_parted2" --- check that a partition cannot participate in regular inheritance -CREATE TABLE inh_test () INHERITS (part_2); -ERROR: cannot inherit from partition "part_2" -CREATE TABLE inh_test (LIKE part_2); -ALTER TABLE inh_test INHERIT part_2; -ERROR: cannot inherit from a partition -ALTER TABLE part_2 INHERIT inh_test; -ERROR: cannot change inheritance of a partition --- cannot drop or alter type of partition key columns of lower level --- partitioned tables; for example, part_5, which is list_parted2's --- partition, is partitioned on b; -ALTER TABLE list_parted2 DROP COLUMN b; -ERROR: cannot drop column "b" because it is part of the partition key of relation "part_5" -ALTER TABLE list_parted2 ALTER COLUMN b TYPE text; -ERROR: cannot alter column "b" because it is part of the partition key of relation "part_5" --- dropping non-partition key columns should be allowed on the parent table. -ALTER TABLE list_parted DROP COLUMN b; -SELECT * FROM list_parted; - a ---- -(0 rows) - --- cleanup -DROP TABLE list_parted, list_parted2, range_parted, list_parted3; -DROP TABLE fail_def_part; -DROP TABLE hash_parted; --- more tests for certain multi-level partitioning scenarios -create table p (a int, b int) partition by range (a, b); -create table p1 (b int, a int not null) partition by range (b); -create table p11 (like p1); -alter table p11 drop a; -alter table p11 add a int; -alter table p11 drop a; -alter table p11 add a int not null; --- attnum for key attribute 'a' is different in p, p1, and p11 -select attrelid::regclass, attname, attnum -from pg_attribute -where attname = 'a' - and (attrelid = 'p'::regclass - or attrelid = 'p1'::regclass - or attrelid = 'p11'::regclass) -order by attrelid::regclass::text; - attrelid | attname | attnum -----------+---------+-------- - p | a | 1 - p1 | a | 2 - p11 | a | 4 -(3 rows) - -alter table p1 attach partition p11 for values from (2) to (5); -insert into p1 (a, b) values (2, 3); --- check that partition validation scan correctly detects violating rows -alter table p attach partition p1 for values from (1, 2) to (1, 10); -ERROR: partition constraint of relation "p11" is violated by some row --- cleanup -drop table p; -drop table p1; --- validate constraint on partitioned tables should only scan leaf partitions -create table parted_validate_test (a int) partition by list (a); -create table parted_validate_test_1 partition of parted_validate_test for values in (0, 1); -alter table parted_validate_test add constraint parted_validate_test_chka check (a > 0) not valid; -alter table parted_validate_test validate constraint parted_validate_test_chka; -drop table parted_validate_test; --- test alter column options -CREATE TABLE attmp(i integer); -INSERT INTO attmp VALUES (1); -ALTER TABLE attmp ALTER COLUMN i SET (n_distinct = 1, n_distinct_inherited = 2); -ALTER TABLE attmp ALTER COLUMN i RESET (n_distinct_inherited); -ANALYZE attmp; -DROP TABLE attmp; -DROP USER regress_alter_table_user1; --- check that violating rows are correctly reported when attaching as the --- default partition -create table defpart_attach_test (a int) partition by list (a); -create table defpart_attach_test1 partition of defpart_attach_test for values in (1); -create table defpart_attach_test_d (b int, a int); -alter table defpart_attach_test_d drop b; -insert into defpart_attach_test_d values (1), (2); --- error because its constraint as the default partition would be violated --- by the row containing 1 -alter table defpart_attach_test attach partition defpart_attach_test_d default; -ERROR: partition constraint of relation "defpart_attach_test_d" is violated by some row -delete from defpart_attach_test_d where a = 1; -alter table defpart_attach_test_d add check (a > 1); --- should be attached successfully and without needing to be scanned -alter table defpart_attach_test attach partition defpart_attach_test_d default; --- check that attaching a partition correctly reports any rows in the default --- partition that should not be there for the new partition to be attached --- successfully -create table defpart_attach_test_2 (like defpart_attach_test_d); -alter table defpart_attach_test attach partition defpart_attach_test_2 for values in (2); -ERROR: updated partition constraint for default partition "defpart_attach_test_d" would be violated by some row -drop table defpart_attach_test; --- check combinations of temporary and permanent relations when attaching --- partitions. -create table perm_part_parent (a int) partition by list (a); -create temp table temp_part_parent (a int) partition by list (a); -create table perm_part_child (a int); -create temp table temp_part_child (a int); -alter table temp_part_parent attach partition perm_part_child default; -- error -ERROR: cannot attach a permanent relation as partition of temporary relation "temp_part_parent" -alter table perm_part_parent attach partition temp_part_child default; -- error -ERROR: cannot attach a temporary relation as partition of permanent relation "perm_part_parent" -alter table temp_part_parent attach partition temp_part_child default; -- ok -drop table perm_part_parent cascade; -drop table temp_part_parent cascade; --- check that attaching partitions to a table while it is being used is --- prevented -create table tab_part_attach (a int) partition by list (a); -create or replace function func_part_attach() returns trigger - language plpgsql as $$ - begin - execute 'create table tab_part_attach_1 (a int)'; - execute 'alter table tab_part_attach attach partition tab_part_attach_1 for values in (1)'; - return null; - end $$; -create trigger trig_part_attach before insert on tab_part_attach - for each statement execute procedure func_part_attach(); -insert into tab_part_attach values (1); -ERROR: cannot ALTER TABLE "tab_part_attach" because it is being used by active queries in this session -CONTEXT: SQL statement "alter table tab_part_attach attach partition tab_part_attach_1 for values in (1)" -PL/pgSQL function func_part_attach() line 4 at EXECUTE -drop table tab_part_attach; -drop function func_part_attach(); --- test case where the partitioning operator is a SQL function whose --- evaluation results in the table's relcache being rebuilt partway through --- the execution of an ATTACH PARTITION command -create function at_test_sql_partop (int4, int4) returns int language sql -as $$ select case when $1 = $2 then 0 when $1 > $2 then 1 else -1 end; $$; -create operator class at_test_sql_partop for type int4 using btree as - operator 1 < (int4, int4), operator 2 <= (int4, int4), - operator 3 = (int4, int4), operator 4 >= (int4, int4), - operator 5 > (int4, int4), function 1 at_test_sql_partop(int4, int4); -create table at_test_sql_partop (a int) partition by range (a at_test_sql_partop); -create table at_test_sql_partop_1 (a int); -alter table at_test_sql_partop attach partition at_test_sql_partop_1 for values from (0) to (10); -drop table at_test_sql_partop; -drop operator class at_test_sql_partop using btree; -drop function at_test_sql_partop; -/* Test case for bug #16242 */ --- We create a parent and child where the child has missing --- non-null attribute values, and arrange to pass them through --- tuple conversion from the child to the parent tupdesc -create table bar1 (a integer, b integer not null default 1) - partition by range (a); -create table bar2 (a integer); -insert into bar2 values (1); -alter table bar2 add column b integer not null default 1; --- (at this point bar2 contains tuple with natts=1) -alter table bar1 attach partition bar2 default; --- this works: -select * from bar1; - a | b ----+--- - 1 | 1 -(1 row) - --- this exercises tuple conversion: -create function xtrig() - returns trigger language plpgsql -as $$ - declare - r record; - begin - for r in select * from old loop - raise info 'a=%, b=%', r.a, r.b; - end loop; - return NULL; - end; -$$; -create trigger xtrig - after update on bar1 - referencing old table as old - for each statement execute procedure xtrig(); -update bar1 set a = a + 1; -INFO: a=1, b=1 -/* End test case for bug #16242 */ -/* Test case for bug #17409 */ -create table attbl (p1 int constraint pk_attbl primary key); -create table atref (c1 int references attbl(p1)); -cluster attbl using pk_attbl; -alter table attbl alter column p1 set data type bigint; -alter table atref alter column c1 set data type bigint; -drop table attbl, atref; -create table attbl (p1 int constraint pk_attbl primary key); -alter table attbl replica identity using index pk_attbl; -create table atref (c1 int references attbl(p1)); -alter table attbl alter column p1 set data type bigint; -alter table atref alter column c1 set data type bigint; -drop table attbl, atref; -/* End test case for bug #17409 */ --- Test that ALTER TABLE rewrite preserves a clustered index --- for normal indexes and indexes on constraints. -create table alttype_cluster (a int); -alter table alttype_cluster add primary key (a); -create index alttype_cluster_ind on alttype_cluster (a); -alter table alttype_cluster cluster on alttype_cluster_ind; --- Normal index remains clustered. -select indexrelid::regclass, indisclustered from pg_index - where indrelid = 'alttype_cluster'::regclass - order by indexrelid::regclass::text; - indexrelid | indisclustered -----------------------+---------------- - alttype_cluster_ind | t - alttype_cluster_pkey | f -(2 rows) - -alter table alttype_cluster alter a type bigint; -select indexrelid::regclass, indisclustered from pg_index - where indrelid = 'alttype_cluster'::regclass - order by indexrelid::regclass::text; - indexrelid | indisclustered -----------------------+---------------- - alttype_cluster_ind | t - alttype_cluster_pkey | f -(2 rows) - --- Constraint index remains clustered. -alter table alttype_cluster cluster on alttype_cluster_pkey; -select indexrelid::regclass, indisclustered from pg_index - where indrelid = 'alttype_cluster'::regclass - order by indexrelid::regclass::text; - indexrelid | indisclustered -----------------------+---------------- - alttype_cluster_ind | f - alttype_cluster_pkey | t -(2 rows) - -alter table alttype_cluster alter a type int; -select indexrelid::regclass, indisclustered from pg_index - where indrelid = 'alttype_cluster'::regclass - order by indexrelid::regclass::text; - indexrelid | indisclustered -----------------------+---------------- - alttype_cluster_ind | f - alttype_cluster_pkey | t -(2 rows) - -drop table alttype_cluster; --- --- Check that attaching or detaching a partitioned partition correctly leads --- to its partitions' constraint being updated to reflect the parent's --- newly added/removed constraint -create table target_parted (a int, b int) partition by list (a); -create table attach_parted (a int, b int) partition by list (b); -create table attach_parted_part1 partition of attach_parted for values in (1); --- insert a row directly into the leaf partition so that its partition --- constraint is built and stored in the relcache -insert into attach_parted_part1 values (1, 1); --- the following better invalidate the partition constraint of the leaf --- partition too... -alter table target_parted attach partition attach_parted for values in (1); --- ...such that the following insert fails -insert into attach_parted_part1 values (2, 1); -ERROR: new row for relation "attach_parted_part1" violates partition constraint -DETAIL: Failing row contains (2, 1). --- ...and doesn't when the partition is detached along with its own partition -alter table target_parted detach partition attach_parted; -insert into attach_parted_part1 values (2, 1); --- Test altering table having publication -create schema alter1; -create schema alter2; -create table alter1.t1 (a int); -set client_min_messages = 'ERROR'; -create publication pub1 for table alter1.t1, tables in schema alter2; -reset client_min_messages; -alter table alter1.t1 set schema alter2; -\d+ alter2.t1 - Table "alter2.t1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - a | integer | | | | plain | | -Publications: - "pub1" - -drop publication pub1; -drop schema alter1 cascade; -drop schema alter2 cascade; -NOTICE: drop cascades to table alter2.t1 +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/sequence.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/sequence.out --- /Users/admin/pgsql/src/test/regress/expected/sequence.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/sequence.out 2024-12-13 13:20:12 @@ -1,849 +1,2 @@ --- --- CREATE SEQUENCE --- --- various error cases -CREATE SEQUENCE sequence_testx INCREMENT BY 0; -ERROR: INCREMENT must not be zero -CREATE SEQUENCE sequence_testx INCREMENT BY -1 MINVALUE 20; -ERROR: MINVALUE (20) must be less than MAXVALUE (-1) -CREATE SEQUENCE sequence_testx INCREMENT BY 1 MAXVALUE -20; -ERROR: MINVALUE (1) must be less than MAXVALUE (-20) -CREATE SEQUENCE sequence_testx INCREMENT BY -1 START 10; -ERROR: START value (10) cannot be greater than MAXVALUE (-1) -CREATE SEQUENCE sequence_testx INCREMENT BY 1 START -10; -ERROR: START value (-10) cannot be less than MINVALUE (1) -CREATE SEQUENCE sequence_testx CACHE 0; -ERROR: CACHE (0) must be greater than zero --- OWNED BY errors -CREATE SEQUENCE sequence_testx OWNED BY nobody; -- nonsense word -ERROR: invalid OWNED BY option -HINT: Specify OWNED BY table.column or OWNED BY NONE. -CREATE SEQUENCE sequence_testx OWNED BY pg_class_oid_index.oid; -- not a table -ERROR: sequence cannot be owned by relation "pg_class_oid_index" -DETAIL: This operation is not supported for indexes. -CREATE SEQUENCE sequence_testx OWNED BY pg_class.relname; -- not same schema -ERROR: sequence must be in same schema as table it is linked to -CREATE TABLE sequence_test_table (a int); -CREATE SEQUENCE sequence_testx OWNED BY sequence_test_table.b; -- wrong column -ERROR: column "b" of relation "sequence_test_table" does not exist -DROP TABLE sequence_test_table; --- sequence data types -CREATE SEQUENCE sequence_test5 AS integer; -CREATE SEQUENCE sequence_test6 AS smallint; -CREATE SEQUENCE sequence_test7 AS bigint; -CREATE SEQUENCE sequence_test8 AS integer MAXVALUE 100000; -CREATE SEQUENCE sequence_test9 AS integer INCREMENT BY -1; -CREATE SEQUENCE sequence_test10 AS integer MINVALUE -100000 START 1; -CREATE SEQUENCE sequence_test11 AS smallint; -CREATE SEQUENCE sequence_test12 AS smallint INCREMENT -1; -CREATE SEQUENCE sequence_test13 AS smallint MINVALUE -32768; -CREATE SEQUENCE sequence_test14 AS smallint MAXVALUE 32767 INCREMENT -1; -CREATE SEQUENCE sequence_testx AS text; -ERROR: sequence type must be smallint, integer, or bigint -CREATE SEQUENCE sequence_testx AS nosuchtype; -ERROR: type "nosuchtype" does not exist -LINE 1: CREATE SEQUENCE sequence_testx AS nosuchtype; - ^ -CREATE SEQUENCE sequence_testx AS smallint MAXVALUE 100000; -ERROR: MAXVALUE (100000) is out of range for sequence data type smallint -CREATE SEQUENCE sequence_testx AS smallint MINVALUE -100000; -ERROR: MINVALUE (-100000) is out of range for sequence data type smallint -ALTER SEQUENCE sequence_test5 AS smallint; -- success, max will be adjusted -ALTER SEQUENCE sequence_test8 AS smallint; -- fail, max has to be adjusted -ERROR: MAXVALUE (100000) is out of range for sequence data type smallint -ALTER SEQUENCE sequence_test8 AS smallint MAXVALUE 20000; -- ok now -ALTER SEQUENCE sequence_test9 AS smallint; -- success, min will be adjusted -ALTER SEQUENCE sequence_test10 AS smallint; -- fail, min has to be adjusted -ERROR: MINVALUE (-100000) is out of range for sequence data type smallint -ALTER SEQUENCE sequence_test10 AS smallint MINVALUE -20000; -- ok now -ALTER SEQUENCE sequence_test11 AS int; -- max will be adjusted -ALTER SEQUENCE sequence_test12 AS int; -- min will be adjusted -ALTER SEQUENCE sequence_test13 AS int; -- min and max will be adjusted -ALTER SEQUENCE sequence_test14 AS int; -- min and max will be adjusted ---- ---- test creation of SERIAL column ---- -CREATE TABLE serialTest1 (f1 text, f2 serial); -INSERT INTO serialTest1 VALUES ('foo'); -INSERT INTO serialTest1 VALUES ('bar'); -INSERT INTO serialTest1 VALUES ('force', 100); -INSERT INTO serialTest1 VALUES ('wrong', NULL); -ERROR: null value in column "f2" of relation "serialtest1" violates not-null constraint -DETAIL: Failing row contains (wrong, null). -SELECT * FROM serialTest1; - f1 | f2 --------+----- - foo | 1 - bar | 2 - force | 100 -(3 rows) - -SELECT pg_get_serial_sequence('serialTest1', 'f2'); - pg_get_serial_sequence ---------------------------- - public.serialtest1_f2_seq -(1 row) - --- test smallserial / bigserial -CREATE TABLE serialTest2 (f1 text, f2 serial, f3 smallserial, f4 serial2, - f5 bigserial, f6 serial8); -INSERT INTO serialTest2 (f1) - VALUES ('test_defaults'); -INSERT INTO serialTest2 (f1, f2, f3, f4, f5, f6) - VALUES ('test_max_vals', 2147483647, 32767, 32767, 9223372036854775807, - 9223372036854775807), - ('test_min_vals', -2147483648, -32768, -32768, -9223372036854775808, - -9223372036854775808); --- All these INSERTs should fail: -INSERT INTO serialTest2 (f1, f3) - VALUES ('bogus', -32769); -ERROR: smallint out of range -INSERT INTO serialTest2 (f1, f4) - VALUES ('bogus', -32769); -ERROR: smallint out of range -INSERT INTO serialTest2 (f1, f3) - VALUES ('bogus', 32768); -ERROR: smallint out of range -INSERT INTO serialTest2 (f1, f4) - VALUES ('bogus', 32768); -ERROR: smallint out of range -INSERT INTO serialTest2 (f1, f5) - VALUES ('bogus', -9223372036854775809); -ERROR: bigint out of range -INSERT INTO serialTest2 (f1, f6) - VALUES ('bogus', -9223372036854775809); -ERROR: bigint out of range -INSERT INTO serialTest2 (f1, f5) - VALUES ('bogus', 9223372036854775808); -ERROR: bigint out of range -INSERT INTO serialTest2 (f1, f6) - VALUES ('bogus', 9223372036854775808); -ERROR: bigint out of range -SELECT * FROM serialTest2 ORDER BY f2 ASC; - f1 | f2 | f3 | f4 | f5 | f6 ----------------+-------------+--------+--------+----------------------+---------------------- - test_min_vals | -2147483648 | -32768 | -32768 | -9223372036854775808 | -9223372036854775808 - test_defaults | 1 | 1 | 1 | 1 | 1 - test_max_vals | 2147483647 | 32767 | 32767 | 9223372036854775807 | 9223372036854775807 -(3 rows) - -SELECT nextval('serialTest2_f2_seq'); - nextval ---------- - 2 -(1 row) - -SELECT nextval('serialTest2_f3_seq'); - nextval ---------- - 2 -(1 row) - -SELECT nextval('serialTest2_f4_seq'); - nextval ---------- - 2 -(1 row) - -SELECT nextval('serialTest2_f5_seq'); - nextval ---------- - 2 -(1 row) - -SELECT nextval('serialTest2_f6_seq'); - nextval ---------- - 2 -(1 row) - --- basic sequence operations using both text and oid references -CREATE SEQUENCE sequence_test; -CREATE SEQUENCE IF NOT EXISTS sequence_test; -NOTICE: relation "sequence_test" already exists, skipping -SELECT nextval('sequence_test'::text); - nextval ---------- - 1 -(1 row) - -SELECT nextval('sequence_test'::regclass); - nextval ---------- - 2 -(1 row) - -SELECT currval('sequence_test'::text); - currval ---------- - 2 -(1 row) - -SELECT currval('sequence_test'::regclass); - currval ---------- - 2 -(1 row) - -SELECT setval('sequence_test'::text, 32); - setval --------- - 32 -(1 row) - -SELECT nextval('sequence_test'::regclass); - nextval ---------- - 33 -(1 row) - -SELECT setval('sequence_test'::text, 99, false); - setval --------- - 99 -(1 row) - -SELECT nextval('sequence_test'::regclass); - nextval ---------- - 99 -(1 row) - -SELECT setval('sequence_test'::regclass, 32); - setval --------- - 32 -(1 row) - -SELECT nextval('sequence_test'::text); - nextval ---------- - 33 -(1 row) - -SELECT setval('sequence_test'::regclass, 99, false); - setval --------- - 99 -(1 row) - -SELECT nextval('sequence_test'::text); - nextval ---------- - 99 -(1 row) - -DISCARD SEQUENCES; -SELECT currval('sequence_test'::regclass); -ERROR: currval of sequence "sequence_test" is not yet defined in this session -DROP SEQUENCE sequence_test; --- renaming sequences -CREATE SEQUENCE foo_seq; -ALTER TABLE foo_seq RENAME TO foo_seq_new; -SELECT * FROM foo_seq_new; - last_value | log_cnt | is_called -------------+---------+----------- - 1 | 0 | f -(1 row) - -SELECT nextval('foo_seq_new'); - nextval ---------- - 1 -(1 row) - -SELECT nextval('foo_seq_new'); - nextval ---------- - 2 -(1 row) - --- log_cnt can be higher if there is a checkpoint just at the right --- time, so just test for the expected range -SELECT last_value, log_cnt IN (31, 32) AS log_cnt_ok, is_called FROM foo_seq_new; - last_value | log_cnt_ok | is_called -------------+------------+----------- - 2 | t | t -(1 row) - -DROP SEQUENCE foo_seq_new; --- renaming serial sequences -ALTER TABLE serialtest1_f2_seq RENAME TO serialtest1_f2_foo; -INSERT INTO serialTest1 VALUES ('more'); -SELECT * FROM serialTest1; - f1 | f2 --------+----- - foo | 1 - bar | 2 - force | 100 - more | 3 -(4 rows) - --- --- Check dependencies of serial and ordinary sequences --- -CREATE TEMP SEQUENCE myseq2; -CREATE TEMP SEQUENCE myseq3; -CREATE TEMP TABLE t1 ( - f1 serial, - f2 int DEFAULT nextval('myseq2'), - f3 int DEFAULT nextval('myseq3'::text) -); --- Both drops should fail, but with different error messages: -DROP SEQUENCE t1_f1_seq; -ERROR: cannot drop sequence t1_f1_seq because other objects depend on it -DETAIL: default value for column f1 of table t1 depends on sequence t1_f1_seq -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP SEQUENCE myseq2; -ERROR: cannot drop sequence myseq2 because other objects depend on it -DETAIL: default value for column f2 of table t1 depends on sequence myseq2 -HINT: Use DROP ... CASCADE to drop the dependent objects too. --- This however will work: -DROP SEQUENCE myseq3; -DROP TABLE t1; --- Fails because no longer existent: -DROP SEQUENCE t1_f1_seq; -ERROR: sequence "t1_f1_seq" does not exist --- Now OK: -DROP SEQUENCE myseq2; --- --- Alter sequence --- -ALTER SEQUENCE IF EXISTS sequence_test2 RESTART WITH 24 - INCREMENT BY 4 MAXVALUE 36 MINVALUE 5 CYCLE; -NOTICE: relation "sequence_test2" does not exist, skipping -ALTER SEQUENCE serialTest1 CYCLE; -- error, not a sequence -ERROR: cannot open relation "serialtest1" -DETAIL: This operation is not supported for tables. -CREATE SEQUENCE sequence_test2 START WITH 32; -CREATE SEQUENCE sequence_test4 INCREMENT BY -1; -SELECT nextval('sequence_test2'); - nextval ---------- - 32 -(1 row) - -SELECT nextval('sequence_test4'); - nextval ---------- - -1 -(1 row) - -ALTER SEQUENCE sequence_test2 RESTART; -SELECT nextval('sequence_test2'); - nextval ---------- - 32 -(1 row) - -ALTER SEQUENCE sequence_test2 RESTART WITH 0; -- error -ERROR: RESTART value (0) cannot be less than MINVALUE (1) -ALTER SEQUENCE sequence_test4 RESTART WITH 40; -- error -ERROR: RESTART value (40) cannot be greater than MAXVALUE (-1) --- test CYCLE and NO CYCLE -ALTER SEQUENCE sequence_test2 RESTART WITH 24 - INCREMENT BY 4 MAXVALUE 36 MINVALUE 5 CYCLE; -SELECT nextval('sequence_test2'); - nextval ---------- - 24 -(1 row) - -SELECT nextval('sequence_test2'); - nextval ---------- - 28 -(1 row) - -SELECT nextval('sequence_test2'); - nextval ---------- - 32 -(1 row) - -SELECT nextval('sequence_test2'); - nextval ---------- - 36 -(1 row) - -SELECT nextval('sequence_test2'); -- cycled - nextval ---------- - 5 -(1 row) - -ALTER SEQUENCE sequence_test2 RESTART WITH 24 - NO CYCLE; -SELECT nextval('sequence_test2'); - nextval ---------- - 24 -(1 row) - -SELECT nextval('sequence_test2'); - nextval ---------- - 28 -(1 row) - -SELECT nextval('sequence_test2'); - nextval ---------- - 32 -(1 row) - -SELECT nextval('sequence_test2'); - nextval ---------- - 36 -(1 row) - -SELECT nextval('sequence_test2'); -- error -ERROR: nextval: reached maximum value of sequence "sequence_test2" (36) -ALTER SEQUENCE sequence_test2 RESTART WITH -24 START WITH -24 - INCREMENT BY -4 MINVALUE -36 MAXVALUE -5 CYCLE; -SELECT nextval('sequence_test2'); - nextval ---------- - -24 -(1 row) - -SELECT nextval('sequence_test2'); - nextval ---------- - -28 -(1 row) - -SELECT nextval('sequence_test2'); - nextval ---------- - -32 -(1 row) - -SELECT nextval('sequence_test2'); - nextval ---------- - -36 -(1 row) - -SELECT nextval('sequence_test2'); -- cycled - nextval ---------- - -5 -(1 row) - -ALTER SEQUENCE sequence_test2 RESTART WITH -24 - NO CYCLE; -SELECT nextval('sequence_test2'); - nextval ---------- - -24 -(1 row) - -SELECT nextval('sequence_test2'); - nextval ---------- - -28 -(1 row) - -SELECT nextval('sequence_test2'); - nextval ---------- - -32 -(1 row) - -SELECT nextval('sequence_test2'); - nextval ---------- - -36 -(1 row) - -SELECT nextval('sequence_test2'); -- error -ERROR: nextval: reached minimum value of sequence "sequence_test2" (-36) --- reset -ALTER SEQUENCE IF EXISTS sequence_test2 RESTART WITH 32 START WITH 32 - INCREMENT BY 4 MAXVALUE 36 MINVALUE 5 CYCLE; -SELECT setval('sequence_test2', -100); -- error -ERROR: setval: value -100 is out of bounds for sequence "sequence_test2" (5..36) -SELECT setval('sequence_test2', 100); -- error -ERROR: setval: value 100 is out of bounds for sequence "sequence_test2" (5..36) -SELECT setval('sequence_test2', 5); - setval --------- - 5 -(1 row) - -CREATE SEQUENCE sequence_test3; -- not read from, to test is_called --- Information schema -SELECT * FROM information_schema.sequences - WHERE sequence_name ~ ANY(ARRAY['sequence_test', 'serialtest']) - ORDER BY sequence_name ASC; - sequence_catalog | sequence_schema | sequence_name | data_type | numeric_precision | numeric_precision_radix | numeric_scale | start_value | minimum_value | maximum_value | increment | cycle_option -------------------+-----------------+--------------------+-----------+-------------------+-------------------------+---------------+-------------+----------------------+---------------------+-----------+-------------- - regression | public | sequence_test10 | smallint | 16 | 2 | 0 | 1 | -20000 | 32767 | 1 | NO - regression | public | sequence_test11 | integer | 32 | 2 | 0 | 1 | 1 | 2147483647 | 1 | NO - regression | public | sequence_test12 | integer | 32 | 2 | 0 | -1 | -2147483648 | -1 | -1 | NO - regression | public | sequence_test13 | integer | 32 | 2 | 0 | -32768 | -2147483648 | 2147483647 | 1 | NO - regression | public | sequence_test14 | integer | 32 | 2 | 0 | 32767 | -2147483648 | 2147483647 | -1 | NO - regression | public | sequence_test2 | bigint | 64 | 2 | 0 | 32 | 5 | 36 | 4 | YES - regression | public | sequence_test3 | bigint | 64 | 2 | 0 | 1 | 1 | 9223372036854775807 | 1 | NO - regression | public | sequence_test4 | bigint | 64 | 2 | 0 | -1 | -9223372036854775808 | -1 | -1 | NO - regression | public | sequence_test5 | smallint | 16 | 2 | 0 | 1 | 1 | 32767 | 1 | NO - regression | public | sequence_test6 | smallint | 16 | 2 | 0 | 1 | 1 | 32767 | 1 | NO - regression | public | sequence_test7 | bigint | 64 | 2 | 0 | 1 | 1 | 9223372036854775807 | 1 | NO - regression | public | sequence_test8 | smallint | 16 | 2 | 0 | 1 | 1 | 20000 | 1 | NO - regression | public | sequence_test9 | smallint | 16 | 2 | 0 | -1 | -32768 | -1 | -1 | NO - regression | public | serialtest1_f2_foo | integer | 32 | 2 | 0 | 1 | 1 | 2147483647 | 1 | NO - regression | public | serialtest2_f2_seq | integer | 32 | 2 | 0 | 1 | 1 | 2147483647 | 1 | NO - regression | public | serialtest2_f3_seq | smallint | 16 | 2 | 0 | 1 | 1 | 32767 | 1 | NO - regression | public | serialtest2_f4_seq | smallint | 16 | 2 | 0 | 1 | 1 | 32767 | 1 | NO - regression | public | serialtest2_f5_seq | bigint | 64 | 2 | 0 | 1 | 1 | 9223372036854775807 | 1 | NO - regression | public | serialtest2_f6_seq | bigint | 64 | 2 | 0 | 1 | 1 | 9223372036854775807 | 1 | NO -(19 rows) - -SELECT schemaname, sequencename, start_value, min_value, max_value, increment_by, cycle, cache_size, last_value -FROM pg_sequences -WHERE sequencename ~ ANY(ARRAY['sequence_test', 'serialtest']) - ORDER BY sequencename ASC; - schemaname | sequencename | start_value | min_value | max_value | increment_by | cycle | cache_size | last_value -------------+--------------------+-------------+----------------------+---------------------+--------------+-------+------------+------------ - public | sequence_test10 | 1 | -20000 | 32767 | 1 | f | 1 | - public | sequence_test11 | 1 | 1 | 2147483647 | 1 | f | 1 | - public | sequence_test12 | -1 | -2147483648 | -1 | -1 | f | 1 | - public | sequence_test13 | -32768 | -2147483648 | 2147483647 | 1 | f | 1 | - public | sequence_test14 | 32767 | -2147483648 | 2147483647 | -1 | f | 1 | - public | sequence_test2 | 32 | 5 | 36 | 4 | t | 1 | 5 - public | sequence_test3 | 1 | 1 | 9223372036854775807 | 1 | f | 1 | - public | sequence_test4 | -1 | -9223372036854775808 | -1 | -1 | f | 1 | -1 - public | sequence_test5 | 1 | 1 | 32767 | 1 | f | 1 | - public | sequence_test6 | 1 | 1 | 32767 | 1 | f | 1 | - public | sequence_test7 | 1 | 1 | 9223372036854775807 | 1 | f | 1 | - public | sequence_test8 | 1 | 1 | 20000 | 1 | f | 1 | - public | sequence_test9 | -1 | -32768 | -1 | -1 | f | 1 | - public | serialtest1_f2_foo | 1 | 1 | 2147483647 | 1 | f | 1 | 3 - public | serialtest2_f2_seq | 1 | 1 | 2147483647 | 1 | f | 1 | 2 - public | serialtest2_f3_seq | 1 | 1 | 32767 | 1 | f | 1 | 2 - public | serialtest2_f4_seq | 1 | 1 | 32767 | 1 | f | 1 | 2 - public | serialtest2_f5_seq | 1 | 1 | 9223372036854775807 | 1 | f | 1 | 2 - public | serialtest2_f6_seq | 1 | 1 | 9223372036854775807 | 1 | f | 1 | 2 -(19 rows) - -SELECT * FROM pg_sequence_parameters('sequence_test4'::regclass); - start_value | minimum_value | maximum_value | increment | cycle_option | cache_size | data_type --------------+----------------------+---------------+-----------+--------------+------------+----------- - -1 | -9223372036854775808 | -1 | -1 | f | 1 | 20 -(1 row) - -\d sequence_test4 - Sequence "public.sequence_test4" - Type | Start | Minimum | Maximum | Increment | Cycles? | Cache ---------+-------+----------------------+---------+-----------+---------+------- - bigint | -1 | -9223372036854775808 | -1 | -1 | no | 1 - -\d serialtest2_f2_seq - Sequence "public.serialtest2_f2_seq" - Type | Start | Minimum | Maximum | Increment | Cycles? | Cache ----------+-------+---------+------------+-----------+---------+------- - integer | 1 | 1 | 2147483647 | 1 | no | 1 -Owned by: public.serialtest2.f2 - --- Test comments -COMMENT ON SEQUENCE asdf IS 'won''t work'; -ERROR: relation "asdf" does not exist -COMMENT ON SEQUENCE sequence_test2 IS 'will work'; -COMMENT ON SEQUENCE sequence_test2 IS NULL; --- Test lastval() -CREATE SEQUENCE seq; -SELECT nextval('seq'); - nextval ---------- - 1 -(1 row) - -SELECT lastval(); - lastval ---------- - 1 -(1 row) - -SELECT setval('seq', 99); - setval --------- - 99 -(1 row) - -SELECT lastval(); - lastval ---------- - 99 -(1 row) - -DISCARD SEQUENCES; -SELECT lastval(); -ERROR: lastval is not yet defined in this session -CREATE SEQUENCE seq2; -SELECT nextval('seq2'); - nextval ---------- - 1 -(1 row) - -SELECT lastval(); - lastval ---------- - 1 -(1 row) - -DROP SEQUENCE seq2; --- should fail -SELECT lastval(); -ERROR: lastval is not yet defined in this session --- unlogged sequences --- (more tests in src/test/recovery/) -CREATE UNLOGGED SEQUENCE sequence_test_unlogged; -ALTER SEQUENCE sequence_test_unlogged SET LOGGED; -\d sequence_test_unlogged - Sequence "public.sequence_test_unlogged" - Type | Start | Minimum | Maximum | Increment | Cycles? | Cache ---------+-------+---------+---------------------+-----------+---------+------- - bigint | 1 | 1 | 9223372036854775807 | 1 | no | 1 - -ALTER SEQUENCE sequence_test_unlogged SET UNLOGGED; -\d sequence_test_unlogged - Unlogged sequence "public.sequence_test_unlogged" - Type | Start | Minimum | Maximum | Increment | Cycles? | Cache ---------+-------+---------+---------------------+-----------+---------+------- - bigint | 1 | 1 | 9223372036854775807 | 1 | no | 1 - -DROP SEQUENCE sequence_test_unlogged; --- Test sequences in read-only transactions -CREATE TEMPORARY SEQUENCE sequence_test_temp1; -START TRANSACTION READ ONLY; -SELECT nextval('sequence_test_temp1'); -- ok - nextval ---------- - 1 -(1 row) - -SELECT nextval('sequence_test2'); -- error -ERROR: cannot execute nextval() in a read-only transaction -ROLLBACK; -START TRANSACTION READ ONLY; -SELECT setval('sequence_test_temp1', 1); -- ok - setval --------- - 1 -(1 row) - -SELECT setval('sequence_test2', 1); -- error -ERROR: cannot execute setval() in a read-only transaction -ROLLBACK; --- privileges tests -CREATE USER regress_seq_user; --- nextval -BEGIN; -SET LOCAL SESSION AUTHORIZATION regress_seq_user; -CREATE SEQUENCE seq3; -REVOKE ALL ON seq3 FROM regress_seq_user; -GRANT SELECT ON seq3 TO regress_seq_user; -SELECT nextval('seq3'); -ERROR: permission denied for sequence seq3 -ROLLBACK; -BEGIN; -SET LOCAL SESSION AUTHORIZATION regress_seq_user; -CREATE SEQUENCE seq3; -REVOKE ALL ON seq3 FROM regress_seq_user; -GRANT UPDATE ON seq3 TO regress_seq_user; -SELECT nextval('seq3'); - nextval ---------- - 1 -(1 row) - -ROLLBACK; -BEGIN; -SET LOCAL SESSION AUTHORIZATION regress_seq_user; -CREATE SEQUENCE seq3; -REVOKE ALL ON seq3 FROM regress_seq_user; -GRANT USAGE ON seq3 TO regress_seq_user; -SELECT nextval('seq3'); - nextval ---------- - 1 -(1 row) - -ROLLBACK; --- currval -BEGIN; -SET LOCAL SESSION AUTHORIZATION regress_seq_user; -CREATE SEQUENCE seq3; -SELECT nextval('seq3'); - nextval ---------- - 1 -(1 row) - -REVOKE ALL ON seq3 FROM regress_seq_user; -GRANT SELECT ON seq3 TO regress_seq_user; -SELECT currval('seq3'); - currval ---------- - 1 -(1 row) - -ROLLBACK; -BEGIN; -SET LOCAL SESSION AUTHORIZATION regress_seq_user; -CREATE SEQUENCE seq3; -SELECT nextval('seq3'); - nextval ---------- - 1 -(1 row) - -REVOKE ALL ON seq3 FROM regress_seq_user; -GRANT UPDATE ON seq3 TO regress_seq_user; -SELECT currval('seq3'); -ERROR: permission denied for sequence seq3 -ROLLBACK; -BEGIN; -SET LOCAL SESSION AUTHORIZATION regress_seq_user; -CREATE SEQUENCE seq3; -SELECT nextval('seq3'); - nextval ---------- - 1 -(1 row) - -REVOKE ALL ON seq3 FROM regress_seq_user; -GRANT USAGE ON seq3 TO regress_seq_user; -SELECT currval('seq3'); - currval ---------- - 1 -(1 row) - -ROLLBACK; --- lastval -BEGIN; -SET LOCAL SESSION AUTHORIZATION regress_seq_user; -CREATE SEQUENCE seq3; -SELECT nextval('seq3'); - nextval ---------- - 1 -(1 row) - -REVOKE ALL ON seq3 FROM regress_seq_user; -GRANT SELECT ON seq3 TO regress_seq_user; -SELECT lastval(); - lastval ---------- - 1 -(1 row) - -ROLLBACK; -BEGIN; -SET LOCAL SESSION AUTHORIZATION regress_seq_user; -CREATE SEQUENCE seq3; -SELECT nextval('seq3'); - nextval ---------- - 1 -(1 row) - -REVOKE ALL ON seq3 FROM regress_seq_user; -GRANT UPDATE ON seq3 TO regress_seq_user; -SELECT lastval(); -ERROR: permission denied for sequence seq3 -ROLLBACK; -BEGIN; -SET LOCAL SESSION AUTHORIZATION regress_seq_user; -CREATE SEQUENCE seq3; -SELECT nextval('seq3'); - nextval ---------- - 1 -(1 row) - -REVOKE ALL ON seq3 FROM regress_seq_user; -GRANT USAGE ON seq3 TO regress_seq_user; -SELECT lastval(); - lastval ---------- - 1 -(1 row) - -ROLLBACK; --- setval -BEGIN; -SET LOCAL SESSION AUTHORIZATION regress_seq_user; -CREATE SEQUENCE seq3; -REVOKE ALL ON seq3 FROM regress_seq_user; -SAVEPOINT save; -SELECT setval('seq3', 5); -ERROR: permission denied for sequence seq3 -ROLLBACK TO save; -GRANT UPDATE ON seq3 TO regress_seq_user; -SELECT setval('seq3', 5); - setval --------- - 5 -(1 row) - -SELECT nextval('seq3'); - nextval ---------- - 6 -(1 row) - -ROLLBACK; --- ALTER SEQUENCE -BEGIN; -SET LOCAL SESSION AUTHORIZATION regress_seq_user; -ALTER SEQUENCE sequence_test2 START WITH 1; -ERROR: must be owner of sequence sequence_test2 -ROLLBACK; --- Sequences should get wiped out as well: -DROP TABLE serialTest1, serialTest2; --- Make sure sequences are gone: -SELECT * FROM information_schema.sequences WHERE sequence_name IN - ('sequence_test2', 'serialtest2_f2_seq', 'serialtest2_f3_seq', - 'serialtest2_f4_seq', 'serialtest2_f5_seq', 'serialtest2_f6_seq') - ORDER BY sequence_name ASC; - sequence_catalog | sequence_schema | sequence_name | data_type | numeric_precision | numeric_precision_radix | numeric_scale | start_value | minimum_value | maximum_value | increment | cycle_option -------------------+-----------------+----------------+-----------+-------------------+-------------------------+---------------+-------------+---------------+---------------+-----------+-------------- - regression | public | sequence_test2 | bigint | 64 | 2 | 0 | 32 | 5 | 36 | 4 | YES -(1 row) - -DROP USER regress_seq_user; -DROP SEQUENCE seq; --- cache tests -CREATE SEQUENCE test_seq1 CACHE 10; -SELECT nextval('test_seq1'); - nextval ---------- - 1 -(1 row) - -SELECT nextval('test_seq1'); - nextval ---------- - 2 -(1 row) - -SELECT nextval('test_seq1'); - nextval ---------- - 3 -(1 row) - --- pg_get_sequence_data -SELECT * FROM pg_get_sequence_data('test_seq1'); - last_value | is_called -------------+----------- - 10 | t -(1 row) - -DROP SEQUENCE test_seq1; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/polymorphism.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/polymorphism.out --- /Users/admin/pgsql/src/test/regress/expected/polymorphism.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/polymorphism.out 2024-12-13 13:20:12 @@ -1,2110 +1,2 @@ --- --- Tests for polymorphic SQL functions and aggregates based on them. --- Tests for other features related to function-calling have snuck in, too. --- -create function polyf(x anyelement) returns anyelement as $$ - select x + 1 -$$ language sql; -select polyf(42) as int, polyf(4.5) as num; - int | num ------+----- - 43 | 5.5 -(1 row) - -select polyf(point(3,4)); -- fail for lack of + operator -ERROR: operator does not exist: point + integer -LINE 2: select x + 1 - ^ -HINT: No operator matches the given name and argument types. You might need to add explicit type casts. -QUERY: - select x + 1 - -CONTEXT: SQL function "polyf" during inlining -drop function polyf(x anyelement); -create function polyf(x anyelement) returns anyarray as $$ - select array[x + 1, x + 2] -$$ language sql; -select polyf(42) as int, polyf(4.5) as num; - int | num ----------+----------- - {43,44} | {5.5,6.5} -(1 row) - -drop function polyf(x anyelement); -create function polyf(x anyarray) returns anyelement as $$ - select x[1] -$$ language sql; -select polyf(array[2,4]) as int, polyf(array[4.5, 7.7]) as num; - int | num ------+----- - 2 | 4.5 -(1 row) - -select polyf(stavalues1) from pg_statistic; -- fail, can't infer element type -ERROR: cannot determine element type of "anyarray" argument -drop function polyf(x anyarray); -create function polyf(x anyarray) returns anyarray as $$ - select x -$$ language sql; -select polyf(array[2,4]) as int, polyf(array[4.5, 7.7]) as num; - int | num --------+----------- - {2,4} | {4.5,7.7} -(1 row) - -select polyf(stavalues1) from pg_statistic; -- fail, can't infer element type -ERROR: return type anyarray is not supported for SQL functions -CONTEXT: SQL function "polyf" during inlining -drop function polyf(x anyarray); --- fail, can't infer type: -create function polyf(x anyelement) returns anyrange as $$ - select array[x + 1, x + 2] -$$ language sql; -ERROR: cannot determine result data type -DETAIL: A result of type anyrange requires at least one input of type anyrange or anymultirange. -create function polyf(x anyrange) returns anyarray as $$ - select array[lower(x), upper(x)] -$$ language sql; -select polyf(int4range(42, 49)) as int, polyf(float8range(4.5, 7.8)) as num; - int | num ----------+----------- - {42,49} | {4.5,7.8} -(1 row) - -drop function polyf(x anyrange); -create function polyf(x anycompatible, y anycompatible) returns anycompatiblearray as $$ - select array[x, y] -$$ language sql; -select polyf(2, 4) as int, polyf(2, 4.5) as num; - int | num --------+--------- - {2,4} | {2,4.5} -(1 row) - -drop function polyf(x anycompatible, y anycompatible); -create function polyf(x anycompatiblerange, y anycompatible, z anycompatible) returns anycompatiblearray as $$ - select array[lower(x), upper(x), y, z] -$$ language sql; -select polyf(int4range(42, 49), 11, 2::smallint) as int, polyf(float8range(4.5, 7.8), 7.8, 11::real) as num; - int | num ---------------+------------------ - {42,49,11,2} | {4.5,7.8,7.8,11} -(1 row) - -select polyf(int4range(42, 49), 11, 4.5) as fail; -- range type doesn't fit -ERROR: function polyf(int4range, integer, numeric) does not exist -LINE 1: select polyf(int4range(42, 49), 11, 4.5) as fail; - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -drop function polyf(x anycompatiblerange, y anycompatible, z anycompatible); -create function polyf(x anycompatiblemultirange, y anycompatible, z anycompatible) returns anycompatiblearray as $$ - select array[lower(x), upper(x), y, z] -$$ language sql; -select polyf(multirange(int4range(42, 49)), 11, 2::smallint) as int, polyf(multirange(float8range(4.5, 7.8)), 7.8, 11::real) as num; - int | num ---------------+------------------ - {42,49,11,2} | {4.5,7.8,7.8,11} -(1 row) - -select polyf(multirange(int4range(42, 49)), 11, 4.5) as fail; -- range type doesn't fit -ERROR: function polyf(int4multirange, integer, numeric) does not exist -LINE 1: select polyf(multirange(int4range(42, 49)), 11, 4.5) as fail... - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -drop function polyf(x anycompatiblemultirange, y anycompatible, z anycompatible); --- fail, can't infer type: -create function polyf(x anycompatible) returns anycompatiblerange as $$ - select array[x + 1, x + 2] -$$ language sql; -ERROR: cannot determine result data type -DETAIL: A result of type anycompatiblerange requires at least one input of type anycompatiblerange or anycompatiblemultirange. -create function polyf(x anycompatiblerange, y anycompatiblearray) returns anycompatiblerange as $$ - select x -$$ language sql; -select polyf(int4range(42, 49), array[11]) as int, polyf(float8range(4.5, 7.8), array[7]) as num; - int | num ----------+----------- - [42,49) | [4.5,7.8) -(1 row) - -drop function polyf(x anycompatiblerange, y anycompatiblearray); --- fail, can't infer type: -create function polyf(x anycompatible) returns anycompatiblemultirange as $$ - select array[x + 1, x + 2] -$$ language sql; -ERROR: cannot determine result data type -DETAIL: A result of type anycompatiblemultirange requires at least one input of type anycompatiblerange or anycompatiblemultirange. -create function polyf(x anycompatiblemultirange, y anycompatiblearray) returns anycompatiblemultirange as $$ - select x -$$ language sql; -select polyf(multirange(int4range(42, 49)), array[11]) as int, polyf(multirange(float8range(4.5, 7.8)), array[7]) as num; - int | num ------------+------------- - {[42,49)} | {[4.5,7.8)} -(1 row) - -drop function polyf(x anycompatiblemultirange, y anycompatiblearray); -create function polyf(a anyelement, b anyarray, - c anycompatible, d anycompatible, - OUT x anyarray, OUT y anycompatiblearray) -as $$ - select a || b, array[c, d] -$$ language sql; -select x, pg_typeof(x), y, pg_typeof(y) - from polyf(11, array[1, 2], 42, 34.5); - x | pg_typeof | y | pg_typeof -----------+-----------+-----------+----------- - {11,1,2} | integer[] | {42,34.5} | numeric[] -(1 row) - -select x, pg_typeof(x), y, pg_typeof(y) - from polyf(11, array[1, 2], point(1,2), point(3,4)); - x | pg_typeof | y | pg_typeof -----------+-----------+-------------------+----------- - {11,1,2} | integer[] | {"(1,2)","(3,4)"} | point[] -(1 row) - -select x, pg_typeof(x), y, pg_typeof(y) - from polyf(11, '{1,2}', point(1,2), '(3,4)'); - x | pg_typeof | y | pg_typeof -----------+-----------+-------------------+----------- - {11,1,2} | integer[] | {"(1,2)","(3,4)"} | point[] -(1 row) - -select x, pg_typeof(x), y, pg_typeof(y) - from polyf(11, array[1, 2.2], 42, 34.5); -- fail -ERROR: function polyf(integer, numeric[], integer, numeric) does not exist -LINE 2: from polyf(11, array[1, 2.2], 42, 34.5); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -drop function polyf(a anyelement, b anyarray, - c anycompatible, d anycompatible); -create function polyf(anyrange) returns anymultirange -as 'select multirange($1);' language sql; -select polyf(int4range(1,10)); - polyf ----------- - {[1,10)} -(1 row) - -select polyf(null); -ERROR: could not determine polymorphic type because input has type unknown -drop function polyf(anyrange); -create function polyf(anymultirange) returns anyelement -as 'select lower($1);' language sql; -select polyf(int4multirange(int4range(1,10), int4range(20,30))); - polyf -------- - 1 -(1 row) - -select polyf(null); -ERROR: could not determine polymorphic type because input has type unknown -drop function polyf(anymultirange); -create function polyf(anycompatiblerange) returns anycompatiblemultirange -as 'select multirange($1);' language sql; -select polyf(int4range(1,10)); - polyf ----------- - {[1,10)} -(1 row) - -select polyf(null); -ERROR: could not determine polymorphic type anycompatiblerange because input has type unknown -drop function polyf(anycompatiblerange); -create function polyf(anymultirange) returns anyrange -as 'select range_merge($1);' language sql; -select polyf(int4multirange(int4range(1,10), int4range(20,30))); - polyf --------- - [1,30) -(1 row) - -select polyf(null); -ERROR: could not determine polymorphic type because input has type unknown -drop function polyf(anymultirange); -create function polyf(anycompatiblemultirange) returns anycompatiblerange -as 'select range_merge($1);' language sql; -select polyf(int4multirange(int4range(1,10), int4range(20,30))); - polyf --------- - [1,30) -(1 row) - -select polyf(null); -ERROR: could not determine polymorphic type anycompatiblerange because input has type unknown -drop function polyf(anycompatiblemultirange); -create function polyf(anycompatiblemultirange) returns anycompatible -as 'select lower($1);' language sql; -select polyf(int4multirange(int4range(1,10), int4range(20,30))); - polyf -------- - 1 -(1 row) - -select polyf(null); -ERROR: could not determine polymorphic type anycompatiblemultirange because input has type unknown -drop function polyf(anycompatiblemultirange); --- --- Polymorphic aggregate tests --- --- Legend: ------------ --- A = type is ANY --- P = type is polymorphic --- N = type is non-polymorphic --- B = aggregate base type --- S = aggregate state type --- R = aggregate return type --- 1 = arg1 of a function --- 2 = arg2 of a function --- ag = aggregate --- tf = trans (state) function --- ff = final function --- rt = return type of a function --- -> = implies --- => = allowed --- !> = not allowed --- E = exists --- NE = not-exists --- --- Possible states: --- ---------------- --- B = (A || P || N) --- when (B = A) -> (tf2 = NE) --- S = (P || N) --- ff = (E || NE) --- tf1 = (P || N) --- tf2 = (NE || P || N) --- R = (P || N) --- create functions for use as tf and ff with the needed combinations of --- argument polymorphism, but within the constraints of valid aggregate --- functions, i.e. tf arg1 and tf return type must match --- polymorphic single arg transfn -CREATE FUNCTION stfp(anyarray) RETURNS anyarray AS -'select $1' LANGUAGE SQL; --- non-polymorphic single arg transfn -CREATE FUNCTION stfnp(int[]) RETURNS int[] AS -'select $1' LANGUAGE SQL; --- dual polymorphic transfn -CREATE FUNCTION tfp(anyarray,anyelement) RETURNS anyarray AS -'select $1 || $2' LANGUAGE SQL; --- dual non-polymorphic transfn -CREATE FUNCTION tfnp(int[],int) RETURNS int[] AS -'select $1 || $2' LANGUAGE SQL; --- arg1 only polymorphic transfn -CREATE FUNCTION tf1p(anyarray,int) RETURNS anyarray AS -'select $1' LANGUAGE SQL; --- arg2 only polymorphic transfn -CREATE FUNCTION tf2p(int[],anyelement) RETURNS int[] AS -'select $1' LANGUAGE SQL; --- multi-arg polymorphic -CREATE FUNCTION sum3(anyelement,anyelement,anyelement) returns anyelement AS -'select $1+$2+$3' language sql strict; --- finalfn polymorphic -CREATE FUNCTION ffp(anyarray) RETURNS anyarray AS -'select $1' LANGUAGE SQL; --- finalfn non-polymorphic -CREATE FUNCTION ffnp(int[]) returns int[] as -'select $1' LANGUAGE SQL; --- Try to cover all the possible states: --- --- Note: in Cases 1 & 2, we are trying to return P. Therefore, if the transfn --- is stfnp, tfnp, or tf2p, we must use ffp as finalfn, because stfnp, tfnp, --- and tf2p do not return P. Conversely, in Cases 3 & 4, we are trying to --- return N. Therefore, if the transfn is stfp, tfp, or tf1p, we must use ffnp --- as finalfn, because stfp, tfp, and tf1p do not return N. --- --- Case1 (R = P) && (B = A) --- ------------------------ --- S tf1 --- ------- --- N N --- should CREATE -CREATE AGGREGATE myaggp01a(*) (SFUNC = stfnp, STYPE = int4[], - FINALFUNC = ffp, INITCOND = '{}'); --- P N --- should ERROR: stfnp(anyarray) not matched by stfnp(int[]) -CREATE AGGREGATE myaggp02a(*) (SFUNC = stfnp, STYPE = anyarray, - FINALFUNC = ffp, INITCOND = '{}'); -ERROR: cannot determine transition data type -DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. --- N P --- should CREATE -CREATE AGGREGATE myaggp03a(*) (SFUNC = stfp, STYPE = int4[], - FINALFUNC = ffp, INITCOND = '{}'); -CREATE AGGREGATE myaggp03b(*) (SFUNC = stfp, STYPE = int4[], - INITCOND = '{}'); --- P P --- should ERROR: we have no way to resolve S -CREATE AGGREGATE myaggp04a(*) (SFUNC = stfp, STYPE = anyarray, - FINALFUNC = ffp, INITCOND = '{}'); -ERROR: cannot determine transition data type -DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. -CREATE AGGREGATE myaggp04b(*) (SFUNC = stfp, STYPE = anyarray, - INITCOND = '{}'); -ERROR: cannot determine transition data type -DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. --- Case2 (R = P) && ((B = P) || (B = N)) --- ------------------------------------- --- S tf1 B tf2 --- ----------------------- --- N N N N --- should CREATE -CREATE AGGREGATE myaggp05a(BASETYPE = int, SFUNC = tfnp, STYPE = int[], - FINALFUNC = ffp, INITCOND = '{}'); --- N N N P --- should CREATE -CREATE AGGREGATE myaggp06a(BASETYPE = int, SFUNC = tf2p, STYPE = int[], - FINALFUNC = ffp, INITCOND = '{}'); --- N N P N --- should ERROR: tfnp(int[], anyelement) not matched by tfnp(int[], int) -CREATE AGGREGATE myaggp07a(BASETYPE = anyelement, SFUNC = tfnp, STYPE = int[], - FINALFUNC = ffp, INITCOND = '{}'); -ERROR: function tfnp(integer[], anyelement) does not exist --- N N P P --- should CREATE -CREATE AGGREGATE myaggp08a(BASETYPE = anyelement, SFUNC = tf2p, STYPE = int[], - FINALFUNC = ffp, INITCOND = '{}'); --- N P N N --- should CREATE -CREATE AGGREGATE myaggp09a(BASETYPE = int, SFUNC = tf1p, STYPE = int[], - FINALFUNC = ffp, INITCOND = '{}'); -CREATE AGGREGATE myaggp09b(BASETYPE = int, SFUNC = tf1p, STYPE = int[], - INITCOND = '{}'); --- N P N P --- should CREATE -CREATE AGGREGATE myaggp10a(BASETYPE = int, SFUNC = tfp, STYPE = int[], - FINALFUNC = ffp, INITCOND = '{}'); -CREATE AGGREGATE myaggp10b(BASETYPE = int, SFUNC = tfp, STYPE = int[], - INITCOND = '{}'); --- N P P N --- should ERROR: tf1p(int[],anyelement) not matched by tf1p(anyarray,int) -CREATE AGGREGATE myaggp11a(BASETYPE = anyelement, SFUNC = tf1p, STYPE = int[], - FINALFUNC = ffp, INITCOND = '{}'); -ERROR: function tf1p(integer[], anyelement) does not exist -CREATE AGGREGATE myaggp11b(BASETYPE = anyelement, SFUNC = tf1p, STYPE = int[], - INITCOND = '{}'); -ERROR: function tf1p(integer[], anyelement) does not exist --- N P P P --- should ERROR: tfp(int[],anyelement) not matched by tfp(anyarray,anyelement) -CREATE AGGREGATE myaggp12a(BASETYPE = anyelement, SFUNC = tfp, STYPE = int[], - FINALFUNC = ffp, INITCOND = '{}'); -ERROR: function tfp(integer[], anyelement) does not exist -CREATE AGGREGATE myaggp12b(BASETYPE = anyelement, SFUNC = tfp, STYPE = int[], - INITCOND = '{}'); -ERROR: function tfp(integer[], anyelement) does not exist --- P N N N --- should ERROR: tfnp(anyarray, int) not matched by tfnp(int[],int) -CREATE AGGREGATE myaggp13a(BASETYPE = int, SFUNC = tfnp, STYPE = anyarray, - FINALFUNC = ffp, INITCOND = '{}'); -ERROR: cannot determine transition data type -DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. --- P N N P --- should ERROR: tf2p(anyarray, int) not matched by tf2p(int[],anyelement) -CREATE AGGREGATE myaggp14a(BASETYPE = int, SFUNC = tf2p, STYPE = anyarray, - FINALFUNC = ffp, INITCOND = '{}'); -ERROR: cannot determine transition data type -DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. --- P N P N --- should ERROR: tfnp(anyarray, anyelement) not matched by tfnp(int[],int) -CREATE AGGREGATE myaggp15a(BASETYPE = anyelement, SFUNC = tfnp, - STYPE = anyarray, FINALFUNC = ffp, INITCOND = '{}'); -ERROR: function tfnp(anyarray, anyelement) does not exist --- P N P P --- should ERROR: tf2p(anyarray, anyelement) not matched by tf2p(int[],anyelement) -CREATE AGGREGATE myaggp16a(BASETYPE = anyelement, SFUNC = tf2p, - STYPE = anyarray, FINALFUNC = ffp, INITCOND = '{}'); -ERROR: function tf2p(anyarray, anyelement) does not exist --- P P N N --- should ERROR: we have no way to resolve S -CREATE AGGREGATE myaggp17a(BASETYPE = int, SFUNC = tf1p, STYPE = anyarray, - FINALFUNC = ffp, INITCOND = '{}'); -ERROR: cannot determine transition data type -DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. -CREATE AGGREGATE myaggp17b(BASETYPE = int, SFUNC = tf1p, STYPE = anyarray, - INITCOND = '{}'); -ERROR: cannot determine transition data type -DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. --- P P N P --- should ERROR: tfp(anyarray, int) not matched by tfp(anyarray, anyelement) -CREATE AGGREGATE myaggp18a(BASETYPE = int, SFUNC = tfp, STYPE = anyarray, - FINALFUNC = ffp, INITCOND = '{}'); -ERROR: cannot determine transition data type -DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. -CREATE AGGREGATE myaggp18b(BASETYPE = int, SFUNC = tfp, STYPE = anyarray, - INITCOND = '{}'); -ERROR: cannot determine transition data type -DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. --- P P P N --- should ERROR: tf1p(anyarray, anyelement) not matched by tf1p(anyarray, int) -CREATE AGGREGATE myaggp19a(BASETYPE = anyelement, SFUNC = tf1p, - STYPE = anyarray, FINALFUNC = ffp, INITCOND = '{}'); -ERROR: function tf1p(anyarray, anyelement) does not exist -CREATE AGGREGATE myaggp19b(BASETYPE = anyelement, SFUNC = tf1p, - STYPE = anyarray, INITCOND = '{}'); -ERROR: function tf1p(anyarray, anyelement) does not exist --- P P P P --- should CREATE -CREATE AGGREGATE myaggp20a(BASETYPE = anyelement, SFUNC = tfp, - STYPE = anyarray, FINALFUNC = ffp, INITCOND = '{}'); -CREATE AGGREGATE myaggp20b(BASETYPE = anyelement, SFUNC = tfp, - STYPE = anyarray, INITCOND = '{}'); --- Case3 (R = N) && (B = A) --- ------------------------ --- S tf1 --- ------- --- N N --- should CREATE -CREATE AGGREGATE myaggn01a(*) (SFUNC = stfnp, STYPE = int4[], - FINALFUNC = ffnp, INITCOND = '{}'); -CREATE AGGREGATE myaggn01b(*) (SFUNC = stfnp, STYPE = int4[], - INITCOND = '{}'); --- P N --- should ERROR: stfnp(anyarray) not matched by stfnp(int[]) -CREATE AGGREGATE myaggn02a(*) (SFUNC = stfnp, STYPE = anyarray, - FINALFUNC = ffnp, INITCOND = '{}'); -ERROR: cannot determine transition data type -DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. -CREATE AGGREGATE myaggn02b(*) (SFUNC = stfnp, STYPE = anyarray, - INITCOND = '{}'); -ERROR: cannot determine transition data type -DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. --- N P --- should CREATE -CREATE AGGREGATE myaggn03a(*) (SFUNC = stfp, STYPE = int4[], - FINALFUNC = ffnp, INITCOND = '{}'); --- P P --- should ERROR: ffnp(anyarray) not matched by ffnp(int[]) -CREATE AGGREGATE myaggn04a(*) (SFUNC = stfp, STYPE = anyarray, - FINALFUNC = ffnp, INITCOND = '{}'); -ERROR: cannot determine transition data type -DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. --- Case4 (R = N) && ((B = P) || (B = N)) --- ------------------------------------- --- S tf1 B tf2 --- ----------------------- --- N N N N --- should CREATE -CREATE AGGREGATE myaggn05a(BASETYPE = int, SFUNC = tfnp, STYPE = int[], - FINALFUNC = ffnp, INITCOND = '{}'); -CREATE AGGREGATE myaggn05b(BASETYPE = int, SFUNC = tfnp, STYPE = int[], - INITCOND = '{}'); --- N N N P --- should CREATE -CREATE AGGREGATE myaggn06a(BASETYPE = int, SFUNC = tf2p, STYPE = int[], - FINALFUNC = ffnp, INITCOND = '{}'); -CREATE AGGREGATE myaggn06b(BASETYPE = int, SFUNC = tf2p, STYPE = int[], - INITCOND = '{}'); --- N N P N --- should ERROR: tfnp(int[], anyelement) not matched by tfnp(int[], int) -CREATE AGGREGATE myaggn07a(BASETYPE = anyelement, SFUNC = tfnp, STYPE = int[], - FINALFUNC = ffnp, INITCOND = '{}'); -ERROR: function tfnp(integer[], anyelement) does not exist -CREATE AGGREGATE myaggn07b(BASETYPE = anyelement, SFUNC = tfnp, STYPE = int[], - INITCOND = '{}'); -ERROR: function tfnp(integer[], anyelement) does not exist --- N N P P --- should CREATE -CREATE AGGREGATE myaggn08a(BASETYPE = anyelement, SFUNC = tf2p, STYPE = int[], - FINALFUNC = ffnp, INITCOND = '{}'); -CREATE AGGREGATE myaggn08b(BASETYPE = anyelement, SFUNC = tf2p, STYPE = int[], - INITCOND = '{}'); --- N P N N --- should CREATE -CREATE AGGREGATE myaggn09a(BASETYPE = int, SFUNC = tf1p, STYPE = int[], - FINALFUNC = ffnp, INITCOND = '{}'); --- N P N P --- should CREATE -CREATE AGGREGATE myaggn10a(BASETYPE = int, SFUNC = tfp, STYPE = int[], - FINALFUNC = ffnp, INITCOND = '{}'); --- N P P N --- should ERROR: tf1p(int[],anyelement) not matched by tf1p(anyarray,int) -CREATE AGGREGATE myaggn11a(BASETYPE = anyelement, SFUNC = tf1p, STYPE = int[], - FINALFUNC = ffnp, INITCOND = '{}'); -ERROR: function tf1p(integer[], anyelement) does not exist --- N P P P --- should ERROR: tfp(int[],anyelement) not matched by tfp(anyarray,anyelement) -CREATE AGGREGATE myaggn12a(BASETYPE = anyelement, SFUNC = tfp, STYPE = int[], - FINALFUNC = ffnp, INITCOND = '{}'); -ERROR: function tfp(integer[], anyelement) does not exist --- P N N N --- should ERROR: tfnp(anyarray, int) not matched by tfnp(int[],int) -CREATE AGGREGATE myaggn13a(BASETYPE = int, SFUNC = tfnp, STYPE = anyarray, - FINALFUNC = ffnp, INITCOND = '{}'); -ERROR: cannot determine transition data type -DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. -CREATE AGGREGATE myaggn13b(BASETYPE = int, SFUNC = tfnp, STYPE = anyarray, - INITCOND = '{}'); -ERROR: cannot determine transition data type -DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. --- P N N P --- should ERROR: tf2p(anyarray, int) not matched by tf2p(int[],anyelement) -CREATE AGGREGATE myaggn14a(BASETYPE = int, SFUNC = tf2p, STYPE = anyarray, - FINALFUNC = ffnp, INITCOND = '{}'); -ERROR: cannot determine transition data type -DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. -CREATE AGGREGATE myaggn14b(BASETYPE = int, SFUNC = tf2p, STYPE = anyarray, - INITCOND = '{}'); -ERROR: cannot determine transition data type -DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. --- P N P N --- should ERROR: tfnp(anyarray, anyelement) not matched by tfnp(int[],int) -CREATE AGGREGATE myaggn15a(BASETYPE = anyelement, SFUNC = tfnp, - STYPE = anyarray, FINALFUNC = ffnp, INITCOND = '{}'); -ERROR: function tfnp(anyarray, anyelement) does not exist -CREATE AGGREGATE myaggn15b(BASETYPE = anyelement, SFUNC = tfnp, - STYPE = anyarray, INITCOND = '{}'); -ERROR: function tfnp(anyarray, anyelement) does not exist --- P N P P --- should ERROR: tf2p(anyarray, anyelement) not matched by tf2p(int[],anyelement) -CREATE AGGREGATE myaggn16a(BASETYPE = anyelement, SFUNC = tf2p, - STYPE = anyarray, FINALFUNC = ffnp, INITCOND = '{}'); -ERROR: function tf2p(anyarray, anyelement) does not exist -CREATE AGGREGATE myaggn16b(BASETYPE = anyelement, SFUNC = tf2p, - STYPE = anyarray, INITCOND = '{}'); -ERROR: function tf2p(anyarray, anyelement) does not exist --- P P N N --- should ERROR: ffnp(anyarray) not matched by ffnp(int[]) -CREATE AGGREGATE myaggn17a(BASETYPE = int, SFUNC = tf1p, STYPE = anyarray, - FINALFUNC = ffnp, INITCOND = '{}'); -ERROR: cannot determine transition data type -DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. --- P P N P --- should ERROR: tfp(anyarray, int) not matched by tfp(anyarray, anyelement) -CREATE AGGREGATE myaggn18a(BASETYPE = int, SFUNC = tfp, STYPE = anyarray, - FINALFUNC = ffnp, INITCOND = '{}'); -ERROR: cannot determine transition data type -DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. --- P P P N --- should ERROR: tf1p(anyarray, anyelement) not matched by tf1p(anyarray, int) -CREATE AGGREGATE myaggn19a(BASETYPE = anyelement, SFUNC = tf1p, - STYPE = anyarray, FINALFUNC = ffnp, INITCOND = '{}'); -ERROR: function tf1p(anyarray, anyelement) does not exist --- P P P P --- should ERROR: ffnp(anyarray) not matched by ffnp(int[]) -CREATE AGGREGATE myaggn20a(BASETYPE = anyelement, SFUNC = tfp, - STYPE = anyarray, FINALFUNC = ffnp, INITCOND = '{}'); -ERROR: function ffnp(anyarray) does not exist --- multi-arg polymorphic -CREATE AGGREGATE mysum2(anyelement,anyelement) (SFUNC = sum3, - STYPE = anyelement, INITCOND = '0'); --- create test data for polymorphic aggregates -create temp table t(f1 int, f2 int[], f3 text); -insert into t values(1,array[1],'a'); -insert into t values(1,array[11],'b'); -insert into t values(1,array[111],'c'); -insert into t values(2,array[2],'a'); -insert into t values(2,array[22],'b'); -insert into t values(2,array[222],'c'); -insert into t values(3,array[3],'a'); -insert into t values(3,array[3],'b'); --- test the successfully created polymorphic aggregates -select f3, myaggp01a(*) from t group by f3 order by f3; - f3 | myaggp01a -----+----------- - a | {} - b | {} - c | {} -(3 rows) - -select f3, myaggp03a(*) from t group by f3 order by f3; - f3 | myaggp03a -----+----------- - a | {} - b | {} - c | {} -(3 rows) - -select f3, myaggp03b(*) from t group by f3 order by f3; - f3 | myaggp03b -----+----------- - a | {} - b | {} - c | {} -(3 rows) - -select f3, myaggp05a(f1) from t group by f3 order by f3; - f3 | myaggp05a -----+----------- - a | {1,2,3} - b | {1,2,3} - c | {1,2} -(3 rows) - -select f3, myaggp06a(f1) from t group by f3 order by f3; - f3 | myaggp06a -----+----------- - a | {} - b | {} - c | {} -(3 rows) - -select f3, myaggp08a(f1) from t group by f3 order by f3; - f3 | myaggp08a -----+----------- - a | {} - b | {} - c | {} -(3 rows) - -select f3, myaggp09a(f1) from t group by f3 order by f3; - f3 | myaggp09a -----+----------- - a | {} - b | {} - c | {} -(3 rows) - -select f3, myaggp09b(f1) from t group by f3 order by f3; - f3 | myaggp09b -----+----------- - a | {} - b | {} - c | {} -(3 rows) - -select f3, myaggp10a(f1) from t group by f3 order by f3; - f3 | myaggp10a -----+----------- - a | {1,2,3} - b | {1,2,3} - c | {1,2} -(3 rows) - -select f3, myaggp10b(f1) from t group by f3 order by f3; - f3 | myaggp10b -----+----------- - a | {1,2,3} - b | {1,2,3} - c | {1,2} -(3 rows) - -select f3, myaggp20a(f1) from t group by f3 order by f3; - f3 | myaggp20a -----+----------- - a | {1,2,3} - b | {1,2,3} - c | {1,2} -(3 rows) - -select f3, myaggp20b(f1) from t group by f3 order by f3; - f3 | myaggp20b -----+----------- - a | {1,2,3} - b | {1,2,3} - c | {1,2} -(3 rows) - -select f3, myaggn01a(*) from t group by f3 order by f3; - f3 | myaggn01a -----+----------- - a | {} - b | {} - c | {} -(3 rows) - -select f3, myaggn01b(*) from t group by f3 order by f3; - f3 | myaggn01b -----+----------- - a | {} - b | {} - c | {} -(3 rows) - -select f3, myaggn03a(*) from t group by f3 order by f3; - f3 | myaggn03a -----+----------- - a | {} - b | {} - c | {} -(3 rows) - -select f3, myaggn05a(f1) from t group by f3 order by f3; - f3 | myaggn05a -----+----------- - a | {1,2,3} - b | {1,2,3} - c | {1,2} -(3 rows) - -select f3, myaggn05b(f1) from t group by f3 order by f3; - f3 | myaggn05b -----+----------- - a | {1,2,3} - b | {1,2,3} - c | {1,2} -(3 rows) - -select f3, myaggn06a(f1) from t group by f3 order by f3; - f3 | myaggn06a -----+----------- - a | {} - b | {} - c | {} -(3 rows) - -select f3, myaggn06b(f1) from t group by f3 order by f3; - f3 | myaggn06b -----+----------- - a | {} - b | {} - c | {} -(3 rows) - -select f3, myaggn08a(f1) from t group by f3 order by f3; - f3 | myaggn08a -----+----------- - a | {} - b | {} - c | {} -(3 rows) - -select f3, myaggn08b(f1) from t group by f3 order by f3; - f3 | myaggn08b -----+----------- - a | {} - b | {} - c | {} -(3 rows) - -select f3, myaggn09a(f1) from t group by f3 order by f3; - f3 | myaggn09a -----+----------- - a | {} - b | {} - c | {} -(3 rows) - -select f3, myaggn10a(f1) from t group by f3 order by f3; - f3 | myaggn10a -----+----------- - a | {1,2,3} - b | {1,2,3} - c | {1,2} -(3 rows) - -select mysum2(f1, f1 + 1) from t; - mysum2 --------- - 38 -(1 row) - --- test inlining of polymorphic SQL functions -create function bleat(int) returns int as $$ -begin - raise notice 'bleat %', $1; - return $1; -end$$ language plpgsql; -create function sql_if(bool, anyelement, anyelement) returns anyelement as $$ -select case when $1 then $2 else $3 end $$ language sql; --- Note this would fail with integer overflow, never mind wrong bleat() output, --- if the CASE expression were not successfully inlined -select f1, sql_if(f1 > 0, bleat(f1), bleat(f1 + 1)) from int4_tbl; -NOTICE: bleat 1 -NOTICE: bleat 123456 -NOTICE: bleat -123455 -NOTICE: bleat 2147483647 -NOTICE: bleat -2147483646 - f1 | sql_if --------------+------------- - 0 | 1 - 123456 | 123456 - -123456 | -123455 - 2147483647 | 2147483647 - -2147483647 | -2147483646 -(5 rows) - -select q2, sql_if(q2 > 0, q2, q2 + 1) from int8_tbl; - q2 | sql_if --------------------+------------------- - 456 | 456 - 4567890123456789 | 4567890123456789 - 123 | 123 - 4567890123456789 | 4567890123456789 - -4567890123456789 | -4567890123456788 -(5 rows) - --- another sort of polymorphic aggregate -CREATE AGGREGATE array_larger_accum (anyarray) -( - sfunc = array_larger, - stype = anyarray, - initcond = '{}' -); -SELECT array_larger_accum(i) -FROM (VALUES (ARRAY[1,2]), (ARRAY[3,4])) as t(i); - array_larger_accum --------------------- - {3,4} -(1 row) - -SELECT array_larger_accum(i) -FROM (VALUES (ARRAY[row(1,2),row(3,4)]), (ARRAY[row(5,6),row(7,8)])) as t(i); - array_larger_accum --------------------- - {"(5,6)","(7,8)"} -(1 row) - --- another kind of polymorphic aggregate -create function add_group(grp anyarray, ad anyelement, size integer) - returns anyarray - as $$ -begin - if grp is null then - return array[ad]; - end if; - if array_upper(grp, 1) < size then - return grp || ad; - end if; - return grp; -end; -$$ - language plpgsql immutable; -create aggregate build_group(anyelement, integer) ( - SFUNC = add_group, - STYPE = anyarray -); -select build_group(q1,3) from int8_tbl; - build_group ----------------------------- - {123,123,4567890123456789} -(1 row) - --- this should fail because stype isn't compatible with arg -create aggregate build_group(int8, integer) ( - SFUNC = add_group, - STYPE = int2[] -); -ERROR: function add_group(smallint[], bigint, integer) does not exist --- but we can make a non-poly agg from a poly sfunc if types are OK -create aggregate build_group(int8, integer) ( - SFUNC = add_group, - STYPE = int8[] -); --- check proper resolution of data types for polymorphic transfn/finalfn -create function first_el_transfn(anyarray, anyelement) returns anyarray as -'select $1 || $2' language sql immutable; -create function first_el(anyarray) returns anyelement as -'select $1[1]' language sql strict immutable; -create aggregate first_el_agg_f8(float8) ( - SFUNC = array_append, - STYPE = float8[], - FINALFUNC = first_el -); -create aggregate first_el_agg_any(anyelement) ( - SFUNC = first_el_transfn, - STYPE = anyarray, - FINALFUNC = first_el -); -select first_el_agg_f8(x::float8) from generate_series(1,10) x; - first_el_agg_f8 ------------------ - 1 -(1 row) - -select first_el_agg_any(x) from generate_series(1,10) x; - first_el_agg_any ------------------- - 1 -(1 row) - -select first_el_agg_f8(x::float8) over(order by x) from generate_series(1,10) x; - first_el_agg_f8 ------------------ - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 -(10 rows) - -select first_el_agg_any(x) over(order by x) from generate_series(1,10) x; - first_el_agg_any ------------------- - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 -(10 rows) - --- check that we can apply functions taking ANYARRAY to pg_stats -select distinct array_ndims(histogram_bounds) from pg_stats -where histogram_bounds is not null; - array_ndims -------------- - 1 -(1 row) - --- such functions must protect themselves if varying element type isn't OK --- (WHERE clause here is to avoid possibly getting a collation error instead) -select max(histogram_bounds) from pg_stats where tablename = 'pg_am'; -ERROR: cannot compare arrays of different element types --- another corner case is the input functions for polymorphic pseudotypes -select array_in('{1,2,3}','int4'::regtype,-1); -- this has historically worked - array_in ----------- - {1,2,3} -(1 row) - -select * from array_in('{1,2,3}','int4'::regtype,-1); -- this not -ERROR: function "array_in" in FROM has unsupported return type anyarray -LINE 1: select * from array_in('{1,2,3}','int4'::regtype,-1); - ^ -select anyrange_in('[10,20)','int4range'::regtype,-1); -ERROR: cannot accept a value of type anyrange --- test variadic polymorphic functions -create function myleast(variadic anyarray) returns anyelement as $$ - select min($1[i]) from generate_subscripts($1,1) g(i) -$$ language sql immutable strict; -select myleast(10, 1, 20, 33); - myleast ---------- - 1 -(1 row) - -select myleast(1.1, 0.22, 0.55); - myleast ---------- - 0.22 -(1 row) - -select myleast('z'::text); - myleast ---------- - z -(1 row) - -select myleast(); -- fail -ERROR: function myleast() does not exist -LINE 1: select myleast(); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. --- test with variadic call parameter -select myleast(variadic array[1,2,3,4,-1]); - myleast ---------- - -1 -(1 row) - -select myleast(variadic array[1.1, -5.5]); - myleast ---------- - -5.5 -(1 row) - ---test with empty variadic call parameter -select myleast(variadic array[]::int[]); - myleast ---------- - -(1 row) - --- an example with some ordinary arguments too -create function concat(text, variadic anyarray) returns text as $$ - select array_to_string($2, $1); -$$ language sql immutable strict; -select concat('%', 1, 2, 3, 4, 5); - concat ------------ - 1%2%3%4%5 -(1 row) - -select concat('|', 'a'::text, 'b', 'c'); - concat --------- - a|b|c -(1 row) - -select concat('|', variadic array[1,2,33]); - concat --------- - 1|2|33 -(1 row) - -select concat('|', variadic array[]::int[]); - concat --------- - -(1 row) - -drop function concat(text, anyarray); --- mix variadic with anyelement -create function formarray(anyelement, variadic anyarray) returns anyarray as $$ - select array_prepend($1, $2); -$$ language sql immutable strict; -select formarray(1,2,3,4,5); - formarray -------------- - {1,2,3,4,5} -(1 row) - -select formarray(1.1, variadic array[1.2,55.5]); - formarray ----------------- - {1.1,1.2,55.5} -(1 row) - -select formarray(1.1, array[1.2,55.5]); -- fail without variadic -ERROR: function formarray(numeric, numeric[]) does not exist -LINE 1: select formarray(1.1, array[1.2,55.5]); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -select formarray(1, 'x'::text); -- fail, type mismatch -ERROR: function formarray(integer, text) does not exist -LINE 1: select formarray(1, 'x'::text); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -select formarray(1, variadic array['x'::text]); -- fail, type mismatch -ERROR: function formarray(integer, text[]) does not exist -LINE 1: select formarray(1, variadic array['x'::text]); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -drop function formarray(anyelement, variadic anyarray); --- test pg_typeof() function -select pg_typeof(null); -- unknown - pg_typeof ------------ - unknown -(1 row) - -select pg_typeof(0); -- integer - pg_typeof ------------ - integer -(1 row) - -select pg_typeof(0.0); -- numeric - pg_typeof ------------ - numeric -(1 row) - -select pg_typeof(1+1 = 2); -- boolean - pg_typeof ------------ - boolean -(1 row) - -select pg_typeof('x'); -- unknown - pg_typeof ------------ - unknown -(1 row) - -select pg_typeof('' || ''); -- text - pg_typeof ------------ - text -(1 row) - -select pg_typeof(pg_typeof(0)); -- regtype - pg_typeof ------------ - regtype -(1 row) - -select pg_typeof(array[1.2,55.5]); -- numeric[] - pg_typeof ------------ - numeric[] -(1 row) - -select pg_typeof(myleast(10, 1, 20, 33)); -- polymorphic input - pg_typeof ------------ - integer -(1 row) - --- test functions with default parameters --- test basic functionality -create function dfunc(a int = 1, int = 2) returns int as $$ - select $1 + $2; -$$ language sql; -select dfunc(); - dfunc -------- - 3 -(1 row) - -select dfunc(10); - dfunc -------- - 12 -(1 row) - -select dfunc(10, 20); - dfunc -------- - 30 -(1 row) - -select dfunc(10, 20, 30); -- fail -ERROR: function dfunc(integer, integer, integer) does not exist -LINE 1: select dfunc(10, 20, 30); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -drop function dfunc(); -- fail -ERROR: function dfunc() does not exist -drop function dfunc(int); -- fail -ERROR: function dfunc(integer) does not exist -drop function dfunc(int, int); -- ok --- fail: defaults must be at end of argument list -create function dfunc(a int = 1, b int) returns int as $$ - select $1 + $2; -$$ language sql; -ERROR: input parameters after one with a default value must also have defaults -LINE 1: create function dfunc(a int = 1, b int) returns int as $$ - ^ --- however, this should work: -create function dfunc(a int = 1, out sum int, b int = 2) as $$ - select $1 + $2; -$$ language sql; -select dfunc(); - dfunc -------- - 3 -(1 row) - --- verify it lists properly -\df dfunc - List of functions - Schema | Name | Result data type | Argument data types | Type ---------+-------+------------------+-----------------------------------------------------------+------ - public | dfunc | integer | a integer DEFAULT 1, OUT sum integer, b integer DEFAULT 2 | func -(1 row) - -drop function dfunc(int, int); --- check implicit coercion -create function dfunc(a int DEFAULT 1.0, int DEFAULT '-1') returns int as $$ - select $1 + $2; -$$ language sql; -select dfunc(); - dfunc -------- - 0 -(1 row) - -create function dfunc(a text DEFAULT 'Hello', b text DEFAULT 'World') returns text as $$ - select $1 || ', ' || $2; -$$ language sql; -select dfunc(); -- fail: which dfunc should be called? int or text -ERROR: function dfunc() is not unique -LINE 1: select dfunc(); - ^ -HINT: Could not choose a best candidate function. You might need to add explicit type casts. -select dfunc('Hi'); -- ok - dfunc ------------ - Hi, World -(1 row) - -select dfunc('Hi', 'City'); -- ok - dfunc ----------- - Hi, City -(1 row) - -select dfunc(0); -- ok - dfunc -------- - -1 -(1 row) - -select dfunc(10, 20); -- ok - dfunc -------- - 30 -(1 row) - -drop function dfunc(int, int); -drop function dfunc(text, text); -create function dfunc(int = 1, int = 2) returns int as $$ - select 2; -$$ language sql; -create function dfunc(int = 1, int = 2, int = 3, int = 4) returns int as $$ - select 4; -$$ language sql; --- Now, dfunc(nargs = 2) and dfunc(nargs = 4) are ambiguous when called --- with 0 to 2 arguments. -select dfunc(); -- fail -ERROR: function dfunc() is not unique -LINE 1: select dfunc(); - ^ -HINT: Could not choose a best candidate function. You might need to add explicit type casts. -select dfunc(1); -- fail -ERROR: function dfunc(integer) is not unique -LINE 1: select dfunc(1); - ^ -HINT: Could not choose a best candidate function. You might need to add explicit type casts. -select dfunc(1, 2); -- fail -ERROR: function dfunc(integer, integer) is not unique -LINE 1: select dfunc(1, 2); - ^ -HINT: Could not choose a best candidate function. You might need to add explicit type casts. -select dfunc(1, 2, 3); -- ok - dfunc -------- - 4 -(1 row) - -select dfunc(1, 2, 3, 4); -- ok - dfunc -------- - 4 -(1 row) - -drop function dfunc(int, int); -drop function dfunc(int, int, int, int); --- default values are not allowed for output parameters -create function dfunc(out int = 20) returns int as $$ - select 1; -$$ language sql; -ERROR: only input parameters can have default values -LINE 1: create function dfunc(out int = 20) returns int as $$ - ^ --- polymorphic parameter test -create function dfunc(anyelement = 'World'::text) returns text as $$ - select 'Hello, ' || $1::text; -$$ language sql; -select dfunc(); - dfunc --------------- - Hello, World -(1 row) - -select dfunc(0); - dfunc ----------- - Hello, 0 -(1 row) - -select dfunc(to_date('20081215','YYYYMMDD')); - dfunc -------------------- - Hello, 12-15-2008 -(1 row) - -select dfunc('City'::text); - dfunc -------------- - Hello, City -(1 row) - -drop function dfunc(anyelement); --- check defaults for variadics -create function dfunc(a variadic int[]) returns int as -$$ select array_upper($1, 1) $$ language sql; -select dfunc(); -- fail -ERROR: function dfunc() does not exist -LINE 1: select dfunc(); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -select dfunc(10); - dfunc -------- - 1 -(1 row) - -select dfunc(10,20); - dfunc -------- - 2 -(1 row) - -create or replace function dfunc(a variadic int[] default array[]::int[]) returns int as -$$ select array_upper($1, 1) $$ language sql; -select dfunc(); -- now ok - dfunc -------- - -(1 row) - -select dfunc(10); - dfunc -------- - 1 -(1 row) - -select dfunc(10,20); - dfunc -------- - 2 -(1 row) - --- can't remove the default once it exists -create or replace function dfunc(a variadic int[]) returns int as -$$ select array_upper($1, 1) $$ language sql; -ERROR: cannot remove parameter defaults from existing function -HINT: Use DROP FUNCTION dfunc(integer[]) first. -\df dfunc - List of functions - Schema | Name | Result data type | Argument data types | Type ---------+-------+------------------+-------------------------------------------------+------ - public | dfunc | integer | VARIADIC a integer[] DEFAULT ARRAY[]::integer[] | func -(1 row) - -drop function dfunc(a variadic int[]); --- Ambiguity should be reported only if there's not a better match available -create function dfunc(int = 1, int = 2, int = 3) returns int as $$ - select 3; -$$ language sql; -create function dfunc(int = 1, int = 2) returns int as $$ - select 2; -$$ language sql; -create function dfunc(text) returns text as $$ - select $1; -$$ language sql; --- dfunc(narg=2) and dfunc(narg=3) are ambiguous -select dfunc(1); -- fail -ERROR: function dfunc(integer) is not unique -LINE 1: select dfunc(1); - ^ -HINT: Could not choose a best candidate function. You might need to add explicit type casts. --- but this works since the ambiguous functions aren't preferred anyway -select dfunc('Hi'); - dfunc -------- - Hi -(1 row) - -drop function dfunc(int, int, int); -drop function dfunc(int, int); -drop function dfunc(text); --- --- Tests for named- and mixed-notation function calling --- -create function dfunc(a int, b int, c int = 0, d int = 0) - returns table (a int, b int, c int, d int) as $$ - select $1, $2, $3, $4; -$$ language sql; -select (dfunc(10,20,30)).*; - a | b | c | d -----+----+----+--- - 10 | 20 | 30 | 0 -(1 row) - -select (dfunc(a := 10, b := 20, c := 30)).*; - a | b | c | d -----+----+----+--- - 10 | 20 | 30 | 0 -(1 row) - -select * from dfunc(a := 10, b := 20); - a | b | c | d -----+----+---+--- - 10 | 20 | 0 | 0 -(1 row) - -select * from dfunc(b := 10, a := 20); - a | b | c | d -----+----+---+--- - 20 | 10 | 0 | 0 -(1 row) - -select * from dfunc(0); -- fail -ERROR: function dfunc(integer) does not exist -LINE 1: select * from dfunc(0); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -select * from dfunc(1,2); - a | b | c | d ----+---+---+--- - 1 | 2 | 0 | 0 -(1 row) - -select * from dfunc(1,2,c := 3); - a | b | c | d ----+---+---+--- - 1 | 2 | 3 | 0 -(1 row) - -select * from dfunc(1,2,d := 3); - a | b | c | d ----+---+---+--- - 1 | 2 | 0 | 3 -(1 row) - -select * from dfunc(x := 20, b := 10, x := 30); -- fail, duplicate name -ERROR: argument name "x" used more than once -LINE 1: select * from dfunc(x := 20, b := 10, x := 30); - ^ -select * from dfunc(10, b := 20, 30); -- fail, named args must be last -ERROR: positional argument cannot follow named argument -LINE 1: select * from dfunc(10, b := 20, 30); - ^ -select * from dfunc(x := 10, b := 20, c := 30); -- fail, unknown param -ERROR: function dfunc(x => integer, b => integer, c => integer) does not exist -LINE 1: select * from dfunc(x := 10, b := 20, c := 30); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -select * from dfunc(10, 10, a := 20); -- fail, a overlaps positional parameter -ERROR: function dfunc(integer, integer, a => integer) does not exist -LINE 1: select * from dfunc(10, 10, a := 20); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -select * from dfunc(1,c := 2,d := 3); -- fail, no value for b -ERROR: function dfunc(integer, c => integer, d => integer) does not exist -LINE 1: select * from dfunc(1,c := 2,d := 3); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -drop function dfunc(int, int, int, int); --- test with different parameter types -create function dfunc(a varchar, b numeric, c date = current_date) - returns table (a varchar, b numeric, c date) as $$ - select $1, $2, $3; -$$ language sql; -select (dfunc('Hello World', 20, '2009-07-25'::date)).*; - a | b | c --------------+----+------------ - Hello World | 20 | 07-25-2009 -(1 row) - -select * from dfunc('Hello World', 20, '2009-07-25'::date); - a | b | c --------------+----+------------ - Hello World | 20 | 07-25-2009 -(1 row) - -select * from dfunc(c := '2009-07-25'::date, a := 'Hello World', b := 20); - a | b | c --------------+----+------------ - Hello World | 20 | 07-25-2009 -(1 row) - -select * from dfunc('Hello World', b := 20, c := '2009-07-25'::date); - a | b | c --------------+----+------------ - Hello World | 20 | 07-25-2009 -(1 row) - -select * from dfunc('Hello World', c := '2009-07-25'::date, b := 20); - a | b | c --------------+----+------------ - Hello World | 20 | 07-25-2009 -(1 row) - -select * from dfunc('Hello World', c := 20, b := '2009-07-25'::date); -- fail -ERROR: function dfunc(unknown, c => integer, b => date) does not exist -LINE 1: select * from dfunc('Hello World', c := 20, b := '2009-07-25... - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -drop function dfunc(varchar, numeric, date); --- test out parameters with named params -create function dfunc(a varchar = 'def a', out _a varchar, c numeric = NULL, out _c numeric) -returns record as $$ - select $1, $2; -$$ language sql; -select (dfunc()).*; - _a | _c --------+---- - def a | -(1 row) - -select * from dfunc(); - _a | _c --------+---- - def a | -(1 row) - -select * from dfunc('Hello', 100); - _a | _c --------+----- - Hello | 100 -(1 row) - -select * from dfunc(a := 'Hello', c := 100); - _a | _c --------+----- - Hello | 100 -(1 row) - -select * from dfunc(c := 100, a := 'Hello'); - _a | _c --------+----- - Hello | 100 -(1 row) - -select * from dfunc('Hello'); - _a | _c --------+---- - Hello | -(1 row) - -select * from dfunc('Hello', c := 100); - _a | _c --------+----- - Hello | 100 -(1 row) - -select * from dfunc(c := 100); - _a | _c --------+----- - def a | 100 -(1 row) - --- fail, can no longer change an input parameter's name -create or replace function dfunc(a varchar = 'def a', out _a varchar, x numeric = NULL, out _c numeric) -returns record as $$ - select $1, $2; -$$ language sql; -ERROR: cannot change name of input parameter "c" -HINT: Use DROP FUNCTION dfunc(character varying,numeric) first. -create or replace function dfunc(a varchar = 'def a', out _a varchar, numeric = NULL, out _c numeric) -returns record as $$ - select $1, $2; -$$ language sql; -ERROR: cannot change name of input parameter "c" -HINT: Use DROP FUNCTION dfunc(character varying,numeric) first. -drop function dfunc(varchar, numeric); ---fail, named parameters are not unique -create function testpolym(a int, a int) returns int as $$ select 1;$$ language sql; -ERROR: parameter name "a" used more than once -LINE 1: create function testpolym(a int, a int) returns int as $$ se... - ^ -create function testpolym(int, out a int, out a int) returns int as $$ select 1;$$ language sql; -ERROR: parameter name "a" used more than once -LINE 1: create function testpolym(int, out a int, out a int) returns... - ^ -create function testpolym(out a int, inout a int) returns int as $$ select 1;$$ language sql; -ERROR: parameter name "a" used more than once -LINE 1: create function testpolym(out a int, inout a int) returns in... - ^ -create function testpolym(a int, inout a int) returns int as $$ select 1;$$ language sql; -ERROR: parameter name "a" used more than once -LINE 1: create function testpolym(a int, inout a int) returns int as... - ^ --- valid -create function testpolym(a int, out a int) returns int as $$ select $1;$$ language sql; -select testpolym(37); - testpolym ------------ - 37 -(1 row) - -drop function testpolym(int); -create function testpolym(a int) returns table(a int) as $$ select $1;$$ language sql; -select * from testpolym(37); - a ----- - 37 -(1 row) - -drop function testpolym(int); --- test polymorphic params and defaults -create function dfunc(a anyelement, b anyelement = null, flag bool = true) -returns anyelement as $$ - select case when $3 then $1 else $2 end; -$$ language sql; -select dfunc(1,2); - dfunc -------- - 1 -(1 row) - -select dfunc('a'::text, 'b'); -- positional notation with default - dfunc -------- - a -(1 row) - -select dfunc(a := 1, b := 2); - dfunc -------- - 1 -(1 row) - -select dfunc(a := 'a'::text, b := 'b'); - dfunc -------- - a -(1 row) - -select dfunc(a := 'a'::text, b := 'b', flag := false); -- named notation - dfunc -------- - b -(1 row) - -select dfunc(b := 'b'::text, a := 'a'); -- named notation with default - dfunc -------- - a -(1 row) - -select dfunc(a := 'a'::text, flag := true); -- named notation with default - dfunc -------- - a -(1 row) - -select dfunc(a := 'a'::text, flag := false); -- named notation with default - dfunc -------- - -(1 row) - -select dfunc(b := 'b'::text, a := 'a', flag := true); -- named notation - dfunc -------- - a -(1 row) - -select dfunc('a'::text, 'b', false); -- full positional notation - dfunc -------- - b -(1 row) - -select dfunc('a'::text, 'b', flag := false); -- mixed notation - dfunc -------- - b -(1 row) - -select dfunc('a'::text, 'b', true); -- full positional notation - dfunc -------- - a -(1 row) - -select dfunc('a'::text, 'b', flag := true); -- mixed notation - dfunc -------- - a -(1 row) - --- ansi/sql syntax -select dfunc(a => 1, b => 2); - dfunc -------- - 1 -(1 row) - -select dfunc(a => 'a'::text, b => 'b'); - dfunc -------- - a -(1 row) - -select dfunc(a => 'a'::text, b => 'b', flag => false); -- named notation - dfunc -------- - b -(1 row) - -select dfunc(b => 'b'::text, a => 'a'); -- named notation with default - dfunc -------- - a -(1 row) - -select dfunc(a => 'a'::text, flag => true); -- named notation with default - dfunc -------- - a -(1 row) - -select dfunc(a => 'a'::text, flag => false); -- named notation with default - dfunc -------- - -(1 row) - -select dfunc(b => 'b'::text, a => 'a', flag => true); -- named notation - dfunc -------- - a -(1 row) - -select dfunc('a'::text, 'b', false); -- full positional notation - dfunc -------- - b -(1 row) - -select dfunc('a'::text, 'b', flag => false); -- mixed notation - dfunc -------- - b -(1 row) - -select dfunc('a'::text, 'b', true); -- full positional notation - dfunc -------- - a -(1 row) - -select dfunc('a'::text, 'b', flag => true); -- mixed notation - dfunc -------- - a -(1 row) - --- this tests lexer edge cases around => -select dfunc(a =>-1); - dfunc -------- - -1 -(1 row) - -select dfunc(a =>+1); - dfunc -------- - 1 -(1 row) - -select dfunc(a =>/**/1); - dfunc -------- - 1 -(1 row) - -select dfunc(a =>--comment to be removed by psql - 1); - dfunc -------- - 1 -(1 row) - --- need DO to protect the -- from psql -do $$ - declare r integer; - begin - select dfunc(a=>-- comment - 1) into r; - raise info 'r = %', r; - end; -$$; -INFO: r = 1 --- check reverse-listing of named-arg calls -CREATE VIEW dfview AS - SELECT q1, q2, - dfunc(q1,q2, flag := q1>q2) as c3, - dfunc(q1, flag := q1 q1 > q2) AS c3, - dfunc(q1, flag => q1 < q2, b => q2) AS c4 - FROM int8_tbl; - -drop view dfview; -drop function dfunc(anyelement, anyelement, bool); --- --- Tests for ANYCOMPATIBLE polymorphism family --- -create function anyctest(anycompatible, anycompatible) -returns anycompatible as $$ - select greatest($1, $2) -$$ language sql; -select x, pg_typeof(x) from anyctest(11, 12) x; - x | pg_typeof -----+----------- - 12 | integer -(1 row) - -select x, pg_typeof(x) from anyctest(11, 12.3) x; - x | pg_typeof -------+----------- - 12.3 | numeric -(1 row) - -select x, pg_typeof(x) from anyctest(11, point(1,2)) x; -- fail -ERROR: function anyctest(integer, point) does not exist -LINE 1: select x, pg_typeof(x) from anyctest(11, point(1,2)) x; - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -select x, pg_typeof(x) from anyctest('11', '12.3') x; -- defaults to text - x | pg_typeof -------+----------- - 12.3 | text -(1 row) - -drop function anyctest(anycompatible, anycompatible); -create function anyctest(anycompatible, anycompatible) -returns anycompatiblearray as $$ - select array[$1, $2] -$$ language sql; -select x, pg_typeof(x) from anyctest(11, 12) x; - x | pg_typeof ----------+----------- - {11,12} | integer[] -(1 row) - -select x, pg_typeof(x) from anyctest(11, 12.3) x; - x | pg_typeof ------------+----------- - {11,12.3} | numeric[] -(1 row) - -select x, pg_typeof(x) from anyctest(11, array[1,2]) x; -- fail -ERROR: function anyctest(integer, integer[]) does not exist -LINE 1: select x, pg_typeof(x) from anyctest(11, array[1,2]) x; - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -drop function anyctest(anycompatible, anycompatible); -create function anyctest(anycompatible, anycompatiblearray) -returns anycompatiblearray as $$ - select array[$1] || $2 -$$ language sql; -select x, pg_typeof(x) from anyctest(11, array[12]) x; - x | pg_typeof ----------+----------- - {11,12} | integer[] -(1 row) - -select x, pg_typeof(x) from anyctest(11, array[12.3]) x; - x | pg_typeof ------------+----------- - {11,12.3} | numeric[] -(1 row) - -select x, pg_typeof(x) from anyctest(12.3, array[13]) x; - x | pg_typeof ------------+----------- - {12.3,13} | numeric[] -(1 row) - -select x, pg_typeof(x) from anyctest(12.3, '{13,14.4}') x; - x | pg_typeof -----------------+----------- - {12.3,13,14.4} | numeric[] -(1 row) - -select x, pg_typeof(x) from anyctest(11, array[point(1,2)]) x; -- fail -ERROR: function anyctest(integer, point[]) does not exist -LINE 1: select x, pg_typeof(x) from anyctest(11, array[point(1,2)]) ... - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -select x, pg_typeof(x) from anyctest(11, 12) x; -- fail -ERROR: function anyctest(integer, integer) does not exist -LINE 1: select x, pg_typeof(x) from anyctest(11, 12) x; - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -drop function anyctest(anycompatible, anycompatiblearray); -create function anyctest(anycompatible, anycompatiblerange) -returns anycompatiblerange as $$ - select $2 -$$ language sql; -select x, pg_typeof(x) from anyctest(11, int4range(4,7)) x; - x | pg_typeof --------+----------- - [4,7) | int4range -(1 row) - -select x, pg_typeof(x) from anyctest(11, numrange(4,7)) x; - x | pg_typeof --------+----------- - [4,7) | numrange -(1 row) - -select x, pg_typeof(x) from anyctest(11, 12) x; -- fail -ERROR: function anyctest(integer, integer) does not exist -LINE 1: select x, pg_typeof(x) from anyctest(11, 12) x; - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -select x, pg_typeof(x) from anyctest(11.2, int4range(4,7)) x; -- fail -ERROR: function anyctest(numeric, int4range) does not exist -LINE 1: select x, pg_typeof(x) from anyctest(11.2, int4range(4,7)) x... - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -select x, pg_typeof(x) from anyctest(11.2, '[4,7)') x; -- fail -ERROR: could not determine polymorphic type anycompatiblerange because input has type unknown -drop function anyctest(anycompatible, anycompatiblerange); -create function anyctest(anycompatiblerange, anycompatiblerange) -returns anycompatible as $$ - select lower($1) + upper($2) -$$ language sql; -select x, pg_typeof(x) from anyctest(int4range(11,12), int4range(4,7)) x; - x | pg_typeof -----+----------- - 18 | integer -(1 row) - -select x, pg_typeof(x) from anyctest(int4range(11,12), numrange(4,7)) x; -- fail -ERROR: function anyctest(int4range, numrange) does not exist -LINE 1: select x, pg_typeof(x) from anyctest(int4range(11,12), numra... - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -drop function anyctest(anycompatiblerange, anycompatiblerange); --- fail, can't infer result type: -create function anyctest(anycompatible) -returns anycompatiblerange as $$ - select $1 -$$ language sql; -ERROR: cannot determine result data type -DETAIL: A result of type anycompatiblerange requires at least one input of type anycompatiblerange or anycompatiblemultirange. -create function anyctest(anycompatible, anycompatiblemultirange) -returns anycompatiblemultirange as $$ - select $2 -$$ language sql; -select x, pg_typeof(x) from anyctest(11, multirange(int4range(4,7))) x; - x | pg_typeof ----------+---------------- - {[4,7)} | int4multirange -(1 row) - -select x, pg_typeof(x) from anyctest(11, multirange(numrange(4,7))) x; - x | pg_typeof ----------+--------------- - {[4,7)} | nummultirange -(1 row) - -select x, pg_typeof(x) from anyctest(11, 12) x; -- fail -ERROR: function anyctest(integer, integer) does not exist -LINE 1: select x, pg_typeof(x) from anyctest(11, 12) x; - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -select x, pg_typeof(x) from anyctest(11.2, multirange(int4range(4,7))) x; -- fail -ERROR: function anyctest(numeric, int4multirange) does not exist -LINE 1: select x, pg_typeof(x) from anyctest(11.2, multirange(int4ra... - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -select x, pg_typeof(x) from anyctest(11.2, '{[4,7)}') x; -- fail -ERROR: could not determine polymorphic type anycompatiblemultirange because input has type unknown -drop function anyctest(anycompatible, anycompatiblemultirange); -create function anyctest(anycompatiblemultirange, anycompatiblemultirange) -returns anycompatible as $$ - select lower($1) + upper($2) -$$ language sql; -select x, pg_typeof(x) from anyctest(multirange(int4range(11,12)), multirange(int4range(4,7))) x; - x | pg_typeof -----+----------- - 18 | integer -(1 row) - -select x, pg_typeof(x) from anyctest(multirange(int4range(11,12)), multirange(numrange(4,7))) x; -- fail -ERROR: function anyctest(int4multirange, nummultirange) does not exist -LINE 1: select x, pg_typeof(x) from anyctest(multirange(int4range(11... - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -drop function anyctest(anycompatiblemultirange, anycompatiblemultirange); --- fail, can't infer result type: -create function anyctest(anycompatible) -returns anycompatiblemultirange as $$ - select $1 -$$ language sql; -ERROR: cannot determine result data type -DETAIL: A result of type anycompatiblemultirange requires at least one input of type anycompatiblerange or anycompatiblemultirange. -create function anyctest(anycompatiblenonarray, anycompatiblenonarray) -returns anycompatiblearray as $$ - select array[$1, $2] -$$ language sql; -select x, pg_typeof(x) from anyctest(11, 12) x; - x | pg_typeof ----------+----------- - {11,12} | integer[] -(1 row) - -select x, pg_typeof(x) from anyctest(11, 12.3) x; - x | pg_typeof ------------+----------- - {11,12.3} | numeric[] -(1 row) - -select x, pg_typeof(x) from anyctest(array[11], array[1,2]) x; -- fail -ERROR: function anyctest(integer[], integer[]) does not exist -LINE 1: select x, pg_typeof(x) from anyctest(array[11], array[1,2]) ... - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -drop function anyctest(anycompatiblenonarray, anycompatiblenonarray); -create function anyctest(a anyelement, b anyarray, - c anycompatible, d anycompatible) -returns anycompatiblearray as $$ - select array[c, d] -$$ language sql; -select x, pg_typeof(x) from anyctest(11, array[1, 2], 42, 34.5) x; - x | pg_typeof ------------+----------- - {42,34.5} | numeric[] -(1 row) - -select x, pg_typeof(x) from anyctest(11, array[1, 2], point(1,2), point(3,4)) x; - x | pg_typeof --------------------+----------- - {"(1,2)","(3,4)"} | point[] -(1 row) - -select x, pg_typeof(x) from anyctest(11, '{1,2}', point(1,2), '(3,4)') x; - x | pg_typeof --------------------+----------- - {"(1,2)","(3,4)"} | point[] -(1 row) - -select x, pg_typeof(x) from anyctest(11, array[1, 2.2], 42, 34.5) x; -- fail -ERROR: function anyctest(integer, numeric[], integer, numeric) does not exist -LINE 1: select x, pg_typeof(x) from anyctest(11, array[1, 2.2], 42, ... - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -drop function anyctest(a anyelement, b anyarray, - c anycompatible, d anycompatible); -create function anyctest(variadic anycompatiblearray) -returns anycompatiblearray as $$ - select $1 -$$ language sql; -select x, pg_typeof(x) from anyctest(11, 12) x; - x | pg_typeof ----------+----------- - {11,12} | integer[] -(1 row) - -select x, pg_typeof(x) from anyctest(11, 12.2) x; - x | pg_typeof ------------+----------- - {11,12.2} | numeric[] -(1 row) - -select x, pg_typeof(x) from anyctest(11, '12') x; - x | pg_typeof ----------+----------- - {11,12} | integer[] -(1 row) - -select x, pg_typeof(x) from anyctest(11, '12.2') x; -- fail -ERROR: invalid input syntax for type integer: "12.2" -LINE 1: select x, pg_typeof(x) from anyctest(11, '12.2') x; - ^ -select x, pg_typeof(x) from anyctest(variadic array[11, 12]) x; - x | pg_typeof ----------+----------- - {11,12} | integer[] -(1 row) - -select x, pg_typeof(x) from anyctest(variadic array[11, 12.2]) x; - x | pg_typeof ------------+----------- - {11,12.2} | numeric[] -(1 row) - -drop function anyctest(variadic anycompatiblearray); +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/rowtypes.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/rowtypes.out --- /Users/admin/pgsql/src/test/regress/expected/rowtypes.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/rowtypes.out 2024-12-13 13:20:12 @@ -1,1396 +1,2 @@ --- --- ROWTYPES --- --- Make both a standalone composite type and a table rowtype -create type complex as (r float8, i float8); -create temp table fullname (first text, last text); --- Nested composite -create type quad as (c1 complex, c2 complex); --- Some simple tests of I/O conversions and row construction -select (1.1,2.2)::complex, row((3.3,4.4),(5.5,null))::quad; - row | row ------------+------------------------ - (1.1,2.2) | ("(3.3,4.4)","(5.5,)") -(1 row) - -select row('Joe', 'Blow')::fullname, '(Joe,Blow)'::fullname; - row | fullname -------------+------------ - (Joe,Blow) | (Joe,Blow) -(1 row) - -select '(Joe,von Blow)'::fullname, '(Joe,d''Blow)'::fullname; - fullname | fullname -------------------+-------------- - (Joe,"von Blow") | (Joe,d'Blow) -(1 row) - -select '(Joe,"von""Blow")'::fullname, E'(Joe,d\\\\Blow)'::fullname; - fullname | fullname --------------------+----------------- - (Joe,"von""Blow") | (Joe,"d\\Blow") -(1 row) - -select '(Joe,"Blow,Jr")'::fullname; - fullname ------------------ - (Joe,"Blow,Jr") -(1 row) - -select '(Joe,)'::fullname; -- ok, null 2nd column - fullname ----------- - (Joe,) -(1 row) - -select '(Joe)'::fullname; -- bad -ERROR: malformed record literal: "(Joe)" -LINE 1: select '(Joe)'::fullname; - ^ -DETAIL: Too few columns. -select '(Joe,,)'::fullname; -- bad -ERROR: malformed record literal: "(Joe,,)" -LINE 1: select '(Joe,,)'::fullname; - ^ -DETAIL: Too many columns. -select '[]'::fullname; -- bad -ERROR: malformed record literal: "[]" -LINE 1: select '[]'::fullname; - ^ -DETAIL: Missing left parenthesis. -select ' (Joe,Blow) '::fullname; -- ok, extra whitespace - fullname ------------- - (Joe,Blow) -(1 row) - -select '(Joe,Blow) /'::fullname; -- bad -ERROR: malformed record literal: "(Joe,Blow) /" -LINE 1: select '(Joe,Blow) /'::fullname; - ^ -DETAIL: Junk after right parenthesis. --- test non-error-throwing API -SELECT pg_input_is_valid('(1,2)', 'complex'); - pg_input_is_valid -------------------- - t -(1 row) - -SELECT pg_input_is_valid('(1,2', 'complex'); - pg_input_is_valid -------------------- - f -(1 row) - -SELECT pg_input_is_valid('(1,zed)', 'complex'); - pg_input_is_valid -------------------- - f -(1 row) - -SELECT * FROM pg_input_error_info('(1,zed)', 'complex'); - message | detail | hint | sql_error_code --------------------------------------------------------+--------+------+---------------- - invalid input syntax for type double precision: "zed" | | | 22P02 -(1 row) - -SELECT * FROM pg_input_error_info('(1,1e400)', 'complex'); - message | detail | hint | sql_error_code ----------------------------------------------------+--------+------+---------------- - "1e400" is out of range for type double precision | | | 22003 -(1 row) - -create temp table quadtable(f1 int, q quad); -insert into quadtable values (1, ((3.3,4.4),(5.5,6.6))); -insert into quadtable values (2, ((null,4.4),(5.5,6.6))); -select * from quadtable; - f1 | q -----+--------------------------- - 1 | ("(3.3,4.4)","(5.5,6.6)") - 2 | ("(,4.4)","(5.5,6.6)") -(2 rows) - -select f1, q.c1 from quadtable; -- fails, q is a table reference -ERROR: missing FROM-clause entry for table "q" -LINE 1: select f1, q.c1 from quadtable; - ^ -select f1, (q).c1, (qq.q).c1.i from quadtable qq; - f1 | c1 | i -----+-----------+----- - 1 | (3.3,4.4) | 4.4 - 2 | (,4.4) | 4.4 -(2 rows) - -create temp table people (fn fullname, bd date); -insert into people values ('(Joe,Blow)', '1984-01-10'); -select * from people; - fn | bd -------------+------------ - (Joe,Blow) | 01-10-1984 -(1 row) - --- at the moment this will not work due to ALTER TABLE inadequacy: -alter table fullname add column suffix text default ''; -ERROR: cannot alter table "fullname" because column "people.fn" uses its row type --- but this should work: -alter table fullname add column suffix text default null; -select * from people; - fn | bd --------------+------------ - (Joe,Blow,) | 01-10-1984 -(1 row) - --- test insertion/updating of subfields -update people set fn.suffix = 'Jr'; -select * from people; - fn | bd ----------------+------------ - (Joe,Blow,Jr) | 01-10-1984 -(1 row) - -insert into quadtable (f1, q.c1.r, q.c2.i) values(44,55,66); -update quadtable set q.c1.r = 12 where f1 = 2; -update quadtable set q.c1 = 12; -- error, type mismatch -ERROR: subfield "c1" is of type complex but expression is of type integer -LINE 1: update quadtable set q.c1 = 12; - ^ -HINT: You will need to rewrite or cast the expression. -select * from quadtable; - f1 | q -----+--------------------------- - 1 | ("(3.3,4.4)","(5.5,6.6)") - 44 | ("(55,)","(,66)") - 2 | ("(12,4.4)","(5.5,6.6)") -(3 rows) - --- The object here is to ensure that toasted references inside --- composite values don't cause problems. The large f1 value will --- be toasted inside pp, it must still work after being copied to people. -create temp table pp (f1 text); -insert into pp values (repeat('abcdefghijkl', 100000)); -insert into people select ('Jim', f1, null)::fullname, current_date from pp; -select (fn).first, substr((fn).last, 1, 20), length((fn).last) from people; - first | substr | length --------+----------------------+--------- - Joe | Blow | 4 - Jim | abcdefghijklabcdefgh | 1200000 -(2 rows) - --- try an update on a toasted composite value, too -update people set fn.first = 'Jack'; -select (fn).first, substr((fn).last, 1, 20), length((fn).last) from people; - first | substr | length --------+----------------------+--------- - Jack | Blow | 4 - Jack | abcdefghijklabcdefgh | 1200000 -(2 rows) - --- Test row comparison semantics. Prior to PG 8.2 we did this in a totally --- non-spec-compliant way. -select ROW(1,2) < ROW(1,3) as true; - true ------- - t -(1 row) - -select ROW(1,2) < ROW(1,1) as false; - false -------- - f -(1 row) - -select ROW(1,2) < ROW(1,NULL) as null; - null ------- - -(1 row) - -select ROW(1,2,3) < ROW(1,3,NULL) as true; -- the NULL is not examined - true ------- - t -(1 row) - -select ROW(11,'ABC') < ROW(11,'DEF') as true; - true ------- - t -(1 row) - -select ROW(11,'ABC') > ROW(11,'DEF') as false; - false -------- - f -(1 row) - -select ROW(12,'ABC') > ROW(11,'DEF') as true; - true ------- - t -(1 row) - --- = and <> have different NULL-behavior than < etc -select ROW(1,2,3) < ROW(1,NULL,4) as null; - null ------- - -(1 row) - -select ROW(1,2,3) = ROW(1,NULL,4) as false; - false -------- - f -(1 row) - -select ROW(1,2,3) <> ROW(1,NULL,4) as true; - true ------- - t -(1 row) - --- We allow operators beyond the six standard ones, if they have btree --- operator classes. -select ROW('ABC','DEF') ~<=~ ROW('DEF','ABC') as true; - true ------- - t -(1 row) - -select ROW('ABC','DEF') ~>=~ ROW('DEF','ABC') as false; - false -------- - f -(1 row) - -select ROW('ABC','DEF') ~~ ROW('DEF','ABC') as fail; -ERROR: could not determine interpretation of row comparison operator ~~ -LINE 1: select ROW('ABC','DEF') ~~ ROW('DEF','ABC') as fail; - ^ -HINT: Row comparison operators must be associated with btree operator families. --- Comparisons of ROW() expressions can cope with some type mismatches -select ROW(1,2) = ROW(1,2::int8); - ?column? ----------- - t -(1 row) - -select ROW(1,2) in (ROW(3,4), ROW(1,2)); - ?column? ----------- - t -(1 row) - -select ROW(1,2) in (ROW(3,4), ROW(1,2::int8)); - ?column? ----------- - t -(1 row) - --- Check row comparison with a subselect -select unique1, unique2 from tenk1 -where (unique1, unique2) < any (select ten, ten from tenk1 where hundred < 3) - and unique1 <= 20 -order by 1; - unique1 | unique2 ----------+--------- - 0 | 9998 - 1 | 2838 -(2 rows) - --- Also check row comparison with an indexable condition -explain (costs off) -select thousand, tenthous from tenk1 -where (thousand, tenthous) >= (997, 5000) -order by thousand, tenthous; - QUERY PLAN ------------------------------------------------------------ - Index Only Scan using tenk1_thous_tenthous on tenk1 - Index Cond: (ROW(thousand, tenthous) >= ROW(997, 5000)) -(2 rows) - -select thousand, tenthous from tenk1 -where (thousand, tenthous) >= (997, 5000) -order by thousand, tenthous; - thousand | tenthous -----------+---------- - 997 | 5997 - 997 | 6997 - 997 | 7997 - 997 | 8997 - 997 | 9997 - 998 | 998 - 998 | 1998 - 998 | 2998 - 998 | 3998 - 998 | 4998 - 998 | 5998 - 998 | 6998 - 998 | 7998 - 998 | 8998 - 998 | 9998 - 999 | 999 - 999 | 1999 - 999 | 2999 - 999 | 3999 - 999 | 4999 - 999 | 5999 - 999 | 6999 - 999 | 7999 - 999 | 8999 - 999 | 9999 -(25 rows) - -explain (costs off) -select thousand, tenthous, four from tenk1 -where (thousand, tenthous, four) > (998, 5000, 3) -order by thousand, tenthous; - QUERY PLAN ------------------------------------------------------------------------ - Sort - Sort Key: thousand, tenthous - -> Bitmap Heap Scan on tenk1 - Filter: (ROW(thousand, tenthous, four) > ROW(998, 5000, 3)) - -> Bitmap Index Scan on tenk1_thous_tenthous - Index Cond: (ROW(thousand, tenthous) >= ROW(998, 5000)) -(6 rows) - -select thousand, tenthous, four from tenk1 -where (thousand, tenthous, four) > (998, 5000, 3) -order by thousand, tenthous; - thousand | tenthous | four -----------+----------+------ - 998 | 5998 | 2 - 998 | 6998 | 2 - 998 | 7998 | 2 - 998 | 8998 | 2 - 998 | 9998 | 2 - 999 | 999 | 3 - 999 | 1999 | 3 - 999 | 2999 | 3 - 999 | 3999 | 3 - 999 | 4999 | 3 - 999 | 5999 | 3 - 999 | 6999 | 3 - 999 | 7999 | 3 - 999 | 8999 | 3 - 999 | 9999 | 3 -(15 rows) - -explain (costs off) -select thousand, tenthous from tenk1 -where (998, 5000) < (thousand, tenthous) -order by thousand, tenthous; - QUERY PLAN ----------------------------------------------------------- - Index Only Scan using tenk1_thous_tenthous on tenk1 - Index Cond: (ROW(thousand, tenthous) > ROW(998, 5000)) -(2 rows) - -select thousand, tenthous from tenk1 -where (998, 5000) < (thousand, tenthous) -order by thousand, tenthous; - thousand | tenthous -----------+---------- - 998 | 5998 - 998 | 6998 - 998 | 7998 - 998 | 8998 - 998 | 9998 - 999 | 999 - 999 | 1999 - 999 | 2999 - 999 | 3999 - 999 | 4999 - 999 | 5999 - 999 | 6999 - 999 | 7999 - 999 | 8999 - 999 | 9999 -(15 rows) - -explain (costs off) -select thousand, hundred from tenk1 -where (998, 5000) < (thousand, hundred) -order by thousand, hundred; - QUERY PLAN ------------------------------------------------------------ - Sort - Sort Key: thousand, hundred - -> Bitmap Heap Scan on tenk1 - Filter: (ROW(998, 5000) < ROW(thousand, hundred)) - -> Bitmap Index Scan on tenk1_thous_tenthous - Index Cond: (thousand >= 998) -(6 rows) - -select thousand, hundred from tenk1 -where (998, 5000) < (thousand, hundred) -order by thousand, hundred; - thousand | hundred -----------+--------- - 999 | 99 - 999 | 99 - 999 | 99 - 999 | 99 - 999 | 99 - 999 | 99 - 999 | 99 - 999 | 99 - 999 | 99 - 999 | 99 -(10 rows) - --- Test case for bug #14010: indexed row comparisons fail with nulls -create temp table test_table (a text, b text); -insert into test_table values ('a', 'b'); -insert into test_table select 'a', null from generate_series(1,1000); -insert into test_table values ('b', 'a'); -create index on test_table (a,b); -set enable_sort = off; -explain (costs off) -select a,b from test_table where (a,b) > ('a','a') order by a,b; - QUERY PLAN --------------------------------------------------------- - Index Only Scan using test_table_a_b_idx on test_table - Index Cond: (ROW(a, b) > ROW('a'::text, 'a'::text)) -(2 rows) - -select a,b from test_table where (a,b) > ('a','a') order by a,b; - a | b ----+--- - a | b - b | a -(2 rows) - -reset enable_sort; --- Check row comparisons with IN -select * from int8_tbl i8 where i8 in (row(123,456)); -- fail, type mismatch -ERROR: cannot compare dissimilar column types bigint and integer at record column 1 -explain (costs off) -select * from int8_tbl i8 -where i8 in (row(123,456)::int8_tbl, '(4567890123456789,123)'); - QUERY PLAN -------------------------------------------------------------------------------- - Seq Scan on int8_tbl i8 - Filter: (i8.* = ANY ('{"(123,456)","(4567890123456789,123)"}'::int8_tbl[])) -(2 rows) - -select * from int8_tbl i8 -where i8 in (row(123,456)::int8_tbl, '(4567890123456789,123)'); - q1 | q2 -------------------+----- - 123 | 456 - 4567890123456789 | 123 -(2 rows) - --- Check ability to select columns from an anonymous rowtype -select (row(1, 2.0)).f1; - f1 ----- - 1 -(1 row) - -select (row(1, 2.0)).f2; - f2 ------ - 2.0 -(1 row) - -select (row(1, 2.0)).nosuch; -- fail -ERROR: could not identify column "nosuch" in record data type -LINE 1: select (row(1, 2.0)).nosuch; - ^ -select (row(1, 2.0)).*; - f1 | f2 -----+----- - 1 | 2.0 -(1 row) - -select (r).f1 from (select row(1, 2.0) as r) ss; - f1 ----- - 1 -(1 row) - -select (r).f3 from (select row(1, 2.0) as r) ss; -- fail -ERROR: could not identify column "f3" in record data type -LINE 1: select (r).f3 from (select row(1, 2.0) as r) ss; - ^ -select (r).* from (select row(1, 2.0) as r) ss; - f1 | f2 -----+----- - 1 | 2.0 -(1 row) - --- Check some corner cases involving empty rowtypes -select ROW(); - row ------ - () -(1 row) - -select ROW() IS NULL; - ?column? ----------- - t -(1 row) - -select ROW() = ROW(); -ERROR: cannot compare rows of zero length -LINE 1: select ROW() = ROW(); - ^ --- Check ability to create arrays of anonymous rowtypes -select array[ row(1,2), row(3,4), row(5,6) ]; - array ---------------------------- - {"(1,2)","(3,4)","(5,6)"} -(1 row) - --- Check ability to compare an anonymous row to elements of an array -select row(1,1.1) = any (array[ row(7,7.7), row(1,1.1), row(0,0.0) ]); - ?column? ----------- - t -(1 row) - -select row(1,1.1) = any (array[ row(7,7.7), row(1,1.0), row(0,0.0) ]); - ?column? ----------- - f -(1 row) - --- Check behavior with a non-comparable rowtype -create type cantcompare as (p point, r float8); -create temp table cc (f1 cantcompare); -insert into cc values('("(1,2)",3)'); -insert into cc values('("(4,5)",6)'); -select * from cc order by f1; -- fail, but should complain about cantcompare -ERROR: could not identify an ordering operator for type cantcompare -LINE 1: select * from cc order by f1; - ^ -HINT: Use an explicit ordering operator or modify the query. --- --- Tests for record_{eq,cmp} --- -create type testtype1 as (a int, b int); --- all true -select row(1, 2)::testtype1 < row(1, 3)::testtype1; - ?column? ----------- - t -(1 row) - -select row(1, 2)::testtype1 <= row(1, 3)::testtype1; - ?column? ----------- - t -(1 row) - -select row(1, 2)::testtype1 = row(1, 2)::testtype1; - ?column? ----------- - t -(1 row) - -select row(1, 2)::testtype1 <> row(1, 3)::testtype1; - ?column? ----------- - t -(1 row) - -select row(1, 3)::testtype1 >= row(1, 2)::testtype1; - ?column? ----------- - t -(1 row) - -select row(1, 3)::testtype1 > row(1, 2)::testtype1; - ?column? ----------- - t -(1 row) - --- all false -select row(1, -2)::testtype1 < row(1, -3)::testtype1; - ?column? ----------- - f -(1 row) - -select row(1, -2)::testtype1 <= row(1, -3)::testtype1; - ?column? ----------- - f -(1 row) - -select row(1, -2)::testtype1 = row(1, -3)::testtype1; - ?column? ----------- - f -(1 row) - -select row(1, -2)::testtype1 <> row(1, -2)::testtype1; - ?column? ----------- - f -(1 row) - -select row(1, -3)::testtype1 >= row(1, -2)::testtype1; - ?column? ----------- - f -(1 row) - -select row(1, -3)::testtype1 > row(1, -2)::testtype1; - ?column? ----------- - f -(1 row) - --- true, but see *< below -select row(1, -2)::testtype1 < row(1, 3)::testtype1; - ?column? ----------- - t -(1 row) - --- mismatches -create type testtype3 as (a int, b text); -select row(1, 2)::testtype1 < row(1, 'abc')::testtype3; -ERROR: cannot compare dissimilar column types integer and text at record column 2 -select row(1, 2)::testtype1 <> row(1, 'abc')::testtype3; -ERROR: cannot compare dissimilar column types integer and text at record column 2 -create type testtype5 as (a int); -select row(1, 2)::testtype1 < row(1)::testtype5; -ERROR: cannot compare record types with different numbers of columns -select row(1, 2)::testtype1 <> row(1)::testtype5; -ERROR: cannot compare record types with different numbers of columns --- non-comparable types -create type testtype6 as (a int, b point); -select row(1, '(1,2)')::testtype6 < row(1, '(1,3)')::testtype6; -ERROR: could not identify a comparison function for type point -select row(1, '(1,2)')::testtype6 <> row(1, '(1,3)')::testtype6; -ERROR: could not identify an equality operator for type point -drop type testtype1, testtype3, testtype5, testtype6; --- --- Tests for record_image_{eq,cmp} --- -create type testtype1 as (a int, b int); --- all true -select row(1, 2)::testtype1 *< row(1, 3)::testtype1; - ?column? ----------- - t -(1 row) - -select row(1, 2)::testtype1 *<= row(1, 3)::testtype1; - ?column? ----------- - t -(1 row) - -select row(1, 2)::testtype1 *= row(1, 2)::testtype1; - ?column? ----------- - t -(1 row) - -select row(1, 2)::testtype1 *<> row(1, 3)::testtype1; - ?column? ----------- - t -(1 row) - -select row(1, 3)::testtype1 *>= row(1, 2)::testtype1; - ?column? ----------- - t -(1 row) - -select row(1, 3)::testtype1 *> row(1, 2)::testtype1; - ?column? ----------- - t -(1 row) - --- all false -select row(1, -2)::testtype1 *< row(1, -3)::testtype1; - ?column? ----------- - f -(1 row) - -select row(1, -2)::testtype1 *<= row(1, -3)::testtype1; - ?column? ----------- - f -(1 row) - -select row(1, -2)::testtype1 *= row(1, -3)::testtype1; - ?column? ----------- - f -(1 row) - -select row(1, -2)::testtype1 *<> row(1, -2)::testtype1; - ?column? ----------- - f -(1 row) - -select row(1, -3)::testtype1 *>= row(1, -2)::testtype1; - ?column? ----------- - f -(1 row) - -select row(1, -3)::testtype1 *> row(1, -2)::testtype1; - ?column? ----------- - f -(1 row) - --- This returns the "wrong" order because record_image_cmp works on --- unsigned datums without knowing about the actual data type. -select row(1, -2)::testtype1 *< row(1, 3)::testtype1; - ?column? ----------- - f -(1 row) - --- other types -create type testtype2 as (a smallint, b bool); -- byval different sizes -select row(1, true)::testtype2 *< row(2, true)::testtype2; - ?column? ----------- - t -(1 row) - -select row(-2, true)::testtype2 *< row(-1, true)::testtype2; - ?column? ----------- - t -(1 row) - -select row(0, false)::testtype2 *< row(0, true)::testtype2; - ?column? ----------- - t -(1 row) - -select row(0, false)::testtype2 *<> row(0, true)::testtype2; - ?column? ----------- - t -(1 row) - -create type testtype3 as (a int, b text); -- variable length -select row(1, 'abc')::testtype3 *< row(1, 'abd')::testtype3; - ?column? ----------- - t -(1 row) - -select row(1, 'abc')::testtype3 *< row(1, 'abcd')::testtype3; - ?column? ----------- - t -(1 row) - -select row(1, 'abc')::testtype3 *> row(1, 'abd')::testtype3; - ?column? ----------- - f -(1 row) - -select row(1, 'abc')::testtype3 *<> row(1, 'abd')::testtype3; - ?column? ----------- - t -(1 row) - -create type testtype4 as (a int, b point); -- by ref, fixed length -select row(1, '(1,2)')::testtype4 *< row(1, '(1,3)')::testtype4; - ?column? ----------- - t -(1 row) - -select row(1, '(1,2)')::testtype4 *<> row(1, '(1,3)')::testtype4; - ?column? ----------- - t -(1 row) - --- mismatches -select row(1, 2)::testtype1 *< row(1, 'abc')::testtype3; -ERROR: cannot compare dissimilar column types integer and text at record column 2 -select row(1, 2)::testtype1 *<> row(1, 'abc')::testtype3; -ERROR: cannot compare dissimilar column types integer and text at record column 2 -create type testtype5 as (a int); -select row(1, 2)::testtype1 *< row(1)::testtype5; -ERROR: cannot compare record types with different numbers of columns -select row(1, 2)::testtype1 *<> row(1)::testtype5; -ERROR: cannot compare record types with different numbers of columns --- non-comparable types -create type testtype6 as (a int, b point); -select row(1, '(1,2)')::testtype6 *< row(1, '(1,3)')::testtype6; - ?column? ----------- - t -(1 row) - -select row(1, '(1,2)')::testtype6 *>= row(1, '(1,3)')::testtype6; - ?column? ----------- - f -(1 row) - -select row(1, '(1,2)')::testtype6 *<> row(1, '(1,3)')::testtype6; - ?column? ----------- - t -(1 row) - --- anonymous rowtypes in coldeflists -select q.a, q.b = row(2), q.c = array[row(3)], q.d = row(row(4)) from - unnest(array[row(1, row(2), array[row(3)], row(row(4))), - row(2, row(3), array[row(4)], row(row(5)))]) - as q(a int, b record, c record[], d record); - a | ?column? | ?column? | ?column? ----+----------+----------+---------- - 1 | t | t | t - 2 | f | f | f -(2 rows) - -drop type testtype1, testtype2, testtype3, testtype4, testtype5, testtype6; --- --- Test case derived from bug #5716: check multiple uses of a rowtype result --- -BEGIN; -CREATE TABLE price ( - id SERIAL PRIMARY KEY, - active BOOLEAN NOT NULL, - price NUMERIC -); -CREATE TYPE price_input AS ( - id INTEGER, - price NUMERIC -); -CREATE TYPE price_key AS ( - id INTEGER -); -CREATE FUNCTION price_key_from_table(price) RETURNS price_key AS $$ - SELECT $1.id -$$ LANGUAGE SQL; -CREATE FUNCTION price_key_from_input(price_input) RETURNS price_key AS $$ - SELECT $1.id -$$ LANGUAGE SQL; -insert into price values (1,false,42), (10,false,100), (11,true,17.99); -UPDATE price - SET active = true, price = input_prices.price - FROM unnest(ARRAY[(10, 123.00), (11, 99.99)]::price_input[]) input_prices - WHERE price_key_from_table(price.*) = price_key_from_input(input_prices.*); -select * from price; - id | active | price -----+--------+-------- - 1 | f | 42 - 10 | t | 123.00 - 11 | t | 99.99 -(3 rows) - -rollback; --- --- Test case derived from bug #9085: check * qualification of composite --- parameters for SQL functions --- -create temp table compos (f1 int, f2 text); -create function fcompos1(v compos) returns void as $$ -insert into compos values (v); -- fail -$$ language sql; -ERROR: column "f1" is of type integer but expression is of type compos -LINE 2: insert into compos values (v); -- fail - ^ -HINT: You will need to rewrite or cast the expression. -create function fcompos1(v compos) returns void as $$ -insert into compos values (v.*); -$$ language sql; -create function fcompos2(v compos) returns void as $$ -select fcompos1(v); -$$ language sql; -create function fcompos3(v compos) returns void as $$ -select fcompos1(fcompos3.v.*); -$$ language sql; -select fcompos1(row(1,'one')); - fcompos1 ----------- - -(1 row) - -select fcompos2(row(2,'two')); - fcompos2 ----------- - -(1 row) - -select fcompos3(row(3,'three')); - fcompos3 ----------- - -(1 row) - -select * from compos; - f1 | f2 -----+------- - 1 | one - 2 | two - 3 | three -(3 rows) - --- --- We allow I/O conversion casts from composite types to strings to be --- invoked via cast syntax, but not functional syntax. This is because --- the latter is too prone to be invoked unintentionally. --- -select cast (fullname as text) from fullname; - fullname ----------- -(0 rows) - -select fullname::text from fullname; - fullname ----------- -(0 rows) - -select text(fullname) from fullname; -- error -ERROR: function text(fullname) does not exist -LINE 1: select text(fullname) from fullname; - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -select fullname.text from fullname; -- error -ERROR: column fullname.text does not exist -LINE 1: select fullname.text from fullname; - ^ --- same, but RECORD instead of named composite type: -select cast (row('Jim', 'Beam') as text); - row ------------- - (Jim,Beam) -(1 row) - -select (row('Jim', 'Beam'))::text; - row ------------- - (Jim,Beam) -(1 row) - -select text(row('Jim', 'Beam')); -- error -ERROR: function text(record) does not exist -LINE 1: select text(row('Jim', 'Beam')); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -select (row('Jim', 'Beam')).text; -- error -ERROR: could not identify column "text" in record data type -LINE 1: select (row('Jim', 'Beam')).text; - ^ --- --- Check the equivalence of functional and column notation --- -insert into fullname values ('Joe', 'Blow'); -select f.last from fullname f; - last ------- - Blow -(1 row) - -select last(f) from fullname f; - last ------- - Blow -(1 row) - -create function longname(fullname) returns text language sql -as $$select $1.first || ' ' || $1.last$$; -select f.longname from fullname f; - longname ----------- - Joe Blow -(1 row) - -select longname(f) from fullname f; - longname ----------- - Joe Blow -(1 row) - --- Starting in v11, the notational form does matter if there's ambiguity -alter table fullname add column longname text; -select f.longname from fullname f; - longname ----------- - -(1 row) - -select longname(f) from fullname f; - longname ----------- - Joe Blow -(1 row) - --- --- Test that composite values are seen to have the correct column names --- (bug #11210 and other reports) --- -select row_to_json(i) from int8_tbl i; - row_to_json ------------------------------------------------- - {"q1":123,"q2":456} - {"q1":123,"q2":4567890123456789} - {"q1":4567890123456789,"q2":123} - {"q1":4567890123456789,"q2":4567890123456789} - {"q1":4567890123456789,"q2":-4567890123456789} -(5 rows) - --- since "i" is of type "int8_tbl", attaching aliases doesn't change anything: -select row_to_json(i) from int8_tbl i(x,y); - row_to_json ------------------------------------------------- - {"q1":123,"q2":456} - {"q1":123,"q2":4567890123456789} - {"q1":4567890123456789,"q2":123} - {"q1":4567890123456789,"q2":4567890123456789} - {"q1":4567890123456789,"q2":-4567890123456789} -(5 rows) - --- in these examples, we'll report the exposed column names of the subselect: -select row_to_json(ss) from - (select q1, q2 from int8_tbl) as ss; - row_to_json ------------------------------------------------- - {"q1":123,"q2":456} - {"q1":123,"q2":4567890123456789} - {"q1":4567890123456789,"q2":123} - {"q1":4567890123456789,"q2":4567890123456789} - {"q1":4567890123456789,"q2":-4567890123456789} -(5 rows) - -select row_to_json(ss) from - (select q1, q2 from int8_tbl offset 0) as ss; - row_to_json ------------------------------------------------- - {"q1":123,"q2":456} - {"q1":123,"q2":4567890123456789} - {"q1":4567890123456789,"q2":123} - {"q1":4567890123456789,"q2":4567890123456789} - {"q1":4567890123456789,"q2":-4567890123456789} -(5 rows) - -select row_to_json(ss) from - (select q1 as a, q2 as b from int8_tbl) as ss; - row_to_json ----------------------------------------------- - {"a":123,"b":456} - {"a":123,"b":4567890123456789} - {"a":4567890123456789,"b":123} - {"a":4567890123456789,"b":4567890123456789} - {"a":4567890123456789,"b":-4567890123456789} -(5 rows) - -select row_to_json(ss) from - (select q1 as a, q2 as b from int8_tbl offset 0) as ss; - row_to_json ----------------------------------------------- - {"a":123,"b":456} - {"a":123,"b":4567890123456789} - {"a":4567890123456789,"b":123} - {"a":4567890123456789,"b":4567890123456789} - {"a":4567890123456789,"b":-4567890123456789} -(5 rows) - -select row_to_json(ss) from - (select q1 as a, q2 as b from int8_tbl) as ss(x,y); - row_to_json ----------------------------------------------- - {"x":123,"y":456} - {"x":123,"y":4567890123456789} - {"x":4567890123456789,"y":123} - {"x":4567890123456789,"y":4567890123456789} - {"x":4567890123456789,"y":-4567890123456789} -(5 rows) - -select row_to_json(ss) from - (select q1 as a, q2 as b from int8_tbl offset 0) as ss(x,y); - row_to_json ----------------------------------------------- - {"x":123,"y":456} - {"x":123,"y":4567890123456789} - {"x":4567890123456789,"y":123} - {"x":4567890123456789,"y":4567890123456789} - {"x":4567890123456789,"y":-4567890123456789} -(5 rows) - -explain (costs off) -select row_to_json(q) from - (select thousand, tenthous from tenk1 - where thousand = 42 and tenthous < 2000 offset 0) q; - QUERY PLAN -------------------------------------------------------------- - Subquery Scan on q - -> Index Only Scan using tenk1_thous_tenthous on tenk1 - Index Cond: ((thousand = 42) AND (tenthous < 2000)) -(3 rows) - -select row_to_json(q) from - (select thousand, tenthous from tenk1 - where thousand = 42 and tenthous < 2000 offset 0) q; - row_to_json ---------------------------------- - {"thousand":42,"tenthous":42} - {"thousand":42,"tenthous":1042} -(2 rows) - -select row_to_json(q) from - (select thousand as x, tenthous as y from tenk1 - where thousand = 42 and tenthous < 2000 offset 0) q; - row_to_json -------------------- - {"x":42,"y":42} - {"x":42,"y":1042} -(2 rows) - -select row_to_json(q) from - (select thousand as x, tenthous as y from tenk1 - where thousand = 42 and tenthous < 2000 offset 0) q(a,b); - row_to_json -------------------- - {"a":42,"b":42} - {"a":42,"b":1042} -(2 rows) - -create temp table tt1 as select * from int8_tbl limit 2; -create temp table tt2 () inherits(tt1); -insert into tt2 values(0,0); -select row_to_json(r) from (select q2,q1 from tt1 offset 0) r; - row_to_json ----------------------------------- - {"q2":456,"q1":123} - {"q2":4567890123456789,"q1":123} - {"q2":0,"q1":0} -(3 rows) - --- check no-op rowtype conversions -create temp table tt3 () inherits(tt2); -insert into tt3 values(33,44); -select row_to_json(tt3::tt2::tt1) from tt3; - row_to_json -------------------- - {"q1":33,"q2":44} -(1 row) - --- --- IS [NOT] NULL should not recurse into nested composites (bug #14235) --- -explain (verbose, costs off) -select r, r is null as isnull, r is not null as isnotnull -from (values (1,row(1,2)), (1,row(null,null)), (1,null), - (null,row(1,2)), (null,row(null,null)), (null,null) ) r(a,b); - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ - Values Scan on "*VALUES*" - Output: ROW("*VALUES*".column1, "*VALUES*".column2), (("*VALUES*".column1 IS NULL) AND ("*VALUES*".column2 IS NOT DISTINCT FROM NULL)), (("*VALUES*".column1 IS NOT NULL) AND ("*VALUES*".column2 IS DISTINCT FROM NULL)) -(2 rows) - -select r, r is null as isnull, r is not null as isnotnull -from (values (1,row(1,2)), (1,row(null,null)), (1,null), - (null,row(1,2)), (null,row(null,null)), (null,null) ) r(a,b); - r | isnull | isnotnull --------------+--------+----------- - (1,"(1,2)") | f | t - (1,"(,)") | f | t - (1,) | f | f - (,"(1,2)") | f | f - (,"(,)") | f | f - (,) | t | f -(6 rows) - -explain (verbose, costs off) -with r(a,b) as materialized - (values (1,row(1,2)), (1,row(null,null)), (1,null), - (null,row(1,2)), (null,row(null,null)), (null,null) ) -select r, r is null as isnull, r is not null as isnotnull from r; - QUERY PLAN ----------------------------------------------------------- - CTE Scan on r - Output: r.*, (r.* IS NULL), (r.* IS NOT NULL) - CTE r - -> Values Scan on "*VALUES*" - Output: "*VALUES*".column1, "*VALUES*".column2 -(5 rows) - -with r(a,b) as materialized - (values (1,row(1,2)), (1,row(null,null)), (1,null), - (null,row(1,2)), (null,row(null,null)), (null,null) ) -select r, r is null as isnull, r is not null as isnotnull from r; - r | isnull | isnotnull --------------+--------+----------- - (1,"(1,2)") | f | t - (1,"(,)") | f | t - (1,) | f | f - (,"(1,2)") | f | f - (,"(,)") | f | f - (,) | t | f -(6 rows) - --- --- Check parsing of indirect references to composite values (bug #18077) --- -explain (verbose, costs off) -with cte(c) as materialized (select row(1, 2)), - cte2(c) as (select * from cte) -select * from cte2 as t -where (select * from (select c as c1) s - where (select (c1).f1 > 0)) is not null; - QUERY PLAN ----------------------------------------------- - CTE Scan on cte - Output: cte.c - Filter: ((SubPlan 3) IS NOT NULL) - CTE cte - -> Result - Output: '(1,2)'::record - SubPlan 3 - -> Result - Output: cte.c - One-Time Filter: (InitPlan 2).col1 - InitPlan 2 - -> Result - Output: ((cte.c).f1 > 0) -(13 rows) - -with cte(c) as materialized (select row(1, 2)), - cte2(c) as (select * from cte) -select * from cte2 as t -where (select * from (select c as c1) s - where (select (c1).f1 > 0)) is not null; - c -------- - (1,2) -(1 row) - --- Also check deparsing of such cases -create view composite_v as -with cte(c) as materialized (select row(1, 2)), - cte2(c) as (select * from cte) -select 1 as one from cte2 as t -where (select * from (select c as c1) s - where (select (c1).f1 > 0)) is not null; -select pg_get_viewdef('composite_v', true); - pg_get_viewdef --------------------------------------------------------- - WITH cte(c) AS MATERIALIZED ( + - SELECT ROW(1, 2) AS "row" + - ), cte2(c) AS ( + - SELECT cte.c + - FROM cte + - ) + - SELECT 1 AS one + - FROM cte2 t + - WHERE (( SELECT s.c1 + - FROM ( SELECT t.c AS c1) s + - WHERE ( SELECT (s.c1).f1 > 0))) IS NOT NULL; -(1 row) - -drop view composite_v; --- --- Check cases where the composite comes from a proven-dummy rel (bug #18576) --- -explain (verbose, costs off) -select (ss.a).x, (ss.a).n from - (select information_schema._pg_expandarray(array[1,2]) AS a) ss; - QUERY PLAN ------------------------------------------------------------------------- - Subquery Scan on ss - Output: (ss.a).x, (ss.a).n - -> ProjectSet - Output: information_schema._pg_expandarray('{1,2}'::integer[]) - -> Result -(5 rows) - -explain (verbose, costs off) -select (ss.a).x, (ss.a).n from - (select information_schema._pg_expandarray(array[1,2]) AS a) ss -where false; - QUERY PLAN --------------------------- - Result - Output: (a).f1, (a).f2 - One-Time Filter: false -(3 rows) - -explain (verbose, costs off) -with cte(c) as materialized (select row(1, 2)), - cte2(c) as (select * from cte) -select (c).f1 from cte2 as t; - QUERY PLAN ------------------------------------ - CTE Scan on cte - Output: (cte.c).f1 - CTE cte - -> Result - Output: '(1,2)'::record -(5 rows) - -explain (verbose, costs off) -with cte(c) as materialized (select row(1, 2)), - cte2(c) as (select * from cte) -select (c).f1 from cte2 as t -where false; - QUERY PLAN ------------------------------------ - Result - Output: (cte.c).f1 - One-Time Filter: false - CTE cte - -> Result - Output: '(1,2)'::record -(6 rows) - --- --- Tests for component access / FieldSelect --- -CREATE TABLE compositetable(a text, b text); -INSERT INTO compositetable(a, b) VALUES('fa', 'fb'); --- composite type columns can't directly be accessed (error) -SELECT d.a FROM (SELECT compositetable AS d FROM compositetable) s; -ERROR: missing FROM-clause entry for table "d" -LINE 1: SELECT d.a FROM (SELECT compositetable AS d FROM compositeta... - ^ --- but can be accessed with proper parens -SELECT (d).a, (d).b FROM (SELECT compositetable AS d FROM compositetable) s; - a | b -----+---- - fa | fb -(1 row) - --- system columns can't be accessed in composite types (error) -SELECT (d).ctid FROM (SELECT compositetable AS d FROM compositetable) s; -ERROR: column "ctid" not found in data type compositetable -LINE 1: SELECT (d).ctid FROM (SELECT compositetable AS d FROM compos... - ^ --- accessing non-existing column in NULL datum errors out -SELECT (NULL::compositetable).nonexistent; -ERROR: column "nonexistent" not found in data type compositetable -LINE 1: SELECT (NULL::compositetable).nonexistent; - ^ --- existing column in a NULL composite yield NULL -SELECT (NULL::compositetable).a; - a ---- - -(1 row) - --- oids can't be accessed in composite types (error) -SELECT (NULL::compositetable).oid; -ERROR: column "oid" not found in data type compositetable -LINE 1: SELECT (NULL::compositetable).oid; - ^ -DROP TABLE compositetable; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/returning.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/returning.out --- /Users/admin/pgsql/src/test/regress/expected/returning.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/returning.out 2024-12-13 13:20:12 @@ -1,357 +1,2 @@ --- --- Test INSERT/UPDATE/DELETE RETURNING --- --- Simple cases -CREATE TEMP TABLE foo (f1 serial, f2 text, f3 int default 42); -INSERT INTO foo (f2,f3) - VALUES ('test', DEFAULT), ('More', 11), (upper('more'), 7+9) - RETURNING *, f1+f3 AS sum; - f1 | f2 | f3 | sum -----+------+----+----- - 1 | test | 42 | 43 - 2 | More | 11 | 13 - 3 | MORE | 16 | 19 -(3 rows) - -SELECT * FROM foo; - f1 | f2 | f3 -----+------+---- - 1 | test | 42 - 2 | More | 11 - 3 | MORE | 16 -(3 rows) - -UPDATE foo SET f2 = lower(f2), f3 = DEFAULT RETURNING foo.*, f1+f3 AS sum13; - f1 | f2 | f3 | sum13 -----+------+----+------- - 1 | test | 42 | 43 - 2 | more | 42 | 44 - 3 | more | 42 | 45 -(3 rows) - -SELECT * FROM foo; - f1 | f2 | f3 -----+------+---- - 1 | test | 42 - 2 | more | 42 - 3 | more | 42 -(3 rows) - -DELETE FROM foo WHERE f1 > 2 RETURNING f3, f2, f1, least(f1,f3); - f3 | f2 | f1 | least -----+------+----+------- - 42 | more | 3 | 3 -(1 row) - -SELECT * FROM foo; - f1 | f2 | f3 -----+------+---- - 1 | test | 42 - 2 | more | 42 -(2 rows) - --- Subplans and initplans in the RETURNING list -INSERT INTO foo SELECT f1+10, f2, f3+99 FROM foo - RETURNING *, f1+112 IN (SELECT q1 FROM int8_tbl) AS subplan, - EXISTS(SELECT * FROM int4_tbl) AS initplan; - f1 | f2 | f3 | subplan | initplan -----+------+-----+---------+---------- - 11 | test | 141 | t | t - 12 | more | 141 | f | t -(2 rows) - -UPDATE foo SET f3 = f3 * 2 - WHERE f1 > 10 - RETURNING *, f1+112 IN (SELECT q1 FROM int8_tbl) AS subplan, - EXISTS(SELECT * FROM int4_tbl) AS initplan; - f1 | f2 | f3 | subplan | initplan -----+------+-----+---------+---------- - 11 | test | 282 | t | t - 12 | more | 282 | f | t -(2 rows) - -DELETE FROM foo - WHERE f1 > 10 - RETURNING *, f1+112 IN (SELECT q1 FROM int8_tbl) AS subplan, - EXISTS(SELECT * FROM int4_tbl) AS initplan; - f1 | f2 | f3 | subplan | initplan -----+------+-----+---------+---------- - 11 | test | 282 | t | t - 12 | more | 282 | f | t -(2 rows) - --- Joins -UPDATE foo SET f3 = f3*2 - FROM int4_tbl i - WHERE foo.f1 + 123455 = i.f1 - RETURNING foo.*, i.f1 as "i.f1"; - f1 | f2 | f3 | i.f1 -----+------+----+-------- - 1 | test | 84 | 123456 -(1 row) - -SELECT * FROM foo; - f1 | f2 | f3 -----+------+---- - 2 | more | 42 - 1 | test | 84 -(2 rows) - -DELETE FROM foo - USING int4_tbl i - WHERE foo.f1 + 123455 = i.f1 - RETURNING foo.*, i.f1 as "i.f1"; - f1 | f2 | f3 | i.f1 -----+------+----+-------- - 1 | test | 84 | 123456 -(1 row) - -SELECT * FROM foo; - f1 | f2 | f3 -----+------+---- - 2 | more | 42 -(1 row) - --- Check inheritance cases -CREATE TEMP TABLE foochild (fc int) INHERITS (foo); -INSERT INTO foochild VALUES(123,'child',999,-123); -ALTER TABLE foo ADD COLUMN f4 int8 DEFAULT 99; -SELECT * FROM foo; - f1 | f2 | f3 | f4 ------+-------+-----+---- - 2 | more | 42 | 99 - 123 | child | 999 | 99 -(2 rows) - -SELECT * FROM foochild; - f1 | f2 | f3 | fc | f4 ------+-------+-----+------+---- - 123 | child | 999 | -123 | 99 -(1 row) - -UPDATE foo SET f4 = f4 + f3 WHERE f4 = 99 RETURNING *; - f1 | f2 | f3 | f4 ------+-------+-----+------ - 2 | more | 42 | 141 - 123 | child | 999 | 1098 -(2 rows) - -SELECT * FROM foo; - f1 | f2 | f3 | f4 ------+-------+-----+------ - 2 | more | 42 | 141 - 123 | child | 999 | 1098 -(2 rows) - -SELECT * FROM foochild; - f1 | f2 | f3 | fc | f4 ------+-------+-----+------+------ - 123 | child | 999 | -123 | 1098 -(1 row) - -UPDATE foo SET f3 = f3*2 - FROM int8_tbl i - WHERE foo.f1 = i.q2 - RETURNING *; - f1 | f2 | f3 | f4 | q1 | q2 ------+-------+------+------+------------------+----- - 123 | child | 1998 | 1098 | 4567890123456789 | 123 -(1 row) - -SELECT * FROM foo; - f1 | f2 | f3 | f4 ------+-------+------+------ - 2 | more | 42 | 141 - 123 | child | 1998 | 1098 -(2 rows) - -SELECT * FROM foochild; - f1 | f2 | f3 | fc | f4 ------+-------+------+------+------ - 123 | child | 1998 | -123 | 1098 -(1 row) - -DELETE FROM foo - USING int8_tbl i - WHERE foo.f1 = i.q2 - RETURNING *; - f1 | f2 | f3 | f4 | q1 | q2 ------+-------+------+------+------------------+----- - 123 | child | 1998 | 1098 | 4567890123456789 | 123 -(1 row) - -SELECT * FROM foo; - f1 | f2 | f3 | f4 -----+------+----+----- - 2 | more | 42 | 141 -(1 row) - -SELECT * FROM foochild; - f1 | f2 | f3 | fc | f4 -----+----+----+----+---- -(0 rows) - -DROP TABLE foochild; --- Rules and views -CREATE TEMP VIEW voo AS SELECT f1, f2 FROM foo; -CREATE RULE voo_i AS ON INSERT TO voo DO INSTEAD - INSERT INTO foo VALUES(new.*, 57); -INSERT INTO voo VALUES(11,'zit'); --- fails: -INSERT INTO voo VALUES(12,'zoo') RETURNING *, f1*2; -ERROR: cannot perform INSERT RETURNING on relation "voo" -HINT: You need an unconditional ON INSERT DO INSTEAD rule with a RETURNING clause. --- fails, incompatible list: -CREATE OR REPLACE RULE voo_i AS ON INSERT TO voo DO INSTEAD - INSERT INTO foo VALUES(new.*, 57) RETURNING *; -ERROR: RETURNING list has too many entries -CREATE OR REPLACE RULE voo_i AS ON INSERT TO voo DO INSTEAD - INSERT INTO foo VALUES(new.*, 57) RETURNING f1, f2; --- should still work -INSERT INTO voo VALUES(13,'zit2'); --- works now -INSERT INTO voo VALUES(14,'zoo2') RETURNING *; - f1 | f2 -----+------ - 14 | zoo2 -(1 row) - -SELECT * FROM foo; - f1 | f2 | f3 | f4 -----+------+----+----- - 2 | more | 42 | 141 - 11 | zit | 57 | 99 - 13 | zit2 | 57 | 99 - 14 | zoo2 | 57 | 99 -(4 rows) - -SELECT * FROM voo; - f1 | f2 -----+------ - 2 | more - 11 | zit - 13 | zit2 - 14 | zoo2 -(4 rows) - -CREATE OR REPLACE RULE voo_u AS ON UPDATE TO voo DO INSTEAD - UPDATE foo SET f1 = new.f1, f2 = new.f2 WHERE f1 = old.f1 - RETURNING f1, f2; -update voo set f1 = f1 + 1 where f2 = 'zoo2'; -update voo set f1 = f1 + 1 where f2 = 'zoo2' RETURNING *, f1*2; - f1 | f2 | ?column? -----+------+---------- - 16 | zoo2 | 32 -(1 row) - -SELECT * FROM foo; - f1 | f2 | f3 | f4 -----+------+----+----- - 2 | more | 42 | 141 - 11 | zit | 57 | 99 - 13 | zit2 | 57 | 99 - 16 | zoo2 | 57 | 99 -(4 rows) - -SELECT * FROM voo; - f1 | f2 -----+------ - 2 | more - 11 | zit - 13 | zit2 - 16 | zoo2 -(4 rows) - -CREATE OR REPLACE RULE voo_d AS ON DELETE TO voo DO INSTEAD - DELETE FROM foo WHERE f1 = old.f1 - RETURNING f1, f2; -DELETE FROM foo WHERE f1 = 13; -DELETE FROM foo WHERE f2 = 'zit' RETURNING *; - f1 | f2 | f3 | f4 -----+-----+----+---- - 11 | zit | 57 | 99 -(1 row) - -SELECT * FROM foo; - f1 | f2 | f3 | f4 -----+------+----+----- - 2 | more | 42 | 141 - 16 | zoo2 | 57 | 99 -(2 rows) - -SELECT * FROM voo; - f1 | f2 -----+------ - 2 | more - 16 | zoo2 -(2 rows) - --- Try a join case -CREATE TEMP TABLE joinme (f2j text, other int); -INSERT INTO joinme VALUES('more', 12345); -INSERT INTO joinme VALUES('zoo2', 54321); -INSERT INTO joinme VALUES('other', 0); -CREATE TEMP VIEW joinview AS - SELECT foo.*, other FROM foo JOIN joinme ON (f2 = f2j); -SELECT * FROM joinview; - f1 | f2 | f3 | f4 | other -----+------+----+-----+------- - 2 | more | 42 | 141 | 12345 - 16 | zoo2 | 57 | 99 | 54321 -(2 rows) - -CREATE RULE joinview_u AS ON UPDATE TO joinview DO INSTEAD - UPDATE foo SET f1 = new.f1, f3 = new.f3 - FROM joinme WHERE f2 = f2j AND f2 = old.f2 - RETURNING foo.*, other; -UPDATE joinview SET f1 = f1 + 1 WHERE f3 = 57 RETURNING *, other + 1; - f1 | f2 | f3 | f4 | other | ?column? -----+------+----+----+-------+---------- - 17 | zoo2 | 57 | 99 | 54321 | 54322 -(1 row) - -SELECT * FROM joinview; - f1 | f2 | f3 | f4 | other -----+------+----+-----+------- - 2 | more | 42 | 141 | 12345 - 17 | zoo2 | 57 | 99 | 54321 -(2 rows) - -SELECT * FROM foo; - f1 | f2 | f3 | f4 -----+------+----+----- - 2 | more | 42 | 141 - 17 | zoo2 | 57 | 99 -(2 rows) - -SELECT * FROM voo; - f1 | f2 -----+------ - 2 | more - 17 | zoo2 -(2 rows) - --- Check aliased target relation -INSERT INTO foo AS bar DEFAULT VALUES RETURNING *; -- ok - f1 | f2 | f3 | f4 -----+----+----+---- - 4 | | 42 | 99 -(1 row) - -INSERT INTO foo AS bar DEFAULT VALUES RETURNING foo.*; -- fails, wrong name -ERROR: invalid reference to FROM-clause entry for table "foo" -LINE 1: INSERT INTO foo AS bar DEFAULT VALUES RETURNING foo.*; - ^ -HINT: Perhaps you meant to reference the table alias "bar". -INSERT INTO foo AS bar DEFAULT VALUES RETURNING bar.*; -- ok - f1 | f2 | f3 | f4 -----+----+----+---- - 5 | | 42 | 99 -(1 row) - -INSERT INTO foo AS bar DEFAULT VALUES RETURNING bar.f3; -- ok - f3 ----- - 42 -(1 row) - +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/largeobject.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/largeobject.out --- /Users/admin/pgsql/src/test/regress/expected/largeobject.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/largeobject.out 2024-12-13 13:20:12 @@ -1,563 +1,2 @@ --- --- Test large object support --- --- directory paths are passed to us in environment variables -\getenv abs_srcdir PG_ABS_SRCDIR -\getenv abs_builddir PG_ABS_BUILDDIR --- ensure consistent test output regardless of the default bytea format -SET bytea_output TO escape; --- Test ALTER LARGE OBJECT OWNER -CREATE ROLE regress_lo_user; -SELECT lo_create(42); - lo_create ------------ - 42 -(1 row) - -ALTER LARGE OBJECT 42 OWNER TO regress_lo_user; --- Test GRANT, COMMENT as non-superuser -SET SESSION AUTHORIZATION regress_lo_user; -GRANT SELECT ON LARGE OBJECT 42 TO public; -COMMENT ON LARGE OBJECT 42 IS 'the ultimate answer'; -RESET SESSION AUTHORIZATION; --- Test psql's \lo_list et al (we assume no other LOs exist yet) -\lo_list - Large objects - ID | Owner | Description -----+-----------------+--------------------- - 42 | regress_lo_user | the ultimate answer -(1 row) - -\lo_list+ - Large objects - ID | Owner | Access privileges | Description -----+-----------------+------------------------------------+--------------------- - 42 | regress_lo_user | regress_lo_user=rw/regress_lo_user+| the ultimate answer - | | =r/regress_lo_user | -(1 row) - -\lo_unlink 42 -\dl - Large objects - ID | Owner | Description -----+-------+------------- -(0 rows) - --- Load a file -CREATE TABLE lotest_stash_values (loid oid, fd integer); --- lo_creat(mode integer) returns oid --- The mode arg to lo_creat is unused, some vestigal holdover from ancient times --- returns the large object id -INSERT INTO lotest_stash_values (loid) SELECT lo_creat(42); --- NOTE: large objects require transactions -BEGIN; --- lo_open(lobjId oid, mode integer) returns integer --- The mode parameter to lo_open uses two constants: --- INV_WRITE = 0x20000 --- INV_READ = 0x40000 --- The return value is a file descriptor-like value which remains valid for the --- transaction. -UPDATE lotest_stash_values SET fd = lo_open(loid, CAST(x'20000' | x'40000' AS integer)); --- loread/lowrite names are wonky, different from other functions which are lo_* --- lowrite(fd integer, data bytea) returns integer --- the integer is the number of bytes written -SELECT lowrite(fd, ' -I wandered lonely as a cloud -That floats on high o''er vales and hills, -When all at once I saw a crowd, -A host, of golden daffodils; -Beside the lake, beneath the trees, -Fluttering and dancing in the breeze. - -Continuous as the stars that shine -And twinkle on the milky way, -They stretched in never-ending line -Along the margin of a bay: -Ten thousand saw I at a glance, -Tossing their heads in sprightly dance. - -The waves beside them danced; but they -Out-did the sparkling waves in glee: -A poet could not but be gay, -In such a jocund company: -I gazed--and gazed--but little thought -What wealth the show to me had brought: - -For oft, when on my couch I lie -In vacant or in pensive mood, -They flash upon that inward eye -Which is the bliss of solitude; -And then my heart with pleasure fills, -And dances with the daffodils. - - -- William Wordsworth -') FROM lotest_stash_values; - lowrite ---------- - 848 -(1 row) - --- lo_close(fd integer) returns integer --- return value is 0 for success, or <0 for error (actually only -1, but...) -SELECT lo_close(fd) FROM lotest_stash_values; - lo_close ----------- - 0 -(1 row) - -END; --- Copy to another large object. --- Note: we intentionally don't remove the object created here; --- it's left behind to help test pg_dump. -SELECT lo_from_bytea(0, lo_get(loid)) AS newloid FROM lotest_stash_values -\gset --- Add a comment to it, as well, for pg_dump/pg_upgrade testing. -COMMENT ON LARGE OBJECT :newloid IS 'I Wandered Lonely as a Cloud'; --- Read out a portion -BEGIN; -UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); --- lo_lseek(fd integer, offset integer, whence integer) returns integer --- offset is in bytes, whence is one of three values: --- SEEK_SET (= 0) meaning relative to beginning --- SEEK_CUR (= 1) meaning relative to current position --- SEEK_END (= 2) meaning relative to end (offset better be negative) --- returns current position in file -SELECT lo_lseek(fd, 104, 0) FROM lotest_stash_values; - lo_lseek ----------- - 104 -(1 row) - --- loread/lowrite names are wonky, different from other functions which are lo_* --- loread(fd integer, len integer) returns bytea -SELECT loread(fd, 28) FROM lotest_stash_values; - loread ------------------------------- - A host, of golden daffodils; -(1 row) - -SELECT lo_lseek(fd, -19, 1) FROM lotest_stash_values; - lo_lseek ----------- - 113 -(1 row) - -SELECT lowrite(fd, 'n') FROM lotest_stash_values; - lowrite ---------- - 1 -(1 row) - -SELECT lo_tell(fd) FROM lotest_stash_values; - lo_tell ---------- - 114 -(1 row) - -SELECT lo_lseek(fd, -744, 2) FROM lotest_stash_values; - lo_lseek ----------- - 104 -(1 row) - -SELECT loread(fd, 28) FROM lotest_stash_values; - loread ------------------------------- - A host, on golden daffodils; -(1 row) - -SELECT lo_close(fd) FROM lotest_stash_values; - lo_close ----------- - 0 -(1 row) - -END; --- Test resource management -BEGIN; -SELECT lo_open(loid, x'40000'::int) from lotest_stash_values; - lo_open ---------- - 0 -(1 row) - -ABORT; -\set filename :abs_builddir '/results/invalid/path' -\set dobody 'DECLARE loid oid; BEGIN ' -\set dobody :dobody 'SELECT tbl.loid INTO loid FROM lotest_stash_values tbl; ' -\set dobody :dobody 'PERFORM lo_export(loid, ' :'filename' '); ' -\set dobody :dobody 'EXCEPTION WHEN UNDEFINED_FILE THEN ' -\set dobody :dobody 'RAISE NOTICE ''could not open file, as expected''; END' -DO :'dobody'; -NOTICE: could not open file, as expected --- Test truncation. -BEGIN; -UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); -SELECT lo_truncate(fd, 11) FROM lotest_stash_values; - lo_truncate -------------- - 0 -(1 row) - -SELECT loread(fd, 15) FROM lotest_stash_values; - loread ----------------- - \012I wandered -(1 row) - -SELECT lo_truncate(fd, 10000) FROM lotest_stash_values; - lo_truncate -------------- - 0 -(1 row) - -SELECT loread(fd, 10) FROM lotest_stash_values; - loread ------------------------------------------- - \000\000\000\000\000\000\000\000\000\000 -(1 row) - -SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; - lo_lseek ----------- - 10000 -(1 row) - -SELECT lo_tell(fd) FROM lotest_stash_values; - lo_tell ---------- - 10000 -(1 row) - -SELECT lo_truncate(fd, 5000) FROM lotest_stash_values; - lo_truncate -------------- - 0 -(1 row) - -SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; - lo_lseek ----------- - 5000 -(1 row) - -SELECT lo_tell(fd) FROM lotest_stash_values; - lo_tell ---------- - 5000 -(1 row) - -SELECT lo_close(fd) FROM lotest_stash_values; - lo_close ----------- - 0 -(1 row) - -END; --- Test 64-bit large object functions. -BEGIN; -UPDATE lotest_stash_values SET fd = lo_open(loid, CAST(x'20000' | x'40000' AS integer)); -SELECT lo_lseek64(fd, 4294967296, 0) FROM lotest_stash_values; - lo_lseek64 ------------- - 4294967296 -(1 row) - -SELECT lowrite(fd, 'offset:4GB') FROM lotest_stash_values; - lowrite ---------- - 10 -(1 row) - -SELECT lo_tell64(fd) FROM lotest_stash_values; - lo_tell64 ------------- - 4294967306 -(1 row) - -SELECT lo_lseek64(fd, -10, 1) FROM lotest_stash_values; - lo_lseek64 ------------- - 4294967296 -(1 row) - -SELECT lo_tell64(fd) FROM lotest_stash_values; - lo_tell64 ------------- - 4294967296 -(1 row) - -SELECT loread(fd, 10) FROM lotest_stash_values; - loread ------------- - offset:4GB -(1 row) - -SELECT lo_truncate64(fd, 5000000000) FROM lotest_stash_values; - lo_truncate64 ---------------- - 0 -(1 row) - -SELECT lo_lseek64(fd, 0, 2) FROM lotest_stash_values; - lo_lseek64 ------------- - 5000000000 -(1 row) - -SELECT lo_tell64(fd) FROM lotest_stash_values; - lo_tell64 ------------- - 5000000000 -(1 row) - -SELECT lo_truncate64(fd, 3000000000) FROM lotest_stash_values; - lo_truncate64 ---------------- - 0 -(1 row) - -SELECT lo_lseek64(fd, 0, 2) FROM lotest_stash_values; - lo_lseek64 ------------- - 3000000000 -(1 row) - -SELECT lo_tell64(fd) FROM lotest_stash_values; - lo_tell64 ------------- - 3000000000 -(1 row) - -SELECT lo_close(fd) FROM lotest_stash_values; - lo_close ----------- - 0 -(1 row) - -END; --- lo_unlink(lobjId oid) returns integer --- return value appears to always be 1 -SELECT lo_unlink(loid) from lotest_stash_values; - lo_unlink ------------ - 1 -(1 row) - -TRUNCATE lotest_stash_values; -\set filename :abs_srcdir '/data/tenk.data' -INSERT INTO lotest_stash_values (loid) SELECT lo_import(:'filename'); -BEGIN; -UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); --- verify length of large object -SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; - lo_lseek ----------- - 670800 -(1 row) - --- with the default BLCKSZ, LOBLKSIZE = 2048, so this positions us for a block --- edge case -SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values; - lo_lseek ----------- - 2030 -(1 row) - --- this should get half of the value from page 0 and half from page 1 of the --- large object -SELECT loread(fd, 36) FROM lotest_stash_values; - loread ------------------------------------------------------------------ - AAA\011FBAAAA\011VVVVxx\0122513\01132\0111\0111\0113\01113\0111 -(1 row) - -SELECT lo_tell(fd) FROM lotest_stash_values; - lo_tell ---------- - 2066 -(1 row) - -SELECT lo_lseek(fd, -26, 1) FROM lotest_stash_values; - lo_lseek ----------- - 2040 -(1 row) - -SELECT lowrite(fd, 'abcdefghijklmnop') FROM lotest_stash_values; - lowrite ---------- - 16 -(1 row) - -SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values; - lo_lseek ----------- - 2030 -(1 row) - -SELECT loread(fd, 36) FROM lotest_stash_values; - loread ------------------------------------------------------ - AAA\011FBAAAAabcdefghijklmnop1\0111\0113\01113\0111 -(1 row) - -SELECT lo_close(fd) FROM lotest_stash_values; - lo_close ----------- - 0 -(1 row) - -END; -\set filename :abs_builddir '/results/lotest.txt' -SELECT lo_export(loid, :'filename') FROM lotest_stash_values; - lo_export ------------ - 1 -(1 row) - -\lo_import :filename -\set newloid :LASTOID --- just make sure \lo_export does not barf -\set filename :abs_builddir '/results/lotest2.txt' -\lo_export :newloid :filename --- This is a hack to test that export/import are reversible --- This uses knowledge about the inner workings of large object mechanism --- which should not be used outside it. This makes it a HACK -SELECT pageno, data FROM pg_largeobject WHERE loid = (SELECT loid from lotest_stash_values) -EXCEPT -SELECT pageno, data FROM pg_largeobject WHERE loid = :newloid; - pageno | data ---------+------ -(0 rows) - -SELECT lo_unlink(loid) FROM lotest_stash_values; - lo_unlink ------------ - 1 -(1 row) - -TRUNCATE lotest_stash_values; -\lo_unlink :newloid -\set filename :abs_builddir '/results/lotest.txt' -\lo_import :filename -\set newloid_1 :LASTOID -SELECT lo_from_bytea(0, lo_get(:newloid_1)) AS newloid_2 -\gset -SELECT fipshash(lo_get(:newloid_1)) = fipshash(lo_get(:newloid_2)); - ?column? ----------- - t -(1 row) - -SELECT lo_get(:newloid_1, 0, 20); - lo_get -------------------------------------------- - 8800\0110\0110\0110\0110\0110\0110\011800 -(1 row) - -SELECT lo_get(:newloid_1, 10, 20); - lo_get -------------------------------------------- - \0110\0110\0110\011800\011800\0113800\011 -(1 row) - -SELECT lo_put(:newloid_1, 5, decode('afafafaf', 'hex')); - lo_put --------- - -(1 row) - -SELECT lo_get(:newloid_1, 0, 20); - lo_get -------------------------------------------------- - 8800\011\257\257\257\2570\0110\0110\0110\011800 -(1 row) - -SELECT lo_put(:newloid_1, 4294967310, 'foo'); - lo_put --------- - -(1 row) - -SELECT lo_get(:newloid_1); -ERROR: large object read request is too large -SELECT lo_get(:newloid_1, 4294967294, 100); - lo_get ---------------------------------------------------------------------- - \000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000foo -(1 row) - -\lo_unlink :newloid_1 -\lo_unlink :newloid_2 --- This object is left in the database for pg_dump test purposes -SELECT lo_from_bytea(0, E'\\xdeadbeef') AS newloid -\gset -SET bytea_output TO hex; -SELECT lo_get(:newloid); - lo_get ------------- - \xdeadbeef -(1 row) - --- Create one more object that we leave behind for testing pg_dump/pg_upgrade; --- this one intentionally has an OID in the system range -SELECT lo_create(2121); - lo_create ------------ - 2121 -(1 row) - -COMMENT ON LARGE OBJECT 2121 IS 'testing comments'; --- Test writes on large objects in read-only transactions -START TRANSACTION READ ONLY; --- INV_READ ... ok -SELECT lo_open(2121, x'40000'::int); - lo_open ---------- - 0 -(1 row) - --- INV_WRITE ... error -SELECT lo_open(2121, x'20000'::int); -ERROR: cannot execute lo_open(INV_WRITE) in a read-only transaction -ROLLBACK; -START TRANSACTION READ ONLY; -SELECT lo_create(42); -ERROR: cannot execute lo_create() in a read-only transaction -ROLLBACK; -START TRANSACTION READ ONLY; -SELECT lo_creat(42); -ERROR: cannot execute lo_creat() in a read-only transaction -ROLLBACK; -START TRANSACTION READ ONLY; -SELECT lo_unlink(42); -ERROR: cannot execute lo_unlink() in a read-only transaction -ROLLBACK; -START TRANSACTION READ ONLY; -SELECT lowrite(42, 'x'); -ERROR: cannot execute lowrite() in a read-only transaction -ROLLBACK; -START TRANSACTION READ ONLY; -SELECT lo_import(:'filename'); -ERROR: cannot execute lo_import() in a read-only transaction -ROLLBACK; -START TRANSACTION READ ONLY; -SELECT lo_truncate(42, 0); -ERROR: cannot execute lo_truncate() in a read-only transaction -ROLLBACK; -START TRANSACTION READ ONLY; -SELECT lo_truncate64(42, 0); -ERROR: cannot execute lo_truncate64() in a read-only transaction -ROLLBACK; -START TRANSACTION READ ONLY; -SELECT lo_from_bytea(0, 'x'); -ERROR: cannot execute lo_from_bytea() in a read-only transaction -ROLLBACK; -START TRANSACTION READ ONLY; -SELECT lo_put(42, 0, 'x'); -ERROR: cannot execute lo_put() in a read-only transaction -ROLLBACK; --- Clean up -DROP TABLE lotest_stash_values; -DROP ROLE regress_lo_user; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/with.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/with.out --- /Users/admin/pgsql/src/test/regress/expected/with.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/with.out 2024-12-13 13:20:12 @@ -1,3635 +1,2 @@ --- --- Tests for common table expressions (WITH query, ... SELECT ...) --- --- Basic WITH -WITH q1(x,y) AS (SELECT 1,2) -SELECT * FROM q1, q1 AS q2; - x | y | x | y ----+---+---+--- - 1 | 2 | 1 | 2 -(1 row) - --- Multiple uses are evaluated only once -SELECT count(*) FROM ( - WITH q1(x) AS (SELECT random() FROM generate_series(1, 5)) - SELECT * FROM q1 - UNION - SELECT * FROM q1 -) ss; - count -------- - 5 -(1 row) - --- WITH RECURSIVE --- sum of 1..100 -WITH RECURSIVE t(n) AS ( - VALUES (1) -UNION ALL - SELECT n+1 FROM t WHERE n < 100 -) -SELECT sum(n) FROM t; - sum ------- - 5050 -(1 row) - -WITH RECURSIVE t(n) AS ( - SELECT (VALUES(1)) -UNION ALL - SELECT n+1 FROM t WHERE n < 5 -) -SELECT * FROM t; - n ---- - 1 - 2 - 3 - 4 - 5 -(5 rows) - --- UNION DISTINCT requires hashable type -WITH RECURSIVE t(n) AS ( - VALUES ('01'::varbit) -UNION - SELECT n || '10'::varbit FROM t WHERE n < '100'::varbit -) -SELECT n FROM t; -ERROR: could not implement recursive UNION -DETAIL: All column datatypes must be hashable. --- recursive view -CREATE RECURSIVE VIEW nums (n) AS - VALUES (1) -UNION ALL - SELECT n+1 FROM nums WHERE n < 5; -SELECT * FROM nums; - n ---- - 1 - 2 - 3 - 4 - 5 -(5 rows) - -CREATE OR REPLACE RECURSIVE VIEW nums (n) AS - VALUES (1) -UNION ALL - SELECT n+1 FROM nums WHERE n < 6; -SELECT * FROM nums; - n ---- - 1 - 2 - 3 - 4 - 5 - 6 -(6 rows) - --- This is an infinite loop with UNION ALL, but not with UNION -WITH RECURSIVE t(n) AS ( - SELECT 1 -UNION - SELECT 10-n FROM t) -SELECT * FROM t; - n ---- - 1 - 9 -(2 rows) - --- This'd be an infinite loop, but outside query reads only as much as needed -WITH RECURSIVE t(n) AS ( - VALUES (1) -UNION ALL - SELECT n+1 FROM t) -SELECT * FROM t LIMIT 10; - n ----- - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 -(10 rows) - --- UNION case should have same property -WITH RECURSIVE t(n) AS ( - SELECT 1 -UNION - SELECT n+1 FROM t) -SELECT * FROM t LIMIT 10; - n ----- - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 -(10 rows) - --- Test behavior with an unknown-type literal in the WITH -WITH q AS (SELECT 'foo' AS x) -SELECT x, pg_typeof(x) FROM q; - x | pg_typeof ------+----------- - foo | text -(1 row) - -WITH RECURSIVE t(n) AS ( - SELECT 'foo' -UNION ALL - SELECT n || ' bar' FROM t WHERE length(n) < 20 -) -SELECT n, pg_typeof(n) FROM t; - n | pg_typeof --------------------------+----------- - foo | text - foo bar | text - foo bar bar | text - foo bar bar bar | text - foo bar bar bar bar | text - foo bar bar bar bar bar | text -(6 rows) - --- In a perfect world, this would work and resolve the literal as int ... --- but for now, we have to be content with resolving to text too soon. -WITH RECURSIVE t(n) AS ( - SELECT '7' -UNION ALL - SELECT n+1 FROM t WHERE n < 10 -) -SELECT n, pg_typeof(n) FROM t; -ERROR: operator does not exist: text + integer -LINE 4: SELECT n+1 FROM t WHERE n < 10 - ^ -HINT: No operator matches the given name and argument types. You might need to add explicit type casts. --- Deeply nested WITH caused a list-munging problem in v13 --- Detection of cross-references and self-references -WITH RECURSIVE w1(c1) AS - (WITH w2(c2) AS - (WITH w3(c3) AS - (WITH w4(c4) AS - (WITH w5(c5) AS - (WITH RECURSIVE w6(c6) AS - (WITH w6(c6) AS - (WITH w8(c8) AS - (SELECT 1) - SELECT * FROM w8) - SELECT * FROM w6) - SELECT * FROM w6) - SELECT * FROM w5) - SELECT * FROM w4) - SELECT * FROM w3) - SELECT * FROM w2) -SELECT * FROM w1; - c1 ----- - 1 -(1 row) - --- Detection of invalid self-references -WITH RECURSIVE outermost(x) AS ( - SELECT 1 - UNION (WITH innermost1 AS ( - SELECT 2 - UNION (WITH innermost2 AS ( - SELECT 3 - UNION (WITH innermost3 AS ( - SELECT 4 - UNION (WITH innermost4 AS ( - SELECT 5 - UNION (WITH innermost5 AS ( - SELECT 6 - UNION (WITH innermost6 AS - (SELECT 7) - SELECT * FROM innermost6)) - SELECT * FROM innermost5)) - SELECT * FROM innermost4)) - SELECT * FROM innermost3)) - SELECT * FROM innermost2)) - SELECT * FROM outermost - UNION SELECT * FROM innermost1) - ) - SELECT * FROM outermost ORDER BY 1; - x ---- - 1 - 2 - 3 - 4 - 5 - 6 - 7 -(7 rows) - --- --- Some examples with a tree --- --- department structure represented here is as follows: --- --- ROOT-+->A-+->B-+->C --- | | --- | +->D-+->F --- +->E-+->G -CREATE TEMP TABLE department ( - id INTEGER PRIMARY KEY, -- department ID - parent_department INTEGER REFERENCES department, -- upper department ID - name TEXT -- department name -); -INSERT INTO department VALUES (0, NULL, 'ROOT'); -INSERT INTO department VALUES (1, 0, 'A'); -INSERT INTO department VALUES (2, 1, 'B'); -INSERT INTO department VALUES (3, 2, 'C'); -INSERT INTO department VALUES (4, 2, 'D'); -INSERT INTO department VALUES (5, 0, 'E'); -INSERT INTO department VALUES (6, 4, 'F'); -INSERT INTO department VALUES (7, 5, 'G'); --- extract all departments under 'A'. Result should be A, B, C, D and F -WITH RECURSIVE subdepartment AS -( - -- non recursive term - SELECT name as root_name, * FROM department WHERE name = 'A' - UNION ALL - -- recursive term - SELECT sd.root_name, d.* FROM department AS d, subdepartment AS sd - WHERE d.parent_department = sd.id -) -SELECT * FROM subdepartment ORDER BY name; - root_name | id | parent_department | name ------------+----+-------------------+------ - A | 1 | 0 | A - A | 2 | 1 | B - A | 3 | 2 | C - A | 4 | 2 | D - A | 6 | 4 | F -(5 rows) - --- extract all departments under 'A' with "level" number -WITH RECURSIVE subdepartment(level, id, parent_department, name) AS -( - -- non recursive term - SELECT 1, * FROM department WHERE name = 'A' - UNION ALL - -- recursive term - SELECT sd.level + 1, d.* FROM department AS d, subdepartment AS sd - WHERE d.parent_department = sd.id -) -SELECT * FROM subdepartment ORDER BY name; - level | id | parent_department | name --------+----+-------------------+------ - 1 | 1 | 0 | A - 2 | 2 | 1 | B - 3 | 3 | 2 | C - 3 | 4 | 2 | D - 4 | 6 | 4 | F -(5 rows) - --- extract all departments under 'A' with "level" number. --- Only shows level 2 or more -WITH RECURSIVE subdepartment(level, id, parent_department, name) AS -( - -- non recursive term - SELECT 1, * FROM department WHERE name = 'A' - UNION ALL - -- recursive term - SELECT sd.level + 1, d.* FROM department AS d, subdepartment AS sd - WHERE d.parent_department = sd.id -) -SELECT * FROM subdepartment WHERE level >= 2 ORDER BY name; - level | id | parent_department | name --------+----+-------------------+------ - 2 | 2 | 1 | B - 3 | 3 | 2 | C - 3 | 4 | 2 | D - 4 | 6 | 4 | F -(4 rows) - --- "RECURSIVE" is ignored if the query has no self-reference -WITH RECURSIVE subdepartment AS -( - -- note lack of recursive UNION structure - SELECT * FROM department WHERE name = 'A' -) -SELECT * FROM subdepartment ORDER BY name; - id | parent_department | name -----+-------------------+------ - 1 | 0 | A -(1 row) - --- inside subqueries -SELECT count(*) FROM ( - WITH RECURSIVE t(n) AS ( - SELECT 1 UNION ALL SELECT n + 1 FROM t WHERE n < 500 - ) - SELECT * FROM t) AS t WHERE n < ( - SELECT count(*) FROM ( - WITH RECURSIVE t(n) AS ( - SELECT 1 UNION ALL SELECT n + 1 FROM t WHERE n < 100 - ) - SELECT * FROM t WHERE n < 50000 - ) AS t WHERE n < 100); - count -------- - 98 -(1 row) - --- use same CTE twice at different subquery levels -WITH q1(x,y) AS ( - SELECT hundred, sum(ten) FROM tenk1 GROUP BY hundred - ) -SELECT count(*) FROM q1 WHERE y > (SELECT sum(y)/100 FROM q1 qsub); - count -------- - 50 -(1 row) - --- via a VIEW -CREATE TEMPORARY VIEW vsubdepartment AS - WITH RECURSIVE subdepartment AS - ( - -- non recursive term - SELECT * FROM department WHERE name = 'A' - UNION ALL - -- recursive term - SELECT d.* FROM department AS d, subdepartment AS sd - WHERE d.parent_department = sd.id - ) - SELECT * FROM subdepartment; -SELECT * FROM vsubdepartment ORDER BY name; - id | parent_department | name -----+-------------------+------ - 1 | 0 | A - 2 | 1 | B - 3 | 2 | C - 4 | 2 | D - 6 | 4 | F -(5 rows) - --- Check reverse listing -SELECT pg_get_viewdef('vsubdepartment'::regclass); - pg_get_viewdef ------------------------------------------------ - WITH RECURSIVE subdepartment AS ( + - SELECT department.id, + - department.parent_department, + - department.name + - FROM department + - WHERE (department.name = 'A'::text)+ - UNION ALL + - SELECT d.id, + - d.parent_department, + - d.name + - FROM department d, + - subdepartment sd + - WHERE (d.parent_department = sd.id)+ - ) + - SELECT id, + - parent_department, + - name + - FROM subdepartment; -(1 row) - -SELECT pg_get_viewdef('vsubdepartment'::regclass, true); - pg_get_viewdef ---------------------------------------------- - WITH RECURSIVE subdepartment AS ( + - SELECT department.id, + - department.parent_department, + - department.name + - FROM department + - WHERE department.name = 'A'::text+ - UNION ALL + - SELECT d.id, + - d.parent_department, + - d.name + - FROM department d, + - subdepartment sd + - WHERE d.parent_department = sd.id+ - ) + - SELECT id, + - parent_department, + - name + - FROM subdepartment; -(1 row) - --- Another reverse-listing example -CREATE VIEW sums_1_100 AS -WITH RECURSIVE t(n) AS ( - VALUES (1) -UNION ALL - SELECT n+1 FROM t WHERE n < 100 -) -SELECT sum(n) FROM t; -\d+ sums_1_100 - View "public.sums_1_100" - Column | Type | Collation | Nullable | Default | Storage | Description ---------+--------+-----------+----------+---------+---------+------------- - sum | bigint | | | | plain | -View definition: - WITH RECURSIVE t(n) AS ( - VALUES (1) - UNION ALL - SELECT t_1.n + 1 - FROM t t_1 - WHERE t_1.n < 100 - ) - SELECT sum(n) AS sum - FROM t; - --- corner case in which sub-WITH gets initialized first -with recursive q as ( - select * from department - union all - (with x as (select * from q) - select * from x) - ) -select * from q limit 24; - id | parent_department | name -----+-------------------+------ - 0 | | ROOT - 1 | 0 | A - 2 | 1 | B - 3 | 2 | C - 4 | 2 | D - 5 | 0 | E - 6 | 4 | F - 7 | 5 | G - 0 | | ROOT - 1 | 0 | A - 2 | 1 | B - 3 | 2 | C - 4 | 2 | D - 5 | 0 | E - 6 | 4 | F - 7 | 5 | G - 0 | | ROOT - 1 | 0 | A - 2 | 1 | B - 3 | 2 | C - 4 | 2 | D - 5 | 0 | E - 6 | 4 | F - 7 | 5 | G -(24 rows) - -with recursive q as ( - select * from department - union all - (with recursive x as ( - select * from department - union all - (select * from q union all select * from x) - ) - select * from x) - ) -select * from q limit 32; - id | parent_department | name -----+-------------------+------ - 0 | | ROOT - 1 | 0 | A - 2 | 1 | B - 3 | 2 | C - 4 | 2 | D - 5 | 0 | E - 6 | 4 | F - 7 | 5 | G - 0 | | ROOT - 1 | 0 | A - 2 | 1 | B - 3 | 2 | C - 4 | 2 | D - 5 | 0 | E - 6 | 4 | F - 7 | 5 | G - 0 | | ROOT - 1 | 0 | A - 2 | 1 | B - 3 | 2 | C - 4 | 2 | D - 5 | 0 | E - 6 | 4 | F - 7 | 5 | G - 0 | | ROOT - 1 | 0 | A - 2 | 1 | B - 3 | 2 | C - 4 | 2 | D - 5 | 0 | E - 6 | 4 | F - 7 | 5 | G -(32 rows) - --- recursive term has sub-UNION -WITH RECURSIVE t(i,j) AS ( - VALUES (1,2) - UNION ALL - SELECT t2.i, t.j+1 FROM - (SELECT 2 AS i UNION ALL SELECT 3 AS i) AS t2 - JOIN t ON (t2.i = t.i+1)) - SELECT * FROM t; - i | j ----+--- - 1 | 2 - 2 | 3 - 3 | 4 -(3 rows) - --- --- different tree example --- -CREATE TEMPORARY TABLE tree( - id INTEGER PRIMARY KEY, - parent_id INTEGER REFERENCES tree(id) -); -INSERT INTO tree -VALUES (1, NULL), (2, 1), (3,1), (4,2), (5,2), (6,2), (7,3), (8,3), - (9,4), (10,4), (11,7), (12,7), (13,7), (14, 9), (15,11), (16,11); --- --- get all paths from "second level" nodes to leaf nodes --- -WITH RECURSIVE t(id, path) AS ( - VALUES(1,ARRAY[]::integer[]) -UNION ALL - SELECT tree.id, t.path || tree.id - FROM tree JOIN t ON (tree.parent_id = t.id) -) -SELECT t1.*, t2.* FROM t AS t1 JOIN t AS t2 ON - (t1.path[1] = t2.path[1] AND - array_upper(t1.path,1) = 1 AND - array_upper(t2.path,1) > 1) - ORDER BY t1.id, t2.id; - id | path | id | path -----+------+----+------------- - 2 | {2} | 4 | {2,4} - 2 | {2} | 5 | {2,5} - 2 | {2} | 6 | {2,6} - 2 | {2} | 9 | {2,4,9} - 2 | {2} | 10 | {2,4,10} - 2 | {2} | 14 | {2,4,9,14} - 3 | {3} | 7 | {3,7} - 3 | {3} | 8 | {3,8} - 3 | {3} | 11 | {3,7,11} - 3 | {3} | 12 | {3,7,12} - 3 | {3} | 13 | {3,7,13} - 3 | {3} | 15 | {3,7,11,15} - 3 | {3} | 16 | {3,7,11,16} -(13 rows) - --- just count 'em -WITH RECURSIVE t(id, path) AS ( - VALUES(1,ARRAY[]::integer[]) -UNION ALL - SELECT tree.id, t.path || tree.id - FROM tree JOIN t ON (tree.parent_id = t.id) -) -SELECT t1.id, count(t2.*) FROM t AS t1 JOIN t AS t2 ON - (t1.path[1] = t2.path[1] AND - array_upper(t1.path,1) = 1 AND - array_upper(t2.path,1) > 1) - GROUP BY t1.id - ORDER BY t1.id; - id | count -----+------- - 2 | 6 - 3 | 7 -(2 rows) - --- this variant tickled a whole-row-variable bug in 8.4devel -WITH RECURSIVE t(id, path) AS ( - VALUES(1,ARRAY[]::integer[]) -UNION ALL - SELECT tree.id, t.path || tree.id - FROM tree JOIN t ON (tree.parent_id = t.id) -) -SELECT t1.id, t2.path, t2 FROM t AS t1 JOIN t AS t2 ON -(t1.id=t2.id); - id | path | t2 -----+-------------+-------------------- - 1 | {} | (1,{}) - 2 | {2} | (2,{2}) - 3 | {3} | (3,{3}) - 4 | {2,4} | (4,"{2,4}") - 5 | {2,5} | (5,"{2,5}") - 6 | {2,6} | (6,"{2,6}") - 7 | {3,7} | (7,"{3,7}") - 8 | {3,8} | (8,"{3,8}") - 9 | {2,4,9} | (9,"{2,4,9}") - 10 | {2,4,10} | (10,"{2,4,10}") - 11 | {3,7,11} | (11,"{3,7,11}") - 12 | {3,7,12} | (12,"{3,7,12}") - 13 | {3,7,13} | (13,"{3,7,13}") - 14 | {2,4,9,14} | (14,"{2,4,9,14}") - 15 | {3,7,11,15} | (15,"{3,7,11,15}") - 16 | {3,7,11,16} | (16,"{3,7,11,16}") -(16 rows) - --- test that column statistics from a materialized CTE are available --- to upper planner (otherwise, we'd get a stupider plan) -explain (costs off) -with x as materialized (select unique1 from tenk1 b) -select count(*) from tenk1 a - where unique1 in (select * from x); - QUERY PLAN ------------------------------------------------------------- - Aggregate - CTE x - -> Index Only Scan using tenk1_unique1 on tenk1 b - -> Hash Semi Join - Hash Cond: (a.unique1 = x.unique1) - -> Index Only Scan using tenk1_unique1 on tenk1 a - -> Hash - -> CTE Scan on x -(8 rows) - -explain (costs off) -with x as materialized (insert into tenk1 default values returning unique1) -select count(*) from tenk1 a - where unique1 in (select * from x); - QUERY PLAN ------------------------------------------------------------- - Aggregate - CTE x - -> Insert on tenk1 - -> Result - -> Nested Loop - -> HashAggregate - Group Key: x.unique1 - -> CTE Scan on x - -> Index Only Scan using tenk1_unique1 on tenk1 a - Index Cond: (unique1 = x.unique1) -(10 rows) - --- test that pathkeys from a materialized CTE are propagated up to the --- outer query -explain (costs off) -with x as materialized (select unique1 from tenk1 b order by unique1) -select count(*) from tenk1 a - where unique1 in (select * from x); - QUERY PLAN ------------------------------------------------------------- - Aggregate - CTE x - -> Index Only Scan using tenk1_unique1 on tenk1 b - -> Merge Semi Join - Merge Cond: (a.unique1 = x.unique1) - -> Index Only Scan using tenk1_unique1 on tenk1 a - -> CTE Scan on x -(7 rows) - --- SEARCH clause -create temp table graph0( f int, t int, label text ); -insert into graph0 values - (1, 2, 'arc 1 -> 2'), - (1, 3, 'arc 1 -> 3'), - (2, 3, 'arc 2 -> 3'), - (1, 4, 'arc 1 -> 4'), - (4, 5, 'arc 4 -> 5'); -explain (verbose, costs off) -with recursive search_graph(f, t, label) as ( - select * from graph0 g - union all - select g.* - from graph0 g, search_graph sg - where g.f = sg.t -) search depth first by f, t set seq -select * from search_graph order by seq; - QUERY PLAN ----------------------------------------------------------------------------------------------- - Sort - Output: search_graph.f, search_graph.t, search_graph.label, search_graph.seq - Sort Key: search_graph.seq - CTE search_graph - -> Recursive Union - -> Seq Scan on pg_temp.graph0 g - Output: g.f, g.t, g.label, ARRAY[ROW(g.f, g.t)] - -> Merge Join - Output: g_1.f, g_1.t, g_1.label, array_cat(sg.seq, ARRAY[ROW(g_1.f, g_1.t)]) - Merge Cond: (g_1.f = sg.t) - -> Sort - Output: g_1.f, g_1.t, g_1.label - Sort Key: g_1.f - -> Seq Scan on pg_temp.graph0 g_1 - Output: g_1.f, g_1.t, g_1.label - -> Sort - Output: sg.seq, sg.t - Sort Key: sg.t - -> WorkTable Scan on search_graph sg - Output: sg.seq, sg.t - -> CTE Scan on search_graph - Output: search_graph.f, search_graph.t, search_graph.label, search_graph.seq -(22 rows) - -with recursive search_graph(f, t, label) as ( - select * from graph0 g - union all - select g.* - from graph0 g, search_graph sg - where g.f = sg.t -) search depth first by f, t set seq -select * from search_graph order by seq; - f | t | label | seq ----+---+------------+------------------- - 1 | 2 | arc 1 -> 2 | {"(1,2)"} - 2 | 3 | arc 2 -> 3 | {"(1,2)","(2,3)"} - 1 | 3 | arc 1 -> 3 | {"(1,3)"} - 1 | 4 | arc 1 -> 4 | {"(1,4)"} - 4 | 5 | arc 4 -> 5 | {"(1,4)","(4,5)"} - 2 | 3 | arc 2 -> 3 | {"(2,3)"} - 4 | 5 | arc 4 -> 5 | {"(4,5)"} -(7 rows) - -with recursive search_graph(f, t, label) as ( - select * from graph0 g - union distinct - select g.* - from graph0 g, search_graph sg - where g.f = sg.t -) search depth first by f, t set seq -select * from search_graph order by seq; - f | t | label | seq ----+---+------------+------------------- - 1 | 2 | arc 1 -> 2 | {"(1,2)"} - 2 | 3 | arc 2 -> 3 | {"(1,2)","(2,3)"} - 1 | 3 | arc 1 -> 3 | {"(1,3)"} - 1 | 4 | arc 1 -> 4 | {"(1,4)"} - 4 | 5 | arc 4 -> 5 | {"(1,4)","(4,5)"} - 2 | 3 | arc 2 -> 3 | {"(2,3)"} - 4 | 5 | arc 4 -> 5 | {"(4,5)"} -(7 rows) - -explain (verbose, costs off) -with recursive search_graph(f, t, label) as ( - select * from graph0 g - union all - select g.* - from graph0 g, search_graph sg - where g.f = sg.t -) search breadth first by f, t set seq -select * from search_graph order by seq; - QUERY PLAN -------------------------------------------------------------------------------------------------- - Sort - Output: search_graph.f, search_graph.t, search_graph.label, search_graph.seq - Sort Key: search_graph.seq - CTE search_graph - -> Recursive Union - -> Seq Scan on pg_temp.graph0 g - Output: g.f, g.t, g.label, ROW('0'::bigint, g.f, g.t) - -> Merge Join - Output: g_1.f, g_1.t, g_1.label, ROW(int8inc((sg.seq)."*DEPTH*"), g_1.f, g_1.t) - Merge Cond: (g_1.f = sg.t) - -> Sort - Output: g_1.f, g_1.t, g_1.label - Sort Key: g_1.f - -> Seq Scan on pg_temp.graph0 g_1 - Output: g_1.f, g_1.t, g_1.label - -> Sort - Output: sg.seq, sg.t - Sort Key: sg.t - -> WorkTable Scan on search_graph sg - Output: sg.seq, sg.t - -> CTE Scan on search_graph - Output: search_graph.f, search_graph.t, search_graph.label, search_graph.seq -(22 rows) - -with recursive search_graph(f, t, label) as ( - select * from graph0 g - union all - select g.* - from graph0 g, search_graph sg - where g.f = sg.t -) search breadth first by f, t set seq -select * from search_graph order by seq; - f | t | label | seq ----+---+------------+--------- - 1 | 2 | arc 1 -> 2 | (0,1,2) - 1 | 3 | arc 1 -> 3 | (0,1,3) - 1 | 4 | arc 1 -> 4 | (0,1,4) - 2 | 3 | arc 2 -> 3 | (0,2,3) - 4 | 5 | arc 4 -> 5 | (0,4,5) - 2 | 3 | arc 2 -> 3 | (1,2,3) - 4 | 5 | arc 4 -> 5 | (1,4,5) -(7 rows) - -with recursive search_graph(f, t, label) as ( - select * from graph0 g - union distinct - select g.* - from graph0 g, search_graph sg - where g.f = sg.t -) search breadth first by f, t set seq -select * from search_graph order by seq; - f | t | label | seq ----+---+------------+--------- - 1 | 2 | arc 1 -> 2 | (0,1,2) - 1 | 3 | arc 1 -> 3 | (0,1,3) - 1 | 4 | arc 1 -> 4 | (0,1,4) - 2 | 3 | arc 2 -> 3 | (0,2,3) - 4 | 5 | arc 4 -> 5 | (0,4,5) - 2 | 3 | arc 2 -> 3 | (1,2,3) - 4 | 5 | arc 4 -> 5 | (1,4,5) -(7 rows) - --- a constant initial value causes issues for EXPLAIN -explain (verbose, costs off) -with recursive test as ( - select 1 as x - union all - select x + 1 - from test -) search depth first by x set y -select * from test limit 5; - QUERY PLAN ------------------------------------------------------------------------------------------ - Limit - Output: test.x, test.y - CTE test - -> Recursive Union - -> Result - Output: 1, '{(1)}'::record[] - -> WorkTable Scan on test test_1 - Output: (test_1.x + 1), array_cat(test_1.y, ARRAY[ROW((test_1.x + 1))]) - -> CTE Scan on test - Output: test.x, test.y -(10 rows) - -with recursive test as ( - select 1 as x - union all - select x + 1 - from test -) search depth first by x set y -select * from test limit 5; - x | y ----+----------------------- - 1 | {(1)} - 2 | {(1),(2)} - 3 | {(1),(2),(3)} - 4 | {(1),(2),(3),(4)} - 5 | {(1),(2),(3),(4),(5)} -(5 rows) - -explain (verbose, costs off) -with recursive test as ( - select 1 as x - union all - select x + 1 - from test -) search breadth first by x set y -select * from test limit 5; - QUERY PLAN --------------------------------------------------------------------------------------------- - Limit - Output: test.x, test.y - CTE test - -> Recursive Union - -> Result - Output: 1, '(0,1)'::record - -> WorkTable Scan on test test_1 - Output: (test_1.x + 1), ROW(int8inc((test_1.y)."*DEPTH*"), (test_1.x + 1)) - -> CTE Scan on test - Output: test.x, test.y -(10 rows) - -with recursive test as ( - select 1 as x - union all - select x + 1 - from test -) search breadth first by x set y -select * from test limit 5; - x | y ----+------- - 1 | (0,1) - 2 | (1,2) - 3 | (2,3) - 4 | (3,4) - 5 | (4,5) -(5 rows) - --- various syntax errors -with recursive search_graph(f, t, label) as ( - select * from graph0 g - union all - select g.* - from graph0 g, search_graph sg - where g.f = sg.t -) search depth first by foo, tar set seq -select * from search_graph; -ERROR: search column "foo" not in WITH query column list -LINE 7: ) search depth first by foo, tar set seq - ^ -with recursive search_graph(f, t, label) as ( - select * from graph0 g - union all - select g.* - from graph0 g, search_graph sg - where g.f = sg.t -) search depth first by f, t set label -select * from search_graph; -ERROR: search sequence column name "label" already used in WITH query column list -LINE 7: ) search depth first by f, t set label - ^ -with recursive search_graph(f, t, label) as ( - select * from graph0 g - union all - select g.* - from graph0 g, search_graph sg - where g.f = sg.t -) search depth first by f, t, f set seq -select * from search_graph; -ERROR: search column "f" specified more than once -LINE 7: ) search depth first by f, t, f set seq - ^ -with recursive search_graph(f, t, label) as ( - select * from graph0 g - union all - select * from graph0 g - union all - select g.* - from graph0 g, search_graph sg - where g.f = sg.t -) search depth first by f, t set seq -select * from search_graph order by seq; -ERROR: with a SEARCH or CYCLE clause, the left side of the UNION must be a SELECT -with recursive search_graph(f, t, label) as ( - select * from graph0 g - union all - (select * from graph0 g - union all - select g.* - from graph0 g, search_graph sg - where g.f = sg.t) -) search depth first by f, t set seq -select * from search_graph order by seq; -ERROR: with a SEARCH or CYCLE clause, the right side of the UNION must be a SELECT --- check that we distinguish same CTE name used at different levels --- (this case could be supported, perhaps, but it isn't today) -with recursive x(col) as ( - select 1 - union - (with x as (select * from x) - select * from x) -) search depth first by col set seq -select * from x; -ERROR: with a SEARCH or CYCLE clause, the recursive reference to WITH query "x" must be at the top level of its right-hand SELECT --- test ruleutils and view expansion -create temp view v_search as -with recursive search_graph(f, t, label) as ( - select * from graph0 g - union all - select g.* - from graph0 g, search_graph sg - where g.f = sg.t -) search depth first by f, t set seq -select f, t, label from search_graph; -select pg_get_viewdef('v_search'); - pg_get_viewdef ------------------------------------------------- - WITH RECURSIVE search_graph(f, t, label) AS (+ - SELECT g.f, + - g.t, + - g.label + - FROM graph0 g + - UNION ALL + - SELECT g.f, + - g.t, + - g.label + - FROM graph0 g, + - search_graph sg + - WHERE (g.f = sg.t) + - ) SEARCH DEPTH FIRST BY f, t SET seq + - SELECT f, + - t, + - label + - FROM search_graph; -(1 row) - -select * from v_search; - f | t | label ----+---+------------ - 1 | 2 | arc 1 -> 2 - 1 | 3 | arc 1 -> 3 - 2 | 3 | arc 2 -> 3 - 1 | 4 | arc 1 -> 4 - 4 | 5 | arc 4 -> 5 - 2 | 3 | arc 2 -> 3 - 4 | 5 | arc 4 -> 5 -(7 rows) - --- --- test cycle detection --- -create temp table graph( f int, t int, label text ); -insert into graph values - (1, 2, 'arc 1 -> 2'), - (1, 3, 'arc 1 -> 3'), - (2, 3, 'arc 2 -> 3'), - (1, 4, 'arc 1 -> 4'), - (4, 5, 'arc 4 -> 5'), - (5, 1, 'arc 5 -> 1'); -with recursive search_graph(f, t, label, is_cycle, path) as ( - select *, false, array[row(g.f, g.t)] from graph g - union all - select g.*, row(g.f, g.t) = any(path), path || row(g.f, g.t) - from graph g, search_graph sg - where g.f = sg.t and not is_cycle -) -select * from search_graph; - f | t | label | is_cycle | path ----+---+------------+----------+------------------------------------------- - 1 | 2 | arc 1 -> 2 | f | {"(1,2)"} - 1 | 3 | arc 1 -> 3 | f | {"(1,3)"} - 2 | 3 | arc 2 -> 3 | f | {"(2,3)"} - 1 | 4 | arc 1 -> 4 | f | {"(1,4)"} - 4 | 5 | arc 4 -> 5 | f | {"(4,5)"} - 5 | 1 | arc 5 -> 1 | f | {"(5,1)"} - 1 | 2 | arc 1 -> 2 | f | {"(5,1)","(1,2)"} - 1 | 3 | arc 1 -> 3 | f | {"(5,1)","(1,3)"} - 1 | 4 | arc 1 -> 4 | f | {"(5,1)","(1,4)"} - 2 | 3 | arc 2 -> 3 | f | {"(1,2)","(2,3)"} - 4 | 5 | arc 4 -> 5 | f | {"(1,4)","(4,5)"} - 5 | 1 | arc 5 -> 1 | f | {"(4,5)","(5,1)"} - 1 | 2 | arc 1 -> 2 | f | {"(4,5)","(5,1)","(1,2)"} - 1 | 3 | arc 1 -> 3 | f | {"(4,5)","(5,1)","(1,3)"} - 1 | 4 | arc 1 -> 4 | f | {"(4,5)","(5,1)","(1,4)"} - 2 | 3 | arc 2 -> 3 | f | {"(5,1)","(1,2)","(2,3)"} - 4 | 5 | arc 4 -> 5 | f | {"(5,1)","(1,4)","(4,5)"} - 5 | 1 | arc 5 -> 1 | f | {"(1,4)","(4,5)","(5,1)"} - 1 | 2 | arc 1 -> 2 | f | {"(1,4)","(4,5)","(5,1)","(1,2)"} - 1 | 3 | arc 1 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,3)"} - 1 | 4 | arc 1 -> 4 | t | {"(1,4)","(4,5)","(5,1)","(1,4)"} - 2 | 3 | arc 2 -> 3 | f | {"(4,5)","(5,1)","(1,2)","(2,3)"} - 4 | 5 | arc 4 -> 5 | t | {"(4,5)","(5,1)","(1,4)","(4,5)"} - 5 | 1 | arc 5 -> 1 | t | {"(5,1)","(1,4)","(4,5)","(5,1)"} - 2 | 3 | arc 2 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"} -(25 rows) - --- UNION DISTINCT exercises row type hashing support -with recursive search_graph(f, t, label, is_cycle, path) as ( - select *, false, array[row(g.f, g.t)] from graph g - union distinct - select g.*, row(g.f, g.t) = any(path), path || row(g.f, g.t) - from graph g, search_graph sg - where g.f = sg.t and not is_cycle -) -select * from search_graph; - f | t | label | is_cycle | path ----+---+------------+----------+------------------------------------------- - 1 | 2 | arc 1 -> 2 | f | {"(1,2)"} - 1 | 3 | arc 1 -> 3 | f | {"(1,3)"} - 2 | 3 | arc 2 -> 3 | f | {"(2,3)"} - 1 | 4 | arc 1 -> 4 | f | {"(1,4)"} - 4 | 5 | arc 4 -> 5 | f | {"(4,5)"} - 5 | 1 | arc 5 -> 1 | f | {"(5,1)"} - 1 | 2 | arc 1 -> 2 | f | {"(5,1)","(1,2)"} - 1 | 3 | arc 1 -> 3 | f | {"(5,1)","(1,3)"} - 1 | 4 | arc 1 -> 4 | f | {"(5,1)","(1,4)"} - 2 | 3 | arc 2 -> 3 | f | {"(1,2)","(2,3)"} - 4 | 5 | arc 4 -> 5 | f | {"(1,4)","(4,5)"} - 5 | 1 | arc 5 -> 1 | f | {"(4,5)","(5,1)"} - 1 | 2 | arc 1 -> 2 | f | {"(4,5)","(5,1)","(1,2)"} - 1 | 3 | arc 1 -> 3 | f | {"(4,5)","(5,1)","(1,3)"} - 1 | 4 | arc 1 -> 4 | f | {"(4,5)","(5,1)","(1,4)"} - 2 | 3 | arc 2 -> 3 | f | {"(5,1)","(1,2)","(2,3)"} - 4 | 5 | arc 4 -> 5 | f | {"(5,1)","(1,4)","(4,5)"} - 5 | 1 | arc 5 -> 1 | f | {"(1,4)","(4,5)","(5,1)"} - 1 | 2 | arc 1 -> 2 | f | {"(1,4)","(4,5)","(5,1)","(1,2)"} - 1 | 3 | arc 1 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,3)"} - 1 | 4 | arc 1 -> 4 | t | {"(1,4)","(4,5)","(5,1)","(1,4)"} - 2 | 3 | arc 2 -> 3 | f | {"(4,5)","(5,1)","(1,2)","(2,3)"} - 4 | 5 | arc 4 -> 5 | t | {"(4,5)","(5,1)","(1,4)","(4,5)"} - 5 | 1 | arc 5 -> 1 | t | {"(5,1)","(1,4)","(4,5)","(5,1)"} - 2 | 3 | arc 2 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"} -(25 rows) - --- ordering by the path column has same effect as SEARCH DEPTH FIRST -with recursive search_graph(f, t, label, is_cycle, path) as ( - select *, false, array[row(g.f, g.t)] from graph g - union all - select g.*, row(g.f, g.t) = any(path), path || row(g.f, g.t) - from graph g, search_graph sg - where g.f = sg.t and not is_cycle -) -select * from search_graph order by path; - f | t | label | is_cycle | path ----+---+------------+----------+------------------------------------------- - 1 | 2 | arc 1 -> 2 | f | {"(1,2)"} - 2 | 3 | arc 2 -> 3 | f | {"(1,2)","(2,3)"} - 1 | 3 | arc 1 -> 3 | f | {"(1,3)"} - 1 | 4 | arc 1 -> 4 | f | {"(1,4)"} - 4 | 5 | arc 4 -> 5 | f | {"(1,4)","(4,5)"} - 5 | 1 | arc 5 -> 1 | f | {"(1,4)","(4,5)","(5,1)"} - 1 | 2 | arc 1 -> 2 | f | {"(1,4)","(4,5)","(5,1)","(1,2)"} - 2 | 3 | arc 2 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"} - 1 | 3 | arc 1 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,3)"} - 1 | 4 | arc 1 -> 4 | t | {"(1,4)","(4,5)","(5,1)","(1,4)"} - 2 | 3 | arc 2 -> 3 | f | {"(2,3)"} - 4 | 5 | arc 4 -> 5 | f | {"(4,5)"} - 5 | 1 | arc 5 -> 1 | f | {"(4,5)","(5,1)"} - 1 | 2 | arc 1 -> 2 | f | {"(4,5)","(5,1)","(1,2)"} - 2 | 3 | arc 2 -> 3 | f | {"(4,5)","(5,1)","(1,2)","(2,3)"} - 1 | 3 | arc 1 -> 3 | f | {"(4,5)","(5,1)","(1,3)"} - 1 | 4 | arc 1 -> 4 | f | {"(4,5)","(5,1)","(1,4)"} - 4 | 5 | arc 4 -> 5 | t | {"(4,5)","(5,1)","(1,4)","(4,5)"} - 5 | 1 | arc 5 -> 1 | f | {"(5,1)"} - 1 | 2 | arc 1 -> 2 | f | {"(5,1)","(1,2)"} - 2 | 3 | arc 2 -> 3 | f | {"(5,1)","(1,2)","(2,3)"} - 1 | 3 | arc 1 -> 3 | f | {"(5,1)","(1,3)"} - 1 | 4 | arc 1 -> 4 | f | {"(5,1)","(1,4)"} - 4 | 5 | arc 4 -> 5 | f | {"(5,1)","(1,4)","(4,5)"} - 5 | 1 | arc 5 -> 1 | t | {"(5,1)","(1,4)","(4,5)","(5,1)"} -(25 rows) - --- CYCLE clause -explain (verbose, costs off) -with recursive search_graph(f, t, label) as ( - select * from graph g - union all - select g.* - from graph g, search_graph sg - where g.f = sg.t -) cycle f, t set is_cycle using path -select * from search_graph; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ - CTE Scan on search_graph - Output: search_graph.f, search_graph.t, search_graph.label, search_graph.is_cycle, search_graph.path - CTE search_graph - -> Recursive Union - -> Seq Scan on pg_temp.graph g - Output: g.f, g.t, g.label, false, ARRAY[ROW(g.f, g.t)] - -> Merge Join - Output: g_1.f, g_1.t, g_1.label, CASE WHEN (ROW(g_1.f, g_1.t) = ANY (sg.path)) THEN true ELSE false END, array_cat(sg.path, ARRAY[ROW(g_1.f, g_1.t)]) - Merge Cond: (g_1.f = sg.t) - -> Sort - Output: g_1.f, g_1.t, g_1.label - Sort Key: g_1.f - -> Seq Scan on pg_temp.graph g_1 - Output: g_1.f, g_1.t, g_1.label - -> Sort - Output: sg.path, sg.t - Sort Key: sg.t - -> WorkTable Scan on search_graph sg - Output: sg.path, sg.t - Filter: (NOT sg.is_cycle) -(20 rows) - -with recursive search_graph(f, t, label) as ( - select * from graph g - union all - select g.* - from graph g, search_graph sg - where g.f = sg.t -) cycle f, t set is_cycle using path -select * from search_graph; - f | t | label | is_cycle | path ----+---+------------+----------+------------------------------------------- - 1 | 2 | arc 1 -> 2 | f | {"(1,2)"} - 1 | 3 | arc 1 -> 3 | f | {"(1,3)"} - 2 | 3 | arc 2 -> 3 | f | {"(2,3)"} - 1 | 4 | arc 1 -> 4 | f | {"(1,4)"} - 4 | 5 | arc 4 -> 5 | f | {"(4,5)"} - 5 | 1 | arc 5 -> 1 | f | {"(5,1)"} - 1 | 2 | arc 1 -> 2 | f | {"(5,1)","(1,2)"} - 1 | 3 | arc 1 -> 3 | f | {"(5,1)","(1,3)"} - 1 | 4 | arc 1 -> 4 | f | {"(5,1)","(1,4)"} - 2 | 3 | arc 2 -> 3 | f | {"(1,2)","(2,3)"} - 4 | 5 | arc 4 -> 5 | f | {"(1,4)","(4,5)"} - 5 | 1 | arc 5 -> 1 | f | {"(4,5)","(5,1)"} - 1 | 2 | arc 1 -> 2 | f | {"(4,5)","(5,1)","(1,2)"} - 1 | 3 | arc 1 -> 3 | f | {"(4,5)","(5,1)","(1,3)"} - 1 | 4 | arc 1 -> 4 | f | {"(4,5)","(5,1)","(1,4)"} - 2 | 3 | arc 2 -> 3 | f | {"(5,1)","(1,2)","(2,3)"} - 4 | 5 | arc 4 -> 5 | f | {"(5,1)","(1,4)","(4,5)"} - 5 | 1 | arc 5 -> 1 | f | {"(1,4)","(4,5)","(5,1)"} - 1 | 2 | arc 1 -> 2 | f | {"(1,4)","(4,5)","(5,1)","(1,2)"} - 1 | 3 | arc 1 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,3)"} - 1 | 4 | arc 1 -> 4 | t | {"(1,4)","(4,5)","(5,1)","(1,4)"} - 2 | 3 | arc 2 -> 3 | f | {"(4,5)","(5,1)","(1,2)","(2,3)"} - 4 | 5 | arc 4 -> 5 | t | {"(4,5)","(5,1)","(1,4)","(4,5)"} - 5 | 1 | arc 5 -> 1 | t | {"(5,1)","(1,4)","(4,5)","(5,1)"} - 2 | 3 | arc 2 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"} -(25 rows) - -with recursive search_graph(f, t, label) as ( - select * from graph g - union distinct - select g.* - from graph g, search_graph sg - where g.f = sg.t -) cycle f, t set is_cycle to 'Y' default 'N' using path -select * from search_graph; - f | t | label | is_cycle | path ----+---+------------+----------+------------------------------------------- - 1 | 2 | arc 1 -> 2 | N | {"(1,2)"} - 1 | 3 | arc 1 -> 3 | N | {"(1,3)"} - 2 | 3 | arc 2 -> 3 | N | {"(2,3)"} - 1 | 4 | arc 1 -> 4 | N | {"(1,4)"} - 4 | 5 | arc 4 -> 5 | N | {"(4,5)"} - 5 | 1 | arc 5 -> 1 | N | {"(5,1)"} - 1 | 2 | arc 1 -> 2 | N | {"(5,1)","(1,2)"} - 1 | 3 | arc 1 -> 3 | N | {"(5,1)","(1,3)"} - 1 | 4 | arc 1 -> 4 | N | {"(5,1)","(1,4)"} - 2 | 3 | arc 2 -> 3 | N | {"(1,2)","(2,3)"} - 4 | 5 | arc 4 -> 5 | N | {"(1,4)","(4,5)"} - 5 | 1 | arc 5 -> 1 | N | {"(4,5)","(5,1)"} - 1 | 2 | arc 1 -> 2 | N | {"(4,5)","(5,1)","(1,2)"} - 1 | 3 | arc 1 -> 3 | N | {"(4,5)","(5,1)","(1,3)"} - 1 | 4 | arc 1 -> 4 | N | {"(4,5)","(5,1)","(1,4)"} - 2 | 3 | arc 2 -> 3 | N | {"(5,1)","(1,2)","(2,3)"} - 4 | 5 | arc 4 -> 5 | N | {"(5,1)","(1,4)","(4,5)"} - 5 | 1 | arc 5 -> 1 | N | {"(1,4)","(4,5)","(5,1)"} - 1 | 2 | arc 1 -> 2 | N | {"(1,4)","(4,5)","(5,1)","(1,2)"} - 1 | 3 | arc 1 -> 3 | N | {"(1,4)","(4,5)","(5,1)","(1,3)"} - 1 | 4 | arc 1 -> 4 | Y | {"(1,4)","(4,5)","(5,1)","(1,4)"} - 2 | 3 | arc 2 -> 3 | N | {"(4,5)","(5,1)","(1,2)","(2,3)"} - 4 | 5 | arc 4 -> 5 | Y | {"(4,5)","(5,1)","(1,4)","(4,5)"} - 5 | 1 | arc 5 -> 1 | Y | {"(5,1)","(1,4)","(4,5)","(5,1)"} - 2 | 3 | arc 2 -> 3 | N | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"} -(25 rows) - -explain (verbose, costs off) -with recursive test as ( - select 0 as x - union all - select (x + 1) % 10 - from test -) cycle x set is_cycle using path -select * from test; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ - CTE Scan on test - Output: test.x, test.is_cycle, test.path - CTE test - -> Recursive Union - -> Result - Output: 0, false, '{(0)}'::record[] - -> WorkTable Scan on test test_1 - Output: ((test_1.x + 1) % 10), CASE WHEN (ROW(((test_1.x + 1) % 10)) = ANY (test_1.path)) THEN true ELSE false END, array_cat(test_1.path, ARRAY[ROW(((test_1.x + 1) % 10))]) - Filter: (NOT test_1.is_cycle) -(9 rows) - -with recursive test as ( - select 0 as x - union all - select (x + 1) % 10 - from test -) cycle x set is_cycle using path -select * from test; - x | is_cycle | path ----+----------+----------------------------------------------- - 0 | f | {(0)} - 1 | f | {(0),(1)} - 2 | f | {(0),(1),(2)} - 3 | f | {(0),(1),(2),(3)} - 4 | f | {(0),(1),(2),(3),(4)} - 5 | f | {(0),(1),(2),(3),(4),(5)} - 6 | f | {(0),(1),(2),(3),(4),(5),(6)} - 7 | f | {(0),(1),(2),(3),(4),(5),(6),(7)} - 8 | f | {(0),(1),(2),(3),(4),(5),(6),(7),(8)} - 9 | f | {(0),(1),(2),(3),(4),(5),(6),(7),(8),(9)} - 0 | t | {(0),(1),(2),(3),(4),(5),(6),(7),(8),(9),(0)} -(11 rows) - -with recursive test as ( - select 0 as x - union all - select (x + 1) % 10 - from test - where not is_cycle -- redundant, but legal -) cycle x set is_cycle using path -select * from test; - x | is_cycle | path ----+----------+----------------------------------------------- - 0 | f | {(0)} - 1 | f | {(0),(1)} - 2 | f | {(0),(1),(2)} - 3 | f | {(0),(1),(2),(3)} - 4 | f | {(0),(1),(2),(3),(4)} - 5 | f | {(0),(1),(2),(3),(4),(5)} - 6 | f | {(0),(1),(2),(3),(4),(5),(6)} - 7 | f | {(0),(1),(2),(3),(4),(5),(6),(7)} - 8 | f | {(0),(1),(2),(3),(4),(5),(6),(7),(8)} - 9 | f | {(0),(1),(2),(3),(4),(5),(6),(7),(8),(9)} - 0 | t | {(0),(1),(2),(3),(4),(5),(6),(7),(8),(9),(0)} -(11 rows) - --- multiple CTEs -with recursive -graph(f, t, label) as ( - values (1, 2, 'arc 1 -> 2'), - (1, 3, 'arc 1 -> 3'), - (2, 3, 'arc 2 -> 3'), - (1, 4, 'arc 1 -> 4'), - (4, 5, 'arc 4 -> 5'), - (5, 1, 'arc 5 -> 1') -), -search_graph(f, t, label) as ( - select * from graph g - union all - select g.* - from graph g, search_graph sg - where g.f = sg.t -) cycle f, t set is_cycle to true default false using path -select f, t, label from search_graph; - f | t | label ----+---+------------ - 1 | 2 | arc 1 -> 2 - 1 | 3 | arc 1 -> 3 - 2 | 3 | arc 2 -> 3 - 1 | 4 | arc 1 -> 4 - 4 | 5 | arc 4 -> 5 - 5 | 1 | arc 5 -> 1 - 2 | 3 | arc 2 -> 3 - 4 | 5 | arc 4 -> 5 - 5 | 1 | arc 5 -> 1 - 1 | 4 | arc 1 -> 4 - 1 | 3 | arc 1 -> 3 - 1 | 2 | arc 1 -> 2 - 5 | 1 | arc 5 -> 1 - 1 | 4 | arc 1 -> 4 - 1 | 3 | arc 1 -> 3 - 1 | 2 | arc 1 -> 2 - 4 | 5 | arc 4 -> 5 - 2 | 3 | arc 2 -> 3 - 1 | 4 | arc 1 -> 4 - 1 | 3 | arc 1 -> 3 - 1 | 2 | arc 1 -> 2 - 4 | 5 | arc 4 -> 5 - 2 | 3 | arc 2 -> 3 - 5 | 1 | arc 5 -> 1 - 2 | 3 | arc 2 -> 3 -(25 rows) - --- star expansion -with recursive a as ( - select 1 as b - union all - select * from a -) cycle b set c using p -select * from a; - b | c | p ----+---+----------- - 1 | f | {(1)} - 1 | t | {(1),(1)} -(2 rows) - --- search+cycle -with recursive search_graph(f, t, label) as ( - select * from graph g - union all - select g.* - from graph g, search_graph sg - where g.f = sg.t -) search depth first by f, t set seq - cycle f, t set is_cycle using path -select * from search_graph; - f | t | label | seq | is_cycle | path ----+---+------------+-------------------------------------------+----------+------------------------------------------- - 1 | 2 | arc 1 -> 2 | {"(1,2)"} | f | {"(1,2)"} - 1 | 3 | arc 1 -> 3 | {"(1,3)"} | f | {"(1,3)"} - 2 | 3 | arc 2 -> 3 | {"(2,3)"} | f | {"(2,3)"} - 1 | 4 | arc 1 -> 4 | {"(1,4)"} | f | {"(1,4)"} - 4 | 5 | arc 4 -> 5 | {"(4,5)"} | f | {"(4,5)"} - 5 | 1 | arc 5 -> 1 | {"(5,1)"} | f | {"(5,1)"} - 1 | 2 | arc 1 -> 2 | {"(5,1)","(1,2)"} | f | {"(5,1)","(1,2)"} - 1 | 3 | arc 1 -> 3 | {"(5,1)","(1,3)"} | f | {"(5,1)","(1,3)"} - 1 | 4 | arc 1 -> 4 | {"(5,1)","(1,4)"} | f | {"(5,1)","(1,4)"} - 2 | 3 | arc 2 -> 3 | {"(1,2)","(2,3)"} | f | {"(1,2)","(2,3)"} - 4 | 5 | arc 4 -> 5 | {"(1,4)","(4,5)"} | f | {"(1,4)","(4,5)"} - 5 | 1 | arc 5 -> 1 | {"(4,5)","(5,1)"} | f | {"(4,5)","(5,1)"} - 1 | 2 | arc 1 -> 2 | {"(4,5)","(5,1)","(1,2)"} | f | {"(4,5)","(5,1)","(1,2)"} - 1 | 3 | arc 1 -> 3 | {"(4,5)","(5,1)","(1,3)"} | f | {"(4,5)","(5,1)","(1,3)"} - 1 | 4 | arc 1 -> 4 | {"(4,5)","(5,1)","(1,4)"} | f | {"(4,5)","(5,1)","(1,4)"} - 2 | 3 | arc 2 -> 3 | {"(5,1)","(1,2)","(2,3)"} | f | {"(5,1)","(1,2)","(2,3)"} - 4 | 5 | arc 4 -> 5 | {"(5,1)","(1,4)","(4,5)"} | f | {"(5,1)","(1,4)","(4,5)"} - 5 | 1 | arc 5 -> 1 | {"(1,4)","(4,5)","(5,1)"} | f | {"(1,4)","(4,5)","(5,1)"} - 1 | 2 | arc 1 -> 2 | {"(1,4)","(4,5)","(5,1)","(1,2)"} | f | {"(1,4)","(4,5)","(5,1)","(1,2)"} - 1 | 3 | arc 1 -> 3 | {"(1,4)","(4,5)","(5,1)","(1,3)"} | f | {"(1,4)","(4,5)","(5,1)","(1,3)"} - 1 | 4 | arc 1 -> 4 | {"(1,4)","(4,5)","(5,1)","(1,4)"} | t | {"(1,4)","(4,5)","(5,1)","(1,4)"} - 2 | 3 | arc 2 -> 3 | {"(4,5)","(5,1)","(1,2)","(2,3)"} | f | {"(4,5)","(5,1)","(1,2)","(2,3)"} - 4 | 5 | arc 4 -> 5 | {"(4,5)","(5,1)","(1,4)","(4,5)"} | t | {"(4,5)","(5,1)","(1,4)","(4,5)"} - 5 | 1 | arc 5 -> 1 | {"(5,1)","(1,4)","(4,5)","(5,1)"} | t | {"(5,1)","(1,4)","(4,5)","(5,1)"} - 2 | 3 | arc 2 -> 3 | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"} | f | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"} -(25 rows) - -with recursive search_graph(f, t, label) as ( - select * from graph g - union all - select g.* - from graph g, search_graph sg - where g.f = sg.t -) search breadth first by f, t set seq - cycle f, t set is_cycle using path -select * from search_graph; - f | t | label | seq | is_cycle | path ----+---+------------+---------+----------+------------------------------------------- - 1 | 2 | arc 1 -> 2 | (0,1,2) | f | {"(1,2)"} - 1 | 3 | arc 1 -> 3 | (0,1,3) | f | {"(1,3)"} - 2 | 3 | arc 2 -> 3 | (0,2,3) | f | {"(2,3)"} - 1 | 4 | arc 1 -> 4 | (0,1,4) | f | {"(1,4)"} - 4 | 5 | arc 4 -> 5 | (0,4,5) | f | {"(4,5)"} - 5 | 1 | arc 5 -> 1 | (0,5,1) | f | {"(5,1)"} - 1 | 2 | arc 1 -> 2 | (1,1,2) | f | {"(5,1)","(1,2)"} - 1 | 3 | arc 1 -> 3 | (1,1,3) | f | {"(5,1)","(1,3)"} - 1 | 4 | arc 1 -> 4 | (1,1,4) | f | {"(5,1)","(1,4)"} - 2 | 3 | arc 2 -> 3 | (1,2,3) | f | {"(1,2)","(2,3)"} - 4 | 5 | arc 4 -> 5 | (1,4,5) | f | {"(1,4)","(4,5)"} - 5 | 1 | arc 5 -> 1 | (1,5,1) | f | {"(4,5)","(5,1)"} - 1 | 2 | arc 1 -> 2 | (2,1,2) | f | {"(4,5)","(5,1)","(1,2)"} - 1 | 3 | arc 1 -> 3 | (2,1,3) | f | {"(4,5)","(5,1)","(1,3)"} - 1 | 4 | arc 1 -> 4 | (2,1,4) | f | {"(4,5)","(5,1)","(1,4)"} - 2 | 3 | arc 2 -> 3 | (2,2,3) | f | {"(5,1)","(1,2)","(2,3)"} - 4 | 5 | arc 4 -> 5 | (2,4,5) | f | {"(5,1)","(1,4)","(4,5)"} - 5 | 1 | arc 5 -> 1 | (2,5,1) | f | {"(1,4)","(4,5)","(5,1)"} - 1 | 2 | arc 1 -> 2 | (3,1,2) | f | {"(1,4)","(4,5)","(5,1)","(1,2)"} - 1 | 3 | arc 1 -> 3 | (3,1,3) | f | {"(1,4)","(4,5)","(5,1)","(1,3)"} - 1 | 4 | arc 1 -> 4 | (3,1,4) | t | {"(1,4)","(4,5)","(5,1)","(1,4)"} - 2 | 3 | arc 2 -> 3 | (3,2,3) | f | {"(4,5)","(5,1)","(1,2)","(2,3)"} - 4 | 5 | arc 4 -> 5 | (3,4,5) | t | {"(4,5)","(5,1)","(1,4)","(4,5)"} - 5 | 1 | arc 5 -> 1 | (3,5,1) | t | {"(5,1)","(1,4)","(4,5)","(5,1)"} - 2 | 3 | arc 2 -> 3 | (4,2,3) | f | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"} -(25 rows) - --- various syntax errors -with recursive search_graph(f, t, label) as ( - select * from graph g - union all - select g.* - from graph g, search_graph sg - where g.f = sg.t -) cycle foo, tar set is_cycle using path -select * from search_graph; -ERROR: cycle column "foo" not in WITH query column list -LINE 7: ) cycle foo, tar set is_cycle using path - ^ -with recursive search_graph(f, t, label) as ( - select * from graph g - union all - select g.* - from graph g, search_graph sg - where g.f = sg.t -) cycle f, t set is_cycle to true default 55 using path -select * from search_graph; -ERROR: CYCLE types boolean and integer cannot be matched -LINE 7: ) cycle f, t set is_cycle to true default 55 using path - ^ -with recursive search_graph(f, t, label) as ( - select * from graph g - union all - select g.* - from graph g, search_graph sg - where g.f = sg.t -) cycle f, t set is_cycle to point '(1,1)' default point '(0,0)' using path -select * from search_graph; -ERROR: could not identify an equality operator for type point -with recursive search_graph(f, t, label) as ( - select * from graph g - union all - select g.* - from graph g, search_graph sg - where g.f = sg.t -) cycle f, t set label to true default false using path -select * from search_graph; -ERROR: cycle mark column name "label" already used in WITH query column list -LINE 7: ) cycle f, t set label to true default false using path - ^ -with recursive search_graph(f, t, label) as ( - select * from graph g - union all - select g.* - from graph g, search_graph sg - where g.f = sg.t -) cycle f, t set is_cycle to true default false using label -select * from search_graph; -ERROR: cycle path column name "label" already used in WITH query column list -LINE 7: ) cycle f, t set is_cycle to true default false using label - ^ -with recursive search_graph(f, t, label) as ( - select * from graph g - union all - select g.* - from graph g, search_graph sg - where g.f = sg.t -) cycle f, t set foo to true default false using foo -select * from search_graph; -ERROR: cycle mark column name and cycle path column name are the same -LINE 7: ) cycle f, t set foo to true default false using foo - ^ -with recursive search_graph(f, t, label) as ( - select * from graph g - union all - select g.* - from graph g, search_graph sg - where g.f = sg.t -) cycle f, t, f set is_cycle to true default false using path -select * from search_graph; -ERROR: cycle column "f" specified more than once -LINE 7: ) cycle f, t, f set is_cycle to true default false using pat... - ^ -with recursive search_graph(f, t, label) as ( - select * from graph g - union all - select g.* - from graph g, search_graph sg - where g.f = sg.t -) search depth first by f, t set foo - cycle f, t set foo to true default false using path -select * from search_graph; -ERROR: search sequence column name and cycle mark column name are the same -LINE 7: ) search depth first by f, t set foo - ^ -with recursive search_graph(f, t, label) as ( - select * from graph g - union all - select g.* - from graph g, search_graph sg - where g.f = sg.t -) search depth first by f, t set foo - cycle f, t set is_cycle to true default false using foo -select * from search_graph; -ERROR: search sequence column name and cycle path column name are the same -LINE 7: ) search depth first by f, t set foo - ^ --- test ruleutils and view expansion -create temp view v_cycle1 as -with recursive search_graph(f, t, label) as ( - select * from graph g - union all - select g.* - from graph g, search_graph sg - where g.f = sg.t -) cycle f, t set is_cycle using path -select f, t, label from search_graph; -create temp view v_cycle2 as -with recursive search_graph(f, t, label) as ( - select * from graph g - union all - select g.* - from graph g, search_graph sg - where g.f = sg.t -) cycle f, t set is_cycle to 'Y' default 'N' using path -select f, t, label from search_graph; -select pg_get_viewdef('v_cycle1'); - pg_get_viewdef ------------------------------------------------- - WITH RECURSIVE search_graph(f, t, label) AS (+ - SELECT g.f, + - g.t, + - g.label + - FROM graph g + - UNION ALL + - SELECT g.f, + - g.t, + - g.label + - FROM graph g, + - search_graph sg + - WHERE (g.f = sg.t) + - ) CYCLE f, t SET is_cycle USING path + - SELECT f, + - t, + - label + - FROM search_graph; -(1 row) - -select pg_get_viewdef('v_cycle2'); - pg_get_viewdef ------------------------------------------------------------------------------ - WITH RECURSIVE search_graph(f, t, label) AS ( + - SELECT g.f, + - g.t, + - g.label + - FROM graph g + - UNION ALL + - SELECT g.f, + - g.t, + - g.label + - FROM graph g, + - search_graph sg + - WHERE (g.f = sg.t) + - ) CYCLE f, t SET is_cycle TO 'Y'::text DEFAULT 'N'::text USING path+ - SELECT f, + - t, + - label + - FROM search_graph; -(1 row) - -select * from v_cycle1; - f | t | label ----+---+------------ - 1 | 2 | arc 1 -> 2 - 1 | 3 | arc 1 -> 3 - 2 | 3 | arc 2 -> 3 - 1 | 4 | arc 1 -> 4 - 4 | 5 | arc 4 -> 5 - 5 | 1 | arc 5 -> 1 - 1 | 2 | arc 1 -> 2 - 1 | 3 | arc 1 -> 3 - 1 | 4 | arc 1 -> 4 - 2 | 3 | arc 2 -> 3 - 4 | 5 | arc 4 -> 5 - 5 | 1 | arc 5 -> 1 - 1 | 2 | arc 1 -> 2 - 1 | 3 | arc 1 -> 3 - 1 | 4 | arc 1 -> 4 - 2 | 3 | arc 2 -> 3 - 4 | 5 | arc 4 -> 5 - 5 | 1 | arc 5 -> 1 - 1 | 2 | arc 1 -> 2 - 1 | 3 | arc 1 -> 3 - 1 | 4 | arc 1 -> 4 - 2 | 3 | arc 2 -> 3 - 4 | 5 | arc 4 -> 5 - 5 | 1 | arc 5 -> 1 - 2 | 3 | arc 2 -> 3 -(25 rows) - -select * from v_cycle2; - f | t | label ----+---+------------ - 1 | 2 | arc 1 -> 2 - 1 | 3 | arc 1 -> 3 - 2 | 3 | arc 2 -> 3 - 1 | 4 | arc 1 -> 4 - 4 | 5 | arc 4 -> 5 - 5 | 1 | arc 5 -> 1 - 1 | 2 | arc 1 -> 2 - 1 | 3 | arc 1 -> 3 - 1 | 4 | arc 1 -> 4 - 2 | 3 | arc 2 -> 3 - 4 | 5 | arc 4 -> 5 - 5 | 1 | arc 5 -> 1 - 1 | 2 | arc 1 -> 2 - 1 | 3 | arc 1 -> 3 - 1 | 4 | arc 1 -> 4 - 2 | 3 | arc 2 -> 3 - 4 | 5 | arc 4 -> 5 - 5 | 1 | arc 5 -> 1 - 1 | 2 | arc 1 -> 2 - 1 | 3 | arc 1 -> 3 - 1 | 4 | arc 1 -> 4 - 2 | 3 | arc 2 -> 3 - 4 | 5 | arc 4 -> 5 - 5 | 1 | arc 5 -> 1 - 2 | 3 | arc 2 -> 3 -(25 rows) - --- --- test multiple WITH queries --- -WITH RECURSIVE - y (id) AS (VALUES (1)), - x (id) AS (SELECT * FROM y UNION ALL SELECT id+1 FROM x WHERE id < 5) -SELECT * FROM x; - id ----- - 1 - 2 - 3 - 4 - 5 -(5 rows) - --- forward reference OK -WITH RECURSIVE - x(id) AS (SELECT * FROM y UNION ALL SELECT id+1 FROM x WHERE id < 5), - y(id) AS (values (1)) - SELECT * FROM x; - id ----- - 1 - 2 - 3 - 4 - 5 -(5 rows) - -WITH RECURSIVE - x(id) AS - (VALUES (1) UNION ALL SELECT id+1 FROM x WHERE id < 5), - y(id) AS - (VALUES (1) UNION ALL SELECT id+1 FROM y WHERE id < 10) - SELECT y.*, x.* FROM y LEFT JOIN x USING (id); - id | id -----+---- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 5 | 5 - 6 | - 7 | - 8 | - 9 | - 10 | -(10 rows) - -WITH RECURSIVE - x(id) AS - (VALUES (1) UNION ALL SELECT id+1 FROM x WHERE id < 5), - y(id) AS - (VALUES (1) UNION ALL SELECT id+1 FROM x WHERE id < 10) - SELECT y.*, x.* FROM y LEFT JOIN x USING (id); - id | id -----+---- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 - 5 | 5 - 6 | -(6 rows) - -WITH RECURSIVE - x(id) AS - (SELECT 1 UNION ALL SELECT id+1 FROM x WHERE id < 3 ), - y(id) AS - (SELECT * FROM x UNION ALL SELECT * FROM x), - z(id) AS - (SELECT * FROM x UNION ALL SELECT id+1 FROM z WHERE id < 10) - SELECT * FROM z; - id ----- - 1 - 2 - 3 - 2 - 3 - 4 - 3 - 4 - 5 - 4 - 5 - 6 - 5 - 6 - 7 - 6 - 7 - 8 - 7 - 8 - 9 - 8 - 9 - 10 - 9 - 10 - 10 -(27 rows) - -WITH RECURSIVE - x(id) AS - (SELECT 1 UNION ALL SELECT id+1 FROM x WHERE id < 3 ), - y(id) AS - (SELECT * FROM x UNION ALL SELECT * FROM x), - z(id) AS - (SELECT * FROM y UNION ALL SELECT id+1 FROM z WHERE id < 10) - SELECT * FROM z; - id ----- - 1 - 2 - 3 - 1 - 2 - 3 - 2 - 3 - 4 - 2 - 3 - 4 - 3 - 4 - 5 - 3 - 4 - 5 - 4 - 5 - 6 - 4 - 5 - 6 - 5 - 6 - 7 - 5 - 6 - 7 - 6 - 7 - 8 - 6 - 7 - 8 - 7 - 8 - 9 - 7 - 8 - 9 - 8 - 9 - 10 - 8 - 9 - 10 - 9 - 10 - 9 - 10 - 10 - 10 -(54 rows) - --- --- Test WITH attached to a data-modifying statement --- -CREATE TEMPORARY TABLE y (a INTEGER); -INSERT INTO y SELECT generate_series(1, 10); -WITH t AS ( - SELECT a FROM y -) -INSERT INTO y -SELECT a+20 FROM t RETURNING *; - a ----- - 21 - 22 - 23 - 24 - 25 - 26 - 27 - 28 - 29 - 30 -(10 rows) - -SELECT * FROM y; - a ----- - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 - 21 - 22 - 23 - 24 - 25 - 26 - 27 - 28 - 29 - 30 -(20 rows) - -WITH t AS ( - SELECT a FROM y -) -UPDATE y SET a = y.a-10 FROM t WHERE y.a > 20 AND t.a = y.a RETURNING y.a; - a ----- - 11 - 12 - 13 - 14 - 15 - 16 - 17 - 18 - 19 - 20 -(10 rows) - -SELECT * FROM y; - a ----- - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 - 11 - 12 - 13 - 14 - 15 - 16 - 17 - 18 - 19 - 20 -(20 rows) - -WITH RECURSIVE t(a) AS ( - SELECT 11 - UNION ALL - SELECT a+1 FROM t WHERE a < 50 -) -DELETE FROM y USING t WHERE t.a = y.a RETURNING y.a; - a ----- - 11 - 12 - 13 - 14 - 15 - 16 - 17 - 18 - 19 - 20 -(10 rows) - -SELECT * FROM y; - a ----- - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 -(10 rows) - -DROP TABLE y; --- --- error cases --- -WITH x(n, b) AS (SELECT 1) -SELECT * FROM x; -ERROR: WITH query "x" has 1 columns available but 2 columns specified -LINE 1: WITH x(n, b) AS (SELECT 1) - ^ --- INTERSECT -WITH RECURSIVE x(n) AS (SELECT 1 INTERSECT SELECT n+1 FROM x) - SELECT * FROM x; -ERROR: recursive query "x" does not have the form non-recursive-term UNION [ALL] recursive-term -LINE 1: WITH RECURSIVE x(n) AS (SELECT 1 INTERSECT SELECT n+1 FROM x... - ^ -WITH RECURSIVE x(n) AS (SELECT 1 INTERSECT ALL SELECT n+1 FROM x) - SELECT * FROM x; -ERROR: recursive query "x" does not have the form non-recursive-term UNION [ALL] recursive-term -LINE 1: WITH RECURSIVE x(n) AS (SELECT 1 INTERSECT ALL SELECT n+1 FR... - ^ --- EXCEPT -WITH RECURSIVE x(n) AS (SELECT 1 EXCEPT SELECT n+1 FROM x) - SELECT * FROM x; -ERROR: recursive query "x" does not have the form non-recursive-term UNION [ALL] recursive-term -LINE 1: WITH RECURSIVE x(n) AS (SELECT 1 EXCEPT SELECT n+1 FROM x) - ^ -WITH RECURSIVE x(n) AS (SELECT 1 EXCEPT ALL SELECT n+1 FROM x) - SELECT * FROM x; -ERROR: recursive query "x" does not have the form non-recursive-term UNION [ALL] recursive-term -LINE 1: WITH RECURSIVE x(n) AS (SELECT 1 EXCEPT ALL SELECT n+1 FROM ... - ^ --- no non-recursive term -WITH RECURSIVE x(n) AS (SELECT n FROM x) - SELECT * FROM x; -ERROR: recursive query "x" does not have the form non-recursive-term UNION [ALL] recursive-term -LINE 1: WITH RECURSIVE x(n) AS (SELECT n FROM x) - ^ --- recursive term in the left hand side (strictly speaking, should allow this) -WITH RECURSIVE x(n) AS (SELECT n FROM x UNION ALL SELECT 1) - SELECT * FROM x; -ERROR: recursive reference to query "x" must not appear within its non-recursive term -LINE 1: WITH RECURSIVE x(n) AS (SELECT n FROM x UNION ALL SELECT 1) - ^ --- allow this, because we historically have -WITH RECURSIVE x(n) AS ( - WITH x1 AS (SELECT 1 AS n) - SELECT 0 - UNION - SELECT * FROM x1) - SELECT * FROM x; - n ---- - 0 - 1 -(2 rows) - --- but this should be rejected -WITH RECURSIVE x(n) AS ( - WITH x1 AS (SELECT 1 FROM x) - SELECT 0 - UNION - SELECT * FROM x1) - SELECT * FROM x; -ERROR: recursive reference to query "x" must not appear within a subquery -LINE 2: WITH x1 AS (SELECT 1 FROM x) - ^ --- and this too -WITH RECURSIVE x(n) AS ( - (WITH x1 AS (SELECT 1 FROM x) SELECT * FROM x1) - UNION - SELECT 0) - SELECT * FROM x; -ERROR: recursive reference to query "x" must not appear within its non-recursive term -LINE 2: (WITH x1 AS (SELECT 1 FROM x) SELECT * FROM x1) - ^ --- and this -WITH RECURSIVE x(n) AS ( - SELECT 0 UNION SELECT 1 - ORDER BY (SELECT n FROM x)) - SELECT * FROM x; -ERROR: ORDER BY in a recursive query is not implemented -LINE 3: ORDER BY (SELECT n FROM x)) - ^ -CREATE TEMPORARY TABLE y (a INTEGER); -INSERT INTO y SELECT generate_series(1, 10); --- LEFT JOIN -WITH RECURSIVE x(n) AS (SELECT a FROM y WHERE a = 1 - UNION ALL - SELECT x.n+1 FROM y LEFT JOIN x ON x.n = y.a WHERE n < 10) -SELECT * FROM x; -ERROR: recursive reference to query "x" must not appear within an outer join -LINE 3: SELECT x.n+1 FROM y LEFT JOIN x ON x.n = y.a WHERE n < 10) - ^ --- RIGHT JOIN -WITH RECURSIVE x(n) AS (SELECT a FROM y WHERE a = 1 - UNION ALL - SELECT x.n+1 FROM x RIGHT JOIN y ON x.n = y.a WHERE n < 10) -SELECT * FROM x; -ERROR: recursive reference to query "x" must not appear within an outer join -LINE 3: SELECT x.n+1 FROM x RIGHT JOIN y ON x.n = y.a WHERE n < 10) - ^ --- FULL JOIN -WITH RECURSIVE x(n) AS (SELECT a FROM y WHERE a = 1 - UNION ALL - SELECT x.n+1 FROM x FULL JOIN y ON x.n = y.a WHERE n < 10) -SELECT * FROM x; -ERROR: recursive reference to query "x" must not appear within an outer join -LINE 3: SELECT x.n+1 FROM x FULL JOIN y ON x.n = y.a WHERE n < 10) - ^ --- subquery -WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT n+1 FROM x - WHERE n IN (SELECT * FROM x)) - SELECT * FROM x; -ERROR: recursive reference to query "x" must not appear within a subquery -LINE 2: WHERE n IN (SELECT * FROM x)) - ^ --- aggregate functions -WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT count(*) FROM x) - SELECT * FROM x; -ERROR: aggregate functions are not allowed in a recursive query's recursive term -LINE 1: WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT count(*) F... - ^ -WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT sum(n) FROM x) - SELECT * FROM x; -ERROR: aggregate functions are not allowed in a recursive query's recursive term -LINE 1: WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT sum(n) FRO... - ^ --- ORDER BY -WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT n+1 FROM x ORDER BY 1) - SELECT * FROM x; -ERROR: ORDER BY in a recursive query is not implemented -LINE 1: ...VE x(n) AS (SELECT 1 UNION ALL SELECT n+1 FROM x ORDER BY 1) - ^ --- LIMIT/OFFSET -WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT n+1 FROM x LIMIT 10 OFFSET 1) - SELECT * FROM x; -ERROR: OFFSET in a recursive query is not implemented -LINE 1: ... AS (SELECT 1 UNION ALL SELECT n+1 FROM x LIMIT 10 OFFSET 1) - ^ --- FOR UPDATE -WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT n+1 FROM x FOR UPDATE) - SELECT * FROM x; -ERROR: FOR UPDATE/SHARE in a recursive query is not implemented --- target list has a recursive query name -WITH RECURSIVE x(id) AS (values (1) - UNION ALL - SELECT (SELECT * FROM x) FROM x WHERE id < 5 -) SELECT * FROM x; -ERROR: recursive reference to query "x" must not appear within a subquery -LINE 3: SELECT (SELECT * FROM x) FROM x WHERE id < 5 - ^ --- mutual recursive query (not implemented) -WITH RECURSIVE - x (id) AS (SELECT 1 UNION ALL SELECT id+1 FROM y WHERE id < 5), - y (id) AS (SELECT 1 UNION ALL SELECT id+1 FROM x WHERE id < 5) -SELECT * FROM x; -ERROR: mutual recursion between WITH items is not implemented -LINE 2: x (id) AS (SELECT 1 UNION ALL SELECT id+1 FROM y WHERE id ... - ^ --- non-linear recursion is not allowed -WITH RECURSIVE foo(i) AS - (values (1) - UNION ALL - (SELECT i+1 FROM foo WHERE i < 10 - UNION ALL - SELECT i+1 FROM foo WHERE i < 5) -) SELECT * FROM foo; -ERROR: recursive reference to query "foo" must not appear more than once -LINE 6: SELECT i+1 FROM foo WHERE i < 5) - ^ -WITH RECURSIVE foo(i) AS - (values (1) - UNION ALL - SELECT * FROM - (SELECT i+1 FROM foo WHERE i < 10 - UNION ALL - SELECT i+1 FROM foo WHERE i < 5) AS t -) SELECT * FROM foo; -ERROR: recursive reference to query "foo" must not appear more than once -LINE 7: SELECT i+1 FROM foo WHERE i < 5) AS t - ^ -WITH RECURSIVE foo(i) AS - (values (1) - UNION ALL - (SELECT i+1 FROM foo WHERE i < 10 - EXCEPT - SELECT i+1 FROM foo WHERE i < 5) -) SELECT * FROM foo; -ERROR: recursive reference to query "foo" must not appear within EXCEPT -LINE 6: SELECT i+1 FROM foo WHERE i < 5) - ^ -WITH RECURSIVE foo(i) AS - (values (1) - UNION ALL - (SELECT i+1 FROM foo WHERE i < 10 - INTERSECT - SELECT i+1 FROM foo WHERE i < 5) -) SELECT * FROM foo; -ERROR: recursive reference to query "foo" must not appear more than once -LINE 6: SELECT i+1 FROM foo WHERE i < 5) - ^ --- Wrong type induced from non-recursive term -WITH RECURSIVE foo(i) AS - (SELECT i FROM (VALUES(1),(2)) t(i) - UNION ALL - SELECT (i+1)::numeric(10,0) FROM foo WHERE i < 10) -SELECT * FROM foo; -ERROR: recursive query "foo" column 1 has type integer in non-recursive term but type numeric overall -LINE 2: (SELECT i FROM (VALUES(1),(2)) t(i) - ^ -HINT: Cast the output of the non-recursive term to the correct type. --- rejects different typmod, too (should we allow this?) -WITH RECURSIVE foo(i) AS - (SELECT i::numeric(3,0) FROM (VALUES(1),(2)) t(i) - UNION ALL - SELECT (i+1)::numeric(10,0) FROM foo WHERE i < 10) -SELECT * FROM foo; -ERROR: recursive query "foo" column 1 has type numeric(3,0) in non-recursive term but type numeric overall -LINE 2: (SELECT i::numeric(3,0) FROM (VALUES(1),(2)) t(i) - ^ -HINT: Cast the output of the non-recursive term to the correct type. --- disallow OLD/NEW reference in CTE -CREATE TEMPORARY TABLE x (n integer); -CREATE RULE r2 AS ON UPDATE TO x DO INSTEAD - WITH t AS (SELECT OLD.*) UPDATE y SET a = t.n FROM t; -ERROR: cannot refer to OLD within WITH query --- --- test for bug #4902 --- -with cte(foo) as ( values(42) ) values((select foo from cte)); - column1 ---------- - 42 -(1 row) - -with cte(foo) as ( select 42 ) select * from ((select foo from cte)) q; - foo ------ - 42 -(1 row) - --- test CTE referencing an outer-level variable (to see that changed-parameter --- signaling still works properly after fixing this bug) -select ( with cte(foo) as ( values(f1) ) - select (select foo from cte) ) -from int4_tbl; - foo -------------- - 0 - 123456 - -123456 - 2147483647 - -2147483647 -(5 rows) - -select ( with cte(foo) as ( values(f1) ) - values((select foo from cte)) ) -from int4_tbl; - column1 -------------- - 0 - 123456 - -123456 - 2147483647 - -2147483647 -(5 rows) - --- --- test for nested-recursive-WITH bug --- -WITH RECURSIVE t(j) AS ( - WITH RECURSIVE s(i) AS ( - VALUES (1) - UNION ALL - SELECT i+1 FROM s WHERE i < 10 - ) - SELECT i FROM s - UNION ALL - SELECT j+1 FROM t WHERE j < 10 -) -SELECT * FROM t; - j ----- - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 - 4 - 5 - 6 - 7 - 8 - 9 - 10 - 5 - 6 - 7 - 8 - 9 - 10 - 6 - 7 - 8 - 9 - 10 - 7 - 8 - 9 - 10 - 8 - 9 - 10 - 9 - 10 - 10 -(55 rows) - --- --- test WITH attached to intermediate-level set operation --- -WITH outermost(x) AS ( - SELECT 1 - UNION (WITH innermost as (SELECT 2) - SELECT * FROM innermost - UNION SELECT 3) -) -SELECT * FROM outermost ORDER BY 1; - x ---- - 1 - 2 - 3 -(3 rows) - -WITH outermost(x) AS ( - SELECT 1 - UNION (WITH innermost as (SELECT 2) - SELECT * FROM outermost -- fail - UNION SELECT * FROM innermost) -) -SELECT * FROM outermost ORDER BY 1; -ERROR: relation "outermost" does not exist -LINE 4: SELECT * FROM outermost -- fail - ^ -DETAIL: There is a WITH item named "outermost", but it cannot be referenced from this part of the query. -HINT: Use WITH RECURSIVE, or re-order the WITH items to remove forward references. -WITH RECURSIVE outermost(x) AS ( - SELECT 1 - UNION (WITH innermost as (SELECT 2) - SELECT * FROM outermost - UNION SELECT * FROM innermost) -) -SELECT * FROM outermost ORDER BY 1; - x ---- - 1 - 2 -(2 rows) - -WITH RECURSIVE outermost(x) AS ( - WITH innermost as (SELECT 2 FROM outermost) -- fail - SELECT * FROM innermost - UNION SELECT * from outermost -) -SELECT * FROM outermost ORDER BY 1; -ERROR: recursive reference to query "outermost" must not appear within a subquery -LINE 2: WITH innermost as (SELECT 2 FROM outermost) -- fail - ^ --- --- This test will fail with the old implementation of PARAM_EXEC parameter --- assignment, because the "q1" Var passed down to A's targetlist subselect --- looks exactly like the "A.id" Var passed down to C's subselect, causing --- the old code to give them the same runtime PARAM_EXEC slot. But the --- lifespans of the two parameters overlap, thanks to B also reading A. --- -with -A as ( select q2 as id, (select q1) as x from int8_tbl ), -B as ( select id, row_number() over (partition by id) as r from A ), -C as ( select A.id, array(select B.id from B where B.id = A.id) from A ) -select * from C; - id | array --------------------+------------------------------------- - 456 | {456} - 4567890123456789 | {4567890123456789,4567890123456789} - 123 | {123} - 4567890123456789 | {4567890123456789,4567890123456789} - -4567890123456789 | {-4567890123456789} -(5 rows) - --- --- Test CTEs read in non-initialization orders --- -WITH RECURSIVE - tab(id_key,link) AS (VALUES (1,17), (2,17), (3,17), (4,17), (6,17), (5,17)), - iter (id_key, row_type, link) AS ( - SELECT 0, 'base', 17 - UNION ALL ( - WITH remaining(id_key, row_type, link, min) AS ( - SELECT tab.id_key, 'true'::text, iter.link, MIN(tab.id_key) OVER () - FROM tab INNER JOIN iter USING (link) - WHERE tab.id_key > iter.id_key - ), - first_remaining AS ( - SELECT id_key, row_type, link - FROM remaining - WHERE id_key=min - ), - effect AS ( - SELECT tab.id_key, 'new'::text, tab.link - FROM first_remaining e INNER JOIN tab ON e.id_key=tab.id_key - WHERE e.row_type = 'false' - ) - SELECT * FROM first_remaining - UNION ALL SELECT * FROM effect - ) - ) -SELECT * FROM iter; - id_key | row_type | link ---------+----------+------ - 0 | base | 17 - 1 | true | 17 - 2 | true | 17 - 3 | true | 17 - 4 | true | 17 - 5 | true | 17 - 6 | true | 17 -(7 rows) - -WITH RECURSIVE - tab(id_key,link) AS (VALUES (1,17), (2,17), (3,17), (4,17), (6,17), (5,17)), - iter (id_key, row_type, link) AS ( - SELECT 0, 'base', 17 - UNION ( - WITH remaining(id_key, row_type, link, min) AS ( - SELECT tab.id_key, 'true'::text, iter.link, MIN(tab.id_key) OVER () - FROM tab INNER JOIN iter USING (link) - WHERE tab.id_key > iter.id_key - ), - first_remaining AS ( - SELECT id_key, row_type, link - FROM remaining - WHERE id_key=min - ), - effect AS ( - SELECT tab.id_key, 'new'::text, tab.link - FROM first_remaining e INNER JOIN tab ON e.id_key=tab.id_key - WHERE e.row_type = 'false' - ) - SELECT * FROM first_remaining - UNION ALL SELECT * FROM effect - ) - ) -SELECT * FROM iter; - id_key | row_type | link ---------+----------+------ - 0 | base | 17 - 1 | true | 17 - 2 | true | 17 - 3 | true | 17 - 4 | true | 17 - 5 | true | 17 - 6 | true | 17 -(7 rows) - --- --- Data-modifying statements in WITH --- --- INSERT ... RETURNING -WITH t AS ( - INSERT INTO y - VALUES - (11), - (12), - (13), - (14), - (15), - (16), - (17), - (18), - (19), - (20) - RETURNING * -) -SELECT * FROM t; - a ----- - 11 - 12 - 13 - 14 - 15 - 16 - 17 - 18 - 19 - 20 -(10 rows) - -SELECT * FROM y; - a ----- - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 - 11 - 12 - 13 - 14 - 15 - 16 - 17 - 18 - 19 - 20 -(20 rows) - --- UPDATE ... RETURNING -WITH t AS ( - UPDATE y - SET a=a+1 - RETURNING * -) -SELECT * FROM t; - a ----- - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 - 11 - 12 - 13 - 14 - 15 - 16 - 17 - 18 - 19 - 20 - 21 -(20 rows) - -SELECT * FROM y; - a ----- - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 - 11 - 12 - 13 - 14 - 15 - 16 - 17 - 18 - 19 - 20 - 21 -(20 rows) - --- DELETE ... RETURNING -WITH t AS ( - DELETE FROM y - WHERE a <= 10 - RETURNING * -) -SELECT * FROM t; - a ----- - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 -(9 rows) - -SELECT * FROM y; - a ----- - 11 - 12 - 13 - 14 - 15 - 16 - 17 - 18 - 19 - 20 - 21 -(11 rows) - --- forward reference -WITH RECURSIVE t AS ( - INSERT INTO y - SELECT a+5 FROM t2 WHERE a > 5 - RETURNING * -), t2 AS ( - UPDATE y SET a=a-11 RETURNING * -) -SELECT * FROM t -UNION ALL -SELECT * FROM t2; - a ----- - 11 - 12 - 13 - 14 - 15 - 0 - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 -(16 rows) - -SELECT * FROM y; - a ----- - 0 - 1 - 2 - 3 - 4 - 5 - 6 - 11 - 7 - 12 - 8 - 13 - 9 - 14 - 10 - 15 -(16 rows) - --- unconditional DO INSTEAD rule -CREATE RULE y_rule AS ON DELETE TO y DO INSTEAD - INSERT INTO y VALUES(42) RETURNING *; -WITH t AS ( - DELETE FROM y RETURNING * -) -SELECT * FROM t; - a ----- - 42 -(1 row) - -SELECT * FROM y; - a ----- - 0 - 1 - 2 - 3 - 4 - 5 - 6 - 11 - 7 - 12 - 8 - 13 - 9 - 14 - 10 - 15 - 42 -(17 rows) - -DROP RULE y_rule ON y; --- check merging of outer CTE with CTE in a rule action -CREATE TEMP TABLE bug6051 AS - select i from generate_series(1,3) as t(i); -SELECT * FROM bug6051; - i ---- - 1 - 2 - 3 -(3 rows) - -WITH t1 AS ( DELETE FROM bug6051 RETURNING * ) -INSERT INTO bug6051 SELECT * FROM t1; -SELECT * FROM bug6051; - i ---- - 1 - 2 - 3 -(3 rows) - -CREATE TEMP TABLE bug6051_2 (i int); -CREATE RULE bug6051_ins AS ON INSERT TO bug6051 DO INSTEAD - INSERT INTO bug6051_2 - VALUES(NEW.i); -WITH t1 AS ( DELETE FROM bug6051 RETURNING * ) -INSERT INTO bug6051 SELECT * FROM t1; -SELECT * FROM bug6051; - i ---- -(0 rows) - -SELECT * FROM bug6051_2; - i ---- - 1 - 2 - 3 -(3 rows) - --- check INSERT ... SELECT rule actions are disallowed on commands --- that have modifyingCTEs -CREATE OR REPLACE RULE bug6051_ins AS ON INSERT TO bug6051 DO INSTEAD - INSERT INTO bug6051_2 - SELECT NEW.i; -WITH t1 AS ( DELETE FROM bug6051 RETURNING * ) -INSERT INTO bug6051 SELECT * FROM t1; -ERROR: INSERT ... SELECT rule actions are not supported for queries having data-modifying statements in WITH --- silly example to verify that hasModifyingCTE flag is propagated -CREATE TEMP TABLE bug6051_3 AS - SELECT a FROM generate_series(11,13) AS a; -CREATE RULE bug6051_3_ins AS ON INSERT TO bug6051_3 DO INSTEAD - SELECT i FROM bug6051_2; -BEGIN; SET LOCAL debug_parallel_query = on; -WITH t1 AS ( DELETE FROM bug6051_3 RETURNING * ) - INSERT INTO bug6051_3 SELECT * FROM t1; - i ---- - 1 - 2 - 3 - 1 - 2 - 3 - 1 - 2 - 3 -(9 rows) - -COMMIT; -SELECT * FROM bug6051_3; - a ---- -(0 rows) - --- check case where CTE reference is removed due to optimization -EXPLAIN (VERBOSE, COSTS OFF) -SELECT q1 FROM -( - WITH t_cte AS (SELECT * FROM int8_tbl t) - SELECT q1, (SELECT q2 FROM t_cte WHERE t_cte.q1 = i8.q1) AS t_sub - FROM int8_tbl i8 -) ss; - QUERY PLAN --------------------------------------- - Subquery Scan on ss - Output: ss.q1 - -> Seq Scan on public.int8_tbl i8 - Output: i8.q1, NULL::bigint -(4 rows) - -SELECT q1 FROM -( - WITH t_cte AS (SELECT * FROM int8_tbl t) - SELECT q1, (SELECT q2 FROM t_cte WHERE t_cte.q1 = i8.q1) AS t_sub - FROM int8_tbl i8 -) ss; - q1 ------------------- - 123 - 123 - 4567890123456789 - 4567890123456789 - 4567890123456789 -(5 rows) - -EXPLAIN (VERBOSE, COSTS OFF) -SELECT q1 FROM -( - WITH t_cte AS MATERIALIZED (SELECT * FROM int8_tbl t) - SELECT q1, (SELECT q2 FROM t_cte WHERE t_cte.q1 = i8.q1) AS t_sub - FROM int8_tbl i8 -) ss; - QUERY PLAN ---------------------------------------------- - Subquery Scan on ss - Output: ss.q1 - -> Seq Scan on public.int8_tbl i8 - Output: i8.q1, NULL::bigint - CTE t_cte - -> Seq Scan on public.int8_tbl t - Output: t.q1, t.q2 -(7 rows) - -SELECT q1 FROM -( - WITH t_cte AS MATERIALIZED (SELECT * FROM int8_tbl t) - SELECT q1, (SELECT q2 FROM t_cte WHERE t_cte.q1 = i8.q1) AS t_sub - FROM int8_tbl i8 -) ss; - q1 ------------------- - 123 - 123 - 4567890123456789 - 4567890123456789 - 4567890123456789 -(5 rows) - --- a truly recursive CTE in the same list -WITH RECURSIVE t(a) AS ( - SELECT 0 - UNION ALL - SELECT a+1 FROM t WHERE a+1 < 5 -), t2 as ( - INSERT INTO y - SELECT * FROM t RETURNING * -) -SELECT * FROM t2 JOIN y USING (a) ORDER BY a; - a ---- - 0 - 1 - 2 - 3 - 4 -(5 rows) - -SELECT * FROM y; - a ----- - 0 - 1 - 2 - 3 - 4 - 5 - 6 - 11 - 7 - 12 - 8 - 13 - 9 - 14 - 10 - 15 - 42 - 0 - 1 - 2 - 3 - 4 -(22 rows) - --- data-modifying WITH in a modifying statement -WITH t AS ( - DELETE FROM y - WHERE a <= 10 - RETURNING * -) -INSERT INTO y SELECT -a FROM t RETURNING *; - a ------ - 0 - -1 - -2 - -3 - -4 - -5 - -6 - -7 - -8 - -9 - -10 - 0 - -1 - -2 - -3 - -4 -(16 rows) - -SELECT * FROM y; - a ------ - 11 - 12 - 13 - 14 - 15 - 42 - 0 - -1 - -2 - -3 - -4 - -5 - -6 - -7 - -8 - -9 - -10 - 0 - -1 - -2 - -3 - -4 -(22 rows) - --- check that WITH query is run to completion even if outer query isn't -WITH t AS ( - UPDATE y SET a = a * 100 RETURNING * -) -SELECT * FROM t LIMIT 10; - a ------- - 1100 - 1200 - 1300 - 1400 - 1500 - 4200 - 0 - -100 - -200 - -300 -(10 rows) - -SELECT * FROM y; - a -------- - 1100 - 1200 - 1300 - 1400 - 1500 - 4200 - 0 - -100 - -200 - -300 - -400 - -500 - -600 - -700 - -800 - -900 - -1000 - 0 - -100 - -200 - -300 - -400 -(22 rows) - --- data-modifying WITH containing INSERT...ON CONFLICT DO UPDATE -CREATE TABLE withz AS SELECT i AS k, (i || ' v')::text v FROM generate_series(1, 16, 3) i; -ALTER TABLE withz ADD UNIQUE (k); -WITH t AS ( - INSERT INTO withz SELECT i, 'insert' - FROM generate_series(0, 16) i - ON CONFLICT (k) DO UPDATE SET v = withz.v || ', now update' - RETURNING * -) -SELECT * FROM t JOIN y ON t.k = y.a ORDER BY a, k; - k | v | a ----+--------+--- - 0 | insert | 0 - 0 | insert | 0 -(2 rows) - --- Test EXCLUDED.* reference within CTE -WITH aa AS ( - INSERT INTO withz VALUES(1, 5) ON CONFLICT (k) DO UPDATE SET v = EXCLUDED.v - WHERE withz.k != EXCLUDED.k - RETURNING * -) -SELECT * FROM aa; - k | v ----+--- -(0 rows) - --- New query/snapshot demonstrates side-effects of previous query. -SELECT * FROM withz ORDER BY k; - k | v -----+------------------ - 0 | insert - 1 | 1 v, now update - 2 | insert - 3 | insert - 4 | 4 v, now update - 5 | insert - 6 | insert - 7 | 7 v, now update - 8 | insert - 9 | insert - 10 | 10 v, now update - 11 | insert - 12 | insert - 13 | 13 v, now update - 14 | insert - 15 | insert - 16 | 16 v, now update -(17 rows) - --- --- Ensure subqueries within the update clause work, even if they --- reference outside values --- -WITH aa AS (SELECT 1 a, 2 b) -INSERT INTO withz VALUES(1, 'insert') -ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 1 LIMIT 1); -WITH aa AS (SELECT 1 a, 2 b) -INSERT INTO withz VALUES(1, 'insert') -ON CONFLICT (k) DO UPDATE SET v = ' update' WHERE withz.k = (SELECT a FROM aa); -WITH aa AS (SELECT 1 a, 2 b) -INSERT INTO withz VALUES(1, 'insert') -ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 1 LIMIT 1); -WITH aa AS (SELECT 'a' a, 'b' b UNION ALL SELECT 'a' a, 'b' b) -INSERT INTO withz VALUES(1, 'insert') -ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 'a' LIMIT 1); -WITH aa AS (SELECT 1 a, 2 b) -INSERT INTO withz VALUES(1, (SELECT b || ' insert' FROM aa WHERE a = 1 )) -ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 1 LIMIT 1); --- Update a row more than once, in different parts of a wCTE. That is --- an allowed, presumably very rare, edge case, but since it was --- broken in the past, having a test seems worthwhile. -WITH simpletup AS ( - SELECT 2 k, 'Green' v), -upsert_cte AS ( - INSERT INTO withz VALUES(2, 'Blue') ON CONFLICT (k) DO - UPDATE SET (k, v) = (SELECT k, v FROM simpletup WHERE simpletup.k = withz.k) - RETURNING k, v) -INSERT INTO withz VALUES(2, 'Red') ON CONFLICT (k) DO -UPDATE SET (k, v) = (SELECT k, v FROM upsert_cte WHERE upsert_cte.k = withz.k) -RETURNING k, v; - k | v ----+--- -(0 rows) - -DROP TABLE withz; --- WITH referenced by MERGE statement -CREATE TABLE m AS SELECT i AS k, (i || ' v')::text v FROM generate_series(1, 16, 3) i; -ALTER TABLE m ADD UNIQUE (k); -WITH RECURSIVE cte_basic AS (SELECT 1 a, 'cte_basic val' b) -MERGE INTO m USING (select 0 k, 'merge source SubPlan' v) o ON m.k=o.k -WHEN MATCHED THEN UPDATE SET v = (SELECT b || ' merge update' FROM cte_basic WHERE cte_basic.a = m.k LIMIT 1) -WHEN NOT MATCHED THEN INSERT VALUES(o.k, o.v); -ERROR: WITH RECURSIVE is not supported for MERGE statement --- Basic: -WITH cte_basic AS MATERIALIZED (SELECT 1 a, 'cte_basic val' b) -MERGE INTO m USING (select 0 k, 'merge source SubPlan' v offset 0) o ON m.k=o.k -WHEN MATCHED THEN UPDATE SET v = (SELECT b || ' merge update' FROM cte_basic WHERE cte_basic.a = m.k LIMIT 1) -WHEN NOT MATCHED THEN INSERT VALUES(o.k, o.v); --- Examine -SELECT * FROM m where k = 0; - k | v ----+---------------------- - 0 | merge source SubPlan -(1 row) - --- See EXPLAIN output for same query: -EXPLAIN (VERBOSE, COSTS OFF) -WITH cte_basic AS MATERIALIZED (SELECT 1 a, 'cte_basic val' b) -MERGE INTO m USING (select 0 k, 'merge source SubPlan' v offset 0) o ON m.k=o.k -WHEN MATCHED THEN UPDATE SET v = (SELECT b || ' merge update' FROM cte_basic WHERE cte_basic.a = m.k LIMIT 1) -WHEN NOT MATCHED THEN INSERT VALUES(o.k, o.v); - QUERY PLAN -------------------------------------------------------------------- - Merge on public.m - CTE cte_basic - -> Result - Output: 1, 'cte_basic val'::text - -> Hash Right Join - Output: m.ctid, o.k, o.v, o.* - Hash Cond: (m.k = o.k) - -> Seq Scan on public.m - Output: m.ctid, m.k - -> Hash - Output: o.k, o.v, o.* - -> Subquery Scan on o - Output: o.k, o.v, o.* - -> Result - Output: 0, 'merge source SubPlan'::text - SubPlan 2 - -> Limit - Output: ((cte_basic.b || ' merge update'::text)) - -> CTE Scan on cte_basic - Output: (cte_basic.b || ' merge update'::text) - Filter: (cte_basic.a = m.k) -(21 rows) - --- InitPlan -WITH cte_init AS MATERIALIZED (SELECT 1 a, 'cte_init val' b) -MERGE INTO m USING (select 1 k, 'merge source InitPlan' v offset 0) o ON m.k=o.k -WHEN MATCHED THEN UPDATE SET v = (SELECT b || ' merge update' FROM cte_init WHERE a = 1 LIMIT 1) -WHEN NOT MATCHED THEN INSERT VALUES(o.k, o.v); --- Examine -SELECT * FROM m where k = 1; - k | v ----+--------------------------- - 1 | cte_init val merge update -(1 row) - --- See EXPLAIN output for same query: -EXPLAIN (VERBOSE, COSTS OFF) -WITH cte_init AS MATERIALIZED (SELECT 1 a, 'cte_init val' b) -MERGE INTO m USING (select 1 k, 'merge source InitPlan' v offset 0) o ON m.k=o.k -WHEN MATCHED THEN UPDATE SET v = (SELECT b || ' merge update' FROM cte_init WHERE a = 1 LIMIT 1) -WHEN NOT MATCHED THEN INSERT VALUES(o.k, o.v); - QUERY PLAN --------------------------------------------------------------------- - Merge on public.m - CTE cte_init - -> Result - Output: 1, 'cte_init val'::text - InitPlan 2 - -> Limit - Output: ((cte_init.b || ' merge update'::text)) - -> CTE Scan on cte_init - Output: (cte_init.b || ' merge update'::text) - Filter: (cte_init.a = 1) - -> Hash Right Join - Output: m.ctid, o.k, o.v, o.* - Hash Cond: (m.k = o.k) - -> Seq Scan on public.m - Output: m.ctid, m.k - -> Hash - Output: o.k, o.v, o.* - -> Subquery Scan on o - Output: o.k, o.v, o.* - -> Result - Output: 1, 'merge source InitPlan'::text -(21 rows) - --- MERGE source comes from CTE: -WITH merge_source_cte AS MATERIALIZED (SELECT 15 a, 'merge_source_cte val' b) -MERGE INTO m USING (select * from merge_source_cte) o ON m.k=o.a -WHEN MATCHED THEN UPDATE SET v = (SELECT b || merge_source_cte.*::text || ' merge update' FROM merge_source_cte WHERE a = 15) -WHEN NOT MATCHED THEN INSERT VALUES(o.a, o.b || (SELECT merge_source_cte.*::text || ' merge insert' FROM merge_source_cte)); --- Examine -SELECT * FROM m where k = 15; - k | v -----+-------------------------------------------------------------- - 15 | merge_source_cte val(15,"merge_source_cte val") merge insert -(1 row) - --- See EXPLAIN output for same query: -EXPLAIN (VERBOSE, COSTS OFF) -WITH merge_source_cte AS MATERIALIZED (SELECT 15 a, 'merge_source_cte val' b) -MERGE INTO m USING (select * from merge_source_cte) o ON m.k=o.a -WHEN MATCHED THEN UPDATE SET v = (SELECT b || merge_source_cte.*::text || ' merge update' FROM merge_source_cte WHERE a = 15) -WHEN NOT MATCHED THEN INSERT VALUES(o.a, o.b || (SELECT merge_source_cte.*::text || ' merge insert' FROM merge_source_cte)); - QUERY PLAN ------------------------------------------------------------------------------------------------------ - Merge on public.m - CTE merge_source_cte - -> Result - Output: 15, 'merge_source_cte val'::text - InitPlan 2 - -> CTE Scan on merge_source_cte merge_source_cte_1 - Output: ((merge_source_cte_1.b || (merge_source_cte_1.*)::text) || ' merge update'::text) - Filter: (merge_source_cte_1.a = 15) - InitPlan 3 - -> CTE Scan on merge_source_cte merge_source_cte_2 - Output: ((merge_source_cte_2.*)::text || ' merge insert'::text) - -> Hash Right Join - Output: m.ctid, merge_source_cte.a, merge_source_cte.b, merge_source_cte.* - Hash Cond: (m.k = merge_source_cte.a) - -> Seq Scan on public.m - Output: m.ctid, m.k - -> Hash - Output: merge_source_cte.a, merge_source_cte.b, merge_source_cte.* - -> CTE Scan on merge_source_cte - Output: merge_source_cte.a, merge_source_cte.b, merge_source_cte.* -(20 rows) - -DROP TABLE m; --- check that run to completion happens in proper ordering -TRUNCATE TABLE y; -INSERT INTO y SELECT generate_series(1, 3); -CREATE TEMPORARY TABLE yy (a INTEGER); -WITH RECURSIVE t1 AS ( - INSERT INTO y SELECT * FROM y RETURNING * -), t2 AS ( - INSERT INTO yy SELECT * FROM t1 RETURNING * -) -SELECT 1; - ?column? ----------- - 1 -(1 row) - -SELECT * FROM y; - a ---- - 1 - 2 - 3 - 1 - 2 - 3 -(6 rows) - -SELECT * FROM yy; - a ---- - 1 - 2 - 3 -(3 rows) - -WITH RECURSIVE t1 AS ( - INSERT INTO yy SELECT * FROM t2 RETURNING * -), t2 AS ( - INSERT INTO y SELECT * FROM y RETURNING * -) -SELECT 1; - ?column? ----------- - 1 -(1 row) - -SELECT * FROM y; - a ---- - 1 - 2 - 3 - 1 - 2 - 3 - 1 - 2 - 3 - 1 - 2 - 3 -(12 rows) - -SELECT * FROM yy; - a ---- - 1 - 2 - 3 - 1 - 2 - 3 - 1 - 2 - 3 -(9 rows) - --- triggers -TRUNCATE TABLE y; -INSERT INTO y SELECT generate_series(1, 10); -CREATE FUNCTION y_trigger() RETURNS trigger AS $$ -begin - raise notice 'y_trigger: a = %', new.a; - return new; -end; -$$ LANGUAGE plpgsql; -CREATE TRIGGER y_trig BEFORE INSERT ON y FOR EACH ROW - EXECUTE PROCEDURE y_trigger(); -WITH t AS ( - INSERT INTO y - VALUES - (21), - (22), - (23) - RETURNING * -) -SELECT * FROM t; -NOTICE: y_trigger: a = 21 -NOTICE: y_trigger: a = 22 -NOTICE: y_trigger: a = 23 - a ----- - 21 - 22 - 23 -(3 rows) - -SELECT * FROM y; - a ----- - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 - 21 - 22 - 23 -(13 rows) - -DROP TRIGGER y_trig ON y; -CREATE TRIGGER y_trig AFTER INSERT ON y FOR EACH ROW - EXECUTE PROCEDURE y_trigger(); -WITH t AS ( - INSERT INTO y - VALUES - (31), - (32), - (33) - RETURNING * -) -SELECT * FROM t LIMIT 1; -NOTICE: y_trigger: a = 31 -NOTICE: y_trigger: a = 32 -NOTICE: y_trigger: a = 33 - a ----- - 31 -(1 row) - -SELECT * FROM y; - a ----- - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 - 21 - 22 - 23 - 31 - 32 - 33 -(16 rows) - -DROP TRIGGER y_trig ON y; -CREATE OR REPLACE FUNCTION y_trigger() RETURNS trigger AS $$ -begin - raise notice 'y_trigger'; - return null; -end; -$$ LANGUAGE plpgsql; -CREATE TRIGGER y_trig AFTER INSERT ON y FOR EACH STATEMENT - EXECUTE PROCEDURE y_trigger(); -WITH t AS ( - INSERT INTO y - VALUES - (41), - (42), - (43) - RETURNING * -) -SELECT * FROM t; -NOTICE: y_trigger - a ----- - 41 - 42 - 43 -(3 rows) - -SELECT * FROM y; - a ----- - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 - 21 - 22 - 23 - 31 - 32 - 33 - 41 - 42 - 43 -(19 rows) - -DROP TRIGGER y_trig ON y; -DROP FUNCTION y_trigger(); --- WITH attached to inherited UPDATE or DELETE -CREATE TEMP TABLE parent ( id int, val text ); -CREATE TEMP TABLE child1 ( ) INHERITS ( parent ); -CREATE TEMP TABLE child2 ( ) INHERITS ( parent ); -INSERT INTO parent VALUES ( 1, 'p1' ); -INSERT INTO child1 VALUES ( 11, 'c11' ),( 12, 'c12' ); -INSERT INTO child2 VALUES ( 23, 'c21' ),( 24, 'c22' ); -WITH rcte AS ( SELECT sum(id) AS totalid FROM parent ) -UPDATE parent SET id = id + totalid FROM rcte; -SELECT * FROM parent; - id | val -----+----- - 72 | p1 - 82 | c11 - 83 | c12 - 94 | c21 - 95 | c22 -(5 rows) - -WITH wcte AS ( INSERT INTO child1 VALUES ( 42, 'new' ) RETURNING id AS newid ) -UPDATE parent SET id = id + newid FROM wcte; -SELECT * FROM parent; - id | val ------+----- - 114 | p1 - 42 | new - 124 | c11 - 125 | c12 - 136 | c21 - 137 | c22 -(6 rows) - -WITH rcte AS ( SELECT max(id) AS maxid FROM parent ) -DELETE FROM parent USING rcte WHERE id = maxid; -SELECT * FROM parent; - id | val ------+----- - 114 | p1 - 42 | new - 124 | c11 - 125 | c12 - 136 | c21 -(5 rows) - -WITH wcte AS ( INSERT INTO child2 VALUES ( 42, 'new2' ) RETURNING id AS newid ) -DELETE FROM parent USING wcte WHERE id = newid; -SELECT * FROM parent; - id | val ------+------ - 114 | p1 - 124 | c11 - 125 | c12 - 136 | c21 - 42 | new2 -(5 rows) - --- check EXPLAIN VERBOSE for a wCTE with RETURNING -EXPLAIN (VERBOSE, COSTS OFF) -WITH wcte AS ( INSERT INTO int8_tbl VALUES ( 42, 47 ) RETURNING q2 ) -DELETE FROM a_star USING wcte WHERE aa = q2; - QUERY PLAN ---------------------------------------------------------------------------- - Delete on public.a_star - Delete on public.a_star a_star_1 - Delete on public.b_star a_star_2 - Delete on public.c_star a_star_3 - Delete on public.d_star a_star_4 - Delete on public.e_star a_star_5 - Delete on public.f_star a_star_6 - CTE wcte - -> Insert on public.int8_tbl - Output: int8_tbl.q2 - -> Result - Output: '42'::bigint, '47'::bigint - -> Hash Join - Output: wcte.*, a_star.tableoid, a_star.ctid - Hash Cond: (a_star.aa = wcte.q2) - -> Append - -> Seq Scan on public.a_star a_star_1 - Output: a_star_1.aa, a_star_1.tableoid, a_star_1.ctid - -> Seq Scan on public.b_star a_star_2 - Output: a_star_2.aa, a_star_2.tableoid, a_star_2.ctid - -> Seq Scan on public.c_star a_star_3 - Output: a_star_3.aa, a_star_3.tableoid, a_star_3.ctid - -> Seq Scan on public.d_star a_star_4 - Output: a_star_4.aa, a_star_4.tableoid, a_star_4.ctid - -> Seq Scan on public.e_star a_star_5 - Output: a_star_5.aa, a_star_5.tableoid, a_star_5.ctid - -> Seq Scan on public.f_star a_star_6 - Output: a_star_6.aa, a_star_6.tableoid, a_star_6.ctid - -> Hash - Output: wcte.*, wcte.q2 - -> CTE Scan on wcte - Output: wcte.*, wcte.q2 -(32 rows) - --- error cases --- data-modifying WITH tries to use its own output -WITH RECURSIVE t AS ( - INSERT INTO y - SELECT * FROM t -) -VALUES(FALSE); -ERROR: recursive query "t" must not contain data-modifying statements -LINE 1: WITH RECURSIVE t AS ( - ^ --- no RETURNING in a referenced data-modifying WITH -WITH t AS ( - INSERT INTO y VALUES(0) -) -SELECT * FROM t; -ERROR: WITH query "t" does not have a RETURNING clause -LINE 4: SELECT * FROM t; - ^ --- RETURNING tries to return its own output -WITH RECURSIVE t(action, a) AS ( - MERGE INTO y USING (VALUES (11)) v(a) ON y.a = v.a - WHEN NOT MATCHED THEN INSERT VALUES (v.a) - RETURNING merge_action(), (SELECT a FROM t) -) -SELECT * FROM t; -ERROR: recursive query "t" must not contain data-modifying statements -LINE 1: WITH RECURSIVE t(action, a) AS ( - ^ --- data-modifying WITH allowed only at the top level -SELECT * FROM ( - WITH t AS (UPDATE y SET a=a+1 RETURNING *) - SELECT * FROM t -) ss; -ERROR: WITH clause containing a data-modifying statement must be at the top level -LINE 2: WITH t AS (UPDATE y SET a=a+1 RETURNING *) - ^ --- most variants of rules aren't allowed -CREATE RULE y_rule AS ON INSERT TO y WHERE a=0 DO INSTEAD DELETE FROM y; -WITH t AS ( - INSERT INTO y VALUES(0) -) -VALUES(FALSE); -ERROR: conditional DO INSTEAD rules are not supported for data-modifying statements in WITH -CREATE OR REPLACE RULE y_rule AS ON INSERT TO y DO INSTEAD NOTHING; -WITH t AS ( - INSERT INTO y VALUES(0) -) -VALUES(FALSE); -ERROR: DO INSTEAD NOTHING rules are not supported for data-modifying statements in WITH -CREATE OR REPLACE RULE y_rule AS ON INSERT TO y DO INSTEAD NOTIFY foo; -WITH t AS ( - INSERT INTO y VALUES(0) -) -VALUES(FALSE); -ERROR: DO INSTEAD NOTIFY rules are not supported for data-modifying statements in WITH -CREATE OR REPLACE RULE y_rule AS ON INSERT TO y DO ALSO NOTIFY foo; -WITH t AS ( - INSERT INTO y VALUES(0) -) -VALUES(FALSE); -ERROR: DO ALSO rules are not supported for data-modifying statements in WITH -CREATE OR REPLACE RULE y_rule AS ON INSERT TO y - DO INSTEAD (NOTIFY foo; NOTIFY bar); -WITH t AS ( - INSERT INTO y VALUES(0) -) -VALUES(FALSE); -ERROR: multi-statement DO INSTEAD rules are not supported for data-modifying statements in WITH -DROP RULE y_rule ON y; --- check that parser lookahead for WITH doesn't cause any odd behavior -create table foo (with baz); -- fail, WITH is a reserved word -ERROR: syntax error at or near "with" -LINE 1: create table foo (with baz); - ^ -create table foo (with ordinality); -- fail, WITH is a reserved word -ERROR: syntax error at or near "with" -LINE 1: create table foo (with ordinality); - ^ -with ordinality as (select 1 as x) select * from ordinality; - x ---- - 1 -(1 row) - --- check sane response to attempt to modify CTE relation -WITH with_test AS (SELECT 42) INSERT INTO with_test VALUES (1); -ERROR: relation "with_test" does not exist -LINE 1: WITH with_test AS (SELECT 42) INSERT INTO with_test VALUES (... - ^ --- check response to attempt to modify table with same name as a CTE (perhaps --- surprisingly it works, because CTEs don't hide tables from data-modifying --- statements) -create temp table with_test (i int); -with with_test as (select 42) insert into with_test select * from with_test; -select * from with_test; - i ----- - 42 -(1 row) - -drop table with_test; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/xml_1.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/xml.out --- /Users/admin/pgsql/src/test/regress/expected/xml_1.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/xml.out 2024-12-13 13:20:12 @@ -1,1482 +1,2 @@ -CREATE TABLE xmltest ( - id int, - data xml -); -INSERT INTO xmltest VALUES (1, 'one'); -ERROR: unsupported XML feature -LINE 1: INSERT INTO xmltest VALUES (1, 'one'); - ^ -DETAIL: This functionality requires the server to be built with libxml support. -INSERT INTO xmltest VALUES (2, 'two'); -ERROR: unsupported XML feature -LINE 1: INSERT INTO xmltest VALUES (2, 'two'); - ^ -DETAIL: This functionality requires the server to be built with libxml support. -INSERT INTO xmltest VALUES (3, 'one', 'xml'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT pg_input_is_valid('oneone', 'xml'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT message FROM pg_input_error_info('', 'xml'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlcomment('test'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlcomment('-test'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlcomment('test-'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlcomment('--test'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlcomment('te st'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlconcat(xmlcomment('hello'), - xmlelement(NAME qux, 'foo'), - xmlcomment('world')); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlconcat('hello', 'you'); -ERROR: unsupported XML feature -LINE 1: SELECT xmlconcat('hello', 'you'); - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlconcat(1, 2); -ERROR: argument of XMLCONCAT must be type xml, not type integer -LINE 1: SELECT xmlconcat(1, 2); - ^ -SELECT xmlconcat('bad', '', NULL, ''); -ERROR: unsupported XML feature -LINE 1: SELECT xmlconcat('', NULL, '', NULL, ''); -ERROR: unsupported XML feature -LINE 1: SELECT xmlconcat('', NULL, 'r'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlelement(name foo, xml 'br'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlelement(name foo, array[1, 2, 3]); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SET xmlbinary TO base64; -SELECT xmlelement(name foo, bytea 'bar'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SET xmlbinary TO hex; -SELECT xmlelement(name foo, bytea 'bar'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlelement(name foo, xmlattributes(true as bar)); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlelement(name foo, xmlattributes('2009-04-09 00:24:37'::timestamp as bar)); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlelement(name foo, xmlattributes('infinity'::timestamp as bar)); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlelement(name foo, xmlattributes('<>&"''' as funny, xml 'br' as funnier)); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlparse(content ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlparse(content ' '); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlparse(content 'abc'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlparse(content 'x'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlparse(content '&'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlparse(content '&idontexist;'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlparse(content ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlparse(content ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlparse(content '&idontexist;'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlparse(content ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlparse(document ' '); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlparse(document 'abc'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlparse(document 'x'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlparse(document '&'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlparse(document '&idontexist;'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlparse(document ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlparse(document ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlparse(document '&idontexist;'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlparse(document ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlpi(name foo); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlpi(name xml); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlpi(name xmlstuff); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlpi(name foo, 'bar'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlpi(name foo, 'in?>valid'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlpi(name foo, null); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlpi(name xml, null); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlpi(name xmlstuff, null); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlpi(name "xml-stylesheet", 'href="mystyle.css" type="text/css"'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlpi(name foo, ' bar'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlroot(xml '', version no value, standalone no value); -ERROR: unsupported XML feature -LINE 1: SELECT xmlroot(xml '', version no value, standalone no... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlroot(xml '', version '2.0'); -ERROR: unsupported XML feature -LINE 1: SELECT xmlroot(xml '', version '2.0'); - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlroot(xml '', version no value, standalone yes); -ERROR: unsupported XML feature -LINE 1: SELECT xmlroot(xml '', version no value, standalone ye... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlroot(xml '', version no value, standalone yes); -ERROR: unsupported XML feature -LINE 1: SELECT xmlroot(xml '', version no... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlroot(xmlroot(xml '', version '1.0'), version '1.1', standalone no); -ERROR: unsupported XML feature -LINE 1: SELECT xmlroot(xmlroot(xml '', version '1.0'), version... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlroot('', version no value, standalone no); -ERROR: unsupported XML feature -LINE 1: SELECT xmlroot('... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlroot('', version no value, standalone no value); -ERROR: unsupported XML feature -LINE 1: SELECT xmlroot('... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlroot('', version no value); -ERROR: unsupported XML feature -LINE 1: SELECT xmlroot('... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlroot ( - xmlelement ( - name gazonk, - xmlattributes ( - 'val' AS name, - 1 + 1 AS num - ), - xmlelement ( - NAME qux, - 'foo' - ) - ), - version '1.0', - standalone yes -); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlserialize(content data as character varying(20)) FROM xmltest; - xmlserialize --------------- -(0 rows) - -SELECT xmlserialize(content 'good' as char(10)); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(content 'good' as char(10)); - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlserialize(document 'bad' as text); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(document 'bad' as text); - ^ -DETAIL: This functionality requires the server to be built with libxml support. --- indent -SELECT xmlserialize(DOCUMENT '42' AS text INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(DOCUMENT '42<... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlserialize(CONTENT '42' AS text INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(CONTENT '42<... - ^ -DETAIL: This functionality requires the server to be built with libxml support. --- no indent -SELECT xmlserialize(DOCUMENT '42' AS text NO INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(DOCUMENT '42<... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlserialize(CONTENT '42' AS text NO INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(CONTENT '42<... - ^ -DETAIL: This functionality requires the server to be built with libxml support. --- indent non singly-rooted xml -SELECT xmlserialize(DOCUMENT '7342' AS text INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(DOCUMENT '734... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlserialize(CONTENT '7342' AS text INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(CONTENT '734... - ^ -DETAIL: This functionality requires the server to be built with libxml support. --- indent non singly-rooted xml with mixed contents -SELECT xmlserialize(DOCUMENT 'text node73text node42' AS text INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(DOCUMENT 'text node73text nod... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlserialize(CONTENT 'text node73text node42' AS text INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(CONTENT 'text node73text nod... - ^ -DETAIL: This functionality requires the server to be built with libxml support. --- indent singly-rooted xml with mixed contents -SELECT xmlserialize(DOCUMENT '42text node73' AS text INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(DOCUMENT '42<... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlserialize(CONTENT '42text node73' AS text INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(CONTENT '42<... - ^ -DETAIL: This functionality requires the server to be built with libxml support. --- indent empty string -SELECT xmlserialize(DOCUMENT '' AS text INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(DOCUMENT '' AS text INDENT); - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlserialize(CONTENT '' AS text INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(CONTENT '' AS text INDENT); - ^ -DETAIL: This functionality requires the server to be built with libxml support. --- whitespaces -SELECT xmlserialize(DOCUMENT ' ' AS text INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(DOCUMENT ' ' AS text INDENT); - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlserialize(CONTENT ' ' AS text INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(CONTENT ' ' AS text INDENT); - ^ -DETAIL: This functionality requires the server to be built with libxml support. --- indent null -SELECT xmlserialize(DOCUMENT NULL AS text INDENT); - xmlserialize --------------- - -(1 row) - -SELECT xmlserialize(CONTENT NULL AS text INDENT); - xmlserialize --------------- - -(1 row) - --- indent with XML declaration -SELECT xmlserialize(DOCUMENT '73' AS text INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(DOCUMENT '73' AS text INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(CONTENT '' AS text INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(DOCUMENT '' AS text INDE... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlserialize(CONTENT '' AS text INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(CONTENT '' AS text INDE... - ^ -DETAIL: This functionality requires the server to be built with libxml support. --- indent xml with empty element -SELECT xmlserialize(DOCUMENT '' AS text INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(DOCUMENT '' AS tex... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlserialize(CONTENT '' AS text INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(CONTENT '' AS tex... - ^ -DETAIL: This functionality requires the server to be built with libxml support. --- 'no indent' = not using 'no indent' -SELECT xmlserialize(DOCUMENT '42' AS text) = xmlserialize(DOCUMENT '42' AS text NO INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(DOCUMENT '42<... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlserialize(CONTENT '42' AS text) = xmlserialize(CONTENT '42' AS text NO INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(CONTENT '42<... - ^ -DETAIL: This functionality requires the server to be built with libxml support. --- indent xml strings containing blank nodes -SELECT xmlserialize(DOCUMENT ' ' AS text INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(DOCUMENT ' '... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlserialize(CONTENT 'text node ' AS text INDENT); -ERROR: unsupported XML feature -LINE 1: SELECT xmlserialize(CONTENT 'text node ... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xml 'bar' IS DOCUMENT; -ERROR: unsupported XML feature -LINE 1: SELECT xml 'bar' IS DOCUMENT; - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xml 'barfoo' IS DOCUMENT; -ERROR: unsupported XML feature -LINE 1: SELECT xml 'barfoo' IS DOCUMENT; - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xml '' IS NOT DOCUMENT; -ERROR: unsupported XML feature -LINE 1: SELECT xml '' IS NOT DOCUMENT; - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xml 'abc' IS NOT DOCUMENT; -ERROR: unsupported XML feature -LINE 1: SELECT xml 'abc' IS NOT DOCUMENT; - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT '<>' IS NOT DOCUMENT; -ERROR: unsupported XML feature -LINE 1: SELECT '<>' IS NOT DOCUMENT; - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlagg(data) FROM xmltest; - xmlagg --------- - -(1 row) - -SELECT xmlagg(data) FROM xmltest WHERE id > 10; - xmlagg --------- - -(1 row) - -SELECT xmlelement(name employees, xmlagg(xmlelement(name name, name))) FROM emp; -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. --- Check mapping SQL identifier to XML name -SELECT xmlpi(name ":::_xml_abc135.%-&_"); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmlpi(name "123"); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -PREPARE foo (xml) AS SELECT xmlconcat('', $1); -ERROR: unsupported XML feature -LINE 1: PREPARE foo (xml) AS SELECT xmlconcat('', $1); - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SET XML OPTION DOCUMENT; -EXECUTE foo (''); -ERROR: prepared statement "foo" does not exist -EXECUTE foo ('bad'); -ERROR: prepared statement "foo" does not exist -SELECT xml ''; -ERROR: unsupported XML feature -LINE 1: SELECT xml ''; - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SET XML OPTION CONTENT; -EXECUTE foo (''); -ERROR: prepared statement "foo" does not exist -EXECUTE foo ('good'); -ERROR: prepared statement "foo" does not exist -SELECT xml ' '; -ERROR: unsupported XML feature -LINE 1: SELECT xml ' '; -ERROR: unsupported XML feature -LINE 1: SELECT xml ' '; -ERROR: unsupported XML feature -LINE 1: SELECT xml ''; - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xml ' oops '; -ERROR: unsupported XML feature -LINE 1: SELECT xml ' oops '; - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xml ' '; -ERROR: unsupported XML feature -LINE 1: SELECT xml ' '; - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xml ''; -ERROR: unsupported XML feature -LINE 1: SELECT xml ''; - ^ -DETAIL: This functionality requires the server to be built with libxml support. --- Test backwards parsing -CREATE VIEW xmlview1 AS SELECT xmlcomment('test'); -CREATE VIEW xmlview2 AS SELECT xmlconcat('hello', 'you'); -ERROR: unsupported XML feature -LINE 1: CREATE VIEW xmlview2 AS SELECT xmlconcat('hello', 'you'); - ^ -DETAIL: This functionality requires the server to be built with libxml support. -CREATE VIEW xmlview3 AS SELECT xmlelement(name element, xmlattributes (1 as ":one:", 'deuce' as two), 'content&'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -CREATE VIEW xmlview4 AS SELECT xmlelement(name employee, xmlforest(name, age, salary as pay)) FROM emp; -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -CREATE VIEW xmlview5 AS SELECT xmlparse(content 'x'); -CREATE VIEW xmlview6 AS SELECT xmlpi(name foo, 'bar'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -CREATE VIEW xmlview7 AS SELECT xmlroot(xml '', version no value, standalone yes); -ERROR: unsupported XML feature -LINE 1: CREATE VIEW xmlview7 AS SELECT xmlroot(xml '', version... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -CREATE VIEW xmlview8 AS SELECT xmlserialize(content 'good' as char(10)); -ERROR: unsupported XML feature -LINE 1: ...EATE VIEW xmlview8 AS SELECT xmlserialize(content 'good' as ... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -CREATE VIEW xmlview9 AS SELECT xmlserialize(content 'good' as text); -ERROR: unsupported XML feature -LINE 1: ...EATE VIEW xmlview9 AS SELECT xmlserialize(content 'good' as ... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT table_name, view_definition FROM information_schema.views - WHERE table_name LIKE 'xmlview%' ORDER BY 1; - table_name | view_definition -------------+-------------------------------------------------------------------------------- - xmlview1 | SELECT xmlcomment('test'::text) AS xmlcomment; - xmlview5 | SELECT XMLPARSE(CONTENT 'x'::text STRIP WHITESPACE) AS "xmlparse"; -(2 rows) - --- Text XPath expressions evaluation -SELECT xpath('/value', data) FROM xmltest; - xpath -------- -(0 rows) - -SELECT xpath(NULL, NULL) IS NULL FROM xmltest; - ?column? ----------- -(0 rows) - -SELECT xpath('', ''); -ERROR: unsupported XML feature -LINE 1: SELECT xpath('', ''); - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xpath('//text()', 'number one'); -ERROR: unsupported XML feature -LINE 1: SELECT xpath('//text()', 'number one', ARRAY[ARRAY['loc', 'http://127.0.0.1']]); -ERROR: unsupported XML feature -LINE 1: SELECT xpath('//loc:piece/@id', 'number one', ARRAY[ARRAY['loc', 'http://127.0.0.1']]); -ERROR: unsupported XML feature -LINE 1: SELECT xpath('//loc:piece', 'number one', ARRAY[ARRAY['loc', 'http://127.0.0.1']]); -ERROR: unsupported XML feature -LINE 1: SELECT xpath('//loc:piece', ''); -ERROR: unsupported XML feature -LINE 1: SELECT xpath('//@value', ''); - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xpath('''<>''', ''); -ERROR: unsupported XML feature -LINE 1: SELECT xpath('''<>''', ''); - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xpath('count(//*)', ''); -ERROR: unsupported XML feature -LINE 1: SELECT xpath('count(//*)', ''); - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xpath('count(//*)=0', ''); -ERROR: unsupported XML feature -LINE 1: SELECT xpath('count(//*)=0', ''); - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xpath('count(//*)=3', ''); -ERROR: unsupported XML feature -LINE 1: SELECT xpath('count(//*)=3', ''); - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xpath('name(/*)', ''); -ERROR: unsupported XML feature -LINE 1: SELECT xpath('name(/*)', ''); - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xpath('/nosuchtag', ''); -ERROR: unsupported XML feature -LINE 1: SELECT xpath('/nosuchtag', ''); - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xpath('root', ''); -ERROR: unsupported XML feature -LINE 1: SELECT xpath('root', ''); - ^ -DETAIL: This functionality requires the server to be built with libxml support. --- Round-trip non-ASCII data through xpath(). -DO $$ -DECLARE - xml_declaration text := ''; - degree_symbol text; - res xml[]; -BEGIN - -- Per the documentation, except when the server encoding is UTF8, xpath() - -- may not work on non-ASCII data. The untranslatable_character and - -- undefined_function traps below, currently dead code, will become relevant - -- if we remove this limitation. - IF current_setting('server_encoding') <> 'UTF8' THEN - RAISE LOG 'skip: encoding % unsupported for xpath', - current_setting('server_encoding'); - RETURN; - END IF; - - degree_symbol := convert_from('\xc2b0', 'UTF8'); - res := xpath('text()', (xml_declaration || - '' || degree_symbol || '')::xml); - IF degree_symbol <> res[1]::text THEN - RAISE 'expected % (%), got % (%)', - degree_symbol, convert_to(degree_symbol, 'UTF8'), - res[1], convert_to(res[1]::text, 'UTF8'); - END IF; -EXCEPTION - -- character with byte sequence 0xc2 0xb0 in encoding "UTF8" has no equivalent in encoding "LATIN8" - WHEN untranslatable_character - -- default conversion function for encoding "UTF8" to "MULE_INTERNAL" does not exist - OR undefined_function - -- unsupported XML feature - OR feature_not_supported THEN - RAISE LOG 'skip: %', SQLERRM; -END -$$; --- Test xmlexists and xpath_exists -SELECT xmlexists('//town[text() = ''Toronto'']' PASSING BY REF 'Bidford-on-AvonCwmbranBristol'); -ERROR: unsupported XML feature -LINE 1: ...sts('//town[text() = ''Toronto'']' PASSING BY REF 'Bidford-on-AvonCwmbranBristol'); -ERROR: unsupported XML feature -LINE 1: ...sts('//town[text() = ''Cwmbran'']' PASSING BY REF ''); -ERROR: unsupported XML feature -LINE 1: ...LECT xmlexists('count(/nosuchtag)' PASSING BY REF '')... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xpath_exists('//town[text() = ''Toronto'']','Bidford-on-AvonCwmbranBristol'::xml); -ERROR: unsupported XML feature -LINE 1: ...ELECT xpath_exists('//town[text() = ''Toronto'']','Bidford-on-AvonCwmbranBristol'::xml); -ERROR: unsupported XML feature -LINE 1: ...ELECT xpath_exists('//town[text() = ''Cwmbran'']',''::xml); -ERROR: unsupported XML feature -LINE 1: SELECT xpath_exists('count(/nosuchtag)', ''::xml); - ^ -DETAIL: This functionality requires the server to be built with libxml support. -INSERT INTO xmltest VALUES (4, 'BudvarfreeCarlinglots'::xml); -ERROR: unsupported XML feature -LINE 1: INSERT INTO xmltest VALUES (4, 'BudvarMolsonfreeCarlinglots'::xml); -ERROR: unsupported XML feature -LINE 1: INSERT INTO xmltest VALUES (5, 'MolsonBudvarfreeCarlinglots'::xml); -ERROR: unsupported XML feature -LINE 1: INSERT INTO xmltest VALUES (6, 'MolsonfreeCarlinglots'::xml); -ERROR: unsupported XML feature -LINE 1: INSERT INTO xmltest VALUES (7, 'number one'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xml_is_well_formed('bar'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xml_is_well_formed('bar'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xml_is_well_formed('&'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xml_is_well_formed('&idontexist;'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xml_is_well_formed(''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xml_is_well_formed(''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xml_is_well_formed('&idontexist;'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SET xmloption TO CONTENT; -SELECT xml_is_well_formed('abc'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. --- Since xpath() deals with namespaces, it's a bit stricter about --- what's well-formed and what's not. If we don't obey these rules --- (i.e. ignore namespace-related errors from libxml), xpath() --- fails in subtle ways. The following would for example produce --- the xml value --- --- which is invalid because '<' may not appear un-escaped in --- attribute values. --- Since different libxml versions emit slightly different --- error messages, we suppress the DETAIL in this test. -\set VERBOSITY terse -SELECT xpath('/*', ''); -ERROR: unsupported XML feature at character 20 -\set VERBOSITY default --- Again, the XML isn't well-formed for namespace purposes -SELECT xpath('/*', ''); -ERROR: unsupported XML feature -LINE 1: SELECT xpath('/*', ''); - ^ -DETAIL: This functionality requires the server to be built with libxml support. --- XPath deprecates relative namespaces, but they're not supposed to --- throw an error, only a warning. -SELECT xpath('/*', ''); -ERROR: unsupported XML feature -LINE 1: SELECT xpath('/*', ''); - ^ -DETAIL: This functionality requires the server to be built with libxml support. --- External entity references should not leak filesystem information. -SELECT XMLPARSE(DOCUMENT ']>&c;'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT XMLPARSE(DOCUMENT ']>&c;'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. --- This might or might not load the requested DTD, but it mustn't throw error. -SELECT XMLPARSE(DOCUMENT ' '); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. --- XMLPATH tests -CREATE TABLE xmldata(data xml); -INSERT INTO xmldata VALUES(' - - AU - Australia - 3 - - - CN - China - 3 - - - HK - HongKong - 3 - - - IN - India - 3 - - - JP - Japan - 3Sinzo Abe - - - SG - Singapore - 3791 - -'); -ERROR: unsupported XML feature -LINE 1: INSERT INTO xmldata VALUES(' - ^ -DETAIL: This functionality requires the server to be built with libxml support. --- XMLTABLE with columns -SELECT xmltable.* - FROM (SELECT data FROM xmldata) x, - LATERAL XMLTABLE('/ROWS/ROW' - PASSING data - COLUMNS id int PATH '@id', - _id FOR ORDINALITY, - country_name text PATH 'COUNTRY_NAME/text()' NOT NULL, - country_id text PATH 'COUNTRY_ID', - region_id int PATH 'REGION_ID', - size float PATH 'SIZE', - unit text PATH 'SIZE/@unit', - premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); - id | _id | country_name | country_id | region_id | size | unit | premier_name -----+-----+--------------+------------+-----------+------+------+-------------- -(0 rows) - -CREATE VIEW xmltableview1 AS SELECT xmltable.* - FROM (SELECT data FROM xmldata) x, - LATERAL XMLTABLE('/ROWS/ROW' - PASSING data - COLUMNS id int PATH '@id', - _id FOR ORDINALITY, - country_name text PATH 'COUNTRY_NAME/text()' NOT NULL, - country_id text PATH 'COUNTRY_ID', - region_id int PATH 'REGION_ID', - size float PATH 'SIZE', - unit text PATH 'SIZE/@unit', - premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); -SELECT * FROM xmltableview1; - id | _id | country_name | country_id | region_id | size | unit | premier_name -----+-----+--------------+------------+-----------+------+------+-------------- -(0 rows) - -\sv xmltableview1 -CREATE OR REPLACE VIEW public.xmltableview1 AS - SELECT "xmltable".id, - "xmltable"._id, - "xmltable".country_name, - "xmltable".country_id, - "xmltable".region_id, - "xmltable".size, - "xmltable".unit, - "xmltable".premier_name - FROM ( SELECT xmldata.data - FROM xmldata) x, - LATERAL XMLTABLE(('/ROWS/ROW'::text) PASSING (x.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) -EXPLAIN (COSTS OFF) SELECT * FROM xmltableview1; - QUERY PLAN ------------------------------------------ - Nested Loop - -> Seq Scan on xmldata - -> Table Function Scan on "xmltable" -(3 rows) - -EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM xmltableview1; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Nested Loop - Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name - -> Seq Scan on public.xmldata - Output: xmldata.data - -> Table Function Scan on "xmltable" - Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name - Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) -(7 rows) - --- errors -SELECT * FROM XMLTABLE (ROW () PASSING null COLUMNS v1 timestamp) AS f (v1, v2); -ERROR: XMLTABLE function has 1 columns available but 2 columns specified --- XMLNAMESPACES tests -SELECT * FROM XMLTABLE(XMLNAMESPACES('http://x.y' AS zz), - '/zz:rows/zz:row' - PASSING '10' - COLUMNS a int PATH 'zz:a'); -ERROR: unsupported XML feature -LINE 3: PASSING '10' - COLUMNS a int PATH 'zz:a'); -ERROR: unsupported XML feature -LINE 3: PASSING '10' - COLUMNS a int PATH 'a'); -ERROR: unsupported XML feature -LINE 3: PASSING '' - COLUMNS a text PATH 'foo/namespace::node()'); -ERROR: unsupported XML feature -LINE 2: PASSING '' - ^ -DETAIL: This functionality requires the server to be built with libxml support. --- used in prepare statements -PREPARE pp AS -SELECT xmltable.* - FROM (SELECT data FROM xmldata) x, - LATERAL XMLTABLE('/ROWS/ROW' - PASSING data - COLUMNS id int PATH '@id', - _id FOR ORDINALITY, - country_name text PATH 'COUNTRY_NAME' NOT NULL, - country_id text PATH 'COUNTRY_ID', - region_id int PATH 'REGION_ID', - size float PATH 'SIZE', - unit text PATH 'SIZE/@unit', - premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); -EXECUTE pp; - id | _id | country_name | country_id | region_id | size | unit | premier_name -----+-----+--------------+------------+-----------+------+------+-------------- -(0 rows) - -SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int); - COUNTRY_NAME | REGION_ID ---------------+----------- -(0 rows) - -SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id FOR ORDINALITY, "COUNTRY_NAME" text, "REGION_ID" int); - id | COUNTRY_NAME | REGION_ID -----+--------------+----------- -(0 rows) - -SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int); - id | COUNTRY_NAME | REGION_ID -----+--------------+----------- -(0 rows) - -SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id'); - id ----- -(0 rows) - -SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id FOR ORDINALITY); - id ----- -(0 rows) - -SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int, rawdata xml PATH '.'); - id | COUNTRY_NAME | REGION_ID | rawdata -----+--------------+-----------+--------- -(0 rows) - -SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int, rawdata xml PATH './*'); - id | COUNTRY_NAME | REGION_ID | rawdata -----+--------------+-----------+--------- -(0 rows) - -SELECT * FROM xmltable('/root' passing 'a1aa2a bbbbxxxcccc' COLUMNS element text); -ERROR: unsupported XML feature -LINE 1: SELECT * FROM xmltable('/root' passing 'a1aa1aa2a bbbbxxxcccc' COLUMNS element text PATH 'element/text()'); -- should fail -ERROR: unsupported XML feature -LINE 1: SELECT * FROM xmltable('/root' passing 'a1a &"<>!foo]]>2' columns c text); -ERROR: unsupported XML feature -LINE 1: select * from xmltable('d/r' passing ''"&<>' COLUMNS ent text); -ERROR: unsupported XML feature -LINE 1: SELECT * FROM xmltable('/x/a' PASSING '''"&<>' COLUMNS ent xml); -ERROR: unsupported XML feature -LINE 1: SELECT * FROM xmltable('/x/a' PASSING '' Seq Scan on public.xmldata - Output: xmldata.data - -> Table Function Scan on "xmltable" - Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name - Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) -(7 rows) - --- test qual -SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int) WHERE "COUNTRY_NAME" = 'Japan'; - COUNTRY_NAME | REGION_ID ---------------+----------- -(0 rows) - -EXPLAIN (VERBOSE, COSTS OFF) -SELECT f.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int) AS f WHERE "COUNTRY_NAME" = 'Japan'; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Nested Loop - Output: f."COUNTRY_NAME", f."REGION_ID" - -> Seq Scan on public.xmldata - Output: xmldata.data - -> Table Function Scan on "xmltable" f - Output: f."COUNTRY_NAME", f."REGION_ID" - Table Function Call: XMLTABLE(('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]'::text) PASSING (xmldata.data) COLUMNS "COUNTRY_NAME" text, "REGION_ID" integer) - Filter: (f."COUNTRY_NAME" = 'Japan'::text) -(8 rows) - -EXPLAIN (VERBOSE, FORMAT JSON, COSTS OFF) -SELECT f.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int) AS f WHERE "COUNTRY_NAME" = 'Japan'; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - [ + - { + - "Plan": { + - "Node Type": "Nested Loop", + - "Parallel Aware": false, + - "Async Capable": false, + - "Join Type": "Inner", + - "Disabled": false, + - "Output": ["f.\"COUNTRY_NAME\"", "f.\"REGION_ID\""], + - "Inner Unique": false, + - "Plans": [ + - { + - "Node Type": "Seq Scan", + - "Parent Relationship": "Outer", + - "Parallel Aware": false, + - "Async Capable": false, + - "Relation Name": "xmldata", + - "Schema": "public", + - "Alias": "xmldata", + - "Disabled": false, + - "Output": ["xmldata.data"] + - }, + - { + - "Node Type": "Table Function Scan", + - "Parent Relationship": "Inner", + - "Parallel Aware": false, + - "Async Capable": false, + - "Table Function Name": "xmltable", + - "Alias": "f", + - "Disabled": false, + - "Output": ["f.\"COUNTRY_NAME\"", "f.\"REGION_ID\""], + - "Table Function Call": "XMLTABLE(('/ROWS/ROW[COUNTRY_NAME=\"Japan\" or COUNTRY_NAME=\"India\"]'::text) PASSING (xmldata.data) COLUMNS \"COUNTRY_NAME\" text, \"REGION_ID\" integer)",+ - "Filter": "(f.\"COUNTRY_NAME\" = 'Japan'::text)" + - } + - ] + - } + - } + - ] -(1 row) - --- should to work with more data -INSERT INTO xmldata VALUES(' - - CZ - Czech Republic - 2Milos Zeman - - - DE - Germany - 2 - - - FR - France - 2 - -'); -ERROR: unsupported XML feature -LINE 1: INSERT INTO xmldata VALUES(' - ^ -DETAIL: This functionality requires the server to be built with libxml support. -INSERT INTO xmldata VALUES(' - - EG - Egypt - 1 - - - SD - Sudan - 1 - -'); -ERROR: unsupported XML feature -LINE 1: INSERT INTO xmldata VALUES(' - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmltable.* - FROM (SELECT data FROM xmldata) x, - LATERAL XMLTABLE('/ROWS/ROW' - PASSING data - COLUMNS id int PATH '@id', - _id FOR ORDINALITY, - country_name text PATH 'COUNTRY_NAME' NOT NULL, - country_id text PATH 'COUNTRY_ID', - region_id int PATH 'REGION_ID', - size float PATH 'SIZE', - unit text PATH 'SIZE/@unit', - premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); - id | _id | country_name | country_id | region_id | size | unit | premier_name -----+-----+--------------+------------+-----------+------+------+-------------- -(0 rows) - -SELECT xmltable.* - FROM (SELECT data FROM xmldata) x, - LATERAL XMLTABLE('/ROWS/ROW' - PASSING data - COLUMNS id int PATH '@id', - _id FOR ORDINALITY, - country_name text PATH 'COUNTRY_NAME' NOT NULL, - country_id text PATH 'COUNTRY_ID', - region_id int PATH 'REGION_ID', - size float PATH 'SIZE', - unit text PATH 'SIZE/@unit', - premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified') - WHERE region_id = 2; - id | _id | country_name | country_id | region_id | size | unit | premier_name -----+-----+--------------+------------+-----------+------+------+-------------- -(0 rows) - -EXPLAIN (VERBOSE, COSTS OFF) -SELECT xmltable.* - FROM (SELECT data FROM xmldata) x, - LATERAL XMLTABLE('/ROWS/ROW' - PASSING data - COLUMNS id int PATH '@id', - _id FOR ORDINALITY, - country_name text PATH 'COUNTRY_NAME' NOT NULL, - country_id text PATH 'COUNTRY_ID', - region_id int PATH 'REGION_ID', - size float PATH 'SIZE', - unit text PATH 'SIZE/@unit', - premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified') - WHERE region_id = 2; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ - Nested Loop - Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name - -> Seq Scan on public.xmldata - Output: xmldata.data - -> Table Function Scan on "xmltable" - Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name - Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) - Filter: ("xmltable".region_id = 2) -(8 rows) - --- should fail, NULL value -SELECT xmltable.* - FROM (SELECT data FROM xmldata) x, - LATERAL XMLTABLE('/ROWS/ROW' - PASSING data - COLUMNS id int PATH '@id', - _id FOR ORDINALITY, - country_name text PATH 'COUNTRY_NAME' NOT NULL, - country_id text PATH 'COUNTRY_ID', - region_id int PATH 'REGION_ID', - size float PATH 'SIZE' NOT NULL, - unit text PATH 'SIZE/@unit', - premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); - id | _id | country_name | country_id | region_id | size | unit | premier_name -----+-----+--------------+------------+-----------+------+------+-------------- -(0 rows) - --- if all is ok, then result is empty --- one line xml test -WITH - x AS (SELECT proname, proowner, procost::numeric, pronargs, - array_to_string(proargnames,',') as proargnames, - case when proargtypes <> '' then array_to_string(proargtypes::oid[],',') end as proargtypes - FROM pg_proc WHERE proname = 'f_leak'), - y AS (SELECT xmlelement(name proc, - xmlforest(proname, proowner, - procost, pronargs, - proargnames, proargtypes)) as proc - FROM x), - z AS (SELECT xmltable.* - FROM y, - LATERAL xmltable('/proc' PASSING proc - COLUMNS proname name, - proowner oid, - procost float, - pronargs int, - proargnames text, - proargtypes text)) - SELECT * FROM z - EXCEPT SELECT * FROM x; -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. --- multi line xml test, result should be empty too -WITH - x AS (SELECT proname, proowner, procost::numeric, pronargs, - array_to_string(proargnames,',') as proargnames, - case when proargtypes <> '' then array_to_string(proargtypes::oid[],',') end as proargtypes - FROM pg_proc), - y AS (SELECT xmlelement(name data, - xmlagg(xmlelement(name proc, - xmlforest(proname, proowner, procost, - pronargs, proargnames, proargtypes)))) as doc - FROM x), - z AS (SELECT xmltable.* - FROM y, - LATERAL xmltable('/data/proc' PASSING doc - COLUMNS proname name, - proowner oid, - procost float, - pronargs int, - proargnames text, - proargtypes text)) - SELECT * FROM z - EXCEPT SELECT * FROM x; -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -CREATE TABLE xmltest2(x xml, _path text); -INSERT INTO xmltest2 VALUES('1', 'A'); -ERROR: unsupported XML feature -LINE 1: INSERT INTO xmltest2 VALUES('1', 'A')... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -INSERT INTO xmltest2 VALUES('2', 'B'); -ERROR: unsupported XML feature -LINE 1: INSERT INTO xmltest2 VALUES('2', 'B')... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -INSERT INTO xmltest2 VALUES('3', 'C'); -ERROR: unsupported XML feature -LINE 1: INSERT INTO xmltest2 VALUES('3', 'C')... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -INSERT INTO xmltest2 VALUES('2', 'D'); -ERROR: unsupported XML feature -LINE 1: INSERT INTO xmltest2 VALUES('2', 'D')... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmltable.* FROM xmltest2, LATERAL xmltable('/d/r' PASSING x COLUMNS a int PATH '' || lower(_path) || 'c'); - a ---- -(0 rows) - -SELECT xmltable.* FROM xmltest2, LATERAL xmltable(('/d/r/' || lower(_path) || 'c') PASSING x COLUMNS a int PATH '.'); - a ---- -(0 rows) - -SELECT xmltable.* FROM xmltest2, LATERAL xmltable(('/d/r/' || lower(_path) || 'c') PASSING x COLUMNS a int PATH 'x' DEFAULT ascii(_path) - 54); - a ---- -(0 rows) - --- XPath result can be boolean or number too -SELECT * FROM XMLTABLE('*' PASSING 'a' COLUMNS a xml PATH '.', b text PATH '.', c text PATH '"hi"', d boolean PATH '. = "a"', e integer PATH 'string-length(.)'); -ERROR: unsupported XML feature -LINE 1: SELECT * FROM XMLTABLE('*' PASSING 'a' COLUMNS a xml ... - ^ -DETAIL: This functionality requires the server to be built with libxml support. -\x -SELECT * FROM XMLTABLE('*' PASSING 'pre&deeppost' COLUMNS x xml PATH '/e/n2', y xml PATH '/'); -ERROR: unsupported XML feature -LINE 1: SELECT * FROM XMLTABLE('*' PASSING 'pre"', b xml PATH '""'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmltext(NULL); - xmltext ---------- - -(1 row) - -SELECT xmltext(''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmltext(' '); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmltext('foo `$_-+?=*^%!|/\()[]{}'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmltext('foo & <"bar">'); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. -SELECT xmltext('x'|| '

73

'::xml || .42 || true || 'j'::char); -ERROR: unsupported XML feature -LINE 1: SELECT xmltext('x'|| '

73

'::xml || .42 || true || 'j':... - ^ -DETAIL: This functionality requires the server to be built with libxml support. +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/partition_join.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/partition_join.out --- /Users/admin/pgsql/src/test/regress/expected/partition_join.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/partition_join.out 2024-12-13 13:20:13 @@ -1,5231 +1,2 @@ --- --- PARTITION_JOIN --- Test partitionwise join between partitioned tables --- --- Enable partitionwise join, which by default is disabled. -SET enable_partitionwise_join to true; --- --- partitioned by a single column --- -CREATE TABLE prt1 (a int, b int, c varchar) PARTITION BY RANGE(a); -CREATE TABLE prt1_p1 PARTITION OF prt1 FOR VALUES FROM (0) TO (250); -CREATE TABLE prt1_p3 PARTITION OF prt1 FOR VALUES FROM (500) TO (600); -CREATE TABLE prt1_p2 PARTITION OF prt1 FOR VALUES FROM (250) TO (500); -INSERT INTO prt1 SELECT i, i % 25, to_char(i, 'FM0000') FROM generate_series(0, 599) i WHERE i % 2 = 0; -CREATE INDEX iprt1_p1_a on prt1_p1(a); -CREATE INDEX iprt1_p2_a on prt1_p2(a); -CREATE INDEX iprt1_p3_a on prt1_p3(a); -ANALYZE prt1; -CREATE TABLE prt2 (a int, b int, c varchar) PARTITION BY RANGE(b); -CREATE TABLE prt2_p1 PARTITION OF prt2 FOR VALUES FROM (0) TO (250); -CREATE TABLE prt2_p2 PARTITION OF prt2 FOR VALUES FROM (250) TO (500); -CREATE TABLE prt2_p3 PARTITION OF prt2 FOR VALUES FROM (500) TO (600); -INSERT INTO prt2 SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(0, 599) i WHERE i % 3 = 0; -CREATE INDEX iprt2_p1_b on prt2_p1(b); -CREATE INDEX iprt2_p2_b on prt2_p2(b); -CREATE INDEX iprt2_p3_b on prt2_p3(b); -ANALYZE prt2; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN --------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_p1 t2_1 - -> Hash - -> Seq Scan on prt1_p1 t1_1 - Filter: (b = 0) - -> Hash Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_p2 t2_2 - -> Hash - -> Seq Scan on prt1_p2 t1_2 - Filter: (b = 0) - -> Hash Join - Hash Cond: (t2_3.b = t1_3.a) - -> Seq Scan on prt2_p3 t2_3 - -> Hash - -> Seq Scan on prt1_p3 t1_3 - Filter: (b = 0) -(21 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 0 | 0000 | 0 | 0000 - 150 | 0150 | 150 | 0150 - 300 | 0300 | 300 | 0300 - 450 | 0450 | 450 | 0450 -(4 rows) - --- inner join with partially-redundant join clauses -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.a AND t1.a = t2.b ORDER BY t1.a, t2.b; - QUERY PLAN ---------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Merge Join - Merge Cond: (t1_1.a = t2_1.a) - -> Index Scan using iprt1_p1_a on prt1_p1 t1_1 - -> Sort - Sort Key: t2_1.b - -> Seq Scan on prt2_p1 t2_1 - Filter: (a = b) - -> Hash Join - Hash Cond: (t1_2.a = t2_2.a) - -> Seq Scan on prt1_p2 t1_2 - -> Hash - -> Seq Scan on prt2_p2 t2_2 - Filter: (a = b) - -> Hash Join - Hash Cond: (t1_3.a = t2_3.a) - -> Seq Scan on prt1_p3 t1_3 - -> Hash - -> Seq Scan on prt2_p3 t2_3 - Filter: (a = b) -(22 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.a AND t1.a = t2.b ORDER BY t1.a, t2.b; - a | c | b | c -----+------+----+------ - 0 | 0000 | 0 | 0000 - 6 | 0006 | 6 | 0006 - 12 | 0012 | 12 | 0012 - 18 | 0018 | 18 | 0018 - 24 | 0024 | 24 | 0024 -(5 rows) - --- left outer join, 3-way -EXPLAIN (COSTS OFF) -SELECT COUNT(*) FROM prt1 t1 - LEFT JOIN prt1 t2 ON t1.a = t2.a - LEFT JOIN prt1 t3 ON t2.a = t3.a; - QUERY PLAN --------------------------------------------------------- - Aggregate - -> Append - -> Hash Left Join - Hash Cond: (t2_1.a = t3_1.a) - -> Hash Left Join - Hash Cond: (t1_1.a = t2_1.a) - -> Seq Scan on prt1_p1 t1_1 - -> Hash - -> Seq Scan on prt1_p1 t2_1 - -> Hash - -> Seq Scan on prt1_p1 t3_1 - -> Hash Left Join - Hash Cond: (t2_2.a = t3_2.a) - -> Hash Left Join - Hash Cond: (t1_2.a = t2_2.a) - -> Seq Scan on prt1_p2 t1_2 - -> Hash - -> Seq Scan on prt1_p2 t2_2 - -> Hash - -> Seq Scan on prt1_p2 t3_2 - -> Hash Left Join - Hash Cond: (t2_3.a = t3_3.a) - -> Hash Left Join - Hash Cond: (t1_3.a = t2_3.a) - -> Seq Scan on prt1_p3 t1_3 - -> Hash - -> Seq Scan on prt1_p3 t2_3 - -> Hash - -> Seq Scan on prt1_p3 t3_3 -(29 rows) - -SELECT COUNT(*) FROM prt1 t1 - LEFT JOIN prt1 t2 ON t1.a = t2.a - LEFT JOIN prt1 t3 ON t2.a = t3.a; - count -------- - 300 -(1 row) - --- left outer join, with whole-row reference; partitionwise join does not apply -EXPLAIN (COSTS OFF) -SELECT t1, t2 FROM prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN --------------------------------------------------- - Sort - Sort Key: t1.a, t2.b - -> Hash Right Join - Hash Cond: (t2.b = t1.a) - -> Append - -> Seq Scan on prt2_p1 t2_1 - -> Seq Scan on prt2_p2 t2_2 - -> Seq Scan on prt2_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on prt1_p1 t1_1 - Filter: (b = 0) - -> Seq Scan on prt1_p2 t1_2 - Filter: (b = 0) - -> Seq Scan on prt1_p3 t1_3 - Filter: (b = 0) -(16 rows) - -SELECT t1, t2 FROM prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; - t1 | t2 ---------------+-------------- - (0,0,0000) | (0,0,0000) - (50,0,0050) | - (100,0,0100) | - (150,0,0150) | (0,150,0150) - (200,0,0200) | - (250,0,0250) | - (300,0,0300) | (0,300,0300) - (350,0,0350) | - (400,0,0400) | - (450,0,0450) | (0,450,0450) - (500,0,0500) | - (550,0,0550) | -(12 rows) - --- right outer join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1 RIGHT JOIN prt2 t2 ON t1.a = t2.b WHERE t2.a = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ---------------------------------------------------------------- - Sort - Sort Key: t1.a, t2.b - -> Append - -> Hash Right Join - Hash Cond: (t1_1.a = t2_1.b) - -> Seq Scan on prt1_p1 t1_1 - -> Hash - -> Seq Scan on prt2_p1 t2_1 - Filter: (a = 0) - -> Hash Right Join - Hash Cond: (t1_2.a = t2_2.b) - -> Seq Scan on prt1_p2 t1_2 - -> Hash - -> Seq Scan on prt2_p2 t2_2 - Filter: (a = 0) - -> Nested Loop Left Join - -> Seq Scan on prt2_p3 t2_3 - Filter: (a = 0) - -> Index Scan using iprt1_p3_a on prt1_p3 t1_3 - Index Cond: (a = t2_3.b) -(20 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1 RIGHT JOIN prt2 t2 ON t1.a = t2.b WHERE t2.a = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 0 | 0000 | 0 | 0000 - 150 | 0150 | 150 | 0150 - 300 | 0300 | 300 | 0300 - 450 | 0450 | 450 | 0450 - | | 75 | 0075 - | | 225 | 0225 - | | 375 | 0375 - | | 525 | 0525 -(8 rows) - --- full outer join, with placeholder vars -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT 50 phv, * FROM prt1 WHERE prt1.b = 0) t1 FULL JOIN (SELECT 75 phv, * FROM prt2 WHERE prt2.a = 0) t2 ON (t1.a = t2.b) WHERE t1.phv = t1.a OR t2.phv = t2.b ORDER BY t1.a, t2.b; - QUERY PLAN ----------------------------------------------------------------- - Sort - Sort Key: prt1.a, prt2.b - -> Append - -> Hash Full Join - Hash Cond: (prt1_1.a = prt2_1.b) - Filter: (((50) = prt1_1.a) OR ((75) = prt2_1.b)) - -> Seq Scan on prt1_p1 prt1_1 - Filter: (b = 0) - -> Hash - -> Seq Scan on prt2_p1 prt2_1 - Filter: (a = 0) - -> Hash Full Join - Hash Cond: (prt1_2.a = prt2_2.b) - Filter: (((50) = prt1_2.a) OR ((75) = prt2_2.b)) - -> Seq Scan on prt1_p2 prt1_2 - Filter: (b = 0) - -> Hash - -> Seq Scan on prt2_p2 prt2_2 - Filter: (a = 0) - -> Hash Full Join - Hash Cond: (prt1_3.a = prt2_3.b) - Filter: (((50) = prt1_3.a) OR ((75) = prt2_3.b)) - -> Seq Scan on prt1_p3 prt1_3 - Filter: (b = 0) - -> Hash - -> Seq Scan on prt2_p3 prt2_3 - Filter: (a = 0) -(27 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT 50 phv, * FROM prt1 WHERE prt1.b = 0) t1 FULL JOIN (SELECT 75 phv, * FROM prt2 WHERE prt2.a = 0) t2 ON (t1.a = t2.b) WHERE t1.phv = t1.a OR t2.phv = t2.b ORDER BY t1.a, t2.b; - a | c | b | c -----+------+----+------ - 50 | 0050 | | - | | 75 | 0075 -(2 rows) - --- Join with pruned partitions from joining relations -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.a < 450 AND t2.b > 250 AND t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------ - Sort - Sort Key: t1.a - -> Hash Join - Hash Cond: (t2.b = t1.a) - -> Seq Scan on prt2_p2 t2 - Filter: (b > 250) - -> Hash - -> Seq Scan on prt1_p2 t1 - Filter: ((a < 450) AND (b = 0)) -(9 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.a < 450 AND t2.b > 250 AND t1.b = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 300 | 0300 | 300 | 0300 -(1 row) - --- Currently we can't do partitioned join if nullable-side partitions are pruned -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a < 450) t1 LEFT JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------------ - Sort - Sort Key: prt1.a, prt2.b - -> Hash Right Join - Hash Cond: (prt2.b = prt1.a) - -> Append - -> Seq Scan on prt2_p2 prt2_1 - Filter: (b > 250) - -> Seq Scan on prt2_p3 prt2_2 - Filter: (b > 250) - -> Hash - -> Append - -> Seq Scan on prt1_p1 prt1_1 - Filter: ((a < 450) AND (b = 0)) - -> Seq Scan on prt1_p2 prt1_2 - Filter: ((a < 450) AND (b = 0)) -(15 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a < 450) t1 LEFT JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 0 | 0000 | | - 50 | 0050 | | - 100 | 0100 | | - 150 | 0150 | | - 200 | 0200 | | - 250 | 0250 | | - 300 | 0300 | 300 | 0300 - 350 | 0350 | | - 400 | 0400 | | -(9 rows) - --- Currently we can't do partitioned join if nullable-side partitions are pruned -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a < 450) t1 FULL JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 OR t2.a = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ----------------------------------------------------- - Sort - Sort Key: prt1.a, prt2.b - -> Hash Full Join - Hash Cond: (prt1.a = prt2.b) - Filter: ((prt1.b = 0) OR (prt2.a = 0)) - -> Append - -> Seq Scan on prt1_p1 prt1_1 - Filter: (a < 450) - -> Seq Scan on prt1_p2 prt1_2 - Filter: (a < 450) - -> Hash - -> Append - -> Seq Scan on prt2_p2 prt2_1 - Filter: (b > 250) - -> Seq Scan on prt2_p3 prt2_2 - Filter: (b > 250) -(16 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a < 450) t1 FULL JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 OR t2.a = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 0 | 0000 | | - 50 | 0050 | | - 100 | 0100 | | - 150 | 0150 | | - 200 | 0200 | | - 250 | 0250 | | - 300 | 0300 | 300 | 0300 - 350 | 0350 | | - 400 | 0400 | | - | | 375 | 0375 - | | 450 | 0450 - | | 525 | 0525 -(12 rows) - --- Semi-join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t2.b FROM prt2 t2 WHERE t2.a = 0) AND t1.b = 0 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Semi Join - Hash Cond: (t1_1.a = t2_1.b) - -> Seq Scan on prt1_p1 t1_1 - Filter: (b = 0) - -> Hash - -> Seq Scan on prt2_p1 t2_1 - Filter: (a = 0) - -> Hash Semi Join - Hash Cond: (t1_2.a = t2_2.b) - -> Seq Scan on prt1_p2 t1_2 - Filter: (b = 0) - -> Hash - -> Seq Scan on prt2_p2 t2_2 - Filter: (a = 0) - -> Nested Loop Semi Join - Join Filter: (t1_3.a = t2_3.b) - -> Seq Scan on prt1_p3 t1_3 - Filter: (b = 0) - -> Materialize - -> Seq Scan on prt2_p3 t2_3 - Filter: (a = 0) -(24 rows) - -SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t2.b FROM prt2 t2 WHERE t2.a = 0) AND t1.b = 0 ORDER BY t1.a; - a | b | c ------+---+------ - 0 | 0 | 0000 - 150 | 0 | 0150 - 300 | 0 | 0300 - 450 | 0 | 0450 -(4 rows) - --- Anti-join with aggregates -EXPLAIN (COSTS OFF) -SELECT sum(t1.a), avg(t1.a), sum(t1.b), avg(t1.b) FROM prt1 t1 WHERE NOT EXISTS (SELECT 1 FROM prt2 t2 WHERE t1.a = t2.b); - QUERY PLAN --------------------------------------------------- - Aggregate - -> Append - -> Hash Anti Join - Hash Cond: (t1_1.a = t2_1.b) - -> Seq Scan on prt1_p1 t1_1 - -> Hash - -> Seq Scan on prt2_p1 t2_1 - -> Hash Anti Join - Hash Cond: (t1_2.a = t2_2.b) - -> Seq Scan on prt1_p2 t1_2 - -> Hash - -> Seq Scan on prt2_p2 t2_2 - -> Hash Anti Join - Hash Cond: (t1_3.a = t2_3.b) - -> Seq Scan on prt1_p3 t1_3 - -> Hash - -> Seq Scan on prt2_p3 t2_3 -(17 rows) - -SELECT sum(t1.a), avg(t1.a), sum(t1.b), avg(t1.b) FROM prt1 t1 WHERE NOT EXISTS (SELECT 1 FROM prt2 t2 WHERE t1.a = t2.b); - sum | avg | sum | avg --------+----------------------+------+--------------------- - 60000 | 300.0000000000000000 | 2400 | 12.0000000000000000 -(1 row) - --- lateral reference -EXPLAIN (COSTS OFF) -SELECT * FROM prt1 t1 LEFT JOIN LATERAL - (SELECT t2.a AS t2a, t3.a AS t3a, least(t1.a,t2.a,t3.b) FROM prt1 t2 JOIN prt2 t3 ON (t2.a = t3.b)) ss - ON t1.a = ss.t2a WHERE t1.b = 0 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Nested Loop Left Join - -> Seq Scan on prt1_p1 t1_1 - Filter: (b = 0) - -> Nested Loop - -> Index Only Scan using iprt1_p1_a on prt1_p1 t2_1 - Index Cond: (a = t1_1.a) - -> Index Scan using iprt2_p1_b on prt2_p1 t3_1 - Index Cond: (b = t2_1.a) - -> Nested Loop Left Join - -> Seq Scan on prt1_p2 t1_2 - Filter: (b = 0) - -> Nested Loop - -> Index Only Scan using iprt1_p2_a on prt1_p2 t2_2 - Index Cond: (a = t1_2.a) - -> Index Scan using iprt2_p2_b on prt2_p2 t3_2 - Index Cond: (b = t2_2.a) - -> Nested Loop Left Join - -> Seq Scan on prt1_p3 t1_3 - Filter: (b = 0) - -> Nested Loop - -> Index Only Scan using iprt1_p3_a on prt1_p3 t2_3 - Index Cond: (a = t1_3.a) - -> Index Scan using iprt2_p3_b on prt2_p3 t3_3 - Index Cond: (b = t2_3.a) -(27 rows) - -SELECT * FROM prt1 t1 LEFT JOIN LATERAL - (SELECT t2.a AS t2a, t3.a AS t3a, least(t1.a,t2.a,t3.b) FROM prt1 t2 JOIN prt2 t3 ON (t2.a = t3.b)) ss - ON t1.a = ss.t2a WHERE t1.b = 0 ORDER BY t1.a; - a | b | c | t2a | t3a | least ------+---+------+-----+-----+------- - 0 | 0 | 0000 | 0 | 0 | 0 - 50 | 0 | 0050 | | | - 100 | 0 | 0100 | | | - 150 | 0 | 0150 | 150 | 0 | 150 - 200 | 0 | 0200 | | | - 250 | 0 | 0250 | | | - 300 | 0 | 0300 | 300 | 0 | 300 - 350 | 0 | 0350 | | | - 400 | 0 | 0400 | | | - 450 | 0 | 0450 | 450 | 0 | 450 - 500 | 0 | 0500 | | | - 550 | 0 | 0550 | | | -(12 rows) - -EXPLAIN (COSTS OFF) -SELECT t1.a, ss.t2a, ss.t2c FROM prt1 t1 LEFT JOIN LATERAL - (SELECT t2.a AS t2a, t3.a AS t3a, t2.b t2b, t2.c t2c, least(t1.a,t2.a,t3.b) FROM prt1 t2 JOIN prt2 t3 ON (t2.a = t3.b)) ss - ON t1.c = ss.t2c WHERE (t1.b + coalesce(ss.t2b, 0)) = 0 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Left Join - Hash Cond: ((t1.c)::text = (t2.c)::text) - Filter: ((t1.b + COALESCE(t2.b, 0)) = 0) - -> Append - -> Seq Scan on prt1_p1 t1_1 - -> Seq Scan on prt1_p2 t1_2 - -> Seq Scan on prt1_p3 t1_3 - -> Hash - -> Append - -> Hash Join - Hash Cond: (t2_1.a = t3_1.b) - -> Seq Scan on prt1_p1 t2_1 - -> Hash - -> Seq Scan on prt2_p1 t3_1 - -> Hash Join - Hash Cond: (t2_2.a = t3_2.b) - -> Seq Scan on prt1_p2 t2_2 - -> Hash - -> Seq Scan on prt2_p2 t3_2 - -> Hash Join - Hash Cond: (t2_3.a = t3_3.b) - -> Seq Scan on prt1_p3 t2_3 - -> Hash - -> Seq Scan on prt2_p3 t3_3 -(26 rows) - -SELECT t1.a, ss.t2a, ss.t2c FROM prt1 t1 LEFT JOIN LATERAL - (SELECT t2.a AS t2a, t3.a AS t3a, t2.b t2b, t2.c t2c, least(t1.a,t2.a,t3.a) FROM prt1 t2 JOIN prt2 t3 ON (t2.a = t3.b)) ss - ON t1.c = ss.t2c WHERE (t1.b + coalesce(ss.t2b, 0)) = 0 ORDER BY t1.a; - a | t2a | t2c ------+-----+------ - 0 | 0 | 0000 - 50 | | - 100 | | - 150 | 150 | 0150 - 200 | | - 250 | | - 300 | 300 | 0300 - 350 | | - 400 | | - 450 | 450 | 0450 - 500 | | - 550 | | -(12 rows) - --- lateral reference in sample scan -EXPLAIN (COSTS OFF) -SELECT * FROM prt1 t1 JOIN LATERAL - (SELECT * FROM prt1 t2 TABLESAMPLE SYSTEM (t1.a) REPEATABLE(t1.b)) s - ON t1.a = s.a; - QUERY PLAN -------------------------------------------------------------- - Append - -> Nested Loop - -> Seq Scan on prt1_p1 t1_1 - -> Sample Scan on prt1_p1 t2_1 - Sampling: system (t1_1.a) REPEATABLE (t1_1.b) - Filter: (t1_1.a = a) - -> Nested Loop - -> Seq Scan on prt1_p2 t1_2 - -> Sample Scan on prt1_p2 t2_2 - Sampling: system (t1_2.a) REPEATABLE (t1_2.b) - Filter: (t1_2.a = a) - -> Nested Loop - -> Seq Scan on prt1_p3 t1_3 - -> Sample Scan on prt1_p3 t2_3 - Sampling: system (t1_3.a) REPEATABLE (t1_3.b) - Filter: (t1_3.a = a) -(16 rows) - --- lateral reference in scan's restriction clauses -EXPLAIN (COSTS OFF) -SELECT count(*) FROM prt1 t1 LEFT JOIN LATERAL - (SELECT t1.b AS t1b, t2.* FROM prt2 t2) s - ON t1.a = s.b WHERE s.t1b = s.a; - QUERY PLAN ---------------------------------------------------------------- - Aggregate - -> Append - -> Nested Loop - -> Seq Scan on prt1_p1 t1_1 - -> Index Scan using iprt2_p1_b on prt2_p1 t2_1 - Index Cond: (b = t1_1.a) - Filter: (t1_1.b = a) - -> Nested Loop - -> Seq Scan on prt1_p2 t1_2 - -> Index Scan using iprt2_p2_b on prt2_p2 t2_2 - Index Cond: (b = t1_2.a) - Filter: (t1_2.b = a) - -> Nested Loop - -> Seq Scan on prt1_p3 t1_3 - -> Index Scan using iprt2_p3_b on prt2_p3 t2_3 - Index Cond: (b = t1_3.a) - Filter: (t1_3.b = a) -(17 rows) - -SELECT count(*) FROM prt1 t1 LEFT JOIN LATERAL - (SELECT t1.b AS t1b, t2.* FROM prt2 t2) s - ON t1.a = s.b WHERE s.t1b = s.a; - count -------- - 100 -(1 row) - -EXPLAIN (COSTS OFF) -SELECT count(*) FROM prt1 t1 LEFT JOIN LATERAL - (SELECT t1.b AS t1b, t2.* FROM prt2 t2) s - ON t1.a = s.b WHERE s.t1b = s.b; - QUERY PLAN --------------------------------------------------------------------- - Aggregate - -> Append - -> Nested Loop - -> Seq Scan on prt1_p1 t1_1 - -> Index Only Scan using iprt2_p1_b on prt2_p1 t2_1 - Index Cond: (b = t1_1.a) - Filter: (b = t1_1.b) - -> Nested Loop - -> Seq Scan on prt1_p2 t1_2 - -> Index Only Scan using iprt2_p2_b on prt2_p2 t2_2 - Index Cond: (b = t1_2.a) - Filter: (b = t1_2.b) - -> Nested Loop - -> Seq Scan on prt1_p3 t1_3 - -> Index Only Scan using iprt2_p3_b on prt2_p3 t2_3 - Index Cond: (b = t1_3.a) - Filter: (b = t1_3.b) -(17 rows) - -SELECT count(*) FROM prt1 t1 LEFT JOIN LATERAL - (SELECT t1.b AS t1b, t2.* FROM prt2 t2) s - ON t1.a = s.b WHERE s.t1b = s.b; - count -------- - 5 -(1 row) - --- bug with inadequate sort key representation -SET enable_partitionwise_aggregate TO true; -SET enable_hashjoin TO false; -EXPLAIN (COSTS OFF) -SELECT a, b FROM prt1 FULL JOIN prt2 p2(b,a,c) USING(a,b) - WHERE a BETWEEN 490 AND 510 - GROUP BY 1, 2 ORDER BY 1, 2; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------ - Group - Group Key: (COALESCE(prt1.a, p2.a)), (COALESCE(prt1.b, p2.b)) - -> Merge Append - Sort Key: (COALESCE(prt1.a, p2.a)), (COALESCE(prt1.b, p2.b)) - -> Group - Group Key: (COALESCE(prt1.a, p2.a)), (COALESCE(prt1.b, p2.b)) - -> Sort - Sort Key: (COALESCE(prt1.a, p2.a)), (COALESCE(prt1.b, p2.b)) - -> Merge Full Join - Merge Cond: ((prt1.a = p2.a) AND (prt1.b = p2.b)) - Filter: ((COALESCE(prt1.a, p2.a) >= 490) AND (COALESCE(prt1.a, p2.a) <= 510)) - -> Sort - Sort Key: prt1.a, prt1.b - -> Seq Scan on prt1_p1 prt1 - -> Sort - Sort Key: p2.a, p2.b - -> Seq Scan on prt2_p1 p2 - -> Group - Group Key: (COALESCE(prt1_1.a, p2_1.a)), (COALESCE(prt1_1.b, p2_1.b)) - -> Sort - Sort Key: (COALESCE(prt1_1.a, p2_1.a)), (COALESCE(prt1_1.b, p2_1.b)) - -> Merge Full Join - Merge Cond: ((prt1_1.a = p2_1.a) AND (prt1_1.b = p2_1.b)) - Filter: ((COALESCE(prt1_1.a, p2_1.a) >= 490) AND (COALESCE(prt1_1.a, p2_1.a) <= 510)) - -> Sort - Sort Key: prt1_1.a, prt1_1.b - -> Seq Scan on prt1_p2 prt1_1 - -> Sort - Sort Key: p2_1.a, p2_1.b - -> Seq Scan on prt2_p2 p2_1 - -> Group - Group Key: (COALESCE(prt1_2.a, p2_2.a)), (COALESCE(prt1_2.b, p2_2.b)) - -> Sort - Sort Key: (COALESCE(prt1_2.a, p2_2.a)), (COALESCE(prt1_2.b, p2_2.b)) - -> Merge Full Join - Merge Cond: ((prt1_2.a = p2_2.a) AND (prt1_2.b = p2_2.b)) - Filter: ((COALESCE(prt1_2.a, p2_2.a) >= 490) AND (COALESCE(prt1_2.a, p2_2.a) <= 510)) - -> Sort - Sort Key: prt1_2.a, prt1_2.b - -> Seq Scan on prt1_p3 prt1_2 - -> Sort - Sort Key: p2_2.a, p2_2.b - -> Seq Scan on prt2_p3 p2_2 -(43 rows) - -SELECT a, b FROM prt1 FULL JOIN prt2 p2(b,a,c) USING(a,b) - WHERE a BETWEEN 490 AND 510 - GROUP BY 1, 2 ORDER BY 1, 2; - a | b ------+---- - 490 | 15 - 492 | 17 - 494 | 19 - 495 | 20 - 496 | 21 - 498 | 23 - 500 | 0 - 501 | 1 - 502 | 2 - 504 | 4 - 506 | 6 - 507 | 7 - 508 | 8 - 510 | 10 -(14 rows) - -RESET enable_partitionwise_aggregate; -RESET enable_hashjoin; --- --- partitioned by expression --- -CREATE TABLE prt1_e (a int, b int, c int) PARTITION BY RANGE(((a + b)/2)); -CREATE TABLE prt1_e_p1 PARTITION OF prt1_e FOR VALUES FROM (0) TO (250); -CREATE TABLE prt1_e_p2 PARTITION OF prt1_e FOR VALUES FROM (250) TO (500); -CREATE TABLE prt1_e_p3 PARTITION OF prt1_e FOR VALUES FROM (500) TO (600); -INSERT INTO prt1_e SELECT i, i, i % 25 FROM generate_series(0, 599, 2) i; -CREATE INDEX iprt1_e_p1_ab2 on prt1_e_p1(((a+b)/2)); -CREATE INDEX iprt1_e_p2_ab2 on prt1_e_p2(((a+b)/2)); -CREATE INDEX iprt1_e_p3_ab2 on prt1_e_p3(((a+b)/2)); -ANALYZE prt1_e; -CREATE TABLE prt2_e (a int, b int, c int) PARTITION BY RANGE(((b + a)/2)); -CREATE TABLE prt2_e_p1 PARTITION OF prt2_e FOR VALUES FROM (0) TO (250); -CREATE TABLE prt2_e_p2 PARTITION OF prt2_e FOR VALUES FROM (250) TO (500); -CREATE TABLE prt2_e_p3 PARTITION OF prt2_e FOR VALUES FROM (500) TO (600); -INSERT INTO prt2_e SELECT i, i, i % 25 FROM generate_series(0, 599, 3) i; -ANALYZE prt2_e; -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_e t1, prt2_e t2 WHERE (t1.a + t1.b)/2 = (t2.b + t2.a)/2 AND t1.c = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------------------------------- - Sort - Sort Key: t1.a, t2.b - -> Append - -> Hash Join - Hash Cond: (((t2_1.b + t2_1.a) / 2) = ((t1_1.a + t1_1.b) / 2)) - -> Seq Scan on prt2_e_p1 t2_1 - -> Hash - -> Seq Scan on prt1_e_p1 t1_1 - Filter: (c = 0) - -> Hash Join - Hash Cond: (((t2_2.b + t2_2.a) / 2) = ((t1_2.a + t1_2.b) / 2)) - -> Seq Scan on prt2_e_p2 t2_2 - -> Hash - -> Seq Scan on prt1_e_p2 t1_2 - Filter: (c = 0) - -> Hash Join - Hash Cond: (((t2_3.b + t2_3.a) / 2) = ((t1_3.a + t1_3.b) / 2)) - -> Seq Scan on prt2_e_p3 t2_3 - -> Hash - -> Seq Scan on prt1_e_p3 t1_3 - Filter: (c = 0) -(21 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_e t1, prt2_e t2 WHERE (t1.a + t1.b)/2 = (t2.b + t2.a)/2 AND t1.c = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+---+-----+--- - 0 | 0 | 0 | 0 - 150 | 0 | 150 | 0 - 300 | 0 | 300 | 0 - 450 | 0 | 450 | 0 -(4 rows) - --- --- N-way join --- -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM prt1 t1, prt2 t2, prt1_e t3 WHERE t1.a = t2.b AND t1.a = (t3.a + t3.b)/2 AND t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ---------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Nested Loop - Join Filter: (t1_1.a = ((t3_1.a + t3_1.b) / 2)) - -> Hash Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_p1 t2_1 - -> Hash - -> Seq Scan on prt1_p1 t1_1 - Filter: (b = 0) - -> Index Scan using iprt1_e_p1_ab2 on prt1_e_p1 t3_1 - Index Cond: (((a + b) / 2) = t2_1.b) - -> Nested Loop - Join Filter: (t1_2.a = ((t3_2.a + t3_2.b) / 2)) - -> Hash Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_p2 t2_2 - -> Hash - -> Seq Scan on prt1_p2 t1_2 - Filter: (b = 0) - -> Index Scan using iprt1_e_p2_ab2 on prt1_e_p2 t3_2 - Index Cond: (((a + b) / 2) = t2_2.b) - -> Nested Loop - Join Filter: (t1_3.a = ((t3_3.a + t3_3.b) / 2)) - -> Hash Join - Hash Cond: (t2_3.b = t1_3.a) - -> Seq Scan on prt2_p3 t2_3 - -> Hash - -> Seq Scan on prt1_p3 t1_3 - Filter: (b = 0) - -> Index Scan using iprt1_e_p3_ab2 on prt1_e_p3 t3_3 - Index Cond: (((a + b) / 2) = t2_3.b) -(33 rows) - -SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM prt1 t1, prt2 t2, prt1_e t3 WHERE t1.a = t2.b AND t1.a = (t3.a + t3.b)/2 AND t1.b = 0 ORDER BY t1.a, t2.b; - a | c | b | c | ?column? | c ------+------+-----+------+----------+--- - 0 | 0000 | 0 | 0000 | 0 | 0 - 150 | 0150 | 150 | 0150 | 300 | 0 - 300 | 0300 | 300 | 0300 | 600 | 0 - 450 | 0450 | 450 | 0450 | 900 | 0 -(4 rows) - -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) LEFT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t1.b = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; - QUERY PLAN --------------------------------------------------------------- - Sort - Sort Key: t1.a, t2.b, ((t3.a + t3.b)) - -> Append - -> Hash Right Join - Hash Cond: (((t3_1.a + t3_1.b) / 2) = t1_1.a) - -> Seq Scan on prt1_e_p1 t3_1 - -> Hash - -> Hash Right Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_p1 t2_1 - -> Hash - -> Seq Scan on prt1_p1 t1_1 - Filter: (b = 0) - -> Hash Right Join - Hash Cond: (((t3_2.a + t3_2.b) / 2) = t1_2.a) - -> Seq Scan on prt1_e_p2 t3_2 - -> Hash - -> Hash Right Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_p2 t2_2 - -> Hash - -> Seq Scan on prt1_p2 t1_2 - Filter: (b = 0) - -> Hash Right Join - Hash Cond: (((t3_3.a + t3_3.b) / 2) = t1_3.a) - -> Seq Scan on prt1_e_p3 t3_3 - -> Hash - -> Hash Right Join - Hash Cond: (t2_3.b = t1_3.a) - -> Seq Scan on prt2_p3 t2_3 - -> Hash - -> Seq Scan on prt1_p3 t1_3 - Filter: (b = 0) -(33 rows) - -SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) LEFT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t1.b = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; - a | c | b | c | ?column? | c ------+------+-----+------+----------+--- - 0 | 0000 | 0 | 0000 | 0 | 0 - 50 | 0050 | | | 100 | 0 - 100 | 0100 | | | 200 | 0 - 150 | 0150 | 150 | 0150 | 300 | 0 - 200 | 0200 | | | 400 | 0 - 250 | 0250 | | | 500 | 0 - 300 | 0300 | 300 | 0300 | 600 | 0 - 350 | 0350 | | | 700 | 0 - 400 | 0400 | | | 800 | 0 - 450 | 0450 | 450 | 0450 | 900 | 0 - 500 | 0500 | | | 1000 | 0 - 550 | 0550 | | | 1100 | 0 -(12 rows) - -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) RIGHT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t3.c = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; - QUERY PLAN -------------------------------------------------------------------- - Sort - Sort Key: t1.a, t2.b, ((t3.a + t3.b)) - -> Append - -> Nested Loop Left Join - -> Hash Right Join - Hash Cond: (t1_1.a = ((t3_1.a + t3_1.b) / 2)) - -> Seq Scan on prt1_p1 t1_1 - -> Hash - -> Seq Scan on prt1_e_p1 t3_1 - Filter: (c = 0) - -> Index Scan using iprt2_p1_b on prt2_p1 t2_1 - Index Cond: (b = t1_1.a) - -> Nested Loop Left Join - -> Hash Right Join - Hash Cond: (t1_2.a = ((t3_2.a + t3_2.b) / 2)) - -> Seq Scan on prt1_p2 t1_2 - -> Hash - -> Seq Scan on prt1_e_p2 t3_2 - Filter: (c = 0) - -> Index Scan using iprt2_p2_b on prt2_p2 t2_2 - Index Cond: (b = t1_2.a) - -> Nested Loop Left Join - -> Hash Right Join - Hash Cond: (t1_3.a = ((t3_3.a + t3_3.b) / 2)) - -> Seq Scan on prt1_p3 t1_3 - -> Hash - -> Seq Scan on prt1_e_p3 t3_3 - Filter: (c = 0) - -> Index Scan using iprt2_p3_b on prt2_p3 t2_3 - Index Cond: (b = t1_3.a) -(30 rows) - -SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) RIGHT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t3.c = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; - a | c | b | c | ?column? | c ------+------+-----+------+----------+--- - 0 | 0000 | 0 | 0000 | 0 | 0 - 50 | 0050 | | | 100 | 0 - 100 | 0100 | | | 200 | 0 - 150 | 0150 | 150 | 0150 | 300 | 0 - 200 | 0200 | | | 400 | 0 - 250 | 0250 | | | 500 | 0 - 300 | 0300 | 300 | 0300 | 600 | 0 - 350 | 0350 | | | 700 | 0 - 400 | 0400 | | | 800 | 0 - 450 | 0450 | 450 | 0450 | 900 | 0 - 500 | 0500 | | | 1000 | 0 - 550 | 0550 | | | 1100 | 0 -(12 rows) - --- --- 3-way full join --- -EXPLAIN (COSTS OFF) -SELECT COUNT(*) FROM prt1 FULL JOIN prt2 p2(b,a,c) USING(a,b) FULL JOIN prt2 p3(b,a,c) USING (a, b) - WHERE a BETWEEN 490 AND 510; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------ - Aggregate - -> Append - -> Hash Full Join - Hash Cond: ((COALESCE(prt1_1.a, p2_1.a) = p3_1.a) AND (COALESCE(prt1_1.b, p2_1.b) = p3_1.b)) - Filter: ((COALESCE(COALESCE(prt1_1.a, p2_1.a), p3_1.a) >= 490) AND (COALESCE(COALESCE(prt1_1.a, p2_1.a), p3_1.a) <= 510)) - -> Hash Full Join - Hash Cond: ((prt1_1.a = p2_1.a) AND (prt1_1.b = p2_1.b)) - -> Seq Scan on prt1_p1 prt1_1 - -> Hash - -> Seq Scan on prt2_p1 p2_1 - -> Hash - -> Seq Scan on prt2_p1 p3_1 - -> Hash Full Join - Hash Cond: ((COALESCE(prt1_2.a, p2_2.a) = p3_2.a) AND (COALESCE(prt1_2.b, p2_2.b) = p3_2.b)) - Filter: ((COALESCE(COALESCE(prt1_2.a, p2_2.a), p3_2.a) >= 490) AND (COALESCE(COALESCE(prt1_2.a, p2_2.a), p3_2.a) <= 510)) - -> Hash Full Join - Hash Cond: ((prt1_2.a = p2_2.a) AND (prt1_2.b = p2_2.b)) - -> Seq Scan on prt1_p2 prt1_2 - -> Hash - -> Seq Scan on prt2_p2 p2_2 - -> Hash - -> Seq Scan on prt2_p2 p3_2 - -> Hash Full Join - Hash Cond: ((COALESCE(prt1_3.a, p2_3.a) = p3_3.a) AND (COALESCE(prt1_3.b, p2_3.b) = p3_3.b)) - Filter: ((COALESCE(COALESCE(prt1_3.a, p2_3.a), p3_3.a) >= 490) AND (COALESCE(COALESCE(prt1_3.a, p2_3.a), p3_3.a) <= 510)) - -> Hash Full Join - Hash Cond: ((prt1_3.a = p2_3.a) AND (prt1_3.b = p2_3.b)) - -> Seq Scan on prt1_p3 prt1_3 - -> Hash - -> Seq Scan on prt2_p3 p2_3 - -> Hash - -> Seq Scan on prt2_p3 p3_3 -(32 rows) - -SELECT COUNT(*) FROM prt1 FULL JOIN prt2 p2(b,a,c) USING(a,b) FULL JOIN prt2 p3(b,a,c) USING (a, b) - WHERE a BETWEEN 490 AND 510; - count -------- - 14 -(1 row) - --- --- 4-way full join --- -EXPLAIN (COSTS OFF) -SELECT COUNT(*) FROM prt1 FULL JOIN prt2 p2(b,a,c) USING(a,b) FULL JOIN prt2 p3(b,a,c) USING (a, b) FULL JOIN prt1 p4 (a,b,c) USING (a, b) - WHERE a BETWEEN 490 AND 510; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ - Aggregate - -> Append - -> Hash Full Join - Hash Cond: ((COALESCE(COALESCE(prt1_1.a, p2_1.a), p3_1.a) = p4_1.a) AND (COALESCE(COALESCE(prt1_1.b, p2_1.b), p3_1.b) = p4_1.b)) - Filter: ((COALESCE(COALESCE(COALESCE(prt1_1.a, p2_1.a), p3_1.a), p4_1.a) >= 490) AND (COALESCE(COALESCE(COALESCE(prt1_1.a, p2_1.a), p3_1.a), p4_1.a) <= 510)) - -> Hash Full Join - Hash Cond: ((COALESCE(prt1_1.a, p2_1.a) = p3_1.a) AND (COALESCE(prt1_1.b, p2_1.b) = p3_1.b)) - -> Hash Full Join - Hash Cond: ((prt1_1.a = p2_1.a) AND (prt1_1.b = p2_1.b)) - -> Seq Scan on prt1_p1 prt1_1 - -> Hash - -> Seq Scan on prt2_p1 p2_1 - -> Hash - -> Seq Scan on prt2_p1 p3_1 - -> Hash - -> Seq Scan on prt1_p1 p4_1 - -> Hash Full Join - Hash Cond: ((COALESCE(COALESCE(prt1_2.a, p2_2.a), p3_2.a) = p4_2.a) AND (COALESCE(COALESCE(prt1_2.b, p2_2.b), p3_2.b) = p4_2.b)) - Filter: ((COALESCE(COALESCE(COALESCE(prt1_2.a, p2_2.a), p3_2.a), p4_2.a) >= 490) AND (COALESCE(COALESCE(COALESCE(prt1_2.a, p2_2.a), p3_2.a), p4_2.a) <= 510)) - -> Hash Full Join - Hash Cond: ((COALESCE(prt1_2.a, p2_2.a) = p3_2.a) AND (COALESCE(prt1_2.b, p2_2.b) = p3_2.b)) - -> Hash Full Join - Hash Cond: ((prt1_2.a = p2_2.a) AND (prt1_2.b = p2_2.b)) - -> Seq Scan on prt1_p2 prt1_2 - -> Hash - -> Seq Scan on prt2_p2 p2_2 - -> Hash - -> Seq Scan on prt2_p2 p3_2 - -> Hash - -> Seq Scan on prt1_p2 p4_2 - -> Hash Full Join - Hash Cond: ((COALESCE(COALESCE(prt1_3.a, p2_3.a), p3_3.a) = p4_3.a) AND (COALESCE(COALESCE(prt1_3.b, p2_3.b), p3_3.b) = p4_3.b)) - Filter: ((COALESCE(COALESCE(COALESCE(prt1_3.a, p2_3.a), p3_3.a), p4_3.a) >= 490) AND (COALESCE(COALESCE(COALESCE(prt1_3.a, p2_3.a), p3_3.a), p4_3.a) <= 510)) - -> Hash Full Join - Hash Cond: ((COALESCE(prt1_3.a, p2_3.a) = p3_3.a) AND (COALESCE(prt1_3.b, p2_3.b) = p3_3.b)) - -> Hash Full Join - Hash Cond: ((prt1_3.a = p2_3.a) AND (prt1_3.b = p2_3.b)) - -> Seq Scan on prt1_p3 prt1_3 - -> Hash - -> Seq Scan on prt2_p3 p2_3 - -> Hash - -> Seq Scan on prt2_p3 p3_3 - -> Hash - -> Seq Scan on prt1_p3 p4_3 -(44 rows) - -SELECT COUNT(*) FROM prt1 FULL JOIN prt2 p2(b,a,c) USING(a,b) FULL JOIN prt2 p3(b,a,c) USING (a, b) FULL JOIN prt1 p4 (a,b,c) USING (a, b) - WHERE a BETWEEN 490 AND 510; - count -------- - 14 -(1 row) - --- Cases with non-nullable expressions in subquery results; --- make sure these go to null as expected -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.phv, t2.b, t2.phv, t3.a + t3.b, t3.phv FROM ((SELECT 50 phv, * FROM prt1 WHERE prt1.b = 0) t1 FULL JOIN (SELECT 75 phv, * FROM prt2 WHERE prt2.a = 0) t2 ON (t1.a = t2.b)) FULL JOIN (SELECT 50 phv, * FROM prt1_e WHERE prt1_e.c = 0) t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t1.a = t1.phv OR t2.b = t2.phv OR (t3.a + t3.b)/2 = t3.phv ORDER BY t1.a, t2.b, t3.a + t3.b; - QUERY PLAN ------------------------------------------------------------------------------------------------------------- - Sort - Sort Key: prt1.a, prt2.b, ((prt1_e.a + prt1_e.b)) - -> Append - -> Hash Full Join - Hash Cond: (prt1_1.a = ((prt1_e_1.a + prt1_e_1.b) / 2)) - Filter: ((prt1_1.a = (50)) OR (prt2_1.b = (75)) OR (((prt1_e_1.a + prt1_e_1.b) / 2) = (50))) - -> Hash Full Join - Hash Cond: (prt1_1.a = prt2_1.b) - -> Seq Scan on prt1_p1 prt1_1 - Filter: (b = 0) - -> Hash - -> Seq Scan on prt2_p1 prt2_1 - Filter: (a = 0) - -> Hash - -> Seq Scan on prt1_e_p1 prt1_e_1 - Filter: (c = 0) - -> Hash Full Join - Hash Cond: (prt1_2.a = ((prt1_e_2.a + prt1_e_2.b) / 2)) - Filter: ((prt1_2.a = (50)) OR (prt2_2.b = (75)) OR (((prt1_e_2.a + prt1_e_2.b) / 2) = (50))) - -> Hash Full Join - Hash Cond: (prt1_2.a = prt2_2.b) - -> Seq Scan on prt1_p2 prt1_2 - Filter: (b = 0) - -> Hash - -> Seq Scan on prt2_p2 prt2_2 - Filter: (a = 0) - -> Hash - -> Seq Scan on prt1_e_p2 prt1_e_2 - Filter: (c = 0) - -> Hash Full Join - Hash Cond: (prt1_3.a = ((prt1_e_3.a + prt1_e_3.b) / 2)) - Filter: ((prt1_3.a = (50)) OR (prt2_3.b = (75)) OR (((prt1_e_3.a + prt1_e_3.b) / 2) = (50))) - -> Hash Full Join - Hash Cond: (prt1_3.a = prt2_3.b) - -> Seq Scan on prt1_p3 prt1_3 - Filter: (b = 0) - -> Hash - -> Seq Scan on prt2_p3 prt2_3 - Filter: (a = 0) - -> Hash - -> Seq Scan on prt1_e_p3 prt1_e_3 - Filter: (c = 0) -(42 rows) - -SELECT t1.a, t1.phv, t2.b, t2.phv, t3.a + t3.b, t3.phv FROM ((SELECT 50 phv, * FROM prt1 WHERE prt1.b = 0) t1 FULL JOIN (SELECT 75 phv, * FROM prt2 WHERE prt2.a = 0) t2 ON (t1.a = t2.b)) FULL JOIN (SELECT 50 phv, * FROM prt1_e WHERE prt1_e.c = 0) t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t1.a = t1.phv OR t2.b = t2.phv OR (t3.a + t3.b)/2 = t3.phv ORDER BY t1.a, t2.b, t3.a + t3.b; - a | phv | b | phv | ?column? | phv -----+-----+----+-----+----------+----- - 50 | 50 | | | 100 | 50 - | | 75 | 75 | | -(2 rows) - --- Semi-join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1, prt1_e t2 WHERE t1.a = 0 AND t1.b = (t2.a + t2.b)/2) AND t1.b = 0 ORDER BY t1.a; - QUERY PLAN ---------------------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Nested Loop - Join Filter: (t1_2.a = t1_5.b) - -> HashAggregate - Group Key: t1_5.b - -> Hash Join - Hash Cond: (((t2_1.a + t2_1.b) / 2) = t1_5.b) - -> Seq Scan on prt1_e_p1 t2_1 - -> Hash - -> Seq Scan on prt2_p1 t1_5 - Filter: (a = 0) - -> Index Scan using iprt1_p1_a on prt1_p1 t1_2 - Index Cond: (a = ((t2_1.a + t2_1.b) / 2)) - Filter: (b = 0) - -> Nested Loop - Join Filter: (t1_3.a = t1_6.b) - -> HashAggregate - Group Key: t1_6.b - -> Hash Join - Hash Cond: (((t2_2.a + t2_2.b) / 2) = t1_6.b) - -> Seq Scan on prt1_e_p2 t2_2 - -> Hash - -> Seq Scan on prt2_p2 t1_6 - Filter: (a = 0) - -> Index Scan using iprt1_p2_a on prt1_p2 t1_3 - Index Cond: (a = ((t2_2.a + t2_2.b) / 2)) - Filter: (b = 0) - -> Nested Loop - Join Filter: (t1_4.a = t1_7.b) - -> HashAggregate - Group Key: t1_7.b - -> Nested Loop - -> Seq Scan on prt2_p3 t1_7 - Filter: (a = 0) - -> Index Scan using iprt1_e_p3_ab2 on prt1_e_p3 t2_3 - Index Cond: (((a + b) / 2) = t1_7.b) - -> Index Scan using iprt1_p3_a on prt1_p3 t1_4 - Index Cond: (a = ((t2_3.a + t2_3.b) / 2)) - Filter: (b = 0) -(41 rows) - -SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1, prt1_e t2 WHERE t1.a = 0 AND t1.b = (t2.a + t2.b)/2) AND t1.b = 0 ORDER BY t1.a; - a | b | c ------+---+------ - 0 | 0 | 0000 - 150 | 0 | 0150 - 300 | 0 | 0300 - 450 | 0 | 0450 -(4 rows) - -EXPLAIN (COSTS OFF) -SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) AND t1.b = 0 ORDER BY t1.a; - QUERY PLAN ---------------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Nested Loop - -> HashAggregate - Group Key: t1_6.b - -> Hash Semi Join - Hash Cond: (t1_6.b = ((t1_9.a + t1_9.b) / 2)) - -> Seq Scan on prt2_p1 t1_6 - -> Hash - -> Seq Scan on prt1_e_p1 t1_9 - Filter: (c = 0) - -> Index Scan using iprt1_p1_a on prt1_p1 t1_3 - Index Cond: (a = t1_6.b) - Filter: (b = 0) - -> Nested Loop - -> HashAggregate - Group Key: t1_7.b - -> Hash Semi Join - Hash Cond: (t1_7.b = ((t1_10.a + t1_10.b) / 2)) - -> Seq Scan on prt2_p2 t1_7 - -> Hash - -> Seq Scan on prt1_e_p2 t1_10 - Filter: (c = 0) - -> Index Scan using iprt1_p2_a on prt1_p2 t1_4 - Index Cond: (a = t1_7.b) - Filter: (b = 0) - -> Nested Loop - -> HashAggregate - Group Key: t1_8.b - -> Hash Semi Join - Hash Cond: (t1_8.b = ((t1_11.a + t1_11.b) / 2)) - -> Seq Scan on prt2_p3 t1_8 - -> Hash - -> Seq Scan on prt1_e_p3 t1_11 - Filter: (c = 0) - -> Index Scan using iprt1_p3_a on prt1_p3 t1_5 - Index Cond: (a = t1_8.b) - Filter: (b = 0) -(39 rows) - -SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) AND t1.b = 0 ORDER BY t1.a; - a | b | c ------+---+------ - 0 | 0 | 0000 - 150 | 0 | 0150 - 300 | 0 | 0300 - 450 | 0 | 0450 -(4 rows) - --- test merge joins -SET enable_hashjoin TO off; -SET enable_nestloop TO off; -EXPLAIN (COSTS OFF) -SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) AND t1.b = 0 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------------------- - Merge Append - Sort Key: t1.a - -> Merge Semi Join - Merge Cond: (t1_3.a = t1_6.b) - -> Sort - Sort Key: t1_3.a - -> Seq Scan on prt1_p1 t1_3 - Filter: (b = 0) - -> Merge Semi Join - Merge Cond: (t1_6.b = (((t1_9.a + t1_9.b) / 2))) - -> Sort - Sort Key: t1_6.b - -> Seq Scan on prt2_p1 t1_6 - -> Sort - Sort Key: (((t1_9.a + t1_9.b) / 2)) - -> Seq Scan on prt1_e_p1 t1_9 - Filter: (c = 0) - -> Merge Semi Join - Merge Cond: (t1_4.a = t1_7.b) - -> Sort - Sort Key: t1_4.a - -> Seq Scan on prt1_p2 t1_4 - Filter: (b = 0) - -> Merge Semi Join - Merge Cond: (t1_7.b = (((t1_10.a + t1_10.b) / 2))) - -> Sort - Sort Key: t1_7.b - -> Seq Scan on prt2_p2 t1_7 - -> Sort - Sort Key: (((t1_10.a + t1_10.b) / 2)) - -> Seq Scan on prt1_e_p2 t1_10 - Filter: (c = 0) - -> Merge Semi Join - Merge Cond: (t1_5.a = t1_8.b) - -> Sort - Sort Key: t1_5.a - -> Seq Scan on prt1_p3 t1_5 - Filter: (b = 0) - -> Merge Semi Join - Merge Cond: (t1_8.b = (((t1_11.a + t1_11.b) / 2))) - -> Sort - Sort Key: t1_8.b - -> Seq Scan on prt2_p3 t1_8 - -> Sort - Sort Key: (((t1_11.a + t1_11.b) / 2)) - -> Seq Scan on prt1_e_p3 t1_11 - Filter: (c = 0) -(47 rows) - -SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) AND t1.b = 0 ORDER BY t1.a; - a | b | c ------+---+------ - 0 | 0 | 0000 - 150 | 0 | 0150 - 300 | 0 | 0300 - 450 | 0 | 0450 -(4 rows) - -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) RIGHT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t3.c = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; - QUERY PLAN ----------------------------------------------------------------------------- - Sort - Sort Key: t1.a, t2.b, ((t3.a + t3.b)) - -> Append - -> Merge Left Join - Merge Cond: (t1_1.a = t2_1.b) - -> Sort - Sort Key: t1_1.a - -> Merge Left Join - Merge Cond: ((((t3_1.a + t3_1.b) / 2)) = t1_1.a) - -> Sort - Sort Key: (((t3_1.a + t3_1.b) / 2)) - -> Seq Scan on prt1_e_p1 t3_1 - Filter: (c = 0) - -> Sort - Sort Key: t1_1.a - -> Seq Scan on prt1_p1 t1_1 - -> Sort - Sort Key: t2_1.b - -> Seq Scan on prt2_p1 t2_1 - -> Merge Left Join - Merge Cond: (t1_2.a = t2_2.b) - -> Sort - Sort Key: t1_2.a - -> Merge Left Join - Merge Cond: ((((t3_2.a + t3_2.b) / 2)) = t1_2.a) - -> Sort - Sort Key: (((t3_2.a + t3_2.b) / 2)) - -> Seq Scan on prt1_e_p2 t3_2 - Filter: (c = 0) - -> Sort - Sort Key: t1_2.a - -> Seq Scan on prt1_p2 t1_2 - -> Sort - Sort Key: t2_2.b - -> Seq Scan on prt2_p2 t2_2 - -> Merge Left Join - Merge Cond: (t1_3.a = t2_3.b) - -> Sort - Sort Key: t1_3.a - -> Merge Left Join - Merge Cond: ((((t3_3.a + t3_3.b) / 2)) = t1_3.a) - -> Sort - Sort Key: (((t3_3.a + t3_3.b) / 2)) - -> Seq Scan on prt1_e_p3 t3_3 - Filter: (c = 0) - -> Sort - Sort Key: t1_3.a - -> Seq Scan on prt1_p3 t1_3 - -> Sort - Sort Key: t2_3.b - -> Seq Scan on prt2_p3 t2_3 -(51 rows) - -SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) RIGHT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t3.c = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; - a | c | b | c | ?column? | c ------+------+-----+------+----------+--- - 0 | 0000 | 0 | 0000 | 0 | 0 - 50 | 0050 | | | 100 | 0 - 100 | 0100 | | | 200 | 0 - 150 | 0150 | 150 | 0150 | 300 | 0 - 200 | 0200 | | | 400 | 0 - 250 | 0250 | | | 500 | 0 - 300 | 0300 | 300 | 0300 | 600 | 0 - 350 | 0350 | | | 700 | 0 - 400 | 0400 | | | 800 | 0 - 450 | 0450 | 450 | 0450 | 900 | 0 - 500 | 0500 | | | 1000 | 0 - 550 | 0550 | | | 1100 | 0 -(12 rows) - --- MergeAppend on nullable column --- This should generate a partitionwise join, but currently fails to -EXPLAIN (COSTS OFF) -SELECT t1.a, t2.b FROM (SELECT * FROM prt1 WHERE a < 450) t1 LEFT JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------------ - Incremental Sort - Sort Key: prt1.a, prt2.b - Presorted Key: prt1.a - -> Merge Left Join - Merge Cond: (prt1.a = prt2.b) - -> Sort - Sort Key: prt1.a - -> Append - -> Seq Scan on prt1_p1 prt1_1 - Filter: ((a < 450) AND (b = 0)) - -> Seq Scan on prt1_p2 prt1_2 - Filter: ((a < 450) AND (b = 0)) - -> Sort - Sort Key: prt2.b - -> Append - -> Seq Scan on prt2_p2 prt2_1 - Filter: (b > 250) - -> Seq Scan on prt2_p3 prt2_2 - Filter: (b > 250) -(19 rows) - -SELECT t1.a, t2.b FROM (SELECT * FROM prt1 WHERE a < 450) t1 LEFT JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; - a | b ------+----- - 0 | - 50 | - 100 | - 150 | - 200 | - 250 | - 300 | 300 - 350 | - 400 | -(9 rows) - --- merge join when expression with whole-row reference needs to be sorted; --- partitionwise join does not apply -EXPLAIN (COSTS OFF) -SELECT t1.a, t2.b FROM prt1 t1, prt2 t2 WHERE t1::text = t2::text AND t1.a = t2.b ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------------------------------------------ - Merge Join - Merge Cond: ((t1.a = t2.b) AND (((((t1.*)::prt1))::text) = ((((t2.*)::prt2))::text))) - -> Sort - Sort Key: t1.a, ((((t1.*)::prt1))::text) - -> Result - -> Append - -> Seq Scan on prt1_p1 t1_1 - -> Seq Scan on prt1_p2 t1_2 - -> Seq Scan on prt1_p3 t1_3 - -> Sort - Sort Key: t2.b, ((((t2.*)::prt2))::text) - -> Result - -> Append - -> Seq Scan on prt2_p1 t2_1 - -> Seq Scan on prt2_p2 t2_2 - -> Seq Scan on prt2_p3 t2_3 -(16 rows) - -SELECT t1.a, t2.b FROM prt1 t1, prt2 t2 WHERE t1::text = t2::text AND t1.a = t2.b ORDER BY t1.a; - a | b -----+---- - 0 | 0 - 6 | 6 - 12 | 12 - 18 | 18 - 24 | 24 -(5 rows) - -RESET enable_hashjoin; -RESET enable_nestloop; --- --- partitioned by multiple columns --- -CREATE TABLE prt1_m (a int, b int, c int) PARTITION BY RANGE(a, ((a + b)/2)); -CREATE TABLE prt1_m_p1 PARTITION OF prt1_m FOR VALUES FROM (0, 0) TO (250, 250); -CREATE TABLE prt1_m_p2 PARTITION OF prt1_m FOR VALUES FROM (250, 250) TO (500, 500); -CREATE TABLE prt1_m_p3 PARTITION OF prt1_m FOR VALUES FROM (500, 500) TO (600, 600); -INSERT INTO prt1_m SELECT i, i, i % 25 FROM generate_series(0, 599, 2) i; -ANALYZE prt1_m; -CREATE TABLE prt2_m (a int, b int, c int) PARTITION BY RANGE(((b + a)/2), b); -CREATE TABLE prt2_m_p1 PARTITION OF prt2_m FOR VALUES FROM (0, 0) TO (250, 250); -CREATE TABLE prt2_m_p2 PARTITION OF prt2_m FOR VALUES FROM (250, 250) TO (500, 500); -CREATE TABLE prt2_m_p3 PARTITION OF prt2_m FOR VALUES FROM (500, 500) TO (600, 600); -INSERT INTO prt2_m SELECT i, i, i % 25 FROM generate_series(0, 599, 3) i; -ANALYZE prt2_m; -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1_m WHERE prt1_m.c = 0) t1 FULL JOIN (SELECT * FROM prt2_m WHERE prt2_m.c = 0) t2 ON (t1.a = (t2.b + t2.a)/2 AND t2.b = (t1.a + t1.b)/2) ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------- - Sort - Sort Key: prt1_m.a, prt2_m.b - -> Append - -> Hash Full Join - Hash Cond: ((prt1_m_1.a = ((prt2_m_1.b + prt2_m_1.a) / 2)) AND (((prt1_m_1.a + prt1_m_1.b) / 2) = prt2_m_1.b)) - -> Seq Scan on prt1_m_p1 prt1_m_1 - Filter: (c = 0) - -> Hash - -> Seq Scan on prt2_m_p1 prt2_m_1 - Filter: (c = 0) - -> Hash Full Join - Hash Cond: ((prt1_m_2.a = ((prt2_m_2.b + prt2_m_2.a) / 2)) AND (((prt1_m_2.a + prt1_m_2.b) / 2) = prt2_m_2.b)) - -> Seq Scan on prt1_m_p2 prt1_m_2 - Filter: (c = 0) - -> Hash - -> Seq Scan on prt2_m_p2 prt2_m_2 - Filter: (c = 0) - -> Hash Full Join - Hash Cond: ((prt1_m_3.a = ((prt2_m_3.b + prt2_m_3.a) / 2)) AND (((prt1_m_3.a + prt1_m_3.b) / 2) = prt2_m_3.b)) - -> Seq Scan on prt1_m_p3 prt1_m_3 - Filter: (c = 0) - -> Hash - -> Seq Scan on prt2_m_p3 prt2_m_3 - Filter: (c = 0) -(24 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1_m WHERE prt1_m.c = 0) t1 FULL JOIN (SELECT * FROM prt2_m WHERE prt2_m.c = 0) t2 ON (t1.a = (t2.b + t2.a)/2 AND t2.b = (t1.a + t1.b)/2) ORDER BY t1.a, t2.b; - a | c | b | c ------+---+-----+--- - 0 | 0 | 0 | 0 - 50 | 0 | | - 100 | 0 | | - 150 | 0 | 150 | 0 - 200 | 0 | | - 250 | 0 | | - 300 | 0 | 300 | 0 - 350 | 0 | | - 400 | 0 | | - 450 | 0 | 450 | 0 - 500 | 0 | | - 550 | 0 | | - | | 75 | 0 - | | 225 | 0 - | | 375 | 0 - | | 525 | 0 -(16 rows) - --- --- tests for list partitioned tables. --- -CREATE TABLE plt1 (a int, b int, c text) PARTITION BY LIST(c); -CREATE TABLE plt1_p1 PARTITION OF plt1 FOR VALUES IN ('0000', '0003', '0004', '0010'); -CREATE TABLE plt1_p2 PARTITION OF plt1 FOR VALUES IN ('0001', '0005', '0002', '0009'); -CREATE TABLE plt1_p3 PARTITION OF plt1 FOR VALUES IN ('0006', '0007', '0008', '0011'); -INSERT INTO plt1 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 2) i; -ANALYZE plt1; -CREATE TABLE plt2 (a int, b int, c text) PARTITION BY LIST(c); -CREATE TABLE plt2_p1 PARTITION OF plt2 FOR VALUES IN ('0000', '0003', '0004', '0010'); -CREATE TABLE plt2_p2 PARTITION OF plt2 FOR VALUES IN ('0001', '0005', '0002', '0009'); -CREATE TABLE plt2_p3 PARTITION OF plt2 FOR VALUES IN ('0006', '0007', '0008', '0011'); -INSERT INTO plt2 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 3) i; -ANALYZE plt2; --- --- list partitioned by expression --- -CREATE TABLE plt1_e (a int, b int, c text) PARTITION BY LIST(ltrim(c, 'A')); -CREATE TABLE plt1_e_p1 PARTITION OF plt1_e FOR VALUES IN ('0000', '0003', '0004', '0010'); -CREATE TABLE plt1_e_p2 PARTITION OF plt1_e FOR VALUES IN ('0001', '0005', '0002', '0009'); -CREATE TABLE plt1_e_p3 PARTITION OF plt1_e FOR VALUES IN ('0006', '0007', '0008', '0011'); -INSERT INTO plt1_e SELECT i, i, 'A' || to_char(i/50, 'FM0000') FROM generate_series(0, 599, 2) i; -ANALYZE plt1_e; --- test partition matching with N-way join -EXPLAIN (COSTS OFF) -SELECT avg(t1.a), avg(t2.b), avg(t3.a + t3.b), t1.c, t2.c, t3.c FROM plt1 t1, plt2 t2, plt1_e t3 WHERE t1.b = t2.b AND t1.c = t2.c AND ltrim(t3.c, 'A') = t1.c GROUP BY t1.c, t2.c, t3.c ORDER BY t1.c, t2.c, t3.c; - QUERY PLAN --------------------------------------------------------------------------------- - GroupAggregate - Group Key: t1.c, t3.c - -> Sort - Sort Key: t1.c, t3.c - -> Append - -> Hash Join - Hash Cond: (t1_1.c = ltrim(t3_1.c, 'A'::text)) - -> Hash Join - Hash Cond: ((t1_1.b = t2_1.b) AND (t1_1.c = t2_1.c)) - -> Seq Scan on plt1_p1 t1_1 - -> Hash - -> Seq Scan on plt2_p1 t2_1 - -> Hash - -> Seq Scan on plt1_e_p1 t3_1 - -> Hash Join - Hash Cond: (t1_2.c = ltrim(t3_2.c, 'A'::text)) - -> Hash Join - Hash Cond: ((t1_2.b = t2_2.b) AND (t1_2.c = t2_2.c)) - -> Seq Scan on plt1_p2 t1_2 - -> Hash - -> Seq Scan on plt2_p2 t2_2 - -> Hash - -> Seq Scan on plt1_e_p2 t3_2 - -> Hash Join - Hash Cond: (t1_3.c = ltrim(t3_3.c, 'A'::text)) - -> Hash Join - Hash Cond: ((t1_3.b = t2_3.b) AND (t1_3.c = t2_3.c)) - -> Seq Scan on plt1_p3 t1_3 - -> Hash - -> Seq Scan on plt2_p3 t2_3 - -> Hash - -> Seq Scan on plt1_e_p3 t3_3 -(32 rows) - -SELECT avg(t1.a), avg(t2.b), avg(t3.a + t3.b), t1.c, t2.c, t3.c FROM plt1 t1, plt2 t2, plt1_e t3 WHERE t1.b = t2.b AND t1.c = t2.c AND ltrim(t3.c, 'A') = t1.c GROUP BY t1.c, t2.c, t3.c ORDER BY t1.c, t2.c, t3.c; - avg | avg | avg | c | c | c -----------------------+----------------------+-----------------------+------+------+------- - 24.0000000000000000 | 24.0000000000000000 | 48.0000000000000000 | 0000 | 0000 | A0000 - 75.0000000000000000 | 75.0000000000000000 | 148.0000000000000000 | 0001 | 0001 | A0001 - 123.0000000000000000 | 123.0000000000000000 | 248.0000000000000000 | 0002 | 0002 | A0002 - 174.0000000000000000 | 174.0000000000000000 | 348.0000000000000000 | 0003 | 0003 | A0003 - 225.0000000000000000 | 225.0000000000000000 | 448.0000000000000000 | 0004 | 0004 | A0004 - 273.0000000000000000 | 273.0000000000000000 | 548.0000000000000000 | 0005 | 0005 | A0005 - 324.0000000000000000 | 324.0000000000000000 | 648.0000000000000000 | 0006 | 0006 | A0006 - 375.0000000000000000 | 375.0000000000000000 | 748.0000000000000000 | 0007 | 0007 | A0007 - 423.0000000000000000 | 423.0000000000000000 | 848.0000000000000000 | 0008 | 0008 | A0008 - 474.0000000000000000 | 474.0000000000000000 | 948.0000000000000000 | 0009 | 0009 | A0009 - 525.0000000000000000 | 525.0000000000000000 | 1048.0000000000000000 | 0010 | 0010 | A0010 - 573.0000000000000000 | 573.0000000000000000 | 1148.0000000000000000 | 0011 | 0011 | A0011 -(12 rows) - --- joins where one of the relations is proven empty -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.a = 1 AND t1.a = 2; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a = 1 AND a = 2) t1 LEFT JOIN prt2 t2 ON t1.a = t2.b; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a = 1 AND a = 2) t1 RIGHT JOIN prt2 t2 ON t1.a = t2.b, prt1 t3 WHERE t2.b = t3.a; - QUERY PLAN --------------------------------------------------- - Hash Left Join - Hash Cond: (t2.b = a) - -> Append - -> Hash Join - Hash Cond: (t3_1.a = t2_1.b) - -> Seq Scan on prt1_p1 t3_1 - -> Hash - -> Seq Scan on prt2_p1 t2_1 - -> Hash Join - Hash Cond: (t3_2.a = t2_2.b) - -> Seq Scan on prt1_p2 t3_2 - -> Hash - -> Seq Scan on prt2_p2 t2_2 - -> Hash Join - Hash Cond: (t3_3.a = t2_3.b) - -> Seq Scan on prt1_p3 t3_3 - -> Hash - -> Seq Scan on prt2_p3 t2_3 - -> Hash - -> Result - One-Time Filter: false -(21 rows) - -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a = 1 AND a = 2) t1 FULL JOIN prt2 t2 ON t1.a = t2.b WHERE t2.a = 0 ORDER BY t1.a, t2.b; - QUERY PLAN --------------------------------------------- - Sort - Sort Key: a, t2.b - -> Hash Left Join - Hash Cond: (t2.b = a) - -> Append - -> Seq Scan on prt2_p1 t2_1 - Filter: (a = 0) - -> Seq Scan on prt2_p2 t2_2 - Filter: (a = 0) - -> Seq Scan on prt2_p3 t2_3 - Filter: (a = 0) - -> Hash - -> Result - One-Time Filter: false -(14 rows) - --- --- tests for hash partitioned tables. --- -CREATE TABLE pht1 (a int, b int, c text) PARTITION BY HASH(c); -CREATE TABLE pht1_p1 PARTITION OF pht1 FOR VALUES WITH (MODULUS 3, REMAINDER 0); -CREATE TABLE pht1_p2 PARTITION OF pht1 FOR VALUES WITH (MODULUS 3, REMAINDER 1); -CREATE TABLE pht1_p3 PARTITION OF pht1 FOR VALUES WITH (MODULUS 3, REMAINDER 2); -INSERT INTO pht1 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 2) i; -ANALYZE pht1; -CREATE TABLE pht2 (a int, b int, c text) PARTITION BY HASH(c); -CREATE TABLE pht2_p1 PARTITION OF pht2 FOR VALUES WITH (MODULUS 3, REMAINDER 0); -CREATE TABLE pht2_p2 PARTITION OF pht2 FOR VALUES WITH (MODULUS 3, REMAINDER 1); -CREATE TABLE pht2_p3 PARTITION OF pht2 FOR VALUES WITH (MODULUS 3, REMAINDER 2); -INSERT INTO pht2 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 3) i; -ANALYZE pht2; --- --- hash partitioned by expression --- -CREATE TABLE pht1_e (a int, b int, c text) PARTITION BY HASH(ltrim(c, 'A')); -CREATE TABLE pht1_e_p1 PARTITION OF pht1_e FOR VALUES WITH (MODULUS 3, REMAINDER 0); -CREATE TABLE pht1_e_p2 PARTITION OF pht1_e FOR VALUES WITH (MODULUS 3, REMAINDER 1); -CREATE TABLE pht1_e_p3 PARTITION OF pht1_e FOR VALUES WITH (MODULUS 3, REMAINDER 2); -INSERT INTO pht1_e SELECT i, i, 'A' || to_char(i/50, 'FM0000') FROM generate_series(0, 299, 2) i; -ANALYZE pht1_e; --- test partition matching with N-way join -EXPLAIN (COSTS OFF) -SELECT avg(t1.a), avg(t2.b), avg(t3.a + t3.b), t1.c, t2.c, t3.c FROM pht1 t1, pht2 t2, pht1_e t3 WHERE t1.b = t2.b AND t1.c = t2.c AND ltrim(t3.c, 'A') = t1.c GROUP BY t1.c, t2.c, t3.c ORDER BY t1.c, t2.c, t3.c; - QUERY PLAN --------------------------------------------------------------------------------- - GroupAggregate - Group Key: t1.c, t3.c - -> Sort - Sort Key: t1.c, t3.c - -> Append - -> Hash Join - Hash Cond: (t1_1.c = ltrim(t3_1.c, 'A'::text)) - -> Hash Join - Hash Cond: ((t1_1.b = t2_1.b) AND (t1_1.c = t2_1.c)) - -> Seq Scan on pht1_p1 t1_1 - -> Hash - -> Seq Scan on pht2_p1 t2_1 - -> Hash - -> Seq Scan on pht1_e_p1 t3_1 - -> Hash Join - Hash Cond: (t1_2.c = ltrim(t3_2.c, 'A'::text)) - -> Hash Join - Hash Cond: ((t1_2.b = t2_2.b) AND (t1_2.c = t2_2.c)) - -> Seq Scan on pht1_p2 t1_2 - -> Hash - -> Seq Scan on pht2_p2 t2_2 - -> Hash - -> Seq Scan on pht1_e_p2 t3_2 - -> Hash Join - Hash Cond: (t1_3.c = ltrim(t3_3.c, 'A'::text)) - -> Hash Join - Hash Cond: ((t1_3.b = t2_3.b) AND (t1_3.c = t2_3.c)) - -> Seq Scan on pht1_p3 t1_3 - -> Hash - -> Seq Scan on pht2_p3 t2_3 - -> Hash - -> Seq Scan on pht1_e_p3 t3_3 -(32 rows) - -SELECT avg(t1.a), avg(t2.b), avg(t3.a + t3.b), t1.c, t2.c, t3.c FROM pht1 t1, pht2 t2, pht1_e t3 WHERE t1.b = t2.b AND t1.c = t2.c AND ltrim(t3.c, 'A') = t1.c GROUP BY t1.c, t2.c, t3.c ORDER BY t1.c, t2.c, t3.c; - avg | avg | avg | c | c | c -----------------------+----------------------+----------------------+------+------+------- - 24.0000000000000000 | 24.0000000000000000 | 48.0000000000000000 | 0000 | 0000 | A0000 - 75.0000000000000000 | 75.0000000000000000 | 148.0000000000000000 | 0001 | 0001 | A0001 - 123.0000000000000000 | 123.0000000000000000 | 248.0000000000000000 | 0002 | 0002 | A0002 - 174.0000000000000000 | 174.0000000000000000 | 348.0000000000000000 | 0003 | 0003 | A0003 - 225.0000000000000000 | 225.0000000000000000 | 448.0000000000000000 | 0004 | 0004 | A0004 - 273.0000000000000000 | 273.0000000000000000 | 548.0000000000000000 | 0005 | 0005 | A0005 -(6 rows) - --- test default partition behavior for range -ALTER TABLE prt1 DETACH PARTITION prt1_p3; -ALTER TABLE prt1 ATTACH PARTITION prt1_p3 DEFAULT; -ANALYZE prt1; -ALTER TABLE prt2 DETACH PARTITION prt2_p3; -ALTER TABLE prt2 ATTACH PARTITION prt2_p3 DEFAULT; -ANALYZE prt2; -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN --------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_p1 t2_1 - -> Hash - -> Seq Scan on prt1_p1 t1_1 - Filter: (b = 0) - -> Hash Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_p2 t2_2 - -> Hash - -> Seq Scan on prt1_p2 t1_2 - Filter: (b = 0) - -> Hash Join - Hash Cond: (t2_3.b = t1_3.a) - -> Seq Scan on prt2_p3 t2_3 - -> Hash - -> Seq Scan on prt1_p3 t1_3 - Filter: (b = 0) -(21 rows) - --- test default partition behavior for list -ALTER TABLE plt1 DETACH PARTITION plt1_p3; -ALTER TABLE plt1 ATTACH PARTITION plt1_p3 DEFAULT; -ANALYZE plt1; -ALTER TABLE plt2 DETACH PARTITION plt2_p3; -ALTER TABLE plt2 ATTACH PARTITION plt2_p3 DEFAULT; -ANALYZE plt2; -EXPLAIN (COSTS OFF) -SELECT avg(t1.a), avg(t2.b), t1.c, t2.c FROM plt1 t1 RIGHT JOIN plt2 t2 ON t1.c = t2.c WHERE t1.a % 25 = 0 GROUP BY t1.c, t2.c ORDER BY t1.c, t2.c; - QUERY PLAN --------------------------------------------------------- - Sort - Sort Key: t1.c - -> HashAggregate - Group Key: t1.c - -> Append - -> Hash Join - Hash Cond: (t2_1.c = t1_1.c) - -> Seq Scan on plt2_p1 t2_1 - -> Hash - -> Seq Scan on plt1_p1 t1_1 - Filter: ((a % 25) = 0) - -> Hash Join - Hash Cond: (t2_2.c = t1_2.c) - -> Seq Scan on plt2_p2 t2_2 - -> Hash - -> Seq Scan on plt1_p2 t1_2 - Filter: ((a % 25) = 0) - -> Hash Join - Hash Cond: (t2_3.c = t1_3.c) - -> Seq Scan on plt2_p3 t2_3 - -> Hash - -> Seq Scan on plt1_p3 t1_3 - Filter: ((a % 25) = 0) -(23 rows) - --- --- multiple levels of partitioning --- -CREATE TABLE prt1_l (a int, b int, c varchar) PARTITION BY RANGE(a); -CREATE TABLE prt1_l_p1 PARTITION OF prt1_l FOR VALUES FROM (0) TO (250); -CREATE TABLE prt1_l_p2 PARTITION OF prt1_l FOR VALUES FROM (250) TO (500) PARTITION BY LIST (c); -CREATE TABLE prt1_l_p2_p1 PARTITION OF prt1_l_p2 FOR VALUES IN ('0000', '0001'); -CREATE TABLE prt1_l_p2_p2 PARTITION OF prt1_l_p2 FOR VALUES IN ('0002', '0003'); -CREATE TABLE prt1_l_p3 PARTITION OF prt1_l FOR VALUES FROM (500) TO (600) PARTITION BY RANGE (b); -CREATE TABLE prt1_l_p3_p1 PARTITION OF prt1_l_p3 FOR VALUES FROM (0) TO (13); -CREATE TABLE prt1_l_p3_p2 PARTITION OF prt1_l_p3 FOR VALUES FROM (13) TO (25); -INSERT INTO prt1_l SELECT i, i % 25, to_char(i % 4, 'FM0000') FROM generate_series(0, 599, 2) i; -ANALYZE prt1_l; -CREATE TABLE prt2_l (a int, b int, c varchar) PARTITION BY RANGE(b); -CREATE TABLE prt2_l_p1 PARTITION OF prt2_l FOR VALUES FROM (0) TO (250); -CREATE TABLE prt2_l_p2 PARTITION OF prt2_l FOR VALUES FROM (250) TO (500) PARTITION BY LIST (c); -CREATE TABLE prt2_l_p2_p1 PARTITION OF prt2_l_p2 FOR VALUES IN ('0000', '0001'); -CREATE TABLE prt2_l_p2_p2 PARTITION OF prt2_l_p2 FOR VALUES IN ('0002', '0003'); -CREATE TABLE prt2_l_p3 PARTITION OF prt2_l FOR VALUES FROM (500) TO (600) PARTITION BY RANGE (a); -CREATE TABLE prt2_l_p3_p1 PARTITION OF prt2_l_p3 FOR VALUES FROM (0) TO (13); -CREATE TABLE prt2_l_p3_p2 PARTITION OF prt2_l_p3 FOR VALUES FROM (13) TO (25); -INSERT INTO prt2_l SELECT i % 25, i, to_char(i % 4, 'FM0000') FROM generate_series(0, 599, 3) i; -ANALYZE prt2_l; --- inner join, qual covering only top-level partitions -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1, prt2_l t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN -------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_l_p1 t2_1 - -> Hash - -> Seq Scan on prt1_l_p1 t1_1 - Filter: (b = 0) - -> Hash Join - Hash Cond: (t2_3.b = t1_3.a) - -> Append - -> Seq Scan on prt2_l_p2_p1 t2_3 - -> Seq Scan on prt2_l_p2_p2 t2_4 - -> Hash - -> Append - -> Seq Scan on prt1_l_p2_p1 t1_3 - Filter: (b = 0) - -> Seq Scan on prt1_l_p2_p2 t1_4 - Filter: (b = 0) - -> Hash Join - Hash Cond: (t2_6.b = t1_5.a) - -> Append - -> Seq Scan on prt2_l_p3_p1 t2_6 - -> Seq Scan on prt2_l_p3_p2 t2_7 - -> Hash - -> Seq Scan on prt1_l_p3_p1 t1_5 - Filter: (b = 0) -(28 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1, prt2_l t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 0 | 0000 | 0 | 0000 - 150 | 0002 | 150 | 0002 - 300 | 0000 | 300 | 0000 - 450 | 0002 | 450 | 0002 -(4 rows) - --- inner join with partially-redundant join clauses -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1, prt2_l t2 WHERE t1.a = t2.a AND t1.a = t2.b AND t1.c = t2.c ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t1_1.a = t2_1.a) AND ((t1_1.c)::text = (t2_1.c)::text)) - -> Seq Scan on prt1_l_p1 t1_1 - -> Hash - -> Seq Scan on prt2_l_p1 t2_1 - Filter: (a = b) - -> Hash Join - Hash Cond: ((t1_2.a = t2_2.a) AND ((t1_2.c)::text = (t2_2.c)::text)) - -> Seq Scan on prt1_l_p2_p1 t1_2 - -> Hash - -> Seq Scan on prt2_l_p2_p1 t2_2 - Filter: (a = b) - -> Hash Join - Hash Cond: ((t1_3.a = t2_3.a) AND ((t1_3.c)::text = (t2_3.c)::text)) - -> Seq Scan on prt1_l_p2_p2 t1_3 - -> Hash - -> Seq Scan on prt2_l_p2_p2 t2_3 - Filter: (a = b) - -> Hash Join - Hash Cond: ((t1_5.a = t2_5.a) AND ((t1_5.c)::text = (t2_5.c)::text)) - -> Append - -> Seq Scan on prt1_l_p3_p1 t1_5 - -> Seq Scan on prt1_l_p3_p2 t1_6 - -> Hash - -> Append - -> Seq Scan on prt2_l_p3_p1 t2_5 - Filter: (a = b) - -> Seq Scan on prt2_l_p3_p2 t2_6 - Filter: (a = b) -(32 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1, prt2_l t2 WHERE t1.a = t2.a AND t1.a = t2.b AND t1.c = t2.c ORDER BY t1.a, t2.b; - a | c | b | c -----+------+----+------ - 0 | 0000 | 0 | 0000 - 6 | 0002 | 6 | 0002 - 12 | 0000 | 12 | 0000 - 18 | 0002 | 18 | 0002 - 24 | 0000 | 24 | 0000 -(5 rows) - --- left join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1 LEFT JOIN prt2_l t2 ON t1.a = t2.b AND t1.c = t2.c WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------------------------------------- - Sort - Sort Key: t1.a, t2.b - -> Append - -> Hash Right Join - Hash Cond: ((t2_1.b = t1_1.a) AND ((t2_1.c)::text = (t1_1.c)::text)) - -> Seq Scan on prt2_l_p1 t2_1 - -> Hash - -> Seq Scan on prt1_l_p1 t1_1 - Filter: (b = 0) - -> Hash Right Join - Hash Cond: ((t2_2.b = t1_2.a) AND ((t2_2.c)::text = (t1_2.c)::text)) - -> Seq Scan on prt2_l_p2_p1 t2_2 - -> Hash - -> Seq Scan on prt1_l_p2_p1 t1_2 - Filter: (b = 0) - -> Hash Right Join - Hash Cond: ((t2_3.b = t1_3.a) AND ((t2_3.c)::text = (t1_3.c)::text)) - -> Seq Scan on prt2_l_p2_p2 t2_3 - -> Hash - -> Seq Scan on prt1_l_p2_p2 t1_3 - Filter: (b = 0) - -> Hash Right Join - Hash Cond: ((t2_5.b = t1_4.a) AND ((t2_5.c)::text = (t1_4.c)::text)) - -> Append - -> Seq Scan on prt2_l_p3_p1 t2_5 - -> Seq Scan on prt2_l_p3_p2 t2_6 - -> Hash - -> Seq Scan on prt1_l_p3_p1 t1_4 - Filter: (b = 0) -(29 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1 LEFT JOIN prt2_l t2 ON t1.a = t2.b AND t1.c = t2.c WHERE t1.b = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 0 | 0000 | 0 | 0000 - 50 | 0002 | | - 100 | 0000 | | - 150 | 0002 | 150 | 0002 - 200 | 0000 | | - 250 | 0002 | | - 300 | 0000 | 300 | 0000 - 350 | 0002 | | - 400 | 0000 | | - 450 | 0002 | 450 | 0002 - 500 | 0000 | | - 550 | 0002 | | -(12 rows) - --- right join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1 RIGHT JOIN prt2_l t2 ON t1.a = t2.b AND t1.c = t2.c WHERE t2.a = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------------------------------------- - Sort - Sort Key: t1.a, t2.b - -> Append - -> Hash Right Join - Hash Cond: ((t1_1.a = t2_1.b) AND ((t1_1.c)::text = (t2_1.c)::text)) - -> Seq Scan on prt1_l_p1 t1_1 - -> Hash - -> Seq Scan on prt2_l_p1 t2_1 - Filter: (a = 0) - -> Hash Right Join - Hash Cond: ((t1_2.a = t2_2.b) AND ((t1_2.c)::text = (t2_2.c)::text)) - -> Seq Scan on prt1_l_p2_p1 t1_2 - -> Hash - -> Seq Scan on prt2_l_p2_p1 t2_2 - Filter: (a = 0) - -> Hash Right Join - Hash Cond: ((t1_3.a = t2_3.b) AND ((t1_3.c)::text = (t2_3.c)::text)) - -> Seq Scan on prt1_l_p2_p2 t1_3 - -> Hash - -> Seq Scan on prt2_l_p2_p2 t2_3 - Filter: (a = 0) - -> Hash Right Join - Hash Cond: ((t1_5.a = t2_4.b) AND ((t1_5.c)::text = (t2_4.c)::text)) - -> Append - -> Seq Scan on prt1_l_p3_p1 t1_5 - -> Seq Scan on prt1_l_p3_p2 t1_6 - -> Hash - -> Seq Scan on prt2_l_p3_p1 t2_4 - Filter: (a = 0) -(29 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1 RIGHT JOIN prt2_l t2 ON t1.a = t2.b AND t1.c = t2.c WHERE t2.a = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 0 | 0000 | 0 | 0000 - 150 | 0002 | 150 | 0002 - 300 | 0000 | 300 | 0000 - 450 | 0002 | 450 | 0002 - | | 75 | 0003 - | | 225 | 0001 - | | 375 | 0003 - | | 525 | 0001 -(8 rows) - --- full join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1_l WHERE prt1_l.b = 0) t1 FULL JOIN (SELECT * FROM prt2_l WHERE prt2_l.a = 0) t2 ON (t1.a = t2.b AND t1.c = t2.c) ORDER BY t1.a, t2.b; - QUERY PLAN ----------------------------------------------------------------------------------------------------- - Sort - Sort Key: prt1_l.a, prt2_l.b - -> Append - -> Hash Full Join - Hash Cond: ((prt1_l_1.a = prt2_l_1.b) AND ((prt1_l_1.c)::text = (prt2_l_1.c)::text)) - -> Seq Scan on prt1_l_p1 prt1_l_1 - Filter: (b = 0) - -> Hash - -> Seq Scan on prt2_l_p1 prt2_l_1 - Filter: (a = 0) - -> Hash Full Join - Hash Cond: ((prt1_l_2.a = prt2_l_2.b) AND ((prt1_l_2.c)::text = (prt2_l_2.c)::text)) - -> Seq Scan on prt1_l_p2_p1 prt1_l_2 - Filter: (b = 0) - -> Hash - -> Seq Scan on prt2_l_p2_p1 prt2_l_2 - Filter: (a = 0) - -> Hash Full Join - Hash Cond: ((prt1_l_3.a = prt2_l_3.b) AND ((prt1_l_3.c)::text = (prt2_l_3.c)::text)) - -> Seq Scan on prt1_l_p2_p2 prt1_l_3 - Filter: (b = 0) - -> Hash - -> Seq Scan on prt2_l_p2_p2 prt2_l_3 - Filter: (a = 0) - -> Hash Full Join - Hash Cond: ((prt1_l_4.a = prt2_l_4.b) AND ((prt1_l_4.c)::text = (prt2_l_4.c)::text)) - -> Seq Scan on prt1_l_p3_p1 prt1_l_4 - Filter: (b = 0) - -> Hash - -> Seq Scan on prt2_l_p3_p1 prt2_l_4 - Filter: (a = 0) -(31 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1_l WHERE prt1_l.b = 0) t1 FULL JOIN (SELECT * FROM prt2_l WHERE prt2_l.a = 0) t2 ON (t1.a = t2.b AND t1.c = t2.c) ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 0 | 0000 | 0 | 0000 - 50 | 0002 | | - 100 | 0000 | | - 150 | 0002 | 150 | 0002 - 200 | 0000 | | - 250 | 0002 | | - 300 | 0000 | 300 | 0000 - 350 | 0002 | | - 400 | 0000 | | - 450 | 0002 | 450 | 0002 - 500 | 0000 | | - 550 | 0002 | | - | | 75 | 0003 - | | 225 | 0001 - | | 375 | 0003 - | | 525 | 0001 -(16 rows) - --- lateral partitionwise join -EXPLAIN (COSTS OFF) -SELECT * FROM prt1_l t1 LEFT JOIN LATERAL - (SELECT t2.a AS t2a, t2.c AS t2c, t2.b AS t2b, t3.b AS t3b, least(t1.a,t2.a,t3.b) FROM prt1_l t2 JOIN prt2_l t3 ON (t2.a = t3.b AND t2.c = t3.c)) ss - ON t1.a = ss.t2a AND t1.c = ss.t2c WHERE t1.b = 0 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------------------------------------------------ - Sort - Sort Key: t1.a - -> Append - -> Nested Loop Left Join - -> Seq Scan on prt1_l_p1 t1_1 - Filter: (b = 0) - -> Hash Join - Hash Cond: ((t3_1.b = t2_1.a) AND ((t3_1.c)::text = (t2_1.c)::text)) - -> Seq Scan on prt2_l_p1 t3_1 - -> Hash - -> Seq Scan on prt1_l_p1 t2_1 - Filter: ((t1_1.a = a) AND ((t1_1.c)::text = (c)::text)) - -> Nested Loop Left Join - -> Seq Scan on prt1_l_p2_p1 t1_2 - Filter: (b = 0) - -> Hash Join - Hash Cond: ((t3_2.b = t2_2.a) AND ((t3_2.c)::text = (t2_2.c)::text)) - -> Seq Scan on prt2_l_p2_p1 t3_2 - -> Hash - -> Seq Scan on prt1_l_p2_p1 t2_2 - Filter: ((t1_2.a = a) AND ((t1_2.c)::text = (c)::text)) - -> Nested Loop Left Join - -> Seq Scan on prt1_l_p2_p2 t1_3 - Filter: (b = 0) - -> Hash Join - Hash Cond: ((t3_3.b = t2_3.a) AND ((t3_3.c)::text = (t2_3.c)::text)) - -> Seq Scan on prt2_l_p2_p2 t3_3 - -> Hash - -> Seq Scan on prt1_l_p2_p2 t2_3 - Filter: ((t1_3.a = a) AND ((t1_3.c)::text = (c)::text)) - -> Nested Loop Left Join - -> Seq Scan on prt1_l_p3_p1 t1_4 - Filter: (b = 0) - -> Hash Join - Hash Cond: ((t3_5.b = t2_5.a) AND ((t3_5.c)::text = (t2_5.c)::text)) - -> Append - -> Seq Scan on prt2_l_p3_p1 t3_5 - -> Seq Scan on prt2_l_p3_p2 t3_6 - -> Hash - -> Append - -> Seq Scan on prt1_l_p3_p1 t2_5 - Filter: ((t1_4.a = a) AND ((t1_4.c)::text = (c)::text)) - -> Seq Scan on prt1_l_p3_p2 t2_6 - Filter: ((t1_4.a = a) AND ((t1_4.c)::text = (c)::text)) -(44 rows) - -SELECT * FROM prt1_l t1 LEFT JOIN LATERAL - (SELECT t2.a AS t2a, t2.c AS t2c, t2.b AS t2b, t3.b AS t3b, least(t1.a,t2.a,t3.b) FROM prt1_l t2 JOIN prt2_l t3 ON (t2.a = t3.b AND t2.c = t3.c)) ss - ON t1.a = ss.t2a AND t1.c = ss.t2c WHERE t1.b = 0 ORDER BY t1.a; - a | b | c | t2a | t2c | t2b | t3b | least ------+---+------+-----+------+-----+-----+------- - 0 | 0 | 0000 | 0 | 0000 | 0 | 0 | 0 - 50 | 0 | 0002 | | | | | - 100 | 0 | 0000 | | | | | - 150 | 0 | 0002 | 150 | 0002 | 0 | 150 | 150 - 200 | 0 | 0000 | | | | | - 250 | 0 | 0002 | | | | | - 300 | 0 | 0000 | 300 | 0000 | 0 | 300 | 300 - 350 | 0 | 0002 | | | | | - 400 | 0 | 0000 | | | | | - 450 | 0 | 0002 | 450 | 0002 | 0 | 450 | 450 - 500 | 0 | 0000 | | | | | - 550 | 0 | 0002 | | | | | -(12 rows) - --- partitionwise join with lateral reference in sample scan -EXPLAIN (COSTS OFF) -SELECT * FROM prt1_l t1 JOIN LATERAL - (SELECT * FROM prt1_l t2 TABLESAMPLE SYSTEM (t1.a) REPEATABLE(t1.b)) s - ON t1.a = s.a AND t1.b = s.b AND t1.c = s.c; - QUERY PLAN ----------------------------------------------------------------------------------------- - Append - -> Nested Loop - -> Seq Scan on prt1_l_p1 t1_1 - -> Sample Scan on prt1_l_p1 t2_1 - Sampling: system (t1_1.a) REPEATABLE (t1_1.b) - Filter: ((t1_1.a = a) AND (t1_1.b = b) AND ((t1_1.c)::text = (c)::text)) - -> Nested Loop - -> Seq Scan on prt1_l_p2_p1 t1_2 - -> Sample Scan on prt1_l_p2_p1 t2_2 - Sampling: system (t1_2.a) REPEATABLE (t1_2.b) - Filter: ((t1_2.a = a) AND (t1_2.b = b) AND ((t1_2.c)::text = (c)::text)) - -> Nested Loop - -> Seq Scan on prt1_l_p2_p2 t1_3 - -> Sample Scan on prt1_l_p2_p2 t2_3 - Sampling: system (t1_3.a) REPEATABLE (t1_3.b) - Filter: ((t1_3.a = a) AND (t1_3.b = b) AND ((t1_3.c)::text = (c)::text)) - -> Nested Loop - -> Seq Scan on prt1_l_p3_p1 t1_4 - -> Sample Scan on prt1_l_p3_p1 t2_4 - Sampling: system (t1_4.a) REPEATABLE (t1_4.b) - Filter: ((t1_4.a = a) AND (t1_4.b = b) AND ((t1_4.c)::text = (c)::text)) - -> Nested Loop - -> Seq Scan on prt1_l_p3_p2 t1_5 - -> Sample Scan on prt1_l_p3_p2 t2_5 - Sampling: system (t1_5.a) REPEATABLE (t1_5.b) - Filter: ((t1_5.a = a) AND (t1_5.b = b) AND ((t1_5.c)::text = (c)::text)) -(26 rows) - --- partitionwise join with lateral reference in scan's restriction clauses -EXPLAIN (COSTS OFF) -SELECT COUNT(*) FROM prt1_l t1 LEFT JOIN LATERAL - (SELECT t1.b AS t1b, t2.* FROM prt2_l t2) s - ON t1.a = s.b AND t1.b = s.a AND t1.c = s.c - WHERE s.t1b = s.a; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------- - Aggregate - -> Append - -> Nested Loop - -> Seq Scan on prt1_l_p1 t1_1 - -> Seq Scan on prt2_l_p1 t2_1 - Filter: ((a = t1_1.b) AND (t1_1.a = b) AND (t1_1.b = a) AND ((t1_1.c)::text = (c)::text)) - -> Nested Loop - -> Seq Scan on prt1_l_p2_p1 t1_2 - -> Seq Scan on prt2_l_p2_p1 t2_2 - Filter: ((a = t1_2.b) AND (t1_2.a = b) AND (t1_2.b = a) AND ((t1_2.c)::text = (c)::text)) - -> Nested Loop - -> Seq Scan on prt1_l_p2_p2 t1_3 - -> Seq Scan on prt2_l_p2_p2 t2_3 - Filter: ((a = t1_3.b) AND (t1_3.a = b) AND (t1_3.b = a) AND ((t1_3.c)::text = (c)::text)) - -> Nested Loop - -> Seq Scan on prt1_l_p3_p1 t1_4 - -> Seq Scan on prt2_l_p3_p1 t2_4 - Filter: ((a = t1_4.b) AND (t1_4.a = b) AND (t1_4.b = a) AND ((t1_4.c)::text = (c)::text)) - -> Nested Loop - -> Seq Scan on prt1_l_p3_p2 t1_5 - -> Seq Scan on prt2_l_p3_p2 t2_5 - Filter: ((a = t1_5.b) AND (t1_5.a = b) AND (t1_5.b = a) AND ((t1_5.c)::text = (c)::text)) -(22 rows) - -SELECT COUNT(*) FROM prt1_l t1 LEFT JOIN LATERAL - (SELECT t1.b AS t1b, t2.* FROM prt2_l t2) s - ON t1.a = s.b AND t1.b = s.a AND t1.c = s.c - WHERE s.t1b = s.a; - count -------- - 100 -(1 row) - --- join with one side empty -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1_l WHERE a = 1 AND a = 2) t1 RIGHT JOIN prt2_l t2 ON t1.a = t2.b AND t1.b = t2.a AND t1.c = t2.c; - QUERY PLAN -------------------------------------------------------------------------- - Hash Left Join - Hash Cond: ((t2.b = a) AND (t2.a = b) AND ((t2.c)::text = (c)::text)) - -> Append - -> Seq Scan on prt2_l_p1 t2_1 - -> Seq Scan on prt2_l_p2_p1 t2_2 - -> Seq Scan on prt2_l_p2_p2 t2_3 - -> Seq Scan on prt2_l_p3_p1 t2_4 - -> Seq Scan on prt2_l_p3_p2 t2_5 - -> Hash - -> Result - One-Time Filter: false -(11 rows) - --- Test case to verify proper handling of subqueries in a partitioned delete. --- The weird-looking lateral join is just there to force creation of a --- nestloop parameter within the subquery, which exposes the problem if the --- planner fails to make multiple copies of the subquery as appropriate. -EXPLAIN (COSTS OFF) -DELETE FROM prt1_l -WHERE EXISTS ( - SELECT 1 - FROM int4_tbl, - LATERAL (SELECT int4_tbl.f1 FROM int8_tbl LIMIT 2) ss - WHERE prt1_l.c IS NULL); - QUERY PLAN ----------------------------------------------------------- - Delete on prt1_l - Delete on prt1_l_p1 prt1_l_1 - Delete on prt1_l_p3_p1 prt1_l_2 - Delete on prt1_l_p3_p2 prt1_l_3 - -> Nested Loop Semi Join - -> Append - -> Seq Scan on prt1_l_p1 prt1_l_1 - Filter: (c IS NULL) - -> Seq Scan on prt1_l_p3_p1 prt1_l_2 - Filter: (c IS NULL) - -> Seq Scan on prt1_l_p3_p2 prt1_l_3 - Filter: (c IS NULL) - -> Materialize - -> Nested Loop - -> Seq Scan on int4_tbl - -> Subquery Scan on ss - -> Limit - -> Seq Scan on int8_tbl -(18 rows) - --- --- negative testcases --- -CREATE TABLE prt1_n (a int, b int, c varchar) PARTITION BY RANGE(c); -CREATE TABLE prt1_n_p1 PARTITION OF prt1_n FOR VALUES FROM ('0000') TO ('0250'); -CREATE TABLE prt1_n_p2 PARTITION OF prt1_n FOR VALUES FROM ('0250') TO ('0500'); -INSERT INTO prt1_n SELECT i, i, to_char(i, 'FM0000') FROM generate_series(0, 499, 2) i; -ANALYZE prt1_n; -CREATE TABLE prt2_n (a int, b int, c text) PARTITION BY LIST(c); -CREATE TABLE prt2_n_p1 PARTITION OF prt2_n FOR VALUES IN ('0000', '0003', '0004', '0010', '0006', '0007'); -CREATE TABLE prt2_n_p2 PARTITION OF prt2_n FOR VALUES IN ('0001', '0005', '0002', '0009', '0008', '0011'); -INSERT INTO prt2_n SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 2) i; -ANALYZE prt2_n; -CREATE TABLE prt3_n (a int, b int, c text) PARTITION BY LIST(c); -CREATE TABLE prt3_n_p1 PARTITION OF prt3_n FOR VALUES IN ('0000', '0004', '0006', '0007'); -CREATE TABLE prt3_n_p2 PARTITION OF prt3_n FOR VALUES IN ('0001', '0002', '0008', '0010'); -CREATE TABLE prt3_n_p3 PARTITION OF prt3_n FOR VALUES IN ('0003', '0005', '0009', '0011'); -INSERT INTO prt2_n SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 2) i; -ANALYZE prt3_n; -CREATE TABLE prt4_n (a int, b int, c text) PARTITION BY RANGE(a); -CREATE TABLE prt4_n_p1 PARTITION OF prt4_n FOR VALUES FROM (0) TO (300); -CREATE TABLE prt4_n_p2 PARTITION OF prt4_n FOR VALUES FROM (300) TO (500); -CREATE TABLE prt4_n_p3 PARTITION OF prt4_n FOR VALUES FROM (500) TO (600); -INSERT INTO prt4_n SELECT i, i, to_char(i, 'FM0000') FROM generate_series(0, 599, 2) i; -ANALYZE prt4_n; --- partitionwise join can not be applied if the partition ranges differ -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt4_n t2 WHERE t1.a = t2.a; - QUERY PLAN ----------------------------------------------- - Hash Join - Hash Cond: (t1.a = t2.a) - -> Append - -> Seq Scan on prt1_p1 t1_1 - -> Seq Scan on prt1_p2 t1_2 - -> Seq Scan on prt1_p3 t1_3 - -> Hash - -> Append - -> Seq Scan on prt4_n_p1 t2_1 - -> Seq Scan on prt4_n_p2 t2_2 - -> Seq Scan on prt4_n_p3 t2_3 -(11 rows) - -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt4_n t2, prt2 t3 WHERE t1.a = t2.a and t1.a = t3.b; - QUERY PLAN --------------------------------------------------------- - Hash Join - Hash Cond: (t2.a = t1.a) - -> Append - -> Seq Scan on prt4_n_p1 t2_1 - -> Seq Scan on prt4_n_p2 t2_2 - -> Seq Scan on prt4_n_p3 t2_3 - -> Hash - -> Append - -> Hash Join - Hash Cond: (t1_1.a = t3_1.b) - -> Seq Scan on prt1_p1 t1_1 - -> Hash - -> Seq Scan on prt2_p1 t3_1 - -> Hash Join - Hash Cond: (t1_2.a = t3_2.b) - -> Seq Scan on prt1_p2 t1_2 - -> Hash - -> Seq Scan on prt2_p2 t3_2 - -> Hash Join - Hash Cond: (t1_3.a = t3_3.b) - -> Seq Scan on prt1_p3 t1_3 - -> Hash - -> Seq Scan on prt2_p3 t3_3 -(23 rows) - --- partitionwise join can not be applied if there are no equi-join conditions --- between partition keys -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1 LEFT JOIN prt2 t2 ON (t1.a < t2.b); - QUERY PLAN ---------------------------------------------------------- - Nested Loop Left Join - -> Append - -> Seq Scan on prt1_p1 t1_1 - -> Seq Scan on prt1_p2 t1_2 - -> Seq Scan on prt1_p3 t1_3 - -> Append - -> Index Scan using iprt2_p1_b on prt2_p1 t2_1 - Index Cond: (b > t1.a) - -> Index Scan using iprt2_p2_b on prt2_p2 t2_2 - Index Cond: (b > t1.a) - -> Index Scan using iprt2_p3_b on prt2_p3 t2_3 - Index Cond: (b > t1.a) -(12 rows) - --- equi-join with join condition on partial keys does not qualify for --- partitionwise join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_m t1, prt2_m t2 WHERE t1.a = (t2.b + t2.a)/2; - QUERY PLAN ----------------------------------------------- - Hash Join - Hash Cond: (((t2.b + t2.a) / 2) = t1.a) - -> Append - -> Seq Scan on prt2_m_p1 t2_1 - -> Seq Scan on prt2_m_p2 t2_2 - -> Seq Scan on prt2_m_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on prt1_m_p1 t1_1 - -> Seq Scan on prt1_m_p2 t1_2 - -> Seq Scan on prt1_m_p3 t1_3 -(11 rows) - --- equi-join between out-of-order partition key columns does not qualify for --- partitionwise join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_m t1 LEFT JOIN prt2_m t2 ON t1.a = t2.b; - QUERY PLAN ----------------------------------------------- - Hash Left Join - Hash Cond: (t1.a = t2.b) - -> Append - -> Seq Scan on prt1_m_p1 t1_1 - -> Seq Scan on prt1_m_p2 t1_2 - -> Seq Scan on prt1_m_p3 t1_3 - -> Hash - -> Append - -> Seq Scan on prt2_m_p1 t2_1 - -> Seq Scan on prt2_m_p2 t2_2 - -> Seq Scan on prt2_m_p3 t2_3 -(11 rows) - --- equi-join between non-key columns does not qualify for partitionwise join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_m t1 LEFT JOIN prt2_m t2 ON t1.c = t2.c; - QUERY PLAN ----------------------------------------------- - Hash Left Join - Hash Cond: (t1.c = t2.c) - -> Append - -> Seq Scan on prt1_m_p1 t1_1 - -> Seq Scan on prt1_m_p2 t1_2 - -> Seq Scan on prt1_m_p3 t1_3 - -> Hash - -> Append - -> Seq Scan on prt2_m_p1 t2_1 - -> Seq Scan on prt2_m_p2 t2_2 - -> Seq Scan on prt2_m_p3 t2_3 -(11 rows) - --- partitionwise join can not be applied for a join between list and range --- partitioned tables -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_n t1 LEFT JOIN prt2_n t2 ON (t1.c = t2.c); - QUERY PLAN ----------------------------------------------- - Hash Right Join - Hash Cond: (t2.c = (t1.c)::text) - -> Append - -> Seq Scan on prt2_n_p1 t2_1 - -> Seq Scan on prt2_n_p2 t2_2 - -> Hash - -> Append - -> Seq Scan on prt1_n_p1 t1_1 - -> Seq Scan on prt1_n_p2 t1_2 -(9 rows) - --- partitionwise join can not be applied between tables with different --- partition lists -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_n t1 JOIN prt2_n t2 ON (t1.c = t2.c) JOIN plt1 t3 ON (t1.c = t3.c); - QUERY PLAN ----------------------------------------------------------- - Hash Join - Hash Cond: (t2.c = (t1.c)::text) - -> Append - -> Seq Scan on prt2_n_p1 t2_1 - -> Seq Scan on prt2_n_p2 t2_2 - -> Hash - -> Hash Join - Hash Cond: (t3.c = (t1.c)::text) - -> Append - -> Seq Scan on plt1_p1 t3_1 - -> Seq Scan on plt1_p2 t3_2 - -> Seq Scan on plt1_p3 t3_3 - -> Hash - -> Append - -> Seq Scan on prt1_n_p1 t1_1 - -> Seq Scan on prt1_n_p2 t1_2 -(16 rows) - --- partitionwise join can not be applied for a join between key column and --- non-key column -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_n t1 FULL JOIN prt1 t2 ON (t1.c = t2.c); - QUERY PLAN ----------------------------------------------- - Hash Full Join - Hash Cond: ((t2.c)::text = (t1.c)::text) - -> Append - -> Seq Scan on prt1_p1 t2_1 - -> Seq Scan on prt1_p2 t2_2 - -> Seq Scan on prt1_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on prt1_n_p1 t1_1 - -> Seq Scan on prt1_n_p2 t1_2 -(10 rows) - --- --- Test some other plan types in a partitionwise join (unfortunately, --- we need larger tables to get the planner to choose these plan types) --- -create temp table prtx1 (a integer, b integer, c integer) - partition by range (a); -create temp table prtx1_1 partition of prtx1 for values from (1) to (11); -create temp table prtx1_2 partition of prtx1 for values from (11) to (21); -create temp table prtx1_3 partition of prtx1 for values from (21) to (31); -create temp table prtx2 (a integer, b integer, c integer) - partition by range (a); -create temp table prtx2_1 partition of prtx2 for values from (1) to (11); -create temp table prtx2_2 partition of prtx2 for values from (11) to (21); -create temp table prtx2_3 partition of prtx2 for values from (21) to (31); -insert into prtx1 select 1 + i%30, i, i - from generate_series(1,1000) i; -insert into prtx2 select 1 + i%30, i, i - from generate_series(1,500) i, generate_series(1,10) j; -create index on prtx2 (b); -create index on prtx2 (c); -analyze prtx1; -analyze prtx2; -explain (costs off) -select * from prtx1 -where not exists (select 1 from prtx2 - where prtx2.a=prtx1.a and prtx2.b=prtx1.b and prtx2.c=123) - and a<20 and c=120; - QUERY PLAN -------------------------------------------------------------- - Append - -> Nested Loop Anti Join - -> Seq Scan on prtx1_1 - Filter: ((a < 20) AND (c = 120)) - -> Bitmap Heap Scan on prtx2_1 - Recheck Cond: ((b = prtx1_1.b) AND (c = 123)) - Filter: (a = prtx1_1.a) - -> BitmapAnd - -> Bitmap Index Scan on prtx2_1_b_idx - Index Cond: (b = prtx1_1.b) - -> Bitmap Index Scan on prtx2_1_c_idx - Index Cond: (c = 123) - -> Nested Loop Anti Join - -> Seq Scan on prtx1_2 - Filter: ((a < 20) AND (c = 120)) - -> Bitmap Heap Scan on prtx2_2 - Recheck Cond: ((b = prtx1_2.b) AND (c = 123)) - Filter: (a = prtx1_2.a) - -> BitmapAnd - -> Bitmap Index Scan on prtx2_2_b_idx - Index Cond: (b = prtx1_2.b) - -> Bitmap Index Scan on prtx2_2_c_idx - Index Cond: (c = 123) -(23 rows) - -select * from prtx1 -where not exists (select 1 from prtx2 - where prtx2.a=prtx1.a and prtx2.b=prtx1.b and prtx2.c=123) - and a<20 and c=120; - a | b | c ----+-----+----- - 1 | 120 | 120 -(1 row) - -explain (costs off) -select * from prtx1 -where not exists (select 1 from prtx2 - where prtx2.a=prtx1.a and (prtx2.b=prtx1.b+1 or prtx2.c=99)) - and a<20 and c=91; - QUERY PLAN ------------------------------------------------------------------ - Append - -> Nested Loop Anti Join - -> Seq Scan on prtx1_1 - Filter: ((a < 20) AND (c = 91)) - -> Bitmap Heap Scan on prtx2_1 - Recheck Cond: ((b = (prtx1_1.b + 1)) OR (c = 99)) - Filter: (a = prtx1_1.a) - -> BitmapOr - -> Bitmap Index Scan on prtx2_1_b_idx - Index Cond: (b = (prtx1_1.b + 1)) - -> Bitmap Index Scan on prtx2_1_c_idx - Index Cond: (c = 99) - -> Nested Loop Anti Join - -> Seq Scan on prtx1_2 - Filter: ((a < 20) AND (c = 91)) - -> Bitmap Heap Scan on prtx2_2 - Recheck Cond: ((b = (prtx1_2.b + 1)) OR (c = 99)) - Filter: (a = prtx1_2.a) - -> BitmapOr - -> Bitmap Index Scan on prtx2_2_b_idx - Index Cond: (b = (prtx1_2.b + 1)) - -> Bitmap Index Scan on prtx2_2_c_idx - Index Cond: (c = 99) -(23 rows) - -select * from prtx1 -where not exists (select 1 from prtx2 - where prtx2.a=prtx1.a and (prtx2.b=prtx1.b+1 or prtx2.c=99)) - and a<20 and c=91; - a | b | c ----+----+---- - 2 | 91 | 91 -(1 row) - --- --- Test advanced partition-matching algorithm for partitioned join --- --- Tests for range-partitioned tables -CREATE TABLE prt1_adv (a int, b int, c varchar) PARTITION BY RANGE (a); -CREATE TABLE prt1_adv_p1 PARTITION OF prt1_adv FOR VALUES FROM (100) TO (200); -CREATE TABLE prt1_adv_p2 PARTITION OF prt1_adv FOR VALUES FROM (200) TO (300); -CREATE TABLE prt1_adv_p3 PARTITION OF prt1_adv FOR VALUES FROM (300) TO (400); -CREATE INDEX prt1_adv_a_idx ON prt1_adv (a); -INSERT INTO prt1_adv SELECT i, i % 25, to_char(i, 'FM0000') FROM generate_series(100, 399) i; -ANALYZE prt1_adv; -CREATE TABLE prt2_adv (a int, b int, c varchar) PARTITION BY RANGE (b); -CREATE TABLE prt2_adv_p1 PARTITION OF prt2_adv FOR VALUES FROM (100) TO (150); -CREATE TABLE prt2_adv_p2 PARTITION OF prt2_adv FOR VALUES FROM (200) TO (300); -CREATE TABLE prt2_adv_p3 PARTITION OF prt2_adv FOR VALUES FROM (350) TO (500); -CREATE INDEX prt2_adv_b_idx ON prt2_adv (b); -INSERT INTO prt2_adv_p1 SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(100, 149) i; -INSERT INTO prt2_adv_p2 SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(200, 299) i; -INSERT INTO prt2_adv_p3 SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(350, 499) i; -ANALYZE prt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Hash Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Hash Join - Hash Cond: (t2_3.b = t1_3.a) - -> Seq Scan on prt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(21 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 100 | 0100 | 100 | 0100 - 125 | 0125 | 125 | 0125 - 200 | 0200 | 200 | 0200 - 225 | 0225 | 225 | 0225 - 250 | 0250 | 250 | 0250 - 275 | 0275 | 275 | 0275 - 350 | 0350 | 350 | 0350 - 375 | 0375 | 375 | 0375 -(8 rows) - --- semi join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM prt1_adv t1 WHERE EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Semi Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Hash Right Semi Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Hash Right Semi Join - Hash Cond: (t2_3.b = t1_3.a) - -> Seq Scan on prt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(21 rows) - -SELECT t1.* FROM prt1_adv t1 WHERE EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - a | b | c ------+---+------ - 100 | 0 | 0100 - 125 | 0 | 0125 - 200 | 0 | 0200 - 225 | 0 | 0225 - 250 | 0 | 0250 - 275 | 0 | 0275 - 350 | 0 | 0350 - 375 | 0 | 0375 -(8 rows) - --- left join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a, t2.b - -> Append - -> Hash Right Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Hash Right Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Hash Right Join - Hash Cond: (t2_3.b = t1_3.a) - -> Seq Scan on prt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(21 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 100 | 0100 | 100 | 0100 - 125 | 0125 | 125 | 0125 - 150 | 0150 | | - 175 | 0175 | | - 200 | 0200 | 200 | 0200 - 225 | 0225 | 225 | 0225 - 250 | 0250 | 250 | 0250 - 275 | 0275 | 275 | 0275 - 300 | 0300 | | - 325 | 0325 | | - 350 | 0350 | 350 | 0350 - 375 | 0375 | 375 | 0375 -(12 rows) - --- anti join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM prt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Anti Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Hash Right Anti Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Hash Right Anti Join - Hash Cond: (t2_3.b = t1_3.a) - -> Seq Scan on prt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(21 rows) - -SELECT t1.* FROM prt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - a | b | c ------+---+------ - 150 | 0 | 0150 - 175 | 0 | 0175 - 300 | 0 | 0300 - 325 | 0 | 0325 -(4 rows) - --- full join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT 175 phv, * FROM prt1_adv WHERE prt1_adv.b = 0) t1 FULL JOIN (SELECT 425 phv, * FROM prt2_adv WHERE prt2_adv.a = 0) t2 ON (t1.a = t2.b) WHERE t1.phv = t1.a OR t2.phv = t2.b ORDER BY t1.a, t2.b; - QUERY PLAN --------------------------------------------------------------------------- - Sort - Sort Key: prt1_adv.a, prt2_adv.b - -> Append - -> Hash Full Join - Hash Cond: (prt1_adv_1.a = prt2_adv_1.b) - Filter: (((175) = prt1_adv_1.a) OR ((425) = prt2_adv_1.b)) - -> Seq Scan on prt1_adv_p1 prt1_adv_1 - Filter: (b = 0) - -> Hash - -> Seq Scan on prt2_adv_p1 prt2_adv_1 - Filter: (a = 0) - -> Hash Full Join - Hash Cond: (prt1_adv_2.a = prt2_adv_2.b) - Filter: (((175) = prt1_adv_2.a) OR ((425) = prt2_adv_2.b)) - -> Seq Scan on prt1_adv_p2 prt1_adv_2 - Filter: (b = 0) - -> Hash - -> Seq Scan on prt2_adv_p2 prt2_adv_2 - Filter: (a = 0) - -> Hash Full Join - Hash Cond: (prt2_adv_3.b = prt1_adv_3.a) - Filter: (((175) = prt1_adv_3.a) OR ((425) = prt2_adv_3.b)) - -> Seq Scan on prt2_adv_p3 prt2_adv_3 - Filter: (a = 0) - -> Hash - -> Seq Scan on prt1_adv_p3 prt1_adv_3 - Filter: (b = 0) -(27 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT 175 phv, * FROM prt1_adv WHERE prt1_adv.b = 0) t1 FULL JOIN (SELECT 425 phv, * FROM prt2_adv WHERE prt2_adv.a = 0) t2 ON (t1.a = t2.b) WHERE t1.phv = t1.a OR t2.phv = t2.b ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 175 | 0175 | | - | | 425 | 0425 -(2 rows) - --- Test cases where one side has an extra partition -CREATE TABLE prt2_adv_extra PARTITION OF prt2_adv FOR VALUES FROM (500) TO (MAXVALUE); -INSERT INTO prt2_adv SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(500, 599) i; -ANALYZE prt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Hash Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Hash Join - Hash Cond: (t2_3.b = t1_3.a) - -> Seq Scan on prt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(21 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 100 | 0100 | 100 | 0100 - 125 | 0125 | 125 | 0125 - 200 | 0200 | 200 | 0200 - 225 | 0225 | 225 | 0225 - 250 | 0250 | 250 | 0250 - 275 | 0275 | 275 | 0275 - 350 | 0350 | 350 | 0350 - 375 | 0375 | 375 | 0375 -(8 rows) - --- semi join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM prt1_adv t1 WHERE EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Semi Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Hash Right Semi Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Hash Right Semi Join - Hash Cond: (t2_3.b = t1_3.a) - -> Seq Scan on prt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(21 rows) - -SELECT t1.* FROM prt1_adv t1 WHERE EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - a | b | c ------+---+------ - 100 | 0 | 0100 - 125 | 0 | 0125 - 200 | 0 | 0200 - 225 | 0 | 0225 - 250 | 0 | 0250 - 275 | 0 | 0275 - 350 | 0 | 0350 - 375 | 0 | 0375 -(8 rows) - --- left join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a, t2.b - -> Append - -> Hash Right Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Hash Right Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Hash Right Join - Hash Cond: (t2_3.b = t1_3.a) - -> Seq Scan on prt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(21 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 100 | 0100 | 100 | 0100 - 125 | 0125 | 125 | 0125 - 150 | 0150 | | - 175 | 0175 | | - 200 | 0200 | 200 | 0200 - 225 | 0225 | 225 | 0225 - 250 | 0250 | 250 | 0250 - 275 | 0275 | 275 | 0275 - 300 | 0300 | | - 325 | 0325 | | - 350 | 0350 | 350 | 0350 - 375 | 0375 | 375 | 0375 -(12 rows) - --- left join; currently we can't do partitioned join if there are no matched --- partitions on the nullable side -EXPLAIN (COSTS OFF) -SELECT t1.b, t1.c, t2.a, t2.c FROM prt2_adv t1 LEFT JOIN prt1_adv t2 ON (t1.b = t2.a) WHERE t1.a = 0 ORDER BY t1.b, t2.a; - QUERY PLAN ---------------------------------------------------------- - Sort - Sort Key: t1.b, t2.a - -> Hash Right Join - Hash Cond: (t2.a = t1.b) - -> Append - -> Seq Scan on prt1_adv_p1 t2_1 - -> Seq Scan on prt1_adv_p2 t2_2 - -> Seq Scan on prt1_adv_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on prt2_adv_p1 t1_1 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p2 t1_2 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p3 t1_3 - Filter: (a = 0) - -> Seq Scan on prt2_adv_extra t1_4 - Filter: (a = 0) -(18 rows) - --- anti join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM prt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Anti Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Hash Right Anti Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Hash Right Anti Join - Hash Cond: (t2_3.b = t1_3.a) - -> Seq Scan on prt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(21 rows) - -SELECT t1.* FROM prt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - a | b | c ------+---+------ - 150 | 0 | 0150 - 175 | 0 | 0175 - 300 | 0 | 0300 - 325 | 0 | 0325 -(4 rows) - --- anti join; currently we can't do partitioned join if there are no matched --- partitions on the nullable side -EXPLAIN (COSTS OFF) -SELECT t1.* FROM prt2_adv t1 WHERE NOT EXISTS (SELECT 1 FROM prt1_adv t2 WHERE t1.b = t2.a) AND t1.a = 0 ORDER BY t1.b; - QUERY PLAN ---------------------------------------------------------- - Sort - Sort Key: t1.b - -> Hash Right Anti Join - Hash Cond: (t2.a = t1.b) - -> Append - -> Seq Scan on prt1_adv_p1 t2_1 - -> Seq Scan on prt1_adv_p2 t2_2 - -> Seq Scan on prt1_adv_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on prt2_adv_p1 t1_1 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p2 t1_2 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p3 t1_3 - Filter: (a = 0) - -> Seq Scan on prt2_adv_extra t1_4 - Filter: (a = 0) -(18 rows) - --- full join; currently we can't do partitioned join if there are no matched --- partitions on the nullable side -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT 175 phv, * FROM prt1_adv WHERE prt1_adv.b = 0) t1 FULL JOIN (SELECT 425 phv, * FROM prt2_adv WHERE prt2_adv.a = 0) t2 ON (t1.a = t2.b) WHERE t1.phv = t1.a OR t2.phv = t2.b ORDER BY t1.a, t2.b; - QUERY PLAN ----------------------------------------------------------------- - Sort - Sort Key: prt1_adv.a, prt2_adv.b - -> Hash Full Join - Hash Cond: (prt2_adv.b = prt1_adv.a) - Filter: (((175) = prt1_adv.a) OR ((425) = prt2_adv.b)) - -> Append - -> Seq Scan on prt2_adv_p1 prt2_adv_1 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p2 prt2_adv_2 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p3 prt2_adv_3 - Filter: (a = 0) - -> Seq Scan on prt2_adv_extra prt2_adv_4 - Filter: (a = 0) - -> Hash - -> Append - -> Seq Scan on prt1_adv_p1 prt1_adv_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p2 prt1_adv_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 prt1_adv_3 - Filter: (b = 0) -(22 rows) - --- 3-way join where not every pair of relations can do partitioned join -EXPLAIN (COSTS OFF) -SELECT t1.b, t1.c, t2.a, t2.c, t3.a, t3.c FROM prt2_adv t1 LEFT JOIN prt1_adv t2 ON (t1.b = t2.a) INNER JOIN prt1_adv t3 ON (t1.b = t3.a) WHERE t1.a = 0 ORDER BY t1.b, t2.a, t3.a; - QUERY PLAN --------------------------------------------------------------------------------- - Sort - Sort Key: t1.b, t2.a - -> Append - -> Nested Loop Left Join - -> Nested Loop - -> Seq Scan on prt2_adv_p1 t1_1 - Filter: (a = 0) - -> Index Scan using prt1_adv_p1_a_idx on prt1_adv_p1 t3_1 - Index Cond: (a = t1_1.b) - -> Index Scan using prt1_adv_p1_a_idx on prt1_adv_p1 t2_1 - Index Cond: (a = t1_1.b) - -> Hash Right Join - Hash Cond: (t2_2.a = t1_2.b) - -> Seq Scan on prt1_adv_p2 t2_2 - -> Hash - -> Hash Join - Hash Cond: (t3_2.a = t1_2.b) - -> Seq Scan on prt1_adv_p2 t3_2 - -> Hash - -> Seq Scan on prt2_adv_p2 t1_2 - Filter: (a = 0) - -> Hash Right Join - Hash Cond: (t2_3.a = t1_3.b) - -> Seq Scan on prt1_adv_p3 t2_3 - -> Hash - -> Hash Join - Hash Cond: (t3_3.a = t1_3.b) - -> Seq Scan on prt1_adv_p3 t3_3 - -> Hash - -> Seq Scan on prt2_adv_p3 t1_3 - Filter: (a = 0) -(31 rows) - -SELECT t1.b, t1.c, t2.a, t2.c, t3.a, t3.c FROM prt2_adv t1 LEFT JOIN prt1_adv t2 ON (t1.b = t2.a) INNER JOIN prt1_adv t3 ON (t1.b = t3.a) WHERE t1.a = 0 ORDER BY t1.b, t2.a, t3.a; - b | c | a | c | a | c ------+------+-----+------+-----+------ - 100 | 0100 | 100 | 0100 | 100 | 0100 - 125 | 0125 | 125 | 0125 | 125 | 0125 - 200 | 0200 | 200 | 0200 | 200 | 0200 - 225 | 0225 | 225 | 0225 | 225 | 0225 - 250 | 0250 | 250 | 0250 | 250 | 0250 - 275 | 0275 | 275 | 0275 | 275 | 0275 - 350 | 0350 | 350 | 0350 | 350 | 0350 - 375 | 0375 | 375 | 0375 | 375 | 0375 -(8 rows) - -DROP TABLE prt2_adv_extra; --- Test cases where a partition on one side matches multiple partitions on --- the other side; we currently can't do partitioned join in such cases -ALTER TABLE prt2_adv DETACH PARTITION prt2_adv_p3; --- Split prt2_adv_p3 into two partitions so that prt1_adv_p3 matches both -CREATE TABLE prt2_adv_p3_1 PARTITION OF prt2_adv FOR VALUES FROM (350) TO (375); -CREATE TABLE prt2_adv_p3_2 PARTITION OF prt2_adv FOR VALUES FROM (375) TO (500); -INSERT INTO prt2_adv SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(350, 499) i; -ANALYZE prt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Join - Hash Cond: (t2.b = t1.a) - -> Append - -> Seq Scan on prt2_adv_p1 t2_1 - -> Seq Scan on prt2_adv_p2 t2_2 - -> Seq Scan on prt2_adv_p3_1 t2_3 - -> Seq Scan on prt2_adv_p3_2 t2_4 - -> Hash - -> Append - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(17 rows) - --- semi join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM prt1_adv t1 WHERE EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Semi Join - Hash Cond: (t2.b = t1.a) - -> Append - -> Seq Scan on prt2_adv_p1 t2_1 - -> Seq Scan on prt2_adv_p2 t2_2 - -> Seq Scan on prt2_adv_p3_1 t2_3 - -> Seq Scan on prt2_adv_p3_2 t2_4 - -> Hash - -> Append - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(17 rows) - --- left join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a, t2.b - -> Hash Right Join - Hash Cond: (t2.b = t1.a) - -> Append - -> Seq Scan on prt2_adv_p1 t2_1 - -> Seq Scan on prt2_adv_p2 t2_2 - -> Seq Scan on prt2_adv_p3_1 t2_3 - -> Seq Scan on prt2_adv_p3_2 t2_4 - -> Hash - -> Append - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(17 rows) - --- anti join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM prt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Anti Join - Hash Cond: (t2.b = t1.a) - -> Append - -> Seq Scan on prt2_adv_p1 t2_1 - -> Seq Scan on prt2_adv_p2 t2_2 - -> Seq Scan on prt2_adv_p3_1 t2_3 - -> Seq Scan on prt2_adv_p3_2 t2_4 - -> Hash - -> Append - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(17 rows) - --- full join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT 175 phv, * FROM prt1_adv WHERE prt1_adv.b = 0) t1 FULL JOIN (SELECT 425 phv, * FROM prt2_adv WHERE prt2_adv.a = 0) t2 ON (t1.a = t2.b) WHERE t1.phv = t1.a OR t2.phv = t2.b ORDER BY t1.a, t2.b; - QUERY PLAN ----------------------------------------------------------------- - Sort - Sort Key: prt1_adv.a, prt2_adv.b - -> Hash Full Join - Hash Cond: (prt2_adv.b = prt1_adv.a) - Filter: (((175) = prt1_adv.a) OR ((425) = prt2_adv.b)) - -> Append - -> Seq Scan on prt2_adv_p1 prt2_adv_1 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p2 prt2_adv_2 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p3_1 prt2_adv_3 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p3_2 prt2_adv_4 - Filter: (a = 0) - -> Hash - -> Append - -> Seq Scan on prt1_adv_p1 prt1_adv_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p2 prt1_adv_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 prt1_adv_3 - Filter: (b = 0) -(22 rows) - -DROP TABLE prt2_adv_p3_1; -DROP TABLE prt2_adv_p3_2; -ANALYZE prt2_adv; --- Test default partitions -ALTER TABLE prt1_adv DETACH PARTITION prt1_adv_p1; --- Change prt1_adv_p1 to the default partition -ALTER TABLE prt1_adv ATTACH PARTITION prt1_adv_p1 DEFAULT; -ALTER TABLE prt1_adv DETACH PARTITION prt1_adv_p3; -ANALYZE prt1_adv; --- We can do partitioned join even if only one of relations has the default --- partition -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: (t2_1.b = t1_2.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_2 - Filter: (b = 0) - -> Hash Join - Hash Cond: (t2_2.b = t1_1.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_1 - Filter: (b = 0) -(15 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 100 | 0100 | 100 | 0100 - 125 | 0125 | 125 | 0125 - 200 | 0200 | 200 | 0200 - 225 | 0225 | 225 | 0225 - 250 | 0250 | 250 | 0250 - 275 | 0275 | 275 | 0275 -(6 rows) - --- Restore prt1_adv_p3 -ALTER TABLE prt1_adv ATTACH PARTITION prt1_adv_p3 FOR VALUES FROM (300) TO (400); -ANALYZE prt1_adv; --- Restore prt2_adv_p3 -ALTER TABLE prt2_adv ATTACH PARTITION prt2_adv_p3 FOR VALUES FROM (350) TO (500); -ANALYZE prt2_adv; --- Partitioned join can't be applied because the default partition of prt1_adv --- matches prt2_adv_p1 and prt2_adv_p3 -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Join - Hash Cond: (t2.b = t1.a) - -> Append - -> Seq Scan on prt2_adv_p1 t2_1 - -> Seq Scan on prt2_adv_p2 t2_2 - -> Seq Scan on prt2_adv_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on prt1_adv_p2 t1_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 t1_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p1 t1_3 - Filter: (b = 0) -(16 rows) - -ALTER TABLE prt2_adv DETACH PARTITION prt2_adv_p3; --- Change prt2_adv_p3 to the default partition -ALTER TABLE prt2_adv ATTACH PARTITION prt2_adv_p3 DEFAULT; -ANALYZE prt2_adv; --- Partitioned join can't be applied because the default partition of prt1_adv --- matches prt2_adv_p1 and prt2_adv_p3 -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Join - Hash Cond: (t2.b = t1.a) - -> Append - -> Seq Scan on prt2_adv_p1 t2_1 - -> Seq Scan on prt2_adv_p2 t2_2 - -> Seq Scan on prt2_adv_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on prt1_adv_p2 t1_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 t1_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p1 t1_3 - Filter: (b = 0) -(16 rows) - -DROP TABLE prt1_adv_p3; -ANALYZE prt1_adv; -DROP TABLE prt2_adv_p3; -ANALYZE prt2_adv; -CREATE TABLE prt3_adv (a int, b int, c varchar) PARTITION BY RANGE (a); -CREATE TABLE prt3_adv_p1 PARTITION OF prt3_adv FOR VALUES FROM (200) TO (300); -CREATE TABLE prt3_adv_p2 PARTITION OF prt3_adv FOR VALUES FROM (300) TO (400); -CREATE INDEX prt3_adv_a_idx ON prt3_adv (a); -INSERT INTO prt3_adv SELECT i, i % 25, to_char(i, 'FM0000') FROM generate_series(200, 399) i; -ANALYZE prt3_adv; --- 3-way join to test the default partition of a join relation -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c, t3.a, t3.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) LEFT JOIN prt3_adv t3 ON (t1.a = t3.a) WHERE t1.b = 0 ORDER BY t1.a, t2.b, t3.a; - QUERY PLAN ------------------------------------------------------------------- - Sort - Sort Key: t1.a, t2.b, t3.a - -> Append - -> Hash Right Join - Hash Cond: (t3_1.a = t1_1.a) - -> Seq Scan on prt3_adv_p1 t3_1 - -> Hash - -> Hash Right Join - Hash Cond: (t2_2.b = t1_1.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_1 - Filter: (b = 0) - -> Hash Right Join - Hash Cond: (t3_2.a = t1_2.a) - -> Seq Scan on prt3_adv_p2 t3_2 - -> Hash - -> Hash Right Join - Hash Cond: (t2_1.b = t1_2.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_2 - Filter: (b = 0) -(23 rows) - -SELECT t1.a, t1.c, t2.b, t2.c, t3.a, t3.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) LEFT JOIN prt3_adv t3 ON (t1.a = t3.a) WHERE t1.b = 0 ORDER BY t1.a, t2.b, t3.a; - a | c | b | c | a | c ------+------+-----+------+-----+------ - 100 | 0100 | 100 | 0100 | | - 125 | 0125 | 125 | 0125 | | - 150 | 0150 | | | | - 175 | 0175 | | | | - 200 | 0200 | 200 | 0200 | 200 | 0200 - 225 | 0225 | 225 | 0225 | 225 | 0225 - 250 | 0250 | 250 | 0250 | 250 | 0250 - 275 | 0275 | 275 | 0275 | 275 | 0275 -(8 rows) - -DROP TABLE prt1_adv; -DROP TABLE prt2_adv; -DROP TABLE prt3_adv; --- Test interaction of partitioned join with partition pruning -CREATE TABLE prt1_adv (a int, b int, c varchar) PARTITION BY RANGE (a); -CREATE TABLE prt1_adv_p1 PARTITION OF prt1_adv FOR VALUES FROM (100) TO (200); -CREATE TABLE prt1_adv_p2 PARTITION OF prt1_adv FOR VALUES FROM (200) TO (300); -CREATE TABLE prt1_adv_p3 PARTITION OF prt1_adv FOR VALUES FROM (300) TO (400); -CREATE INDEX prt1_adv_a_idx ON prt1_adv (a); -INSERT INTO prt1_adv SELECT i, i % 25, to_char(i, 'FM0000') FROM generate_series(100, 399) i; -ANALYZE prt1_adv; -CREATE TABLE prt2_adv (a int, b int, c varchar) PARTITION BY RANGE (b); -CREATE TABLE prt2_adv_p1 PARTITION OF prt2_adv FOR VALUES FROM (100) TO (200); -CREATE TABLE prt2_adv_p2 PARTITION OF prt2_adv FOR VALUES FROM (200) TO (400); -CREATE INDEX prt2_adv_b_idx ON prt2_adv (b); -INSERT INTO prt2_adv SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(100, 399) i; -ANALYZE prt2_adv; -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.a < 300 AND t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------------ - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: ((a < 300) AND (b = 0)) - -> Hash Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: ((a < 300) AND (b = 0)) -(15 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.a < 300 AND t1.b = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 100 | 0100 | 100 | 0100 - 125 | 0125 | 125 | 0125 - 150 | 0150 | 150 | 0150 - 175 | 0175 | 175 | 0175 - 200 | 0200 | 200 | 0200 - 225 | 0225 | 225 | 0225 - 250 | 0250 | 250 | 0250 - 275 | 0275 | 275 | 0275 -(8 rows) - -DROP TABLE prt1_adv_p3; -CREATE TABLE prt1_adv_default PARTITION OF prt1_adv DEFAULT; -ANALYZE prt1_adv; -CREATE TABLE prt2_adv_default PARTITION OF prt2_adv DEFAULT; -ANALYZE prt2_adv; -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.a >= 100 AND t1.a < 300 AND t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN --------------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: ((a >= 100) AND (a < 300) AND (b = 0)) - -> Hash Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: ((a >= 100) AND (a < 300) AND (b = 0)) -(15 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.a >= 100 AND t1.a < 300 AND t1.b = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 100 | 0100 | 100 | 0100 - 125 | 0125 | 125 | 0125 - 150 | 0150 | 150 | 0150 - 175 | 0175 | 175 | 0175 - 200 | 0200 | 200 | 0200 - 225 | 0225 | 225 | 0225 - 250 | 0250 | 250 | 0250 - 275 | 0275 | 275 | 0275 -(8 rows) - -DROP TABLE prt1_adv; -DROP TABLE prt2_adv; --- Tests for list-partitioned tables -CREATE TABLE plt1_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt1_adv_p1 PARTITION OF plt1_adv FOR VALUES IN ('0001', '0003'); -CREATE TABLE plt1_adv_p2 PARTITION OF plt1_adv FOR VALUES IN ('0004', '0006'); -CREATE TABLE plt1_adv_p3 PARTITION OF plt1_adv FOR VALUES IN ('0008', '0009'); -INSERT INTO plt1_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (1, 3, 4, 6, 8, 9); -ANALYZE plt1_adv; -CREATE TABLE plt2_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt2_adv_p1 PARTITION OF plt2_adv FOR VALUES IN ('0002', '0003'); -CREATE TABLE plt2_adv_p2 PARTITION OF plt2_adv FOR VALUES IN ('0004', '0006'); -CREATE TABLE plt2_adv_p3 PARTITION OF plt2_adv FOR VALUES IN ('0007', '0009'); -INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); -ANALYZE plt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 9 | 0009 | 9 | 0009 -(4 rows) - --- semi join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Semi Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Right Semi Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Semi Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - a | b | c ----+---+------ - 3 | 3 | 0003 - 4 | 4 | 0004 - 6 | 6 | 0006 - 9 | 9 | 0009 -(4 rows) - --- left join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 1 | 0001 | | - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 8 | 0008 | | - 9 | 0009 | 9 | 0009 -(6 rows) - --- anti join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Anti Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Right Anti Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Anti Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - a | b | c ----+---+------ - 1 | 1 | 0001 - 8 | 8 | 0008 -(2 rows) - --- full join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - QUERY PLAN ------------------------------------------------------------------------------------ - Sort - Sort Key: t1.a, t2.a - -> Append - -> Hash Full Join - Hash Cond: ((t1_1.a = t2_1.a) AND (t1_1.c = t2_1.c)) - Filter: ((COALESCE(t1_1.b, 0) < 10) AND (COALESCE(t2_1.b, 0) < 10)) - -> Seq Scan on plt1_adv_p1 t1_1 - -> Hash - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash Full Join - Hash Cond: ((t1_2.a = t2_2.a) AND (t1_2.c = t2_2.c)) - Filter: ((COALESCE(t1_2.b, 0) < 10) AND (COALESCE(t2_2.b, 0) < 10)) - -> Seq Scan on plt1_adv_p2 t1_2 - -> Hash - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash Full Join - Hash Cond: ((t1_3.a = t2_3.a) AND (t1_3.c = t2_3.c)) - Filter: ((COALESCE(t1_3.b, 0) < 10) AND (COALESCE(t2_3.b, 0) < 10)) - -> Seq Scan on plt1_adv_p3 t1_3 - -> Hash - -> Seq Scan on plt2_adv_p3 t2_3 -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - a | c | a | c ----+------+---+------ - 1 | 0001 | | - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 8 | 0008 | | - 9 | 0009 | 9 | 0009 - | | 2 | 0002 - | | 7 | 0007 -(8 rows) - --- Test cases where one side has an extra partition -CREATE TABLE plt2_adv_extra PARTITION OF plt2_adv FOR VALUES IN ('0000'); -INSERT INTO plt2_adv_extra VALUES (0, 0, '0000'); -ANALYZE plt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 9 | 0009 | 9 | 0009 -(4 rows) - --- semi join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Semi Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Right Semi Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Semi Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - a | b | c ----+---+------ - 3 | 3 | 0003 - 4 | 4 | 0004 - 6 | 6 | 0006 - 9 | 9 | 0009 -(4 rows) - --- left join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 1 | 0001 | | - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 8 | 0008 | | - 9 | 0009 | 9 | 0009 -(6 rows) - --- left join; currently we can't do partitioned join if there are no matched --- partitions on the nullable side -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt2_adv t1 LEFT JOIN plt1_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ---------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt1_adv_p1 t2_1 - -> Seq Scan on plt1_adv_p2 t2_2 - -> Seq Scan on plt1_adv_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on plt2_adv_extra t1_1 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p1 t1_2 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p2 t1_3 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p3 t1_4 - Filter: (b < 10) -(18 rows) - --- anti join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Anti Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Right Anti Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Anti Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - a | b | c ----+---+------ - 1 | 1 | 0001 - 8 | 8 | 0008 -(2 rows) - --- anti join; currently we can't do partitioned join if there are no matched --- partitions on the nullable side -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt2_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt1_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN ---------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Anti Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt1_adv_p1 t2_1 - -> Seq Scan on plt1_adv_p2 t2_2 - -> Seq Scan on plt1_adv_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on plt2_adv_extra t1_1 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p1 t1_2 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p2 t1_3 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p3 t1_4 - Filter: (b < 10) -(18 rows) - --- full join; currently we can't do partitioned join if there are no matched --- partitions on the nullable side -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - QUERY PLAN -------------------------------------------------------------------------- - Sort - Sort Key: t1.a, t2.a - -> Hash Full Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - Filter: ((COALESCE(t1.b, 0) < 10) AND (COALESCE(t2.b, 0) < 10)) - -> Append - -> Seq Scan on plt2_adv_extra t2_1 - -> Seq Scan on plt2_adv_p1 t2_2 - -> Seq Scan on plt2_adv_p2 t2_3 - -> Seq Scan on plt2_adv_p3 t2_4 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - -> Seq Scan on plt1_adv_p2 t1_2 - -> Seq Scan on plt1_adv_p3 t1_3 -(15 rows) - -DROP TABLE plt2_adv_extra; --- Test cases where a partition on one side matches multiple partitions on --- the other side; we currently can't do partitioned join in such cases -ALTER TABLE plt2_adv DETACH PARTITION plt2_adv_p2; --- Split plt2_adv_p2 into two partitions so that plt1_adv_p2 matches both -CREATE TABLE plt2_adv_p2_1 PARTITION OF plt2_adv FOR VALUES IN ('0004'); -CREATE TABLE plt2_adv_p2_2 PARTITION OF plt2_adv FOR VALUES IN ('0006'); -INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (4, 6); -ANALYZE plt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2_1 t2_2 - -> Seq Scan on plt2_adv_p2_2 t2_3 - -> Seq Scan on plt2_adv_p3 t2_4 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(17 rows) - --- semi join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Semi Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2_1 t2_2 - -> Seq Scan on plt2_adv_p2_2 t2_3 - -> Seq Scan on plt2_adv_p3 t2_4 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(17 rows) - --- left join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2_1 t2_2 - -> Seq Scan on plt2_adv_p2_2 t2_3 - -> Seq Scan on plt2_adv_p3 t2_4 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(17 rows) - --- anti join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Anti Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2_1 t2_2 - -> Seq Scan on plt2_adv_p2_2 t2_3 - -> Seq Scan on plt2_adv_p3 t2_4 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(17 rows) - --- full join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - QUERY PLAN -------------------------------------------------------------------------- - Sort - Sort Key: t1.a, t2.a - -> Hash Full Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - Filter: ((COALESCE(t1.b, 0) < 10) AND (COALESCE(t2.b, 0) < 10)) - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2_1 t2_2 - -> Seq Scan on plt2_adv_p2_2 t2_3 - -> Seq Scan on plt2_adv_p3 t2_4 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - -> Seq Scan on plt1_adv_p2 t1_2 - -> Seq Scan on plt1_adv_p3 t1_3 -(15 rows) - -DROP TABLE plt2_adv_p2_1; -DROP TABLE plt2_adv_p2_2; --- Restore plt2_adv_p2 -ALTER TABLE plt2_adv ATTACH PARTITION plt2_adv_p2 FOR VALUES IN ('0004', '0006'); --- Test NULL partitions -ALTER TABLE plt1_adv DETACH PARTITION plt1_adv_p1; --- Change plt1_adv_p1 to the NULL partition -CREATE TABLE plt1_adv_p1_null PARTITION OF plt1_adv FOR VALUES IN (NULL, '0001', '0003'); -INSERT INTO plt1_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (1, 3); -INSERT INTO plt1_adv VALUES (-1, -1, NULL); -ANALYZE plt1_adv; -ALTER TABLE plt2_adv DETACH PARTITION plt2_adv_p3; --- Change plt2_adv_p3 to the NULL partition -CREATE TABLE plt2_adv_p3_null PARTITION OF plt2_adv FOR VALUES IN (NULL, '0007', '0009'); -INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (7, 9); -INSERT INTO plt2_adv VALUES (-1, -1, NULL); -ANALYZE plt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1_null t1_1 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3_null t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 9 | 0009 | 9 | 0009 -(4 rows) - --- semi join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Semi Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1_null t1_1 - Filter: (b < 10) - -> Hash Right Semi Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Semi Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3_null t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - a | b | c ----+---+------ - 3 | 3 | 0003 - 4 | 4 | 0004 - 6 | 6 | 0006 - 9 | 9 | 0009 -(4 rows) - --- left join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1_null t1_1 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3_null t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c -----+------+---+------ - -1 | | | - 1 | 0001 | | - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 8 | 0008 | | - 9 | 0009 | 9 | 0009 -(7 rows) - --- anti join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Anti Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1_null t1_1 - Filter: (b < 10) - -> Hash Right Anti Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Anti Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3_null t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - a | b | c -----+----+------ - -1 | -1 | - 1 | 1 | 0001 - 8 | 8 | 0008 -(3 rows) - --- full join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - QUERY PLAN ------------------------------------------------------------------------------------ - Sort - Sort Key: t1.a, t2.a - -> Append - -> Hash Full Join - Hash Cond: ((t1_1.a = t2_1.a) AND (t1_1.c = t2_1.c)) - Filter: ((COALESCE(t1_1.b, 0) < 10) AND (COALESCE(t2_1.b, 0) < 10)) - -> Seq Scan on plt1_adv_p1_null t1_1 - -> Hash - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash Full Join - Hash Cond: ((t1_2.a = t2_2.a) AND (t1_2.c = t2_2.c)) - Filter: ((COALESCE(t1_2.b, 0) < 10) AND (COALESCE(t2_2.b, 0) < 10)) - -> Seq Scan on plt1_adv_p2 t1_2 - -> Hash - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash Full Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - Filter: ((COALESCE(t1_3.b, 0) < 10) AND (COALESCE(t2_3.b, 0) < 10)) - -> Seq Scan on plt2_adv_p3_null t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - a | c | a | c -----+------+----+------ - -1 | | | - 1 | 0001 | | - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 8 | 0008 | | - 9 | 0009 | 9 | 0009 - | | -1 | - | | 2 | 0002 - | | 7 | 0007 -(10 rows) - -DROP TABLE plt1_adv_p1_null; --- Restore plt1_adv_p1 -ALTER TABLE plt1_adv ATTACH PARTITION plt1_adv_p1 FOR VALUES IN ('0001', '0003'); --- Add to plt1_adv the extra NULL partition containing only NULL values as the --- key values -CREATE TABLE plt1_adv_extra PARTITION OF plt1_adv FOR VALUES IN (NULL); -INSERT INTO plt1_adv VALUES (-1, -1, NULL); -ANALYZE plt1_adv; -DROP TABLE plt2_adv_p3_null; --- Restore plt2_adv_p3 -ALTER TABLE plt2_adv ATTACH PARTITION plt2_adv_p3 FOR VALUES IN ('0007', '0009'); -ANALYZE plt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 9 | 0009 | 9 | 0009 -(4 rows) - --- left join; currently we can't do partitioned join if there are no matched --- partitions on the nullable side -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ---------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2 t2_2 - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) - -> Seq Scan on plt1_adv_extra t1_4 - Filter: (b < 10) -(18 rows) - --- full join; currently we can't do partitioned join if there are no matched --- partitions on the nullable side -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - QUERY PLAN -------------------------------------------------------------------------- - Sort - Sort Key: t1.a, t2.a - -> Hash Full Join - Hash Cond: ((t1.a = t2.a) AND (t1.c = t2.c)) - Filter: ((COALESCE(t1.b, 0) < 10) AND (COALESCE(t2.b, 0) < 10)) - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - -> Seq Scan on plt1_adv_p2 t1_2 - -> Seq Scan on plt1_adv_p3 t1_3 - -> Seq Scan on plt1_adv_extra t1_4 - -> Hash - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2 t2_2 - -> Seq Scan on plt2_adv_p3 t2_3 -(15 rows) - --- Add to plt2_adv the extra NULL partition containing only NULL values as the --- key values -CREATE TABLE plt2_adv_extra PARTITION OF plt2_adv FOR VALUES IN (NULL); -INSERT INTO plt2_adv VALUES (-1, -1, NULL); -ANALYZE plt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 9 | 0009 | 9 | 0009 -(4 rows) - --- left join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ----------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) - -> Nested Loop Left Join - Join Filter: ((t1_4.a = t2_4.a) AND (t1_4.c = t2_4.c)) - -> Seq Scan on plt1_adv_extra t1_4 - Filter: (b < 10) - -> Seq Scan on plt2_adv_extra t2_4 -(26 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c -----+------+---+------ - -1 | | | - 1 | 0001 | | - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 8 | 0008 | | - 9 | 0009 | 9 | 0009 -(7 rows) - --- full join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - QUERY PLAN ------------------------------------------------------------------------------------ - Sort - Sort Key: t1.a, t2.a - -> Append - -> Hash Full Join - Hash Cond: ((t1_1.a = t2_1.a) AND (t1_1.c = t2_1.c)) - Filter: ((COALESCE(t1_1.b, 0) < 10) AND (COALESCE(t2_1.b, 0) < 10)) - -> Seq Scan on plt1_adv_p1 t1_1 - -> Hash - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash Full Join - Hash Cond: ((t1_2.a = t2_2.a) AND (t1_2.c = t2_2.c)) - Filter: ((COALESCE(t1_2.b, 0) < 10) AND (COALESCE(t2_2.b, 0) < 10)) - -> Seq Scan on plt1_adv_p2 t1_2 - -> Hash - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash Full Join - Hash Cond: ((t1_3.a = t2_3.a) AND (t1_3.c = t2_3.c)) - Filter: ((COALESCE(t1_3.b, 0) < 10) AND (COALESCE(t2_3.b, 0) < 10)) - -> Seq Scan on plt1_adv_p3 t1_3 - -> Hash - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash Full Join - Hash Cond: ((t1_4.a = t2_4.a) AND (t1_4.c = t2_4.c)) - Filter: ((COALESCE(t1_4.b, 0) < 10) AND (COALESCE(t2_4.b, 0) < 10)) - -> Seq Scan on plt1_adv_extra t1_4 - -> Hash - -> Seq Scan on plt2_adv_extra t2_4 -(27 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - a | c | a | c -----+------+----+------ - -1 | | | - 1 | 0001 | | - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 8 | 0008 | | - 9 | 0009 | 9 | 0009 - | | -1 | - | | 2 | 0002 - | | 7 | 0007 -(10 rows) - --- 3-way join to test the NULL partition of a join relation -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) LEFT JOIN plt1_adv t3 ON (t1.a = t3.a AND t1.c = t3.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Join - Hash Cond: ((t3_1.a = t1_1.a) AND (t3_1.c = t1_1.c)) - -> Seq Scan on plt1_adv_p1 t3_1 - -> Hash - -> Hash Right Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t3_2.a = t1_2.a) AND (t3_2.c = t1_2.c)) - -> Seq Scan on plt1_adv_p2 t3_2 - -> Hash - -> Hash Right Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t3_3.a = t1_3.a) AND (t3_3.c = t1_3.c)) - -> Seq Scan on plt1_adv_p3 t3_3 - -> Hash - -> Hash Right Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) - -> Nested Loop Left Join - Join Filter: ((t1_4.a = t3_4.a) AND (t1_4.c = t3_4.c)) - -> Nested Loop Left Join - Join Filter: ((t1_4.a = t2_4.a) AND (t1_4.c = t2_4.c)) - -> Seq Scan on plt1_adv_extra t1_4 - Filter: (b < 10) - -> Seq Scan on plt2_adv_extra t2_4 - -> Seq Scan on plt1_adv_extra t3_4 -(41 rows) - -SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) LEFT JOIN plt1_adv t3 ON (t1.a = t3.a AND t1.c = t3.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c | a | c -----+------+---+------+---+------ - -1 | | | | | - 1 | 0001 | | | 1 | 0001 - 3 | 0003 | 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 | 6 | 0006 - 8 | 0008 | | | 8 | 0008 - 9 | 0009 | 9 | 0009 | 9 | 0009 -(7 rows) - -DROP TABLE plt1_adv_extra; -DROP TABLE plt2_adv_extra; --- Test default partitions -ALTER TABLE plt1_adv DETACH PARTITION plt1_adv_p1; --- Change plt1_adv_p1 to the default partition -ALTER TABLE plt1_adv ATTACH PARTITION plt1_adv_p1 DEFAULT; -DROP TABLE plt1_adv_p3; -ANALYZE plt1_adv; -DROP TABLE plt2_adv_p3; -ANALYZE plt2_adv; --- We can do partitioned join even if only one of relations has the default --- partition -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_2.a) AND (t2_1.c = t1_2.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_2 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_2.a = t1_1.a) AND (t2_2.c = t1_1.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_1 - Filter: (b < 10) -(15 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 -(3 rows) - -ALTER TABLE plt2_adv DETACH PARTITION plt2_adv_p2; --- Change plt2_adv_p2 to contain '0005' in addition to '0004' and '0006' as --- the key values -CREATE TABLE plt2_adv_p2_ext PARTITION OF plt2_adv FOR VALUES IN ('0004', '0005', '0006'); -INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (4, 5, 6); -ANALYZE plt2_adv; --- Partitioned join can't be applied because the default partition of plt1_adv --- matches plt2_adv_p1 and plt2_adv_p2_ext -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2_ext t2_2 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p2 t1_1 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p1 t1_2 - Filter: (b < 10) -(13 rows) - -ALTER TABLE plt2_adv DETACH PARTITION plt2_adv_p2_ext; --- Change plt2_adv_p2_ext to the default partition -ALTER TABLE plt2_adv ATTACH PARTITION plt2_adv_p2_ext DEFAULT; -ANALYZE plt2_adv; --- Partitioned join can't be applied because the default partition of plt1_adv --- matches plt2_adv_p1 and plt2_adv_p2_ext -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2_ext t2_2 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p2 t1_1 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p1 t1_2 - Filter: (b < 10) -(13 rows) - -DROP TABLE plt2_adv_p2_ext; --- Restore plt2_adv_p2 -ALTER TABLE plt2_adv ATTACH PARTITION plt2_adv_p2 FOR VALUES IN ('0004', '0006'); -ANALYZE plt2_adv; -CREATE TABLE plt3_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt3_adv_p1 PARTITION OF plt3_adv FOR VALUES IN ('0004', '0006'); -CREATE TABLE plt3_adv_p2 PARTITION OF plt3_adv FOR VALUES IN ('0007', '0009'); -INSERT INTO plt3_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (4, 6, 7, 9); -ANALYZE plt3_adv; --- 3-way join to test the default partition of a join relation -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) LEFT JOIN plt3_adv t3 ON (t1.a = t3.a AND t1.c = t3.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Join - Hash Cond: ((t3_1.a = t1_1.a) AND (t3_1.c = t1_1.c)) - -> Seq Scan on plt3_adv_p1 t3_1 - -> Hash - -> Hash Right Join - Hash Cond: ((t2_2.a = t1_1.a) AND (t2_2.c = t1_1.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_1 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t3_2.a = t1_2.a) AND (t3_2.c = t1_2.c)) - -> Seq Scan on plt3_adv_p2 t3_2 - -> Hash - -> Hash Right Join - Hash Cond: ((t2_1.a = t1_2.a) AND (t2_1.c = t1_2.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_2 - Filter: (b < 10) -(23 rows) - -SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) LEFT JOIN plt3_adv t3 ON (t1.a = t3.a AND t1.c = t3.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c | a | c ----+------+---+------+---+------ - 1 | 0001 | | | | - 3 | 0003 | 3 | 0003 | | - 4 | 0004 | 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 | 6 | 0006 -(4 rows) - --- Test cases where one side has the default partition while the other side --- has the NULL partition -DROP TABLE plt2_adv_p1; --- Add the NULL partition to plt2_adv -CREATE TABLE plt2_adv_p1_null PARTITION OF plt2_adv FOR VALUES IN (NULL, '0001', '0003'); -INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (1, 3); -INSERT INTO plt2_adv VALUES (-1, -1, NULL); -ANALYZE plt2_adv; -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_2.a) AND (t2_1.c = t1_2.c)) - -> Seq Scan on plt2_adv_p1_null t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_2 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_2.a = t1_1.a) AND (t2_2.c = t1_1.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_1 - Filter: (b < 10) -(15 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 1 | 0001 | 1 | 0001 - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 -(4 rows) - -DROP TABLE plt2_adv_p1_null; --- Add the NULL partition that contains only NULL values as the key values -CREATE TABLE plt2_adv_p1_null PARTITION OF plt2_adv FOR VALUES IN (NULL); -INSERT INTO plt2_adv VALUES (-1, -1, NULL); -ANALYZE plt2_adv; -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Seq Scan on plt2_adv_p2 t2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1 - Filter: (b < 10) -(8 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 -(2 rows) - -DROP TABLE plt1_adv; -DROP TABLE plt2_adv; -DROP TABLE plt3_adv; --- Test interaction of partitioned join with partition pruning -CREATE TABLE plt1_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt1_adv_p1 PARTITION OF plt1_adv FOR VALUES IN ('0001'); -CREATE TABLE plt1_adv_p2 PARTITION OF plt1_adv FOR VALUES IN ('0002'); -CREATE TABLE plt1_adv_p3 PARTITION OF plt1_adv FOR VALUES IN ('0003'); -CREATE TABLE plt1_adv_p4 PARTITION OF plt1_adv FOR VALUES IN (NULL, '0004', '0005'); -INSERT INTO plt1_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (1, 2, 3, 4, 5); -INSERT INTO plt1_adv VALUES (-1, -1, NULL); -ANALYZE plt1_adv; -CREATE TABLE plt2_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt2_adv_p1 PARTITION OF plt2_adv FOR VALUES IN ('0001', '0002'); -CREATE TABLE plt2_adv_p2 PARTITION OF plt2_adv FOR VALUES IN (NULL); -CREATE TABLE plt2_adv_p3 PARTITION OF plt2_adv FOR VALUES IN ('0003'); -CREATE TABLE plt2_adv_p4 PARTITION OF plt2_adv FOR VALUES IN ('0004', '0005'); -INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (1, 2, 3, 4, 5); -INSERT INTO plt2_adv VALUES (-1, -1, NULL); -ANALYZE plt2_adv; -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IN ('0003', '0004', '0005') AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------------------------------------------ - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p3 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_1 - Filter: ((b < 10) AND (c = ANY ('{0003,0004,0005}'::text[]))) - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p4 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p4 t1_2 - Filter: ((b < 10) AND (c = ANY ('{0003,0004,0005}'::text[]))) -(15 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IN ('0003', '0004', '0005') AND t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 5 | 0005 | 5 | 0005 -(3 rows) - -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IS NULL AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Seq Scan on plt2_adv_p4 t2 - -> Hash - -> Seq Scan on plt1_adv_p4 t1 - Filter: ((c IS NULL) AND (b < 10)) -(8 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IS NULL AND t1.b < 10 ORDER BY t1.a; - a | c | a | c -----+---+---+--- - -1 | | | -(1 row) - -CREATE TABLE plt1_adv_default PARTITION OF plt1_adv DEFAULT; -ANALYZE plt1_adv; -CREATE TABLE plt2_adv_default PARTITION OF plt2_adv DEFAULT; -ANALYZE plt2_adv; -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IN ('0003', '0004', '0005') AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------------------------------------------ - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p3 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_1 - Filter: ((b < 10) AND (c = ANY ('{0003,0004,0005}'::text[]))) - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p4 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p4 t1_2 - Filter: ((b < 10) AND (c = ANY ('{0003,0004,0005}'::text[]))) -(15 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IN ('0003', '0004', '0005') AND t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 5 | 0005 | 5 | 0005 -(3 rows) - -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IS NULL AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Seq Scan on plt2_adv_p4 t2 - -> Hash - -> Seq Scan on plt1_adv_p4 t1 - Filter: ((c IS NULL) AND (b < 10)) -(8 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IS NULL AND t1.b < 10 ORDER BY t1.a; - a | c | a | c -----+---+---+--- - -1 | | | -(1 row) - -DROP TABLE plt1_adv; -DROP TABLE plt2_adv; --- Test the process_outer_partition() code path -CREATE TABLE plt1_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt1_adv_p1 PARTITION OF plt1_adv FOR VALUES IN ('0000', '0001', '0002'); -CREATE TABLE plt1_adv_p2 PARTITION OF plt1_adv FOR VALUES IN ('0003', '0004'); -INSERT INTO plt1_adv SELECT i, i, to_char(i % 5, 'FM0000') FROM generate_series(0, 24) i; -ANALYZE plt1_adv; -CREATE TABLE plt2_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt2_adv_p1 PARTITION OF plt2_adv FOR VALUES IN ('0002'); -CREATE TABLE plt2_adv_p2 PARTITION OF plt2_adv FOR VALUES IN ('0003', '0004'); -INSERT INTO plt2_adv SELECT i, i, to_char(i % 5, 'FM0000') FROM generate_series(0, 24) i WHERE i % 5 IN (2, 3, 4); -ANALYZE plt2_adv; -CREATE TABLE plt3_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt3_adv_p1 PARTITION OF plt3_adv FOR VALUES IN ('0001'); -CREATE TABLE plt3_adv_p2 PARTITION OF plt3_adv FOR VALUES IN ('0003', '0004'); -INSERT INTO plt3_adv SELECT i, i, to_char(i % 5, 'FM0000') FROM generate_series(0, 24) i WHERE i % 5 IN (1, 3, 4); -ANALYZE plt3_adv; --- This tests that when merging partitions from plt1_adv and plt2_adv in --- merge_list_bounds(), process_outer_partition() returns an already-assigned --- merged partition when re-called with plt1_adv_p1 for the second list value --- '0001' of that partition -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM (plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.c = t2.c)) FULL JOIN plt3_adv t3 ON (t1.c = t3.c) WHERE coalesce(t1.a, 0) % 5 != 3 AND coalesce(t1.a, 0) % 5 != 4 ORDER BY t1.c, t1.a, t2.a, t3.a; - QUERY PLAN ------------------------------------------------------------------------------------------------ - Sort - Sort Key: t1.c, t1.a, t2.a, t3.a - -> Append - -> Hash Full Join - Hash Cond: (t1_1.c = t3_1.c) - Filter: (((COALESCE(t1_1.a, 0) % 5) <> 3) AND ((COALESCE(t1_1.a, 0) % 5) <> 4)) - -> Hash Left Join - Hash Cond: (t1_1.c = t2_1.c) - -> Seq Scan on plt1_adv_p1 t1_1 - -> Hash - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt3_adv_p1 t3_1 - -> Hash Full Join - Hash Cond: (t1_2.c = t3_2.c) - Filter: (((COALESCE(t1_2.a, 0) % 5) <> 3) AND ((COALESCE(t1_2.a, 0) % 5) <> 4)) - -> Hash Left Join - Hash Cond: (t1_2.c = t2_2.c) - -> Seq Scan on plt1_adv_p2 t1_2 - -> Hash - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt3_adv_p2 t3_2 -(23 rows) - -SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM (plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.c = t2.c)) FULL JOIN plt3_adv t3 ON (t1.c = t3.c) WHERE coalesce(t1.a, 0) % 5 != 3 AND coalesce(t1.a, 0) % 5 != 4 ORDER BY t1.c, t1.a, t2.a, t3.a; - a | c | a | c | a | c -----+------+----+------+----+------ - 0 | 0000 | | | | - 5 | 0000 | | | | - 10 | 0000 | | | | - 15 | 0000 | | | | - 20 | 0000 | | | | - 1 | 0001 | | | 1 | 0001 - 1 | 0001 | | | 6 | 0001 - 1 | 0001 | | | 11 | 0001 - 1 | 0001 | | | 16 | 0001 - 1 | 0001 | | | 21 | 0001 - 6 | 0001 | | | 1 | 0001 - 6 | 0001 | | | 6 | 0001 - 6 | 0001 | | | 11 | 0001 - 6 | 0001 | | | 16 | 0001 - 6 | 0001 | | | 21 | 0001 - 11 | 0001 | | | 1 | 0001 - 11 | 0001 | | | 6 | 0001 - 11 | 0001 | | | 11 | 0001 - 11 | 0001 | | | 16 | 0001 - 11 | 0001 | | | 21 | 0001 - 16 | 0001 | | | 1 | 0001 - 16 | 0001 | | | 6 | 0001 - 16 | 0001 | | | 11 | 0001 - 16 | 0001 | | | 16 | 0001 - 16 | 0001 | | | 21 | 0001 - 21 | 0001 | | | 1 | 0001 - 21 | 0001 | | | 6 | 0001 - 21 | 0001 | | | 11 | 0001 - 21 | 0001 | | | 16 | 0001 - 21 | 0001 | | | 21 | 0001 - 2 | 0002 | 2 | 0002 | | - 2 | 0002 | 7 | 0002 | | - 2 | 0002 | 12 | 0002 | | - 2 | 0002 | 17 | 0002 | | - 2 | 0002 | 22 | 0002 | | - 7 | 0002 | 2 | 0002 | | - 7 | 0002 | 7 | 0002 | | - 7 | 0002 | 12 | 0002 | | - 7 | 0002 | 17 | 0002 | | - 7 | 0002 | 22 | 0002 | | - 12 | 0002 | 2 | 0002 | | - 12 | 0002 | 7 | 0002 | | - 12 | 0002 | 12 | 0002 | | - 12 | 0002 | 17 | 0002 | | - 12 | 0002 | 22 | 0002 | | - 17 | 0002 | 2 | 0002 | | - 17 | 0002 | 7 | 0002 | | - 17 | 0002 | 12 | 0002 | | - 17 | 0002 | 17 | 0002 | | - 17 | 0002 | 22 | 0002 | | - 22 | 0002 | 2 | 0002 | | - 22 | 0002 | 7 | 0002 | | - 22 | 0002 | 12 | 0002 | | - 22 | 0002 | 17 | 0002 | | - 22 | 0002 | 22 | 0002 | | -(55 rows) - -DROP TABLE plt1_adv; -DROP TABLE plt2_adv; -DROP TABLE plt3_adv; --- Tests for multi-level partitioned tables -CREATE TABLE alpha (a double precision, b int, c text) PARTITION BY RANGE (a); -CREATE TABLE alpha_neg PARTITION OF alpha FOR VALUES FROM ('-Infinity') TO (0) PARTITION BY RANGE (b); -CREATE TABLE alpha_pos PARTITION OF alpha FOR VALUES FROM (0) TO (10.0) PARTITION BY LIST (c); -CREATE TABLE alpha_neg_p1 PARTITION OF alpha_neg FOR VALUES FROM (100) TO (200); -CREATE TABLE alpha_neg_p2 PARTITION OF alpha_neg FOR VALUES FROM (200) TO (300); -CREATE TABLE alpha_neg_p3 PARTITION OF alpha_neg FOR VALUES FROM (300) TO (400); -CREATE TABLE alpha_pos_p1 PARTITION OF alpha_pos FOR VALUES IN ('0001', '0003'); -CREATE TABLE alpha_pos_p2 PARTITION OF alpha_pos FOR VALUES IN ('0004', '0006'); -CREATE TABLE alpha_pos_p3 PARTITION OF alpha_pos FOR VALUES IN ('0008', '0009'); -INSERT INTO alpha_neg SELECT -1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(100, 399) i WHERE i % 10 IN (1, 3, 4, 6, 8, 9); -INSERT INTO alpha_pos SELECT 1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(100, 399) i WHERE i % 10 IN (1, 3, 4, 6, 8, 9); -ANALYZE alpha; -CREATE TABLE beta (a double precision, b int, c text) PARTITION BY RANGE (a); -CREATE TABLE beta_neg PARTITION OF beta FOR VALUES FROM (-10.0) TO (0) PARTITION BY RANGE (b); -CREATE TABLE beta_pos PARTITION OF beta FOR VALUES FROM (0) TO ('Infinity') PARTITION BY LIST (c); -CREATE TABLE beta_neg_p1 PARTITION OF beta_neg FOR VALUES FROM (100) TO (150); -CREATE TABLE beta_neg_p2 PARTITION OF beta_neg FOR VALUES FROM (200) TO (300); -CREATE TABLE beta_neg_p3 PARTITION OF beta_neg FOR VALUES FROM (350) TO (500); -CREATE TABLE beta_pos_p1 PARTITION OF beta_pos FOR VALUES IN ('0002', '0003'); -CREATE TABLE beta_pos_p2 PARTITION OF beta_pos FOR VALUES IN ('0004', '0006'); -CREATE TABLE beta_pos_p3 PARTITION OF beta_pos FOR VALUES IN ('0007', '0009'); -INSERT INTO beta_neg SELECT -1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(100, 149) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); -INSERT INTO beta_neg SELECT -1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(200, 299) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); -INSERT INTO beta_neg SELECT -1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(350, 499) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); -INSERT INTO beta_pos SELECT 1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(100, 149) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); -INSERT INTO beta_pos SELECT 1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(200, 299) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); -INSERT INTO beta_pos SELECT 1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(350, 499) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); -ANALYZE beta; -EXPLAIN (COSTS OFF) -SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.b = t2.b) WHERE t1.b >= 125 AND t1.b < 225 ORDER BY t1.a, t1.b; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a, t1.b - -> Append - -> Hash Join - Hash Cond: ((t1_1.a = t2_1.a) AND (t1_1.b = t2_1.b)) - -> Seq Scan on alpha_neg_p1 t1_1 - Filter: ((b >= 125) AND (b < 225)) - -> Hash - -> Seq Scan on beta_neg_p1 t2_1 - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.b = t1_2.b)) - -> Seq Scan on beta_neg_p2 t2_2 - -> Hash - -> Seq Scan on alpha_neg_p2 t1_2 - Filter: ((b >= 125) AND (b < 225)) - -> Hash Join - Hash Cond: ((t2_4.a = t1_4.a) AND (t2_4.b = t1_4.b)) - -> Append - -> Seq Scan on beta_pos_p1 t2_4 - -> Seq Scan on beta_pos_p2 t2_5 - -> Seq Scan on beta_pos_p3 t2_6 - -> Hash - -> Append - -> Seq Scan on alpha_pos_p1 t1_4 - Filter: ((b >= 125) AND (b < 225)) - -> Seq Scan on alpha_pos_p2 t1_5 - Filter: ((b >= 125) AND (b < 225)) - -> Seq Scan on alpha_pos_p3 t1_6 - Filter: ((b >= 125) AND (b < 225)) -(29 rows) - -SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.b = t2.b) WHERE t1.b >= 125 AND t1.b < 225 ORDER BY t1.a, t1.b; - a | b | c | a | b | c -----+-----+------+----+-----+------ - -1 | 126 | 0006 | -1 | 126 | 0006 - -1 | 129 | 0009 | -1 | 129 | 0009 - -1 | 133 | 0003 | -1 | 133 | 0003 - -1 | 134 | 0004 | -1 | 134 | 0004 - -1 | 136 | 0006 | -1 | 136 | 0006 - -1 | 139 | 0009 | -1 | 139 | 0009 - -1 | 143 | 0003 | -1 | 143 | 0003 - -1 | 144 | 0004 | -1 | 144 | 0004 - -1 | 146 | 0006 | -1 | 146 | 0006 - -1 | 149 | 0009 | -1 | 149 | 0009 - -1 | 203 | 0003 | -1 | 203 | 0003 - -1 | 204 | 0004 | -1 | 204 | 0004 - -1 | 206 | 0006 | -1 | 206 | 0006 - -1 | 209 | 0009 | -1 | 209 | 0009 - -1 | 213 | 0003 | -1 | 213 | 0003 - -1 | 214 | 0004 | -1 | 214 | 0004 - -1 | 216 | 0006 | -1 | 216 | 0006 - -1 | 219 | 0009 | -1 | 219 | 0009 - -1 | 223 | 0003 | -1 | 223 | 0003 - -1 | 224 | 0004 | -1 | 224 | 0004 - 1 | 126 | 0006 | 1 | 126 | 0006 - 1 | 129 | 0009 | 1 | 129 | 0009 - 1 | 133 | 0003 | 1 | 133 | 0003 - 1 | 134 | 0004 | 1 | 134 | 0004 - 1 | 136 | 0006 | 1 | 136 | 0006 - 1 | 139 | 0009 | 1 | 139 | 0009 - 1 | 143 | 0003 | 1 | 143 | 0003 - 1 | 144 | 0004 | 1 | 144 | 0004 - 1 | 146 | 0006 | 1 | 146 | 0006 - 1 | 149 | 0009 | 1 | 149 | 0009 - 1 | 203 | 0003 | 1 | 203 | 0003 - 1 | 204 | 0004 | 1 | 204 | 0004 - 1 | 206 | 0006 | 1 | 206 | 0006 - 1 | 209 | 0009 | 1 | 209 | 0009 - 1 | 213 | 0003 | 1 | 213 | 0003 - 1 | 214 | 0004 | 1 | 214 | 0004 - 1 | 216 | 0006 | 1 | 216 | 0006 - 1 | 219 | 0009 | 1 | 219 | 0009 - 1 | 223 | 0003 | 1 | 223 | 0003 - 1 | 224 | 0004 | 1 | 224 | 0004 -(40 rows) - -EXPLAIN (COSTS OFF) -SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE ((t1.b >= 100 AND t1.b < 110) OR (t1.b >= 200 AND t1.b < 210)) AND ((t2.b >= 100 AND t2.b < 110) OR (t2.b >= 200 AND t2.b < 210)) AND t1.c IN ('0004', '0009') ORDER BY t1.a, t1.b, t2.b; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------- - Sort - Sort Key: t1.a, t1.b, t2.b - -> Append - -> Hash Join - Hash Cond: ((t1_2.a = t2_2.a) AND (t1_2.c = t2_2.c)) - -> Append - -> Seq Scan on alpha_neg_p1 t1_2 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Seq Scan on alpha_neg_p2 t1_3 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Hash - -> Append - -> Seq Scan on beta_neg_p1 t2_2 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) - -> Seq Scan on beta_neg_p2 t2_3 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) - -> Nested Loop - Join Filter: ((t1_4.a = t2_4.a) AND (t1_4.c = t2_4.c)) - -> Seq Scan on alpha_pos_p2 t1_4 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Seq Scan on beta_pos_p2 t2_4 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) - -> Nested Loop - Join Filter: ((t1_5.a = t2_5.a) AND (t1_5.c = t2_5.c)) - -> Seq Scan on alpha_pos_p3 t1_5 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Seq Scan on beta_pos_p3 t2_5 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) -(28 rows) - -SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE ((t1.b >= 100 AND t1.b < 110) OR (t1.b >= 200 AND t1.b < 210)) AND ((t2.b >= 100 AND t2.b < 110) OR (t2.b >= 200 AND t2.b < 210)) AND t1.c IN ('0004', '0009') ORDER BY t1.a, t1.b, t2.b; - a | b | c | a | b | c -----+-----+------+----+-----+------ - -1 | 104 | 0004 | -1 | 104 | 0004 - -1 | 104 | 0004 | -1 | 204 | 0004 - -1 | 109 | 0009 | -1 | 109 | 0009 - -1 | 109 | 0009 | -1 | 209 | 0009 - -1 | 204 | 0004 | -1 | 104 | 0004 - -1 | 204 | 0004 | -1 | 204 | 0004 - -1 | 209 | 0009 | -1 | 109 | 0009 - -1 | 209 | 0009 | -1 | 209 | 0009 - 1 | 104 | 0004 | 1 | 104 | 0004 - 1 | 104 | 0004 | 1 | 204 | 0004 - 1 | 109 | 0009 | 1 | 109 | 0009 - 1 | 109 | 0009 | 1 | 209 | 0009 - 1 | 204 | 0004 | 1 | 104 | 0004 - 1 | 204 | 0004 | 1 | 204 | 0004 - 1 | 209 | 0009 | 1 | 109 | 0009 - 1 | 209 | 0009 | 1 | 209 | 0009 -(16 rows) - -EXPLAIN (COSTS OFF) -SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.b = t2.b AND t1.c = t2.c) WHERE ((t1.b >= 100 AND t1.b < 110) OR (t1.b >= 200 AND t1.b < 210)) AND ((t2.b >= 100 AND t2.b < 110) OR (t2.b >= 200 AND t2.b < 210)) AND t1.c IN ('0004', '0009') ORDER BY t1.a, t1.b; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------- - Sort - Sort Key: t1.a, t1.b - -> Append - -> Hash Join - Hash Cond: ((t1_1.a = t2_1.a) AND (t1_1.b = t2_1.b) AND (t1_1.c = t2_1.c)) - -> Seq Scan on alpha_neg_p1 t1_1 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Hash - -> Seq Scan on beta_neg_p1 t2_1 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) - -> Hash Join - Hash Cond: ((t1_2.a = t2_2.a) AND (t1_2.b = t2_2.b) AND (t1_2.c = t2_2.c)) - -> Seq Scan on alpha_neg_p2 t1_2 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Hash - -> Seq Scan on beta_neg_p2 t2_2 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) - -> Nested Loop - Join Filter: ((t1_3.a = t2_3.a) AND (t1_3.b = t2_3.b) AND (t1_3.c = t2_3.c)) - -> Seq Scan on alpha_pos_p2 t1_3 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Seq Scan on beta_pos_p2 t2_3 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) - -> Nested Loop - Join Filter: ((t1_4.a = t2_4.a) AND (t1_4.b = t2_4.b) AND (t1_4.c = t2_4.c)) - -> Seq Scan on alpha_pos_p3 t1_4 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Seq Scan on beta_pos_p3 t2_4 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) -(29 rows) - -SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.b = t2.b AND t1.c = t2.c) WHERE ((t1.b >= 100 AND t1.b < 110) OR (t1.b >= 200 AND t1.b < 210)) AND ((t2.b >= 100 AND t2.b < 110) OR (t2.b >= 200 AND t2.b < 210)) AND t1.c IN ('0004', '0009') ORDER BY t1.a, t1.b; - a | b | c | a | b | c -----+-----+------+----+-----+------ - -1 | 104 | 0004 | -1 | 104 | 0004 - -1 | 109 | 0009 | -1 | 109 | 0009 - -1 | 204 | 0004 | -1 | 204 | 0004 - -1 | 209 | 0009 | -1 | 209 | 0009 - 1 | 104 | 0004 | 1 | 104 | 0004 - 1 | 109 | 0009 | 1 | 109 | 0009 - 1 | 204 | 0004 | 1 | 204 | 0004 - 1 | 209 | 0009 | 1 | 209 | 0009 -(8 rows) - --- partitionwise join with fractional paths -CREATE TABLE fract_t (id BIGINT, PRIMARY KEY (id)) PARTITION BY RANGE (id); -CREATE TABLE fract_t0 PARTITION OF fract_t FOR VALUES FROM ('0') TO ('1000'); -CREATE TABLE fract_t1 PARTITION OF fract_t FOR VALUES FROM ('1000') TO ('2000'); --- insert data -INSERT INTO fract_t (id) (SELECT generate_series(0, 1999)); -ANALYZE fract_t; --- verify plan; nested index only scans -SET max_parallel_workers_per_gather = 0; -SET enable_partitionwise_join = on; -EXPLAIN (COSTS OFF) -SELECT x.id, y.id FROM fract_t x LEFT JOIN fract_t y USING (id) ORDER BY x.id ASC LIMIT 10; - QUERY PLAN ------------------------------------------------------------------------ - Limit - -> Merge Append - Sort Key: x.id - -> Merge Left Join - Merge Cond: (x_1.id = y_1.id) - -> Index Only Scan using fract_t0_pkey on fract_t0 x_1 - -> Index Only Scan using fract_t0_pkey on fract_t0 y_1 - -> Merge Left Join - Merge Cond: (x_2.id = y_2.id) - -> Index Only Scan using fract_t1_pkey on fract_t1 x_2 - -> Index Only Scan using fract_t1_pkey on fract_t1 y_2 -(11 rows) - -EXPLAIN (COSTS OFF) -SELECT x.id, y.id FROM fract_t x LEFT JOIN fract_t y USING (id) ORDER BY x.id DESC LIMIT 10; - QUERY PLAN --------------------------------------------------------------------------------- - Limit - -> Merge Append - Sort Key: x.id DESC - -> Nested Loop Left Join - -> Index Only Scan Backward using fract_t0_pkey on fract_t0 x_1 - -> Index Only Scan using fract_t0_pkey on fract_t0 y_1 - Index Cond: (id = x_1.id) - -> Nested Loop Left Join - -> Index Only Scan Backward using fract_t1_pkey on fract_t1 x_2 - -> Index Only Scan using fract_t1_pkey on fract_t1 y_2 - Index Cond: (id = x_2.id) -(11 rows) - --- cleanup -DROP TABLE fract_t; -RESET max_parallel_workers_per_gather; -RESET enable_partitionwise_join; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/partition_prune.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/partition_prune.out --- /Users/admin/pgsql/src/test/regress/expected/partition_prune.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/partition_prune.out 2024-12-13 13:20:13 @@ -1,4442 +1,2 @@ --- --- Test partitioning planner code --- --- Helper function which can be used for masking out portions of EXPLAIN --- ANALYZE which could contain information that's not consistent on all --- platforms. -create function explain_analyze(query text) returns setof text -language plpgsql as -$$ -declare - ln text; -begin - for ln in - execute format('explain (analyze, costs off, summary off, timing off, buffers off) %s', - query) - loop - ln := regexp_replace(ln, 'Maximum Storage: \d+', 'Maximum Storage: N'); - return next ln; - end loop; -end; -$$; --- Force generic plans to be used for all prepared statements in this file. -set plan_cache_mode = force_generic_plan; -create table lp (a char) partition by list (a); -create table lp_default partition of lp default; -create table lp_ef partition of lp for values in ('e', 'f'); -create table lp_ad partition of lp for values in ('a', 'd'); -create table lp_bc partition of lp for values in ('b', 'c'); -create table lp_g partition of lp for values in ('g'); -create table lp_null partition of lp for values in (null); -explain (costs off) select * from lp; - QUERY PLAN ------------------------------------ - Append - -> Seq Scan on lp_ad lp_1 - -> Seq Scan on lp_bc lp_2 - -> Seq Scan on lp_ef lp_3 - -> Seq Scan on lp_g lp_4 - -> Seq Scan on lp_null lp_5 - -> Seq Scan on lp_default lp_6 -(7 rows) - -explain (costs off) select * from lp where a > 'a' and a < 'd'; - QUERY PLAN ------------------------------------------------------------ - Append - -> Seq Scan on lp_bc lp_1 - Filter: ((a > 'a'::bpchar) AND (a < 'd'::bpchar)) - -> Seq Scan on lp_default lp_2 - Filter: ((a > 'a'::bpchar) AND (a < 'd'::bpchar)) -(5 rows) - -explain (costs off) select * from lp where a > 'a' and a <= 'd'; - QUERY PLAN ------------------------------------------------------------- - Append - -> Seq Scan on lp_ad lp_1 - Filter: ((a > 'a'::bpchar) AND (a <= 'd'::bpchar)) - -> Seq Scan on lp_bc lp_2 - Filter: ((a > 'a'::bpchar) AND (a <= 'd'::bpchar)) - -> Seq Scan on lp_default lp_3 - Filter: ((a > 'a'::bpchar) AND (a <= 'd'::bpchar)) -(7 rows) - -explain (costs off) select * from lp where a = 'a'; - QUERY PLAN ------------------------------ - Seq Scan on lp_ad lp - Filter: (a = 'a'::bpchar) -(2 rows) - -explain (costs off) select * from lp where 'a' = a; /* commuted */ - QUERY PLAN ------------------------------ - Seq Scan on lp_ad lp - Filter: ('a'::bpchar = a) -(2 rows) - -explain (costs off) select * from lp where a is not null; - QUERY PLAN ------------------------------------ - Append - -> Seq Scan on lp_ad lp_1 - Filter: (a IS NOT NULL) - -> Seq Scan on lp_bc lp_2 - Filter: (a IS NOT NULL) - -> Seq Scan on lp_ef lp_3 - Filter: (a IS NOT NULL) - -> Seq Scan on lp_g lp_4 - Filter: (a IS NOT NULL) - -> Seq Scan on lp_default lp_5 - Filter: (a IS NOT NULL) -(11 rows) - -explain (costs off) select * from lp where a is null; - QUERY PLAN ------------------------- - Seq Scan on lp_null lp - Filter: (a IS NULL) -(2 rows) - -explain (costs off) select * from lp where a = 'a' or a = 'c'; - QUERY PLAN ----------------------------------------------------------- - Append - -> Seq Scan on lp_ad lp_1 - Filter: ((a = 'a'::bpchar) OR (a = 'c'::bpchar)) - -> Seq Scan on lp_bc lp_2 - Filter: ((a = 'a'::bpchar) OR (a = 'c'::bpchar)) -(5 rows) - -explain (costs off) select * from lp where a is not null and (a = 'a' or a = 'c'); - QUERY PLAN --------------------------------------------------------------------------------- - Append - -> Seq Scan on lp_ad lp_1 - Filter: ((a IS NOT NULL) AND ((a = 'a'::bpchar) OR (a = 'c'::bpchar))) - -> Seq Scan on lp_bc lp_2 - Filter: ((a IS NOT NULL) AND ((a = 'a'::bpchar) OR (a = 'c'::bpchar))) -(5 rows) - -explain (costs off) select * from lp where a <> 'g'; - QUERY PLAN ------------------------------------- - Append - -> Seq Scan on lp_ad lp_1 - Filter: (a <> 'g'::bpchar) - -> Seq Scan on lp_bc lp_2 - Filter: (a <> 'g'::bpchar) - -> Seq Scan on lp_ef lp_3 - Filter: (a <> 'g'::bpchar) - -> Seq Scan on lp_default lp_4 - Filter: (a <> 'g'::bpchar) -(9 rows) - -explain (costs off) select * from lp where a <> 'a' and a <> 'd'; - QUERY PLAN -------------------------------------------------------------- - Append - -> Seq Scan on lp_bc lp_1 - Filter: ((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) - -> Seq Scan on lp_ef lp_2 - Filter: ((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) - -> Seq Scan on lp_g lp_3 - Filter: ((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) - -> Seq Scan on lp_default lp_4 - Filter: ((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) -(9 rows) - -explain (costs off) select * from lp where a not in ('a', 'd'); - QUERY PLAN ------------------------------------------------- - Append - -> Seq Scan on lp_bc lp_1 - Filter: (a <> ALL ('{a,d}'::bpchar[])) - -> Seq Scan on lp_ef lp_2 - Filter: (a <> ALL ('{a,d}'::bpchar[])) - -> Seq Scan on lp_g lp_3 - Filter: (a <> ALL ('{a,d}'::bpchar[])) - -> Seq Scan on lp_default lp_4 - Filter: (a <> ALL ('{a,d}'::bpchar[])) -(9 rows) - --- collation matches the partitioning collation, pruning works -create table coll_pruning (a text collate "C") partition by list (a); -create table coll_pruning_a partition of coll_pruning for values in ('a'); -create table coll_pruning_b partition of coll_pruning for values in ('b'); -create table coll_pruning_def partition of coll_pruning default; -explain (costs off) select * from coll_pruning where a collate "C" = 'a' collate "C"; - QUERY PLAN ------------------------------------------ - Seq Scan on coll_pruning_a coll_pruning - Filter: (a = 'a'::text COLLATE "C") -(2 rows) - --- collation doesn't match the partitioning collation, no pruning occurs -explain (costs off) select * from coll_pruning where a collate "POSIX" = 'a' collate "POSIX"; - QUERY PLAN ---------------------------------------------------------- - Append - -> Seq Scan on coll_pruning_a coll_pruning_1 - Filter: ((a)::text = 'a'::text COLLATE "POSIX") - -> Seq Scan on coll_pruning_b coll_pruning_2 - Filter: ((a)::text = 'a'::text COLLATE "POSIX") - -> Seq Scan on coll_pruning_def coll_pruning_3 - Filter: ((a)::text = 'a'::text COLLATE "POSIX") -(7 rows) - -create table rlp (a int, b varchar) partition by range (a); -create table rlp_default partition of rlp default partition by list (a); -create table rlp_default_default partition of rlp_default default; -create table rlp_default_10 partition of rlp_default for values in (10); -create table rlp_default_30 partition of rlp_default for values in (30); -create table rlp_default_null partition of rlp_default for values in (null); -create table rlp1 partition of rlp for values from (minvalue) to (1); -create table rlp2 partition of rlp for values from (1) to (10); -create table rlp3 (b varchar, a int) partition by list (b varchar_ops); -create table rlp3_default partition of rlp3 default; -create table rlp3abcd partition of rlp3 for values in ('ab', 'cd'); -create table rlp3efgh partition of rlp3 for values in ('ef', 'gh'); -create table rlp3nullxy partition of rlp3 for values in (null, 'xy'); -alter table rlp attach partition rlp3 for values from (15) to (20); -create table rlp4 partition of rlp for values from (20) to (30) partition by range (a); -create table rlp4_default partition of rlp4 default; -create table rlp4_1 partition of rlp4 for values from (20) to (25); -create table rlp4_2 partition of rlp4 for values from (25) to (29); -create table rlp5 partition of rlp for values from (31) to (maxvalue) partition by range (a); -create table rlp5_default partition of rlp5 default; -create table rlp5_1 partition of rlp5 for values from (31) to (40); -explain (costs off) select * from rlp where a < 1; - QUERY PLAN ----------------------- - Seq Scan on rlp1 rlp - Filter: (a < 1) -(2 rows) - -explain (costs off) select * from rlp where 1 > a; /* commuted */ - QUERY PLAN ----------------------- - Seq Scan on rlp1 rlp - Filter: (1 > a) -(2 rows) - -explain (costs off) select * from rlp where a <= 1; - QUERY PLAN ------------------------------- - Append - -> Seq Scan on rlp1 rlp_1 - Filter: (a <= 1) - -> Seq Scan on rlp2 rlp_2 - Filter: (a <= 1) -(5 rows) - -explain (costs off) select * from rlp where a = 1; - QUERY PLAN ----------------------- - Seq Scan on rlp2 rlp - Filter: (a = 1) -(2 rows) - -explain (costs off) select * from rlp where a = 1::bigint; /* same as above */ - QUERY PLAN ------------------------------ - Seq Scan on rlp2 rlp - Filter: (a = '1'::bigint) -(2 rows) - -explain (costs off) select * from rlp where a = 1::numeric; /* no pruning */ - QUERY PLAN ------------------------------------------------ - Append - -> Seq Scan on rlp1 rlp_1 - Filter: ((a)::numeric = '1'::numeric) - -> Seq Scan on rlp2 rlp_2 - Filter: ((a)::numeric = '1'::numeric) - -> Seq Scan on rlp3abcd rlp_3 - Filter: ((a)::numeric = '1'::numeric) - -> Seq Scan on rlp3efgh rlp_4 - Filter: ((a)::numeric = '1'::numeric) - -> Seq Scan on rlp3nullxy rlp_5 - Filter: ((a)::numeric = '1'::numeric) - -> Seq Scan on rlp3_default rlp_6 - Filter: ((a)::numeric = '1'::numeric) - -> Seq Scan on rlp4_1 rlp_7 - Filter: ((a)::numeric = '1'::numeric) - -> Seq Scan on rlp4_2 rlp_8 - Filter: ((a)::numeric = '1'::numeric) - -> Seq Scan on rlp4_default rlp_9 - Filter: ((a)::numeric = '1'::numeric) - -> Seq Scan on rlp5_1 rlp_10 - Filter: ((a)::numeric = '1'::numeric) - -> Seq Scan on rlp5_default rlp_11 - Filter: ((a)::numeric = '1'::numeric) - -> Seq Scan on rlp_default_10 rlp_12 - Filter: ((a)::numeric = '1'::numeric) - -> Seq Scan on rlp_default_30 rlp_13 - Filter: ((a)::numeric = '1'::numeric) - -> Seq Scan on rlp_default_null rlp_14 - Filter: ((a)::numeric = '1'::numeric) - -> Seq Scan on rlp_default_default rlp_15 - Filter: ((a)::numeric = '1'::numeric) -(31 rows) - -explain (costs off) select * from rlp where a <= 10; - QUERY PLAN ---------------------------------------------- - Append - -> Seq Scan on rlp1 rlp_1 - Filter: (a <= 10) - -> Seq Scan on rlp2 rlp_2 - Filter: (a <= 10) - -> Seq Scan on rlp_default_10 rlp_3 - Filter: (a <= 10) - -> Seq Scan on rlp_default_default rlp_4 - Filter: (a <= 10) -(9 rows) - -explain (costs off) select * from rlp where a > 10; - QUERY PLAN ----------------------------------------------- - Append - -> Seq Scan on rlp3abcd rlp_1 - Filter: (a > 10) - -> Seq Scan on rlp3efgh rlp_2 - Filter: (a > 10) - -> Seq Scan on rlp3nullxy rlp_3 - Filter: (a > 10) - -> Seq Scan on rlp3_default rlp_4 - Filter: (a > 10) - -> Seq Scan on rlp4_1 rlp_5 - Filter: (a > 10) - -> Seq Scan on rlp4_2 rlp_6 - Filter: (a > 10) - -> Seq Scan on rlp4_default rlp_7 - Filter: (a > 10) - -> Seq Scan on rlp5_1 rlp_8 - Filter: (a > 10) - -> Seq Scan on rlp5_default rlp_9 - Filter: (a > 10) - -> Seq Scan on rlp_default_30 rlp_10 - Filter: (a > 10) - -> Seq Scan on rlp_default_default rlp_11 - Filter: (a > 10) -(23 rows) - -explain (costs off) select * from rlp where a < 15; - QUERY PLAN ---------------------------------------------- - Append - -> Seq Scan on rlp1 rlp_1 - Filter: (a < 15) - -> Seq Scan on rlp2 rlp_2 - Filter: (a < 15) - -> Seq Scan on rlp_default_10 rlp_3 - Filter: (a < 15) - -> Seq Scan on rlp_default_default rlp_4 - Filter: (a < 15) -(9 rows) - -explain (costs off) select * from rlp where a <= 15; - QUERY PLAN ---------------------------------------------- - Append - -> Seq Scan on rlp1 rlp_1 - Filter: (a <= 15) - -> Seq Scan on rlp2 rlp_2 - Filter: (a <= 15) - -> Seq Scan on rlp3abcd rlp_3 - Filter: (a <= 15) - -> Seq Scan on rlp3efgh rlp_4 - Filter: (a <= 15) - -> Seq Scan on rlp3nullxy rlp_5 - Filter: (a <= 15) - -> Seq Scan on rlp3_default rlp_6 - Filter: (a <= 15) - -> Seq Scan on rlp_default_10 rlp_7 - Filter: (a <= 15) - -> Seq Scan on rlp_default_default rlp_8 - Filter: (a <= 15) -(17 rows) - -explain (costs off) select * from rlp where a > 15 and b = 'ab'; - QUERY PLAN ---------------------------------------------------------- - Append - -> Seq Scan on rlp3abcd rlp_1 - Filter: ((a > 15) AND ((b)::text = 'ab'::text)) - -> Seq Scan on rlp4_1 rlp_2 - Filter: ((a > 15) AND ((b)::text = 'ab'::text)) - -> Seq Scan on rlp4_2 rlp_3 - Filter: ((a > 15) AND ((b)::text = 'ab'::text)) - -> Seq Scan on rlp4_default rlp_4 - Filter: ((a > 15) AND ((b)::text = 'ab'::text)) - -> Seq Scan on rlp5_1 rlp_5 - Filter: ((a > 15) AND ((b)::text = 'ab'::text)) - -> Seq Scan on rlp5_default rlp_6 - Filter: ((a > 15) AND ((b)::text = 'ab'::text)) - -> Seq Scan on rlp_default_30 rlp_7 - Filter: ((a > 15) AND ((b)::text = 'ab'::text)) - -> Seq Scan on rlp_default_default rlp_8 - Filter: ((a > 15) AND ((b)::text = 'ab'::text)) -(17 rows) - -explain (costs off) select * from rlp where a = 16; - QUERY PLAN --------------------------------------- - Append - -> Seq Scan on rlp3abcd rlp_1 - Filter: (a = 16) - -> Seq Scan on rlp3efgh rlp_2 - Filter: (a = 16) - -> Seq Scan on rlp3nullxy rlp_3 - Filter: (a = 16) - -> Seq Scan on rlp3_default rlp_4 - Filter: (a = 16) -(9 rows) - -explain (costs off) select * from rlp where a = 16 and b in ('not', 'in', 'here'); - QUERY PLAN ----------------------------------------------------------------------- - Seq Scan on rlp3_default rlp - Filter: ((a = 16) AND ((b)::text = ANY ('{not,in,here}'::text[]))) -(2 rows) - -explain (costs off) select * from rlp where a = 16 and b < 'ab'; - QUERY PLAN ---------------------------------------------------- - Seq Scan on rlp3_default rlp - Filter: (((b)::text < 'ab'::text) AND (a = 16)) -(2 rows) - -explain (costs off) select * from rlp where a = 16 and b <= 'ab'; - QUERY PLAN ----------------------------------------------------------- - Append - -> Seq Scan on rlp3abcd rlp_1 - Filter: (((b)::text <= 'ab'::text) AND (a = 16)) - -> Seq Scan on rlp3_default rlp_2 - Filter: (((b)::text <= 'ab'::text) AND (a = 16)) -(5 rows) - -explain (costs off) select * from rlp where a = 16 and b is null; - QUERY PLAN --------------------------------------- - Seq Scan on rlp3nullxy rlp - Filter: ((b IS NULL) AND (a = 16)) -(2 rows) - -explain (costs off) select * from rlp where a = 16 and b is not null; - QUERY PLAN ------------------------------------------------- - Append - -> Seq Scan on rlp3abcd rlp_1 - Filter: ((b IS NOT NULL) AND (a = 16)) - -> Seq Scan on rlp3efgh rlp_2 - Filter: ((b IS NOT NULL) AND (a = 16)) - -> Seq Scan on rlp3nullxy rlp_3 - Filter: ((b IS NOT NULL) AND (a = 16)) - -> Seq Scan on rlp3_default rlp_4 - Filter: ((b IS NOT NULL) AND (a = 16)) -(9 rows) - -explain (costs off) select * from rlp where a is null; - QUERY PLAN ----------------------------------- - Seq Scan on rlp_default_null rlp - Filter: (a IS NULL) -(2 rows) - -explain (costs off) select * from rlp where a is not null; - QUERY PLAN ----------------------------------------------- - Append - -> Seq Scan on rlp1 rlp_1 - Filter: (a IS NOT NULL) - -> Seq Scan on rlp2 rlp_2 - Filter: (a IS NOT NULL) - -> Seq Scan on rlp3abcd rlp_3 - Filter: (a IS NOT NULL) - -> Seq Scan on rlp3efgh rlp_4 - Filter: (a IS NOT NULL) - -> Seq Scan on rlp3nullxy rlp_5 - Filter: (a IS NOT NULL) - -> Seq Scan on rlp3_default rlp_6 - Filter: (a IS NOT NULL) - -> Seq Scan on rlp4_1 rlp_7 - Filter: (a IS NOT NULL) - -> Seq Scan on rlp4_2 rlp_8 - Filter: (a IS NOT NULL) - -> Seq Scan on rlp4_default rlp_9 - Filter: (a IS NOT NULL) - -> Seq Scan on rlp5_1 rlp_10 - Filter: (a IS NOT NULL) - -> Seq Scan on rlp5_default rlp_11 - Filter: (a IS NOT NULL) - -> Seq Scan on rlp_default_10 rlp_12 - Filter: (a IS NOT NULL) - -> Seq Scan on rlp_default_30 rlp_13 - Filter: (a IS NOT NULL) - -> Seq Scan on rlp_default_default rlp_14 - Filter: (a IS NOT NULL) -(29 rows) - -explain (costs off) select * from rlp where a > 30; - QUERY PLAN ---------------------------------------------- - Append - -> Seq Scan on rlp5_1 rlp_1 - Filter: (a > 30) - -> Seq Scan on rlp5_default rlp_2 - Filter: (a > 30) - -> Seq Scan on rlp_default_default rlp_3 - Filter: (a > 30) -(7 rows) - -explain (costs off) select * from rlp where a = 30; /* only default is scanned */ - QUERY PLAN --------------------------------- - Seq Scan on rlp_default_30 rlp - Filter: (a = 30) -(2 rows) - -explain (costs off) select * from rlp where a <= 31; - QUERY PLAN ----------------------------------------------- - Append - -> Seq Scan on rlp1 rlp_1 - Filter: (a <= 31) - -> Seq Scan on rlp2 rlp_2 - Filter: (a <= 31) - -> Seq Scan on rlp3abcd rlp_3 - Filter: (a <= 31) - -> Seq Scan on rlp3efgh rlp_4 - Filter: (a <= 31) - -> Seq Scan on rlp3nullxy rlp_5 - Filter: (a <= 31) - -> Seq Scan on rlp3_default rlp_6 - Filter: (a <= 31) - -> Seq Scan on rlp4_1 rlp_7 - Filter: (a <= 31) - -> Seq Scan on rlp4_2 rlp_8 - Filter: (a <= 31) - -> Seq Scan on rlp4_default rlp_9 - Filter: (a <= 31) - -> Seq Scan on rlp5_1 rlp_10 - Filter: (a <= 31) - -> Seq Scan on rlp_default_10 rlp_11 - Filter: (a <= 31) - -> Seq Scan on rlp_default_30 rlp_12 - Filter: (a <= 31) - -> Seq Scan on rlp_default_default rlp_13 - Filter: (a <= 31) -(27 rows) - -explain (costs off) select * from rlp where a = 1 or a = 7; - QUERY PLAN --------------------------------- - Seq Scan on rlp2 rlp - Filter: ((a = 1) OR (a = 7)) -(2 rows) - -explain (costs off) select * from rlp where a = 1 or b = 'ab'; - QUERY PLAN -------------------------------------------------------- - Append - -> Seq Scan on rlp1 rlp_1 - Filter: ((a = 1) OR ((b)::text = 'ab'::text)) - -> Seq Scan on rlp2 rlp_2 - Filter: ((a = 1) OR ((b)::text = 'ab'::text)) - -> Seq Scan on rlp3abcd rlp_3 - Filter: ((a = 1) OR ((b)::text = 'ab'::text)) - -> Seq Scan on rlp4_1 rlp_4 - Filter: ((a = 1) OR ((b)::text = 'ab'::text)) - -> Seq Scan on rlp4_2 rlp_5 - Filter: ((a = 1) OR ((b)::text = 'ab'::text)) - -> Seq Scan on rlp4_default rlp_6 - Filter: ((a = 1) OR ((b)::text = 'ab'::text)) - -> Seq Scan on rlp5_1 rlp_7 - Filter: ((a = 1) OR ((b)::text = 'ab'::text)) - -> Seq Scan on rlp5_default rlp_8 - Filter: ((a = 1) OR ((b)::text = 'ab'::text)) - -> Seq Scan on rlp_default_10 rlp_9 - Filter: ((a = 1) OR ((b)::text = 'ab'::text)) - -> Seq Scan on rlp_default_30 rlp_10 - Filter: ((a = 1) OR ((b)::text = 'ab'::text)) - -> Seq Scan on rlp_default_null rlp_11 - Filter: ((a = 1) OR ((b)::text = 'ab'::text)) - -> Seq Scan on rlp_default_default rlp_12 - Filter: ((a = 1) OR ((b)::text = 'ab'::text)) -(25 rows) - -explain (costs off) select * from rlp where a > 20 and a < 27; - QUERY PLAN ------------------------------------------ - Append - -> Seq Scan on rlp4_1 rlp_1 - Filter: ((a > 20) AND (a < 27)) - -> Seq Scan on rlp4_2 rlp_2 - Filter: ((a > 20) AND (a < 27)) -(5 rows) - -explain (costs off) select * from rlp where a = 29; - QUERY PLAN ------------------------------- - Seq Scan on rlp4_default rlp - Filter: (a = 29) -(2 rows) - -explain (costs off) select * from rlp where a >= 29; - QUERY PLAN ---------------------------------------------- - Append - -> Seq Scan on rlp4_default rlp_1 - Filter: (a >= 29) - -> Seq Scan on rlp5_1 rlp_2 - Filter: (a >= 29) - -> Seq Scan on rlp5_default rlp_3 - Filter: (a >= 29) - -> Seq Scan on rlp_default_30 rlp_4 - Filter: (a >= 29) - -> Seq Scan on rlp_default_default rlp_5 - Filter: (a >= 29) -(11 rows) - -explain (costs off) select * from rlp where a < 1 or (a > 20 and a < 25); - QUERY PLAN ------------------------------------------------------- - Append - -> Seq Scan on rlp1 rlp_1 - Filter: ((a < 1) OR ((a > 20) AND (a < 25))) - -> Seq Scan on rlp4_1 rlp_2 - Filter: ((a < 1) OR ((a > 20) AND (a < 25))) -(5 rows) - --- where clause contradicts sub-partition's constraint -explain (costs off) select * from rlp where a = 20 or a = 40; - QUERY PLAN ----------------------------------------- - Append - -> Seq Scan on rlp4_1 rlp_1 - Filter: ((a = 20) OR (a = 40)) - -> Seq Scan on rlp5_default rlp_2 - Filter: ((a = 20) OR (a = 40)) -(5 rows) - -explain (costs off) select * from rlp3 where a = 20; /* empty */ - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - --- redundant clauses are eliminated -explain (costs off) select * from rlp where a > 1 and a = 10; /* only default */ - QUERY PLAN ----------------------------------- - Seq Scan on rlp_default_10 rlp - Filter: ((a > 1) AND (a = 10)) -(2 rows) - -explain (costs off) select * from rlp where a > 1 and a >=15; /* rlp3 onwards, including default */ - QUERY PLAN ----------------------------------------------- - Append - -> Seq Scan on rlp3abcd rlp_1 - Filter: ((a > 1) AND (a >= 15)) - -> Seq Scan on rlp3efgh rlp_2 - Filter: ((a > 1) AND (a >= 15)) - -> Seq Scan on rlp3nullxy rlp_3 - Filter: ((a > 1) AND (a >= 15)) - -> Seq Scan on rlp3_default rlp_4 - Filter: ((a > 1) AND (a >= 15)) - -> Seq Scan on rlp4_1 rlp_5 - Filter: ((a > 1) AND (a >= 15)) - -> Seq Scan on rlp4_2 rlp_6 - Filter: ((a > 1) AND (a >= 15)) - -> Seq Scan on rlp4_default rlp_7 - Filter: ((a > 1) AND (a >= 15)) - -> Seq Scan on rlp5_1 rlp_8 - Filter: ((a > 1) AND (a >= 15)) - -> Seq Scan on rlp5_default rlp_9 - Filter: ((a > 1) AND (a >= 15)) - -> Seq Scan on rlp_default_30 rlp_10 - Filter: ((a > 1) AND (a >= 15)) - -> Seq Scan on rlp_default_default rlp_11 - Filter: ((a > 1) AND (a >= 15)) -(23 rows) - -explain (costs off) select * from rlp where a = 1 and a = 3; /* empty */ - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -explain (costs off) select * from rlp where (a = 1 and a = 3) or (a > 1 and a = 15); - QUERY PLAN -------------------------------------------------------------------- - Append - -> Seq Scan on rlp2 rlp_1 - Filter: (((a = 1) AND (a = 3)) OR ((a > 1) AND (a = 15))) - -> Seq Scan on rlp3abcd rlp_2 - Filter: (((a = 1) AND (a = 3)) OR ((a > 1) AND (a = 15))) - -> Seq Scan on rlp3efgh rlp_3 - Filter: (((a = 1) AND (a = 3)) OR ((a > 1) AND (a = 15))) - -> Seq Scan on rlp3nullxy rlp_4 - Filter: (((a = 1) AND (a = 3)) OR ((a > 1) AND (a = 15))) - -> Seq Scan on rlp3_default rlp_5 - Filter: (((a = 1) AND (a = 3)) OR ((a > 1) AND (a = 15))) -(11 rows) - --- multi-column keys -create table mc3p (a int, b int, c int) partition by range (a, abs(b), c); -create table mc3p_default partition of mc3p default; -create table mc3p0 partition of mc3p for values from (minvalue, minvalue, minvalue) to (1, 1, 1); -create table mc3p1 partition of mc3p for values from (1, 1, 1) to (10, 5, 10); -create table mc3p2 partition of mc3p for values from (10, 5, 10) to (10, 10, 10); -create table mc3p3 partition of mc3p for values from (10, 10, 10) to (10, 10, 20); -create table mc3p4 partition of mc3p for values from (10, 10, 20) to (10, maxvalue, maxvalue); -create table mc3p5 partition of mc3p for values from (11, 1, 1) to (20, 10, 10); -create table mc3p6 partition of mc3p for values from (20, 10, 10) to (20, 20, 20); -create table mc3p7 partition of mc3p for values from (20, 20, 20) to (maxvalue, maxvalue, maxvalue); -explain (costs off) select * from mc3p where a = 1; - QUERY PLAN ---------------------------------------- - Append - -> Seq Scan on mc3p0 mc3p_1 - Filter: (a = 1) - -> Seq Scan on mc3p1 mc3p_2 - Filter: (a = 1) - -> Seq Scan on mc3p_default mc3p_3 - Filter: (a = 1) -(7 rows) - -explain (costs off) select * from mc3p where a = 1 and abs(b) < 1; - QUERY PLAN --------------------------------------------- - Append - -> Seq Scan on mc3p0 mc3p_1 - Filter: ((a = 1) AND (abs(b) < 1)) - -> Seq Scan on mc3p_default mc3p_2 - Filter: ((a = 1) AND (abs(b) < 1)) -(5 rows) - -explain (costs off) select * from mc3p where a = 1 and abs(b) = 1; - QUERY PLAN --------------------------------------------- - Append - -> Seq Scan on mc3p0 mc3p_1 - Filter: ((a = 1) AND (abs(b) = 1)) - -> Seq Scan on mc3p1 mc3p_2 - Filter: ((a = 1) AND (abs(b) = 1)) - -> Seq Scan on mc3p_default mc3p_3 - Filter: ((a = 1) AND (abs(b) = 1)) -(7 rows) - -explain (costs off) select * from mc3p where a = 1 and abs(b) = 1 and c < 8; - QUERY PLAN --------------------------------------------------------- - Append - -> Seq Scan on mc3p0 mc3p_1 - Filter: ((c < 8) AND (a = 1) AND (abs(b) = 1)) - -> Seq Scan on mc3p1 mc3p_2 - Filter: ((c < 8) AND (a = 1) AND (abs(b) = 1)) -(5 rows) - -explain (costs off) select * from mc3p where a = 10 and abs(b) between 5 and 35; - QUERY PLAN ------------------------------------------------------------------ - Append - -> Seq Scan on mc3p1 mc3p_1 - Filter: ((a = 10) AND (abs(b) >= 5) AND (abs(b) <= 35)) - -> Seq Scan on mc3p2 mc3p_2 - Filter: ((a = 10) AND (abs(b) >= 5) AND (abs(b) <= 35)) - -> Seq Scan on mc3p3 mc3p_3 - Filter: ((a = 10) AND (abs(b) >= 5) AND (abs(b) <= 35)) - -> Seq Scan on mc3p4 mc3p_4 - Filter: ((a = 10) AND (abs(b) >= 5) AND (abs(b) <= 35)) - -> Seq Scan on mc3p_default mc3p_5 - Filter: ((a = 10) AND (abs(b) >= 5) AND (abs(b) <= 35)) -(11 rows) - -explain (costs off) select * from mc3p where a > 10; - QUERY PLAN ---------------------------------------- - Append - -> Seq Scan on mc3p5 mc3p_1 - Filter: (a > 10) - -> Seq Scan on mc3p6 mc3p_2 - Filter: (a > 10) - -> Seq Scan on mc3p7 mc3p_3 - Filter: (a > 10) - -> Seq Scan on mc3p_default mc3p_4 - Filter: (a > 10) -(9 rows) - -explain (costs off) select * from mc3p where a >= 10; - QUERY PLAN ---------------------------------------- - Append - -> Seq Scan on mc3p1 mc3p_1 - Filter: (a >= 10) - -> Seq Scan on mc3p2 mc3p_2 - Filter: (a >= 10) - -> Seq Scan on mc3p3 mc3p_3 - Filter: (a >= 10) - -> Seq Scan on mc3p4 mc3p_4 - Filter: (a >= 10) - -> Seq Scan on mc3p5 mc3p_5 - Filter: (a >= 10) - -> Seq Scan on mc3p6 mc3p_6 - Filter: (a >= 10) - -> Seq Scan on mc3p7 mc3p_7 - Filter: (a >= 10) - -> Seq Scan on mc3p_default mc3p_8 - Filter: (a >= 10) -(17 rows) - -explain (costs off) select * from mc3p where a < 10; - QUERY PLAN ---------------------------------------- - Append - -> Seq Scan on mc3p0 mc3p_1 - Filter: (a < 10) - -> Seq Scan on mc3p1 mc3p_2 - Filter: (a < 10) - -> Seq Scan on mc3p_default mc3p_3 - Filter: (a < 10) -(7 rows) - -explain (costs off) select * from mc3p where a <= 10 and abs(b) < 10; - QUERY PLAN ------------------------------------------------ - Append - -> Seq Scan on mc3p0 mc3p_1 - Filter: ((a <= 10) AND (abs(b) < 10)) - -> Seq Scan on mc3p1 mc3p_2 - Filter: ((a <= 10) AND (abs(b) < 10)) - -> Seq Scan on mc3p2 mc3p_3 - Filter: ((a <= 10) AND (abs(b) < 10)) - -> Seq Scan on mc3p_default mc3p_4 - Filter: ((a <= 10) AND (abs(b) < 10)) -(9 rows) - -explain (costs off) select * from mc3p where a = 11 and abs(b) = 0; - QUERY PLAN ---------------------------------------- - Seq Scan on mc3p_default mc3p - Filter: ((a = 11) AND (abs(b) = 0)) -(2 rows) - -explain (costs off) select * from mc3p where a = 20 and abs(b) = 10 and c = 100; - QUERY PLAN ------------------------------------------------------- - Seq Scan on mc3p6 mc3p - Filter: ((a = 20) AND (c = 100) AND (abs(b) = 10)) -(2 rows) - -explain (costs off) select * from mc3p where a > 20; - QUERY PLAN ---------------------------------------- - Append - -> Seq Scan on mc3p7 mc3p_1 - Filter: (a > 20) - -> Seq Scan on mc3p_default mc3p_2 - Filter: (a > 20) -(5 rows) - -explain (costs off) select * from mc3p where a >= 20; - QUERY PLAN ---------------------------------------- - Append - -> Seq Scan on mc3p5 mc3p_1 - Filter: (a >= 20) - -> Seq Scan on mc3p6 mc3p_2 - Filter: (a >= 20) - -> Seq Scan on mc3p7 mc3p_3 - Filter: (a >= 20) - -> Seq Scan on mc3p_default mc3p_4 - Filter: (a >= 20) -(9 rows) - -explain (costs off) select * from mc3p where (a = 1 and abs(b) = 1 and c = 1) or (a = 10 and abs(b) = 5 and c = 10) or (a > 11 and a < 20); - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------- - Append - -> Seq Scan on mc3p1 mc3p_1 - Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20))) - -> Seq Scan on mc3p2 mc3p_2 - Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20))) - -> Seq Scan on mc3p5 mc3p_3 - Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20))) - -> Seq Scan on mc3p_default mc3p_4 - Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20))) -(9 rows) - -explain (costs off) select * from mc3p where (a = 1 and abs(b) = 1 and c = 1) or (a = 10 and abs(b) = 5 and c = 10) or (a > 11 and a < 20) or a < 1; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------- - Append - -> Seq Scan on mc3p0 mc3p_1 - Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1)) - -> Seq Scan on mc3p1 mc3p_2 - Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1)) - -> Seq Scan on mc3p2 mc3p_3 - Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1)) - -> Seq Scan on mc3p5 mc3p_4 - Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1)) - -> Seq Scan on mc3p_default mc3p_5 - Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1)) -(11 rows) - -explain (costs off) select * from mc3p where (a = 1 and abs(b) = 1 and c = 1) or (a = 10 and abs(b) = 5 and c = 10) or (a > 11 and a < 20) or a < 1 or a = 1; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------------------------- - Append - -> Seq Scan on mc3p0 mc3p_1 - Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1) OR (a = 1)) - -> Seq Scan on mc3p1 mc3p_2 - Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1) OR (a = 1)) - -> Seq Scan on mc3p2 mc3p_3 - Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1) OR (a = 1)) - -> Seq Scan on mc3p5 mc3p_4 - Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1) OR (a = 1)) - -> Seq Scan on mc3p_default mc3p_5 - Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1) OR (a = 1)) -(11 rows) - -explain (costs off) select * from mc3p where a = 1 or abs(b) = 1 or c = 1; - QUERY PLAN ------------------------------------------------------- - Append - -> Seq Scan on mc3p0 mc3p_1 - Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) - -> Seq Scan on mc3p1 mc3p_2 - Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) - -> Seq Scan on mc3p2 mc3p_3 - Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) - -> Seq Scan on mc3p3 mc3p_4 - Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) - -> Seq Scan on mc3p4 mc3p_5 - Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) - -> Seq Scan on mc3p5 mc3p_6 - Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) - -> Seq Scan on mc3p6 mc3p_7 - Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) - -> Seq Scan on mc3p7 mc3p_8 - Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) - -> Seq Scan on mc3p_default mc3p_9 - Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) -(19 rows) - -explain (costs off) select * from mc3p where (a = 1 and abs(b) = 1) or (a = 10 and abs(b) = 10); - QUERY PLAN ------------------------------------------------------------------------------- - Append - -> Seq Scan on mc3p0 mc3p_1 - Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 10))) - -> Seq Scan on mc3p1 mc3p_2 - Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 10))) - -> Seq Scan on mc3p2 mc3p_3 - Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 10))) - -> Seq Scan on mc3p3 mc3p_4 - Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 10))) - -> Seq Scan on mc3p4 mc3p_5 - Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 10))) - -> Seq Scan on mc3p_default mc3p_6 - Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 10))) -(13 rows) - -explain (costs off) select * from mc3p where (a = 1 and abs(b) = 1) or (a = 10 and abs(b) = 9); - QUERY PLAN ------------------------------------------------------------------------------ - Append - -> Seq Scan on mc3p0 mc3p_1 - Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 9))) - -> Seq Scan on mc3p1 mc3p_2 - Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 9))) - -> Seq Scan on mc3p2 mc3p_3 - Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 9))) - -> Seq Scan on mc3p_default mc3p_4 - Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 9))) -(9 rows) - --- a simpler multi-column keys case -create table mc2p (a int, b int) partition by range (a, b); -create table mc2p_default partition of mc2p default; -create table mc2p0 partition of mc2p for values from (minvalue, minvalue) to (1, minvalue); -create table mc2p1 partition of mc2p for values from (1, minvalue) to (1, 1); -create table mc2p2 partition of mc2p for values from (1, 1) to (2, minvalue); -create table mc2p3 partition of mc2p for values from (2, minvalue) to (2, 1); -create table mc2p4 partition of mc2p for values from (2, 1) to (2, maxvalue); -create table mc2p5 partition of mc2p for values from (2, maxvalue) to (maxvalue, maxvalue); -explain (costs off) select * from mc2p where a < 2; - QUERY PLAN ---------------------------------------- - Append - -> Seq Scan on mc2p0 mc2p_1 - Filter: (a < 2) - -> Seq Scan on mc2p1 mc2p_2 - Filter: (a < 2) - -> Seq Scan on mc2p2 mc2p_3 - Filter: (a < 2) - -> Seq Scan on mc2p_default mc2p_4 - Filter: (a < 2) -(9 rows) - -explain (costs off) select * from mc2p where a = 2 and b < 1; - QUERY PLAN ---------------------------------- - Seq Scan on mc2p3 mc2p - Filter: ((b < 1) AND (a = 2)) -(2 rows) - -explain (costs off) select * from mc2p where a > 1; - QUERY PLAN ---------------------------------------- - Append - -> Seq Scan on mc2p2 mc2p_1 - Filter: (a > 1) - -> Seq Scan on mc2p3 mc2p_2 - Filter: (a > 1) - -> Seq Scan on mc2p4 mc2p_3 - Filter: (a > 1) - -> Seq Scan on mc2p5 mc2p_4 - Filter: (a > 1) - -> Seq Scan on mc2p_default mc2p_5 - Filter: (a > 1) -(11 rows) - -explain (costs off) select * from mc2p where a = 1 and b > 1; - QUERY PLAN ---------------------------------- - Seq Scan on mc2p2 mc2p - Filter: ((b > 1) AND (a = 1)) -(2 rows) - --- all partitions but the default one should be pruned -explain (costs off) select * from mc2p where a = 1 and b is null; - QUERY PLAN -------------------------------------- - Seq Scan on mc2p_default mc2p - Filter: ((b IS NULL) AND (a = 1)) -(2 rows) - -explain (costs off) select * from mc2p where a is null and b is null; - QUERY PLAN ------------------------------------------ - Seq Scan on mc2p_default mc2p - Filter: ((a IS NULL) AND (b IS NULL)) -(2 rows) - -explain (costs off) select * from mc2p where a is null and b = 1; - QUERY PLAN -------------------------------------- - Seq Scan on mc2p_default mc2p - Filter: ((a IS NULL) AND (b = 1)) -(2 rows) - -explain (costs off) select * from mc2p where a is null; - QUERY PLAN -------------------------------- - Seq Scan on mc2p_default mc2p - Filter: (a IS NULL) -(2 rows) - -explain (costs off) select * from mc2p where b is null; - QUERY PLAN -------------------------------- - Seq Scan on mc2p_default mc2p - Filter: (b IS NULL) -(2 rows) - --- boolean partitioning -create table boolpart (a bool) partition by list (a); -create table boolpart_default partition of boolpart default; -create table boolpart_t partition of boolpart for values in ('true'); -create table boolpart_f partition of boolpart for values in ('false'); -insert into boolpart values (true), (false), (null); -explain (costs off) select * from boolpart where a in (true, false); - QUERY PLAN ------------------------------------------------- - Append - -> Seq Scan on boolpart_f boolpart_1 - Filter: (a = ANY ('{t,f}'::boolean[])) - -> Seq Scan on boolpart_t boolpart_2 - Filter: (a = ANY ('{t,f}'::boolean[])) -(5 rows) - -explain (costs off) select * from boolpart where a = false; - QUERY PLAN ---------------------------------- - Seq Scan on boolpart_f boolpart - Filter: (NOT a) -(2 rows) - -explain (costs off) select * from boolpart where not a = false; - QUERY PLAN ---------------------------------- - Seq Scan on boolpart_t boolpart - Filter: a -(2 rows) - -explain (costs off) select * from boolpart where a is true or a is not true; - QUERY PLAN --------------------------------------------------- - Append - -> Seq Scan on boolpart_f boolpart_1 - Filter: ((a IS TRUE) OR (a IS NOT TRUE)) - -> Seq Scan on boolpart_t boolpart_2 - Filter: ((a IS TRUE) OR (a IS NOT TRUE)) - -> Seq Scan on boolpart_default boolpart_3 - Filter: ((a IS TRUE) OR (a IS NOT TRUE)) -(7 rows) - -explain (costs off) select * from boolpart where a is not true; - QUERY PLAN ------------------------------------------------ - Append - -> Seq Scan on boolpart_f boolpart_1 - Filter: (a IS NOT TRUE) - -> Seq Scan on boolpart_default boolpart_2 - Filter: (a IS NOT TRUE) -(5 rows) - -explain (costs off) select * from boolpart where a is not true and a is not false; - QUERY PLAN --------------------------------------------------- - Seq Scan on boolpart_default boolpart - Filter: ((a IS NOT TRUE) AND (a IS NOT FALSE)) -(2 rows) - -explain (costs off) select * from boolpart where a is unknown; - QUERY PLAN ---------------------------------------- - Seq Scan on boolpart_default boolpart - Filter: (a IS UNKNOWN) -(2 rows) - -explain (costs off) select * from boolpart where a is not unknown; - QUERY PLAN ------------------------------------------------ - Append - -> Seq Scan on boolpart_f boolpart_1 - Filter: (a IS NOT UNKNOWN) - -> Seq Scan on boolpart_t boolpart_2 - Filter: (a IS NOT UNKNOWN) - -> Seq Scan on boolpart_default boolpart_3 - Filter: (a IS NOT UNKNOWN) -(7 rows) - -select * from boolpart where a in (true, false); - a ---- - f - t -(2 rows) - -select * from boolpart where a = false; - a ---- - f -(1 row) - -select * from boolpart where not a = false; - a ---- - t -(1 row) - -select * from boolpart where a is true or a is not true; - a ---- - f - t - -(3 rows) - -select * from boolpart where a is not true; - a ---- - f - -(2 rows) - -select * from boolpart where a is not true and a is not false; - a ---- - -(1 row) - -select * from boolpart where a is unknown; - a ---- - -(1 row) - -select * from boolpart where a is not unknown; - a ---- - f - t -(2 rows) - --- try some other permutations with a NULL partition instead of a DEFAULT -delete from boolpart where a is null; -create table boolpart_null partition of boolpart for values in (null); -insert into boolpart values(null); -explain (costs off) select * from boolpart where a is not true; - QUERY PLAN --------------------------------------------- - Append - -> Seq Scan on boolpart_f boolpart_1 - Filter: (a IS NOT TRUE) - -> Seq Scan on boolpart_null boolpart_2 - Filter: (a IS NOT TRUE) -(5 rows) - -explain (costs off) select * from boolpart where a is not true and a is not false; - QUERY PLAN --------------------------------------------------- - Seq Scan on boolpart_null boolpart - Filter: ((a IS NOT TRUE) AND (a IS NOT FALSE)) -(2 rows) - -explain (costs off) select * from boolpart where a is not false; - QUERY PLAN --------------------------------------------- - Append - -> Seq Scan on boolpart_t boolpart_1 - Filter: (a IS NOT FALSE) - -> Seq Scan on boolpart_null boolpart_2 - Filter: (a IS NOT FALSE) -(5 rows) - -explain (costs off) select * from boolpart where a is not unknown; - QUERY PLAN ------------------------------------------------ - Append - -> Seq Scan on boolpart_f boolpart_1 - Filter: (a IS NOT UNKNOWN) - -> Seq Scan on boolpart_t boolpart_2 - Filter: (a IS NOT UNKNOWN) - -> Seq Scan on boolpart_default boolpart_3 - Filter: (a IS NOT UNKNOWN) -(7 rows) - -select * from boolpart where a is not true; - a ---- - f - -(2 rows) - -select * from boolpart where a is not true and a is not false; - a ---- - -(1 row) - -select * from boolpart where a is not false; - a ---- - t - -(2 rows) - -select * from boolpart where a is not unknown; - a ---- - f - t -(2 rows) - --- check that all partitions are pruned when faced with conflicting clauses -explain (costs off) select * from boolpart where a is not unknown and a is unknown; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -explain (costs off) select * from boolpart where a is false and a is unknown; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -explain (costs off) select * from boolpart where a is true and a is unknown; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - --- inverse boolean partitioning - a seemingly unlikely design, but we've got --- code for it, so we'd better test it. -create table iboolpart (a bool) partition by list ((not a)); -create table iboolpart_default partition of iboolpart default; -create table iboolpart_f partition of iboolpart for values in ('true'); -create table iboolpart_t partition of iboolpart for values in ('false'); -insert into iboolpart values (true), (false), (null); -explain (costs off) select * from iboolpart where a in (true, false); - QUERY PLAN -------------------------------------------------- - Append - -> Seq Scan on iboolpart_t iboolpart_1 - Filter: (a = ANY ('{t,f}'::boolean[])) - -> Seq Scan on iboolpart_f iboolpart_2 - Filter: (a = ANY ('{t,f}'::boolean[])) - -> Seq Scan on iboolpart_default iboolpart_3 - Filter: (a = ANY ('{t,f}'::boolean[])) -(7 rows) - -explain (costs off) select * from iboolpart where a = false; - QUERY PLAN ------------------------------------ - Seq Scan on iboolpart_f iboolpart - Filter: (NOT a) -(2 rows) - -explain (costs off) select * from iboolpart where not a = false; - QUERY PLAN ------------------------------------ - Seq Scan on iboolpart_t iboolpart - Filter: a -(2 rows) - -explain (costs off) select * from iboolpart where a is true or a is not true; - QUERY PLAN --------------------------------------------------- - Append - -> Seq Scan on iboolpart_t iboolpart_1 - Filter: ((a IS TRUE) OR (a IS NOT TRUE)) - -> Seq Scan on iboolpart_f iboolpart_2 - Filter: ((a IS TRUE) OR (a IS NOT TRUE)) - -> Seq Scan on iboolpart_default iboolpart_3 - Filter: ((a IS TRUE) OR (a IS NOT TRUE)) -(7 rows) - -explain (costs off) select * from iboolpart where a is not true; - QUERY PLAN -------------------------------------------------- - Append - -> Seq Scan on iboolpart_t iboolpart_1 - Filter: (a IS NOT TRUE) - -> Seq Scan on iboolpart_f iboolpart_2 - Filter: (a IS NOT TRUE) - -> Seq Scan on iboolpart_default iboolpart_3 - Filter: (a IS NOT TRUE) -(7 rows) - -explain (costs off) select * from iboolpart where a is not true and a is not false; - QUERY PLAN --------------------------------------------------------- - Append - -> Seq Scan on iboolpart_t iboolpart_1 - Filter: ((a IS NOT TRUE) AND (a IS NOT FALSE)) - -> Seq Scan on iboolpart_f iboolpart_2 - Filter: ((a IS NOT TRUE) AND (a IS NOT FALSE)) - -> Seq Scan on iboolpart_default iboolpart_3 - Filter: ((a IS NOT TRUE) AND (a IS NOT FALSE)) -(7 rows) - -explain (costs off) select * from iboolpart where a is unknown; - QUERY PLAN -------------------------------------------------- - Append - -> Seq Scan on iboolpart_t iboolpart_1 - Filter: (a IS UNKNOWN) - -> Seq Scan on iboolpart_f iboolpart_2 - Filter: (a IS UNKNOWN) - -> Seq Scan on iboolpart_default iboolpart_3 - Filter: (a IS UNKNOWN) -(7 rows) - -explain (costs off) select * from iboolpart where a is not unknown; - QUERY PLAN -------------------------------------------------- - Append - -> Seq Scan on iboolpart_t iboolpart_1 - Filter: (a IS NOT UNKNOWN) - -> Seq Scan on iboolpart_f iboolpart_2 - Filter: (a IS NOT UNKNOWN) - -> Seq Scan on iboolpart_default iboolpart_3 - Filter: (a IS NOT UNKNOWN) -(7 rows) - -select * from iboolpart where a in (true, false); - a ---- - t - f -(2 rows) - -select * from iboolpart where a = false; - a ---- - f -(1 row) - -select * from iboolpart where not a = false; - a ---- - t -(1 row) - -select * from iboolpart where a is true or a is not true; - a ---- - t - f - -(3 rows) - -select * from iboolpart where a is not true; - a ---- - f - -(2 rows) - -select * from iboolpart where a is not true and a is not false; - a ---- - -(1 row) - -select * from iboolpart where a is unknown; - a ---- - -(1 row) - -select * from iboolpart where a is not unknown; - a ---- - t - f -(2 rows) - --- Try some other permutations with a NULL partition instead of a DEFAULT -delete from iboolpart where a is null; -create table iboolpart_null partition of iboolpart for values in (null); -insert into iboolpart values(null); --- Pruning shouldn't take place for these. Just check the result is correct -select * from iboolpart where a is not true; - a ---- - f - -(2 rows) - -select * from iboolpart where a is not true and a is not false; - a ---- - -(1 row) - -select * from iboolpart where a is not false; - a ---- - t - -(2 rows) - -create table boolrangep (a bool, b bool, c int) partition by range (a,b,c); -create table boolrangep_tf partition of boolrangep for values from ('true', 'false', 0) to ('true', 'false', 100); -create table boolrangep_ft partition of boolrangep for values from ('false', 'true', 0) to ('false', 'true', 100); -create table boolrangep_ff1 partition of boolrangep for values from ('false', 'false', 0) to ('false', 'false', 50); -create table boolrangep_ff2 partition of boolrangep for values from ('false', 'false', 50) to ('false', 'false', 100); -create table boolrangep_null partition of boolrangep default; --- try a more complex case that's been known to trip up pruning in the past -explain (costs off) select * from boolrangep where not a and not b and c = 25; - QUERY PLAN ----------------------------------------------- - Seq Scan on boolrangep_ff1 boolrangep - Filter: ((NOT a) AND (NOT b) AND (c = 25)) -(2 rows) - --- ensure we prune boolrangep_tf -explain (costs off) select * from boolrangep where a is not true and not b and c = 25; - QUERY PLAN ------------------------------------------------------------- - Append - -> Seq Scan on boolrangep_ff1 boolrangep_1 - Filter: ((a IS NOT TRUE) AND (NOT b) AND (c = 25)) - -> Seq Scan on boolrangep_ff2 boolrangep_2 - Filter: ((a IS NOT TRUE) AND (NOT b) AND (c = 25)) - -> Seq Scan on boolrangep_ft boolrangep_3 - Filter: ((a IS NOT TRUE) AND (NOT b) AND (c = 25)) - -> Seq Scan on boolrangep_null boolrangep_4 - Filter: ((a IS NOT TRUE) AND (NOT b) AND (c = 25)) -(9 rows) - --- ensure we prune everything apart from boolrangep_tf and boolrangep_null -explain (costs off) select * from boolrangep where a is not false and not b and c = 25; - QUERY PLAN -------------------------------------------------------------- - Append - -> Seq Scan on boolrangep_tf boolrangep_1 - Filter: ((a IS NOT FALSE) AND (NOT b) AND (c = 25)) - -> Seq Scan on boolrangep_null boolrangep_2 - Filter: ((a IS NOT FALSE) AND (NOT b) AND (c = 25)) -(5 rows) - --- test scalar-to-array operators -create table coercepart (a varchar) partition by list (a); -create table coercepart_ab partition of coercepart for values in ('ab'); -create table coercepart_bc partition of coercepart for values in ('bc'); -create table coercepart_cd partition of coercepart for values in ('cd'); -explain (costs off) select * from coercepart where a in ('ab', to_char(125, '999')); - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------- - Append - -> Seq Scan on coercepart_ab coercepart_1 - Filter: ((a)::text = ANY ((ARRAY['ab'::character varying, (to_char(125, '999'::text))::character varying])::text[])) - -> Seq Scan on coercepart_bc coercepart_2 - Filter: ((a)::text = ANY ((ARRAY['ab'::character varying, (to_char(125, '999'::text))::character varying])::text[])) - -> Seq Scan on coercepart_cd coercepart_3 - Filter: ((a)::text = ANY ((ARRAY['ab'::character varying, (to_char(125, '999'::text))::character varying])::text[])) -(7 rows) - -explain (costs off) select * from coercepart where a ~ any ('{ab}'); - QUERY PLAN ----------------------------------------------------- - Append - -> Seq Scan on coercepart_ab coercepart_1 - Filter: ((a)::text ~ ANY ('{ab}'::text[])) - -> Seq Scan on coercepart_bc coercepart_2 - Filter: ((a)::text ~ ANY ('{ab}'::text[])) - -> Seq Scan on coercepart_cd coercepart_3 - Filter: ((a)::text ~ ANY ('{ab}'::text[])) -(7 rows) - -explain (costs off) select * from coercepart where a !~ all ('{ab}'); - QUERY PLAN ------------------------------------------------------ - Append - -> Seq Scan on coercepart_ab coercepart_1 - Filter: ((a)::text !~ ALL ('{ab}'::text[])) - -> Seq Scan on coercepart_bc coercepart_2 - Filter: ((a)::text !~ ALL ('{ab}'::text[])) - -> Seq Scan on coercepart_cd coercepart_3 - Filter: ((a)::text !~ ALL ('{ab}'::text[])) -(7 rows) - -explain (costs off) select * from coercepart where a ~ any ('{ab,bc}'); - QUERY PLAN -------------------------------------------------------- - Append - -> Seq Scan on coercepart_ab coercepart_1 - Filter: ((a)::text ~ ANY ('{ab,bc}'::text[])) - -> Seq Scan on coercepart_bc coercepart_2 - Filter: ((a)::text ~ ANY ('{ab,bc}'::text[])) - -> Seq Scan on coercepart_cd coercepart_3 - Filter: ((a)::text ~ ANY ('{ab,bc}'::text[])) -(7 rows) - -explain (costs off) select * from coercepart where a !~ all ('{ab,bc}'); - QUERY PLAN --------------------------------------------------------- - Append - -> Seq Scan on coercepart_ab coercepart_1 - Filter: ((a)::text !~ ALL ('{ab,bc}'::text[])) - -> Seq Scan on coercepart_bc coercepart_2 - Filter: ((a)::text !~ ALL ('{ab,bc}'::text[])) - -> Seq Scan on coercepart_cd coercepart_3 - Filter: ((a)::text !~ ALL ('{ab,bc}'::text[])) -(7 rows) - -explain (costs off) select * from coercepart where a = any ('{ab,bc}'); - QUERY PLAN -------------------------------------------------------- - Append - -> Seq Scan on coercepart_ab coercepart_1 - Filter: ((a)::text = ANY ('{ab,bc}'::text[])) - -> Seq Scan on coercepart_bc coercepart_2 - Filter: ((a)::text = ANY ('{ab,bc}'::text[])) -(5 rows) - -explain (costs off) select * from coercepart where a = any ('{ab,null}'); - QUERY PLAN ---------------------------------------------------- - Seq Scan on coercepart_ab coercepart - Filter: ((a)::text = ANY ('{ab,NULL}'::text[])) -(2 rows) - -explain (costs off) select * from coercepart where a = any (null::text[]); - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -explain (costs off) select * from coercepart where a = all ('{ab}'); - QUERY PLAN ----------------------------------------------- - Seq Scan on coercepart_ab coercepart - Filter: ((a)::text = ALL ('{ab}'::text[])) -(2 rows) - -explain (costs off) select * from coercepart where a = all ('{ab,bc}'); - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -explain (costs off) select * from coercepart where a = all ('{ab,null}'); - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -explain (costs off) select * from coercepart where a = all (null::text[]); - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -drop table coercepart; -CREATE TABLE part (a INT, b INT) PARTITION BY LIST (a); -CREATE TABLE part_p1 PARTITION OF part FOR VALUES IN (-2,-1,0,1,2); -CREATE TABLE part_p2 PARTITION OF part DEFAULT PARTITION BY RANGE(a); -CREATE TABLE part_p2_p1 PARTITION OF part_p2 DEFAULT; -CREATE TABLE part_rev (b INT, c INT, a INT); -ALTER TABLE part ATTACH PARTITION part_rev FOR VALUES IN (3); -- fail -ERROR: table "part_rev" contains column "c" not found in parent "part" -DETAIL: The new partition may contain only the columns present in parent. -ALTER TABLE part_rev DROP COLUMN c; -ALTER TABLE part ATTACH PARTITION part_rev FOR VALUES IN (3); -- now it's ok -INSERT INTO part VALUES (-1,-1), (1,1), (2,NULL), (NULL,-2),(NULL,NULL); -EXPLAIN (COSTS OFF) SELECT tableoid::regclass as part, a, b FROM part WHERE a IS NULL ORDER BY 1, 2, 3; - QUERY PLAN ---------------------------------------------------------- - Sort - Sort Key: ((part.tableoid)::regclass), part.a, part.b - -> Seq Scan on part_p2_p1 part - Filter: (a IS NULL) -(4 rows) - -EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM part p(x) ORDER BY x; - QUERY PLAN ------------------------------------------------ - Sort - Output: p.x, p.b - Sort Key: p.x - -> Append - -> Seq Scan on public.part_p1 p_1 - Output: p_1.x, p_1.b - -> Seq Scan on public.part_rev p_2 - Output: p_2.x, p_2.b - -> Seq Scan on public.part_p2_p1 p_3 - Output: p_3.x, p_3.b -(10 rows) - --- --- some more cases --- --- --- pruning for partitioned table appearing inside a sub-query --- --- pruning won't work for mc3p, because some keys are Params -explain (costs off) select * from mc2p t1, lateral (select count(*) from mc3p t2 where t2.a = t1.b and abs(t2.b) = 1 and t2.c = 1) s where t1.a = 1; - QUERY PLAN ------------------------------------------------------------------------ - Nested Loop - -> Append - -> Seq Scan on mc2p1 t1_1 - Filter: (a = 1) - -> Seq Scan on mc2p2 t1_2 - Filter: (a = 1) - -> Seq Scan on mc2p_default t1_3 - Filter: (a = 1) - -> Aggregate - -> Append - -> Seq Scan on mc3p0 t2_1 - Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) - -> Seq Scan on mc3p1 t2_2 - Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) - -> Seq Scan on mc3p2 t2_3 - Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) - -> Seq Scan on mc3p3 t2_4 - Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) - -> Seq Scan on mc3p4 t2_5 - Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) - -> Seq Scan on mc3p5 t2_6 - Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) - -> Seq Scan on mc3p6 t2_7 - Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) - -> Seq Scan on mc3p7 t2_8 - Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) - -> Seq Scan on mc3p_default t2_9 - Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) -(28 rows) - --- pruning should work fine, because values for a prefix of keys (a, b) are --- available -explain (costs off) select * from mc2p t1, lateral (select count(*) from mc3p t2 where t2.c = t1.b and abs(t2.b) = 1 and t2.a = 1) s where t1.a = 1; - QUERY PLAN ------------------------------------------------------------------------ - Nested Loop - -> Append - -> Seq Scan on mc2p1 t1_1 - Filter: (a = 1) - -> Seq Scan on mc2p2 t1_2 - Filter: (a = 1) - -> Seq Scan on mc2p_default t1_3 - Filter: (a = 1) - -> Aggregate - -> Append - -> Seq Scan on mc3p0 t2_1 - Filter: ((c = t1.b) AND (a = 1) AND (abs(b) = 1)) - -> Seq Scan on mc3p1 t2_2 - Filter: ((c = t1.b) AND (a = 1) AND (abs(b) = 1)) - -> Seq Scan on mc3p_default t2_3 - Filter: ((c = t1.b) AND (a = 1) AND (abs(b) = 1)) -(16 rows) - --- also here, because values for all keys are provided -explain (costs off) select * from mc2p t1, lateral (select count(*) from mc3p t2 where t2.a = 1 and abs(t2.b) = 1 and t2.c = 1) s where t1.a = 1; - QUERY PLAN --------------------------------------------------------------- - Nested Loop - -> Aggregate - -> Seq Scan on mc3p1 t2 - Filter: ((a = 1) AND (c = 1) AND (abs(b) = 1)) - -> Append - -> Seq Scan on mc2p1 t1_1 - Filter: (a = 1) - -> Seq Scan on mc2p2 t1_2 - Filter: (a = 1) - -> Seq Scan on mc2p_default t1_3 - Filter: (a = 1) -(11 rows) - --- --- pruning with clauses containing <> operator --- --- doesn't prune range partitions -create table rp (a int) partition by range (a); -create table rp0 partition of rp for values from (minvalue) to (1); -create table rp1 partition of rp for values from (1) to (2); -create table rp2 partition of rp for values from (2) to (maxvalue); -explain (costs off) select * from rp where a <> 1; - QUERY PLAN ----------------------------- - Append - -> Seq Scan on rp0 rp_1 - Filter: (a <> 1) - -> Seq Scan on rp1 rp_2 - Filter: (a <> 1) - -> Seq Scan on rp2 rp_3 - Filter: (a <> 1) -(7 rows) - -explain (costs off) select * from rp where a <> 1 and a <> 2; - QUERY PLAN ------------------------------------------ - Append - -> Seq Scan on rp0 rp_1 - Filter: ((a <> 1) AND (a <> 2)) - -> Seq Scan on rp1 rp_2 - Filter: ((a <> 1) AND (a <> 2)) - -> Seq Scan on rp2 rp_3 - Filter: ((a <> 1) AND (a <> 2)) -(7 rows) - --- null partition should be eliminated due to strict <> clause. -explain (costs off) select * from lp where a <> 'a'; - QUERY PLAN ------------------------------------- - Append - -> Seq Scan on lp_ad lp_1 - Filter: (a <> 'a'::bpchar) - -> Seq Scan on lp_bc lp_2 - Filter: (a <> 'a'::bpchar) - -> Seq Scan on lp_ef lp_3 - Filter: (a <> 'a'::bpchar) - -> Seq Scan on lp_g lp_4 - Filter: (a <> 'a'::bpchar) - -> Seq Scan on lp_default lp_5 - Filter: (a <> 'a'::bpchar) -(11 rows) - --- ensure we detect contradictions in clauses; a can't be NULL and NOT NULL. -explain (costs off) select * from lp where a <> 'a' and a is null; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -explain (costs off) select * from lp where (a <> 'a' and a <> 'd') or a is null; - QUERY PLAN ------------------------------------------------------------------------------- - Append - -> Seq Scan on lp_bc lp_1 - Filter: (((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) OR (a IS NULL)) - -> Seq Scan on lp_ef lp_2 - Filter: (((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) OR (a IS NULL)) - -> Seq Scan on lp_g lp_3 - Filter: (((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) OR (a IS NULL)) - -> Seq Scan on lp_null lp_4 - Filter: (((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) OR (a IS NULL)) - -> Seq Scan on lp_default lp_5 - Filter: (((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) OR (a IS NULL)) -(11 rows) - --- check that it also works for a partitioned table that's not root, --- which in this case are partitions of rlp that are themselves --- list-partitioned on b -explain (costs off) select * from rlp where a = 15 and b <> 'ab' and b <> 'cd' and b <> 'xy' and b is not null; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------- - Append - -> Seq Scan on rlp3efgh rlp_1 - Filter: ((b IS NOT NULL) AND ((b)::text <> 'ab'::text) AND ((b)::text <> 'cd'::text) AND ((b)::text <> 'xy'::text) AND (a = 15)) - -> Seq Scan on rlp3_default rlp_2 - Filter: ((b IS NOT NULL) AND ((b)::text <> 'ab'::text) AND ((b)::text <> 'cd'::text) AND ((b)::text <> 'xy'::text) AND (a = 15)) -(5 rows) - --- --- different collations for different keys with same expression --- -create table coll_pruning_multi (a text) partition by range (substr(a, 1) collate "POSIX", substr(a, 1) collate "C"); -create table coll_pruning_multi1 partition of coll_pruning_multi for values from ('a', 'a') to ('a', 'e'); -create table coll_pruning_multi2 partition of coll_pruning_multi for values from ('a', 'e') to ('a', 'z'); -create table coll_pruning_multi3 partition of coll_pruning_multi for values from ('b', 'a') to ('b', 'e'); --- no pruning, because no value for the leading key -explain (costs off) select * from coll_pruning_multi where substr(a, 1) = 'e' collate "C"; - QUERY PLAN ------------------------------------------------------------- - Append - -> Seq Scan on coll_pruning_multi1 coll_pruning_multi_1 - Filter: (substr(a, 1) = 'e'::text COLLATE "C") - -> Seq Scan on coll_pruning_multi2 coll_pruning_multi_2 - Filter: (substr(a, 1) = 'e'::text COLLATE "C") - -> Seq Scan on coll_pruning_multi3 coll_pruning_multi_3 - Filter: (substr(a, 1) = 'e'::text COLLATE "C") -(7 rows) - --- pruning, with a value provided for the leading key -explain (costs off) select * from coll_pruning_multi where substr(a, 1) = 'a' collate "POSIX"; - QUERY PLAN ------------------------------------------------------------- - Append - -> Seq Scan on coll_pruning_multi1 coll_pruning_multi_1 - Filter: (substr(a, 1) = 'a'::text COLLATE "POSIX") - -> Seq Scan on coll_pruning_multi2 coll_pruning_multi_2 - Filter: (substr(a, 1) = 'a'::text COLLATE "POSIX") -(5 rows) - --- pruning, with values provided for both keys -explain (costs off) select * from coll_pruning_multi where substr(a, 1) = 'e' collate "C" and substr(a, 1) = 'a' collate "POSIX"; - QUERY PLAN ---------------------------------------------------------------------------------------------------- - Seq Scan on coll_pruning_multi2 coll_pruning_multi - Filter: ((substr(a, 1) = 'e'::text COLLATE "C") AND (substr(a, 1) = 'a'::text COLLATE "POSIX")) -(2 rows) - --- --- LIKE operators don't prune --- -create table like_op_noprune (a text) partition by list (a); -create table like_op_noprune1 partition of like_op_noprune for values in ('ABC'); -create table like_op_noprune2 partition of like_op_noprune for values in ('BCD'); -explain (costs off) select * from like_op_noprune where a like '%BC'; - QUERY PLAN ------------------------------------------------------- - Append - -> Seq Scan on like_op_noprune1 like_op_noprune_1 - Filter: (a ~~ '%BC'::text) - -> Seq Scan on like_op_noprune2 like_op_noprune_2 - Filter: (a ~~ '%BC'::text) -(5 rows) - --- --- tests wherein clause value requires a cross-type comparison function --- -create table lparted_by_int2 (a smallint) partition by list (a); -create table lparted_by_int2_1 partition of lparted_by_int2 for values in (1); -create table lparted_by_int2_16384 partition of lparted_by_int2 for values in (16384); -explain (costs off) select * from lparted_by_int2 where a = 100_000_000_000_000; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -create table rparted_by_int2 (a smallint) partition by range (a); -create table rparted_by_int2_1 partition of rparted_by_int2 for values from (1) to (10); -create table rparted_by_int2_16384 partition of rparted_by_int2 for values from (10) to (16384); --- all partitions pruned -explain (costs off) select * from rparted_by_int2 where a > 100_000_000_000_000; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -create table rparted_by_int2_maxvalue partition of rparted_by_int2 for values from (16384) to (maxvalue); --- all partitions but rparted_by_int2_maxvalue pruned -explain (costs off) select * from rparted_by_int2 where a > 100_000_000_000_000; - QUERY PLAN ------------------------------------------------------- - Seq Scan on rparted_by_int2_maxvalue rparted_by_int2 - Filter: (a > '100000000000000'::bigint) -(2 rows) - -drop table lp, coll_pruning, rlp, mc3p, mc2p, boolpart, iboolpart, boolrangep, rp, coll_pruning_multi, like_op_noprune, lparted_by_int2, rparted_by_int2; --- --- Test Partition pruning for HASH partitioning --- --- Use hand-rolled hash functions and operator classes to get predictable --- result on different machines. See the definitions of --- part_test_int4_ops and part_test_text_ops in test_setup.sql. --- -create table hp (a int, b text, c int) - partition by hash (a part_test_int4_ops, b part_test_text_ops); -create table hp0 partition of hp for values with (modulus 4, remainder 0); -create table hp3 partition of hp for values with (modulus 4, remainder 3); -create table hp1 partition of hp for values with (modulus 4, remainder 1); -create table hp2 partition of hp for values with (modulus 4, remainder 2); -insert into hp values (null, null, 0); -insert into hp values (1, null, 1); -insert into hp values (1, 'xxx', 2); -insert into hp values (null, 'xxx', 3); -insert into hp values (2, 'xxx', 4); -insert into hp values (1, 'abcde', 5); -select tableoid::regclass, * from hp order by c; - tableoid | a | b | c -----------+---+-------+--- - hp0 | | | 0 - hp1 | 1 | | 1 - hp0 | 1 | xxx | 2 - hp2 | | xxx | 3 - hp3 | 2 | xxx | 4 - hp2 | 1 | abcde | 5 -(6 rows) - --- partial keys won't prune, nor would non-equality conditions -explain (costs off) select * from hp where a = 1; - QUERY PLAN ----------------------------- - Append - -> Seq Scan on hp0 hp_1 - Filter: (a = 1) - -> Seq Scan on hp1 hp_2 - Filter: (a = 1) - -> Seq Scan on hp2 hp_3 - Filter: (a = 1) - -> Seq Scan on hp3 hp_4 - Filter: (a = 1) -(9 rows) - -explain (costs off) select * from hp where b = 'xxx'; - QUERY PLAN ------------------------------------ - Append - -> Seq Scan on hp0 hp_1 - Filter: (b = 'xxx'::text) - -> Seq Scan on hp1 hp_2 - Filter: (b = 'xxx'::text) - -> Seq Scan on hp2 hp_3 - Filter: (b = 'xxx'::text) - -> Seq Scan on hp3 hp_4 - Filter: (b = 'xxx'::text) -(9 rows) - -explain (costs off) select * from hp where a is null; - QUERY PLAN ------------------------------ - Append - -> Seq Scan on hp0 hp_1 - Filter: (a IS NULL) - -> Seq Scan on hp1 hp_2 - Filter: (a IS NULL) - -> Seq Scan on hp2 hp_3 - Filter: (a IS NULL) - -> Seq Scan on hp3 hp_4 - Filter: (a IS NULL) -(9 rows) - -explain (costs off) select * from hp where b is null; - QUERY PLAN ------------------------------ - Append - -> Seq Scan on hp0 hp_1 - Filter: (b IS NULL) - -> Seq Scan on hp1 hp_2 - Filter: (b IS NULL) - -> Seq Scan on hp2 hp_3 - Filter: (b IS NULL) - -> Seq Scan on hp3 hp_4 - Filter: (b IS NULL) -(9 rows) - -explain (costs off) select * from hp where a < 1 and b = 'xxx'; - QUERY PLAN -------------------------------------------------- - Append - -> Seq Scan on hp0 hp_1 - Filter: ((a < 1) AND (b = 'xxx'::text)) - -> Seq Scan on hp1 hp_2 - Filter: ((a < 1) AND (b = 'xxx'::text)) - -> Seq Scan on hp2 hp_3 - Filter: ((a < 1) AND (b = 'xxx'::text)) - -> Seq Scan on hp3 hp_4 - Filter: ((a < 1) AND (b = 'xxx'::text)) -(9 rows) - -explain (costs off) select * from hp where a <> 1 and b = 'yyy'; - QUERY PLAN --------------------------------------------------- - Append - -> Seq Scan on hp0 hp_1 - Filter: ((a <> 1) AND (b = 'yyy'::text)) - -> Seq Scan on hp1 hp_2 - Filter: ((a <> 1) AND (b = 'yyy'::text)) - -> Seq Scan on hp2 hp_3 - Filter: ((a <> 1) AND (b = 'yyy'::text)) - -> Seq Scan on hp3 hp_4 - Filter: ((a <> 1) AND (b = 'yyy'::text)) -(9 rows) - -explain (costs off) select * from hp where a <> 1 and b <> 'xxx'; - QUERY PLAN ---------------------------------------------------- - Append - -> Seq Scan on hp0 hp_1 - Filter: ((a <> 1) AND (b <> 'xxx'::text)) - -> Seq Scan on hp1 hp_2 - Filter: ((a <> 1) AND (b <> 'xxx'::text)) - -> Seq Scan on hp2 hp_3 - Filter: ((a <> 1) AND (b <> 'xxx'::text)) - -> Seq Scan on hp3 hp_4 - Filter: ((a <> 1) AND (b <> 'xxx'::text)) -(9 rows) - --- pruning should work if either a value or a IS NULL clause is provided for --- each of the keys -explain (costs off) select * from hp where a is null and b is null; - QUERY PLAN ------------------------------------------ - Seq Scan on hp0 hp - Filter: ((a IS NULL) AND (b IS NULL)) -(2 rows) - -explain (costs off) select * from hp where a = 1 and b is null; - QUERY PLAN -------------------------------------- - Seq Scan on hp1 hp - Filter: ((b IS NULL) AND (a = 1)) -(2 rows) - -explain (costs off) select * from hp where a = 1 and b = 'xxx'; - QUERY PLAN -------------------------------------------- - Seq Scan on hp0 hp - Filter: ((a = 1) AND (b = 'xxx'::text)) -(2 rows) - -explain (costs off) select * from hp where a is null and b = 'xxx'; - QUERY PLAN ------------------------------------------------ - Seq Scan on hp2 hp - Filter: ((a IS NULL) AND (b = 'xxx'::text)) -(2 rows) - -explain (costs off) select * from hp where a = 2 and b = 'xxx'; - QUERY PLAN -------------------------------------------- - Seq Scan on hp3 hp - Filter: ((a = 2) AND (b = 'xxx'::text)) -(2 rows) - -explain (costs off) select * from hp where a = 1 and b = 'abcde'; - QUERY PLAN ---------------------------------------------- - Seq Scan on hp2 hp - Filter: ((a = 1) AND (b = 'abcde'::text)) -(2 rows) - -explain (costs off) select * from hp where (a = 1 and b = 'abcde') or (a = 2 and b = 'xxx') or (a is null and b is null); - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------- - Append - -> Seq Scan on hp0 hp_1 - Filter: (((a = 1) AND (b = 'abcde'::text)) OR ((a = 2) AND (b = 'xxx'::text)) OR ((a IS NULL) AND (b IS NULL))) - -> Seq Scan on hp2 hp_2 - Filter: (((a = 1) AND (b = 'abcde'::text)) OR ((a = 2) AND (b = 'xxx'::text)) OR ((a IS NULL) AND (b IS NULL))) - -> Seq Scan on hp3 hp_3 - Filter: (((a = 1) AND (b = 'abcde'::text)) OR ((a = 2) AND (b = 'xxx'::text)) OR ((a IS NULL) AND (b IS NULL))) -(7 rows) - --- test pruning when not all the partitions exist -drop table hp1; -drop table hp3; -explain (costs off) select * from hp where a = 1 and b = 'abcde'; - QUERY PLAN ---------------------------------------------- - Seq Scan on hp2 hp - Filter: ((a = 1) AND (b = 'abcde'::text)) -(2 rows) - -explain (costs off) select * from hp where a = 1 and b = 'abcde' and - (c = 2 or c = 3); - QUERY PLAN ----------------------------------------------------------------------- - Seq Scan on hp2 hp - Filter: ((a = 1) AND (b = 'abcde'::text) AND ((c = 2) OR (c = 3))) -(2 rows) - -drop table hp2; -explain (costs off) select * from hp where a = 1 and b = 'abcde' and - (c = 2 or c = 3); - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - --- --- Test runtime partition pruning --- -create table ab (a int not null, b int not null) partition by list (a); -create table ab_a2 partition of ab for values in(2) partition by list (b); -create table ab_a2_b1 partition of ab_a2 for values in (1); -create table ab_a2_b2 partition of ab_a2 for values in (2); -create table ab_a2_b3 partition of ab_a2 for values in (3); -create table ab_a1 partition of ab for values in(1) partition by list (b); -create table ab_a1_b1 partition of ab_a1 for values in (1); -create table ab_a1_b2 partition of ab_a1 for values in (2); -create table ab_a1_b3 partition of ab_a1 for values in (3); -create table ab_a3 partition of ab for values in(3) partition by list (b); -create table ab_a3_b1 partition of ab_a3 for values in (1); -create table ab_a3_b2 partition of ab_a3 for values in (2); -create table ab_a3_b3 partition of ab_a3 for values in (3); --- Disallow index only scans as concurrent transactions may stop visibility --- bits being set causing "Heap Fetches" to be unstable in the EXPLAIN ANALYZE --- output. -set enable_indexonlyscan = off; -prepare ab_q1 (int, int, int) as -select * from ab where a between $1 and $2 and b <= $3; -explain (analyze, costs off, summary off, timing off, buffers off) execute ab_q1 (2, 2, 3); - QUERY PLAN ---------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 6 - -> Seq Scan on ab_a2_b1 ab_1 (actual rows=0 loops=1) - Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) - -> Seq Scan on ab_a2_b2 ab_2 (actual rows=0 loops=1) - Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) - -> Seq Scan on ab_a2_b3 ab_3 (actual rows=0 loops=1) - Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) -(8 rows) - -explain (analyze, costs off, summary off, timing off, buffers off) execute ab_q1 (1, 2, 3); - QUERY PLAN ---------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 3 - -> Seq Scan on ab_a1_b1 ab_1 (actual rows=0 loops=1) - Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) - -> Seq Scan on ab_a1_b2 ab_2 (actual rows=0 loops=1) - Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) - -> Seq Scan on ab_a1_b3 ab_3 (actual rows=0 loops=1) - Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) - -> Seq Scan on ab_a2_b1 ab_4 (actual rows=0 loops=1) - Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) - -> Seq Scan on ab_a2_b2 ab_5 (actual rows=0 loops=1) - Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) - -> Seq Scan on ab_a2_b3 ab_6 (actual rows=0 loops=1) - Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) -(14 rows) - -deallocate ab_q1; --- Runtime pruning after optimizer pruning -prepare ab_q1 (int, int) as -select a from ab where a between $1 and $2 and b < 3; -explain (analyze, costs off, summary off, timing off, buffers off) execute ab_q1 (2, 2); - QUERY PLAN ---------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 4 - -> Seq Scan on ab_a2_b1 ab_1 (actual rows=0 loops=1) - Filter: ((a >= $1) AND (a <= $2) AND (b < 3)) - -> Seq Scan on ab_a2_b2 ab_2 (actual rows=0 loops=1) - Filter: ((a >= $1) AND (a <= $2) AND (b < 3)) -(6 rows) - -explain (analyze, costs off, summary off, timing off, buffers off) execute ab_q1 (2, 4); - QUERY PLAN ---------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 2 - -> Seq Scan on ab_a2_b1 ab_1 (actual rows=0 loops=1) - Filter: ((a >= $1) AND (a <= $2) AND (b < 3)) - -> Seq Scan on ab_a2_b2 ab_2 (actual rows=0 loops=1) - Filter: ((a >= $1) AND (a <= $2) AND (b < 3)) - -> Seq Scan on ab_a3_b1 ab_3 (actual rows=0 loops=1) - Filter: ((a >= $1) AND (a <= $2) AND (b < 3)) - -> Seq Scan on ab_a3_b2 ab_4 (actual rows=0 loops=1) - Filter: ((a >= $1) AND (a <= $2) AND (b < 3)) -(10 rows) - --- Ensure a mix of PARAM_EXTERN and PARAM_EXEC Params work together at --- different levels of partitioning. -prepare ab_q2 (int, int) as -select a from ab where a between $1 and $2 and b < (select 3); -explain (analyze, costs off, summary off, timing off, buffers off) execute ab_q2 (2, 2); - QUERY PLAN ------------------------------------------------------------------------ - Append (actual rows=0 loops=1) - Subplans Removed: 6 - InitPlan 1 - -> Result (actual rows=1 loops=1) - -> Seq Scan on ab_a2_b1 ab_1 (actual rows=0 loops=1) - Filter: ((a >= $1) AND (a <= $2) AND (b < (InitPlan 1).col1)) - -> Seq Scan on ab_a2_b2 ab_2 (actual rows=0 loops=1) - Filter: ((a >= $1) AND (a <= $2) AND (b < (InitPlan 1).col1)) - -> Seq Scan on ab_a2_b3 ab_3 (never executed) - Filter: ((a >= $1) AND (a <= $2) AND (b < (InitPlan 1).col1)) -(10 rows) - --- As above, but swap the PARAM_EXEC Param to the first partition level -prepare ab_q3 (int, int) as -select a from ab where b between $1 and $2 and a < (select 3); -explain (analyze, costs off, summary off, timing off, buffers off) execute ab_q3 (2, 2); - QUERY PLAN ------------------------------------------------------------------------ - Append (actual rows=0 loops=1) - Subplans Removed: 6 - InitPlan 1 - -> Result (actual rows=1 loops=1) - -> Seq Scan on ab_a1_b2 ab_1 (actual rows=0 loops=1) - Filter: ((b >= $1) AND (b <= $2) AND (a < (InitPlan 1).col1)) - -> Seq Scan on ab_a2_b2 ab_2 (actual rows=0 loops=1) - Filter: ((b >= $1) AND (b <= $2) AND (a < (InitPlan 1).col1)) - -> Seq Scan on ab_a3_b2 ab_3 (never executed) - Filter: ((b >= $1) AND (b <= $2) AND (a < (InitPlan 1).col1)) -(10 rows) - --- --- Test runtime pruning with hash partitioned tables --- --- recreate partitions dropped above -create table hp1 partition of hp for values with (modulus 4, remainder 1); -create table hp2 partition of hp for values with (modulus 4, remainder 2); -create table hp3 partition of hp for values with (modulus 4, remainder 3); --- Ensure we correctly prune unneeded partitions when there is an IS NULL qual -prepare hp_q1 (text) as -select * from hp where a is null and b = $1; -explain (costs off) execute hp_q1('xxx'); - QUERY PLAN --------------------------------------------- - Append - Subplans Removed: 3 - -> Seq Scan on hp2 hp_1 - Filter: ((a IS NULL) AND (b = $1)) -(4 rows) - -deallocate hp_q1; -drop table hp; --- Test a backwards Append scan -create table list_part (a int) partition by list (a); -create table list_part1 partition of list_part for values in (1); -create table list_part2 partition of list_part for values in (2); -create table list_part3 partition of list_part for values in (3); -create table list_part4 partition of list_part for values in (4); -insert into list_part select generate_series(1,4); -begin; --- Don't select an actual value out of the table as the order of the Append's --- subnodes may not be stable. -declare cur SCROLL CURSOR for select 1 from list_part where a > (select 1) and a < (select 4); --- move beyond the final row -move 3 from cur; --- Ensure we get two rows. -fetch backward all from cur; - ?column? ----------- - 1 - 1 -(2 rows) - -commit; -begin; --- Test run-time pruning using stable functions -create function list_part_fn(int) returns int as $$ begin return $1; end;$$ language plpgsql stable; --- Ensure pruning works using a stable function containing no Vars -explain (analyze, costs off, summary off, timing off, buffers off) select * from list_part where a = list_part_fn(1); - QUERY PLAN ------------------------------------------------------------------- - Append (actual rows=1 loops=1) - Subplans Removed: 3 - -> Seq Scan on list_part1 list_part_1 (actual rows=1 loops=1) - Filter: (a = list_part_fn(1)) -(4 rows) - --- Ensure pruning does not take place when the function has a Var parameter -explain (analyze, costs off, summary off, timing off, buffers off) select * from list_part where a = list_part_fn(a); - QUERY PLAN ------------------------------------------------------------------- - Append (actual rows=4 loops=1) - -> Seq Scan on list_part1 list_part_1 (actual rows=1 loops=1) - Filter: (a = list_part_fn(a)) - -> Seq Scan on list_part2 list_part_2 (actual rows=1 loops=1) - Filter: (a = list_part_fn(a)) - -> Seq Scan on list_part3 list_part_3 (actual rows=1 loops=1) - Filter: (a = list_part_fn(a)) - -> Seq Scan on list_part4 list_part_4 (actual rows=1 loops=1) - Filter: (a = list_part_fn(a)) -(9 rows) - --- Ensure pruning does not take place when the expression contains a Var. -explain (analyze, costs off, summary off, timing off, buffers off) select * from list_part where a = list_part_fn(1) + a; - QUERY PLAN ------------------------------------------------------------------- - Append (actual rows=0 loops=1) - -> Seq Scan on list_part1 list_part_1 (actual rows=0 loops=1) - Filter: (a = (list_part_fn(1) + a)) - Rows Removed by Filter: 1 - -> Seq Scan on list_part2 list_part_2 (actual rows=0 loops=1) - Filter: (a = (list_part_fn(1) + a)) - Rows Removed by Filter: 1 - -> Seq Scan on list_part3 list_part_3 (actual rows=0 loops=1) - Filter: (a = (list_part_fn(1) + a)) - Rows Removed by Filter: 1 - -> Seq Scan on list_part4 list_part_4 (actual rows=0 loops=1) - Filter: (a = (list_part_fn(1) + a)) - Rows Removed by Filter: 1 -(13 rows) - -rollback; -drop table list_part; --- Parallel append --- Parallel queries won't necessarily get as many workers as the planner --- asked for. This affects not only the "Workers Launched:" field of EXPLAIN --- results, but also row counts and loop counts for parallel scans, Gathers, --- and everything in between. This function filters out the values we can't --- rely on to be stable. --- This removes enough info that you might wonder why bother with EXPLAIN --- ANALYZE at all. The answer is that we need to see '(never executed)' --- notations because that's the only way to verify runtime pruning. -create function explain_parallel_append(text) returns setof text -language plpgsql as -$$ -declare - ln text; -begin - for ln in - execute format('explain (analyze, costs off, summary off, timing off, buffers off) %s', - $1) - loop - ln := regexp_replace(ln, 'Workers Launched: \d+', 'Workers Launched: N'); - ln := regexp_replace(ln, 'actual rows=\d+ loops=\d+', 'actual rows=N loops=N'); - ln := regexp_replace(ln, 'Rows Removed by Filter: \d+', 'Rows Removed by Filter: N'); - return next ln; - end loop; -end; -$$; -prepare ab_q4 (int, int) as -select avg(a) from ab where a between $1 and $2 and b < 4; --- Encourage use of parallel plans -set parallel_setup_cost = 0; -set parallel_tuple_cost = 0; -set min_parallel_table_scan_size = 0; -set max_parallel_workers_per_gather = 2; -select explain_parallel_append('execute ab_q4 (2, 2)'); - explain_parallel_append ------------------------------------------------------------------------------------- - Finalize Aggregate (actual rows=N loops=N) - -> Gather (actual rows=N loops=N) - Workers Planned: 2 - Workers Launched: N - -> Partial Aggregate (actual rows=N loops=N) - -> Parallel Append (actual rows=N loops=N) - Subplans Removed: 6 - -> Parallel Seq Scan on ab_a2_b1 ab_1 (actual rows=N loops=N) - Filter: ((a >= $1) AND (a <= $2) AND (b < 4)) - -> Parallel Seq Scan on ab_a2_b2 ab_2 (actual rows=N loops=N) - Filter: ((a >= $1) AND (a <= $2) AND (b < 4)) - -> Parallel Seq Scan on ab_a2_b3 ab_3 (actual rows=N loops=N) - Filter: ((a >= $1) AND (a <= $2) AND (b < 4)) -(13 rows) - --- Test run-time pruning with IN lists. -prepare ab_q5 (int, int, int) as -select avg(a) from ab where a in($1,$2,$3) and b < 4; -select explain_parallel_append('execute ab_q5 (1, 1, 1)'); - explain_parallel_append ------------------------------------------------------------------------------------- - Finalize Aggregate (actual rows=N loops=N) - -> Gather (actual rows=N loops=N) - Workers Planned: 2 - Workers Launched: N - -> Partial Aggregate (actual rows=N loops=N) - -> Parallel Append (actual rows=N loops=N) - Subplans Removed: 6 - -> Parallel Seq Scan on ab_a1_b1 ab_1 (actual rows=N loops=N) - Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) - -> Parallel Seq Scan on ab_a1_b2 ab_2 (actual rows=N loops=N) - Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) - -> Parallel Seq Scan on ab_a1_b3 ab_3 (actual rows=N loops=N) - Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) -(13 rows) - -select explain_parallel_append('execute ab_q5 (2, 3, 3)'); - explain_parallel_append ------------------------------------------------------------------------------------- - Finalize Aggregate (actual rows=N loops=N) - -> Gather (actual rows=N loops=N) - Workers Planned: 2 - Workers Launched: N - -> Partial Aggregate (actual rows=N loops=N) - -> Parallel Append (actual rows=N loops=N) - Subplans Removed: 3 - -> Parallel Seq Scan on ab_a2_b1 ab_1 (actual rows=N loops=N) - Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) - -> Parallel Seq Scan on ab_a2_b2 ab_2 (actual rows=N loops=N) - Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) - -> Parallel Seq Scan on ab_a2_b3 ab_3 (actual rows=N loops=N) - Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) - -> Parallel Seq Scan on ab_a3_b1 ab_4 (actual rows=N loops=N) - Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) - -> Parallel Seq Scan on ab_a3_b2 ab_5 (actual rows=N loops=N) - Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) - -> Parallel Seq Scan on ab_a3_b3 ab_6 (actual rows=N loops=N) - Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) -(19 rows) - --- Try some params whose values do not belong to any partition. -select explain_parallel_append('execute ab_q5 (33, 44, 55)'); - explain_parallel_append ------------------------------------------------------------ - Finalize Aggregate (actual rows=N loops=N) - -> Gather (actual rows=N loops=N) - Workers Planned: 2 - Workers Launched: N - -> Partial Aggregate (actual rows=N loops=N) - -> Parallel Append (actual rows=N loops=N) - Subplans Removed: 9 -(7 rows) - --- Test Parallel Append with PARAM_EXEC Params -select explain_parallel_append('select count(*) from ab where (a = (select 1) or a = (select 3)) and b = 2'); - explain_parallel_append ------------------------------------------------------------------------------------------------- - Aggregate (actual rows=N loops=N) - InitPlan 1 - -> Result (actual rows=N loops=N) - InitPlan 2 - -> Result (actual rows=N loops=N) - -> Gather (actual rows=N loops=N) - Workers Planned: 2 - Workers Launched: N - -> Parallel Append (actual rows=N loops=N) - -> Parallel Seq Scan on ab_a1_b2 ab_1 (actual rows=N loops=N) - Filter: ((b = 2) AND ((a = (InitPlan 1).col1) OR (a = (InitPlan 2).col1))) - -> Parallel Seq Scan on ab_a2_b2 ab_2 (never executed) - Filter: ((b = 2) AND ((a = (InitPlan 1).col1) OR (a = (InitPlan 2).col1))) - -> Parallel Seq Scan on ab_a3_b2 ab_3 (actual rows=N loops=N) - Filter: ((b = 2) AND ((a = (InitPlan 1).col1) OR (a = (InitPlan 2).col1))) -(15 rows) - --- Test pruning during parallel nested loop query -create table lprt_a (a int not null); --- Insert some values we won't find in ab -insert into lprt_a select 0 from generate_series(1,100); --- and insert some values that we should find. -insert into lprt_a values(1),(1); -analyze lprt_a; -create index ab_a2_b1_a_idx on ab_a2_b1 (a); -create index ab_a2_b2_a_idx on ab_a2_b2 (a); -create index ab_a2_b3_a_idx on ab_a2_b3 (a); -create index ab_a1_b1_a_idx on ab_a1_b1 (a); -create index ab_a1_b2_a_idx on ab_a1_b2 (a); -create index ab_a1_b3_a_idx on ab_a1_b3 (a); -create index ab_a3_b1_a_idx on ab_a3_b1 (a); -create index ab_a3_b2_a_idx on ab_a3_b2 (a); -create index ab_a3_b3_a_idx on ab_a3_b3 (a); -set enable_hashjoin = 0; -set enable_mergejoin = 0; -set enable_memoize = 0; -select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(0, 0, 1)'); - explain_parallel_append --------------------------------------------------------------------------------------------------------- - Finalize Aggregate (actual rows=N loops=N) - -> Gather (actual rows=N loops=N) - Workers Planned: 1 - Workers Launched: N - -> Partial Aggregate (actual rows=N loops=N) - -> Nested Loop (actual rows=N loops=N) - -> Parallel Seq Scan on lprt_a a (actual rows=N loops=N) - Filter: (a = ANY ('{0,0,1}'::integer[])) - -> Append (actual rows=N loops=N) - -> Index Scan using ab_a1_b1_a_idx on ab_a1_b1 ab_1 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a1_b2_a_idx on ab_a1_b2 ab_2 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a1_b3_a_idx on ab_a1_b3 ab_3 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b1_a_idx on ab_a2_b1 ab_4 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b2_a_idx on ab_a2_b2 ab_5 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b3_a_idx on ab_a2_b3 ab_6 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b1_a_idx on ab_a3_b1 ab_7 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b2_a_idx on ab_a3_b2 ab_8 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b3_a_idx on ab_a3_b3 ab_9 (never executed) - Index Cond: (a = a.a) -(27 rows) - --- Ensure the same partitions are pruned when we make the nested loop --- parameter an Expr rather than a plain Param. -select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a + 0 where a.a in(0, 0, 1)'); - explain_parallel_append --------------------------------------------------------------------------------------------------------- - Finalize Aggregate (actual rows=N loops=N) - -> Gather (actual rows=N loops=N) - Workers Planned: 1 - Workers Launched: N - -> Partial Aggregate (actual rows=N loops=N) - -> Nested Loop (actual rows=N loops=N) - -> Parallel Seq Scan on lprt_a a (actual rows=N loops=N) - Filter: (a = ANY ('{0,0,1}'::integer[])) - -> Append (actual rows=N loops=N) - -> Index Scan using ab_a1_b1_a_idx on ab_a1_b1 ab_1 (actual rows=N loops=N) - Index Cond: (a = (a.a + 0)) - -> Index Scan using ab_a1_b2_a_idx on ab_a1_b2 ab_2 (actual rows=N loops=N) - Index Cond: (a = (a.a + 0)) - -> Index Scan using ab_a1_b3_a_idx on ab_a1_b3 ab_3 (actual rows=N loops=N) - Index Cond: (a = (a.a + 0)) - -> Index Scan using ab_a2_b1_a_idx on ab_a2_b1 ab_4 (never executed) - Index Cond: (a = (a.a + 0)) - -> Index Scan using ab_a2_b2_a_idx on ab_a2_b2 ab_5 (never executed) - Index Cond: (a = (a.a + 0)) - -> Index Scan using ab_a2_b3_a_idx on ab_a2_b3 ab_6 (never executed) - Index Cond: (a = (a.a + 0)) - -> Index Scan using ab_a3_b1_a_idx on ab_a3_b1 ab_7 (never executed) - Index Cond: (a = (a.a + 0)) - -> Index Scan using ab_a3_b2_a_idx on ab_a3_b2 ab_8 (never executed) - Index Cond: (a = (a.a + 0)) - -> Index Scan using ab_a3_b3_a_idx on ab_a3_b3 ab_9 (never executed) - Index Cond: (a = (a.a + 0)) -(27 rows) - -insert into lprt_a values(3),(3); -select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(1, 0, 3)'); - explain_parallel_append --------------------------------------------------------------------------------------------------------- - Finalize Aggregate (actual rows=N loops=N) - -> Gather (actual rows=N loops=N) - Workers Planned: 1 - Workers Launched: N - -> Partial Aggregate (actual rows=N loops=N) - -> Nested Loop (actual rows=N loops=N) - -> Parallel Seq Scan on lprt_a a (actual rows=N loops=N) - Filter: (a = ANY ('{1,0,3}'::integer[])) - -> Append (actual rows=N loops=N) - -> Index Scan using ab_a1_b1_a_idx on ab_a1_b1 ab_1 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a1_b2_a_idx on ab_a1_b2 ab_2 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a1_b3_a_idx on ab_a1_b3 ab_3 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b1_a_idx on ab_a2_b1 ab_4 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b2_a_idx on ab_a2_b2 ab_5 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b3_a_idx on ab_a2_b3 ab_6 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b1_a_idx on ab_a3_b1 ab_7 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b2_a_idx on ab_a3_b2 ab_8 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b3_a_idx on ab_a3_b3 ab_9 (actual rows=N loops=N) - Index Cond: (a = a.a) -(27 rows) - -select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(1, 0, 0)'); - explain_parallel_append --------------------------------------------------------------------------------------------------------- - Finalize Aggregate (actual rows=N loops=N) - -> Gather (actual rows=N loops=N) - Workers Planned: 1 - Workers Launched: N - -> Partial Aggregate (actual rows=N loops=N) - -> Nested Loop (actual rows=N loops=N) - -> Parallel Seq Scan on lprt_a a (actual rows=N loops=N) - Filter: (a = ANY ('{1,0,0}'::integer[])) - Rows Removed by Filter: N - -> Append (actual rows=N loops=N) - -> Index Scan using ab_a1_b1_a_idx on ab_a1_b1 ab_1 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a1_b2_a_idx on ab_a1_b2 ab_2 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a1_b3_a_idx on ab_a1_b3 ab_3 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b1_a_idx on ab_a2_b1 ab_4 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b2_a_idx on ab_a2_b2 ab_5 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b3_a_idx on ab_a2_b3 ab_6 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b1_a_idx on ab_a3_b1 ab_7 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b2_a_idx on ab_a3_b2 ab_8 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b3_a_idx on ab_a3_b3 ab_9 (never executed) - Index Cond: (a = a.a) -(28 rows) - -delete from lprt_a where a = 1; -select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(1, 0, 0)'); - explain_parallel_append -------------------------------------------------------------------------------------------------- - Finalize Aggregate (actual rows=N loops=N) - -> Gather (actual rows=N loops=N) - Workers Planned: 1 - Workers Launched: N - -> Partial Aggregate (actual rows=N loops=N) - -> Nested Loop (actual rows=N loops=N) - -> Parallel Seq Scan on lprt_a a (actual rows=N loops=N) - Filter: (a = ANY ('{1,0,0}'::integer[])) - Rows Removed by Filter: N - -> Append (actual rows=N loops=N) - -> Index Scan using ab_a1_b1_a_idx on ab_a1_b1 ab_1 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a1_b2_a_idx on ab_a1_b2 ab_2 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a1_b3_a_idx on ab_a1_b3 ab_3 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b1_a_idx on ab_a2_b1 ab_4 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b2_a_idx on ab_a2_b2 ab_5 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b3_a_idx on ab_a2_b3 ab_6 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b1_a_idx on ab_a3_b1 ab_7 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b2_a_idx on ab_a3_b2 ab_8 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b3_a_idx on ab_a3_b3 ab_9 (never executed) - Index Cond: (a = a.a) -(28 rows) - -reset enable_hashjoin; -reset enable_mergejoin; -reset enable_memoize; -reset parallel_setup_cost; -reset parallel_tuple_cost; -reset min_parallel_table_scan_size; -reset max_parallel_workers_per_gather; --- Test run-time partition pruning with an initplan -explain (analyze, costs off, summary off, timing off, buffers off) -select * from ab where a = (select max(a) from lprt_a) and b = (select max(a)-1 from lprt_a); - QUERY PLAN -------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - InitPlan 1 - -> Aggregate (actual rows=1 loops=1) - -> Seq Scan on lprt_a (actual rows=102 loops=1) - InitPlan 2 - -> Aggregate (actual rows=1 loops=1) - -> Seq Scan on lprt_a lprt_a_1 (actual rows=102 loops=1) - -> Bitmap Heap Scan on ab_a1_b1 ab_1 (never executed) - Recheck Cond: (a = (InitPlan 1).col1) - Filter: (b = (InitPlan 2).col1) - -> Bitmap Index Scan on ab_a1_b1_a_idx (never executed) - Index Cond: (a = (InitPlan 1).col1) - -> Bitmap Heap Scan on ab_a1_b2 ab_2 (never executed) - Recheck Cond: (a = (InitPlan 1).col1) - Filter: (b = (InitPlan 2).col1) - -> Bitmap Index Scan on ab_a1_b2_a_idx (never executed) - Index Cond: (a = (InitPlan 1).col1) - -> Bitmap Heap Scan on ab_a1_b3 ab_3 (never executed) - Recheck Cond: (a = (InitPlan 1).col1) - Filter: (b = (InitPlan 2).col1) - -> Bitmap Index Scan on ab_a1_b3_a_idx (never executed) - Index Cond: (a = (InitPlan 1).col1) - -> Bitmap Heap Scan on ab_a2_b1 ab_4 (never executed) - Recheck Cond: (a = (InitPlan 1).col1) - Filter: (b = (InitPlan 2).col1) - -> Bitmap Index Scan on ab_a2_b1_a_idx (never executed) - Index Cond: (a = (InitPlan 1).col1) - -> Bitmap Heap Scan on ab_a2_b2 ab_5 (never executed) - Recheck Cond: (a = (InitPlan 1).col1) - Filter: (b = (InitPlan 2).col1) - -> Bitmap Index Scan on ab_a2_b2_a_idx (never executed) - Index Cond: (a = (InitPlan 1).col1) - -> Bitmap Heap Scan on ab_a2_b3 ab_6 (never executed) - Recheck Cond: (a = (InitPlan 1).col1) - Filter: (b = (InitPlan 2).col1) - -> Bitmap Index Scan on ab_a2_b3_a_idx (never executed) - Index Cond: (a = (InitPlan 1).col1) - -> Bitmap Heap Scan on ab_a3_b1 ab_7 (never executed) - Recheck Cond: (a = (InitPlan 1).col1) - Filter: (b = (InitPlan 2).col1) - -> Bitmap Index Scan on ab_a3_b1_a_idx (never executed) - Index Cond: (a = (InitPlan 1).col1) - -> Bitmap Heap Scan on ab_a3_b2 ab_8 (actual rows=0 loops=1) - Recheck Cond: (a = (InitPlan 1).col1) - Filter: (b = (InitPlan 2).col1) - -> Bitmap Index Scan on ab_a3_b2_a_idx (actual rows=0 loops=1) - Index Cond: (a = (InitPlan 1).col1) - -> Bitmap Heap Scan on ab_a3_b3 ab_9 (never executed) - Recheck Cond: (a = (InitPlan 1).col1) - Filter: (b = (InitPlan 2).col1) - -> Bitmap Index Scan on ab_a3_b3_a_idx (never executed) - Index Cond: (a = (InitPlan 1).col1) -(52 rows) - --- Test run-time partition pruning with UNION ALL parents -explain (analyze, costs off, summary off, timing off, buffers off) -select * from (select * from ab where a = 1 union all select * from ab) ab where b = (select 1); - QUERY PLAN -------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - InitPlan 1 - -> Result (actual rows=1 loops=1) - -> Append (actual rows=0 loops=1) - -> Bitmap Heap Scan on ab_a1_b1 ab_11 (actual rows=0 loops=1) - Recheck Cond: (a = 1) - Filter: (b = (InitPlan 1).col1) - -> Bitmap Index Scan on ab_a1_b1_a_idx (actual rows=0 loops=1) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b2 ab_12 (never executed) - Recheck Cond: (a = 1) - Filter: (b = (InitPlan 1).col1) - -> Bitmap Index Scan on ab_a1_b2_a_idx (never executed) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b3 ab_13 (never executed) - Recheck Cond: (a = 1) - Filter: (b = (InitPlan 1).col1) - -> Bitmap Index Scan on ab_a1_b3_a_idx (never executed) - Index Cond: (a = 1) - -> Seq Scan on ab_a1_b1 ab_1 (actual rows=0 loops=1) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a1_b2 ab_2 (never executed) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a1_b3 ab_3 (never executed) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a2_b1 ab_4 (actual rows=0 loops=1) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a2_b2 ab_5 (never executed) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a2_b3 ab_6 (never executed) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a3_b1 ab_7 (actual rows=0 loops=1) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a3_b2 ab_8 (never executed) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a3_b3 ab_9 (never executed) - Filter: (b = (InitPlan 1).col1) -(37 rows) - --- A case containing a UNION ALL with a non-partitioned child. -explain (analyze, costs off, summary off, timing off, buffers off) -select * from (select * from ab where a = 1 union all (values(10,5)) union all select * from ab) ab where b = (select 1); - QUERY PLAN -------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - InitPlan 1 - -> Result (actual rows=1 loops=1) - -> Append (actual rows=0 loops=1) - -> Bitmap Heap Scan on ab_a1_b1 ab_11 (actual rows=0 loops=1) - Recheck Cond: (a = 1) - Filter: (b = (InitPlan 1).col1) - -> Bitmap Index Scan on ab_a1_b1_a_idx (actual rows=0 loops=1) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b2 ab_12 (never executed) - Recheck Cond: (a = 1) - Filter: (b = (InitPlan 1).col1) - -> Bitmap Index Scan on ab_a1_b2_a_idx (never executed) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b3 ab_13 (never executed) - Recheck Cond: (a = 1) - Filter: (b = (InitPlan 1).col1) - -> Bitmap Index Scan on ab_a1_b3_a_idx (never executed) - Index Cond: (a = 1) - -> Result (actual rows=0 loops=1) - One-Time Filter: (5 = (InitPlan 1).col1) - -> Seq Scan on ab_a1_b1 ab_1 (actual rows=0 loops=1) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a1_b2 ab_2 (never executed) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a1_b3 ab_3 (never executed) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a2_b1 ab_4 (actual rows=0 loops=1) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a2_b2 ab_5 (never executed) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a2_b3 ab_6 (never executed) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a3_b1 ab_7 (actual rows=0 loops=1) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a3_b2 ab_8 (never executed) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a3_b3 ab_9 (never executed) - Filter: (b = (InitPlan 1).col1) -(39 rows) - --- Another UNION ALL test, but containing a mix of exec init and exec run-time pruning. -create table xy_1 (x int, y int); -insert into xy_1 values(100,-10); -set enable_bitmapscan = 0; -set enable_indexscan = 0; -prepare ab_q6 as -select * from ( - select tableoid::regclass,a,b from ab -union all - select tableoid::regclass,x,y from xy_1 -union all - select tableoid::regclass,a,b from ab -) ab where a = $1 and b = (select -10); --- Ensure the xy_1 subplan is not pruned. -explain (analyze, costs off, summary off, timing off, buffers off) execute ab_q6(1); - QUERY PLAN --------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 12 - InitPlan 1 - -> Result (actual rows=1 loops=1) - -> Seq Scan on ab_a1_b1 ab_1 (never executed) - Filter: ((a = $1) AND (b = (InitPlan 1).col1)) - -> Seq Scan on ab_a1_b2 ab_2 (never executed) - Filter: ((a = $1) AND (b = (InitPlan 1).col1)) - -> Seq Scan on ab_a1_b3 ab_3 (never executed) - Filter: ((a = $1) AND (b = (InitPlan 1).col1)) - -> Seq Scan on xy_1 (actual rows=0 loops=1) - Filter: ((x = $1) AND (y = (InitPlan 1).col1)) - Rows Removed by Filter: 1 - -> Seq Scan on ab_a1_b1 ab_4 (never executed) - Filter: ((a = $1) AND (b = (InitPlan 1).col1)) - -> Seq Scan on ab_a1_b2 ab_5 (never executed) - Filter: ((a = $1) AND (b = (InitPlan 1).col1)) - -> Seq Scan on ab_a1_b3 ab_6 (never executed) - Filter: ((a = $1) AND (b = (InitPlan 1).col1)) -(19 rows) - --- Ensure we see just the xy_1 row. -execute ab_q6(100); - tableoid | a | b -----------+-----+----- - xy_1 | 100 | -10 -(1 row) - -reset enable_bitmapscan; -reset enable_indexscan; -deallocate ab_q1; -deallocate ab_q2; -deallocate ab_q3; -deallocate ab_q4; -deallocate ab_q5; -deallocate ab_q6; --- UPDATE on a partition subtree has been seen to have problems. -insert into ab values (1,2); -select explain_analyze(' -update ab_a1 set b = 3 from ab where ab.a = 1 and ab.a = ab_a1.a;'); - explain_analyze -------------------------------------------------------------------------------------------- - Update on ab_a1 (actual rows=0 loops=1) - Update on ab_a1_b1 ab_a1_1 - Update on ab_a1_b2 ab_a1_2 - Update on ab_a1_b3 ab_a1_3 - -> Nested Loop (actual rows=1 loops=1) - -> Append (actual rows=1 loops=1) - -> Bitmap Heap Scan on ab_a1_b1 ab_a1_1 (actual rows=0 loops=1) - Recheck Cond: (a = 1) - -> Bitmap Index Scan on ab_a1_b1_a_idx (actual rows=0 loops=1) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b2 ab_a1_2 (actual rows=1 loops=1) - Recheck Cond: (a = 1) - Heap Blocks: exact=1 - -> Bitmap Index Scan on ab_a1_b2_a_idx (actual rows=1 loops=1) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b3 ab_a1_3 (actual rows=0 loops=1) - Recheck Cond: (a = 1) - Heap Blocks: exact=1 - -> Bitmap Index Scan on ab_a1_b3_a_idx (actual rows=1 loops=1) - Index Cond: (a = 1) - -> Materialize (actual rows=1 loops=1) - Storage: Memory Maximum Storage: NkB - -> Append (actual rows=1 loops=1) - -> Bitmap Heap Scan on ab_a1_b1 ab_1 (actual rows=0 loops=1) - Recheck Cond: (a = 1) - -> Bitmap Index Scan on ab_a1_b1_a_idx (actual rows=0 loops=1) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b2 ab_2 (actual rows=1 loops=1) - Recheck Cond: (a = 1) - Heap Blocks: exact=1 - -> Bitmap Index Scan on ab_a1_b2_a_idx (actual rows=1 loops=1) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b3 ab_3 (actual rows=0 loops=1) - Recheck Cond: (a = 1) - Heap Blocks: exact=1 - -> Bitmap Index Scan on ab_a1_b3_a_idx (actual rows=1 loops=1) - Index Cond: (a = 1) -(37 rows) - -table ab; - a | b ----+--- - 1 | 3 -(1 row) - --- Test UPDATE where source relation has run-time pruning enabled -truncate ab; -insert into ab values (1, 1), (1, 2), (1, 3), (2, 1); -select explain_analyze(' -update ab_a1 set b = 3 from ab_a2 where ab_a2.b = (select 1);'); - explain_analyze ------------------------------------------------------------------------------- - Update on ab_a1 (actual rows=0 loops=1) - Update on ab_a1_b1 ab_a1_1 - Update on ab_a1_b2 ab_a1_2 - Update on ab_a1_b3 ab_a1_3 - InitPlan 1 - -> Result (actual rows=1 loops=1) - -> Nested Loop (actual rows=3 loops=1) - -> Append (actual rows=3 loops=1) - -> Seq Scan on ab_a1_b1 ab_a1_1 (actual rows=1 loops=1) - -> Seq Scan on ab_a1_b2 ab_a1_2 (actual rows=1 loops=1) - -> Seq Scan on ab_a1_b3 ab_a1_3 (actual rows=1 loops=1) - -> Materialize (actual rows=1 loops=3) - Storage: Memory Maximum Storage: NkB - -> Append (actual rows=1 loops=1) - -> Seq Scan on ab_a2_b1 ab_a2_1 (actual rows=1 loops=1) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a2_b2 ab_a2_2 (never executed) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a2_b3 ab_a2_3 (never executed) - Filter: (b = (InitPlan 1).col1) -(20 rows) - -select tableoid::regclass, * from ab; - tableoid | a | b -----------+---+--- - ab_a1_b3 | 1 | 3 - ab_a1_b3 | 1 | 3 - ab_a1_b3 | 1 | 3 - ab_a2_b1 | 2 | 1 -(4 rows) - -drop table ab, lprt_a; --- Join -create table tbl1(col1 int); -insert into tbl1 values (501), (505); --- Basic table -create table tprt (col1 int) partition by range (col1); -create table tprt_1 partition of tprt for values from (1) to (501); -create table tprt_2 partition of tprt for values from (501) to (1001); -create table tprt_3 partition of tprt for values from (1001) to (2001); -create table tprt_4 partition of tprt for values from (2001) to (3001); -create table tprt_5 partition of tprt for values from (3001) to (4001); -create table tprt_6 partition of tprt for values from (4001) to (5001); -create index tprt1_idx on tprt_1 (col1); -create index tprt2_idx on tprt_2 (col1); -create index tprt3_idx on tprt_3 (col1); -create index tprt4_idx on tprt_4 (col1); -create index tprt5_idx on tprt_5 (col1); -create index tprt6_idx on tprt_6 (col1); -insert into tprt values (10), (20), (501), (502), (505), (1001), (4500); -set enable_hashjoin = off; -set enable_mergejoin = off; -explain (analyze, costs off, summary off, timing off, buffers off) -select * from tbl1 join tprt on tbl1.col1 > tprt.col1; - QUERY PLAN --------------------------------------------------------------------------- - Nested Loop (actual rows=6 loops=1) - -> Seq Scan on tbl1 (actual rows=2 loops=1) - -> Append (actual rows=3 loops=2) - -> Index Scan using tprt1_idx on tprt_1 (actual rows=2 loops=2) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt2_idx on tprt_2 (actual rows=2 loops=1) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt3_idx on tprt_3 (never executed) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt4_idx on tprt_4 (never executed) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt5_idx on tprt_5 (never executed) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt6_idx on tprt_6 (never executed) - Index Cond: (col1 < tbl1.col1) -(15 rows) - -explain (analyze, costs off, summary off, timing off, buffers off) -select * from tbl1 join tprt on tbl1.col1 = tprt.col1; - QUERY PLAN --------------------------------------------------------------------------- - Nested Loop (actual rows=2 loops=1) - -> Seq Scan on tbl1 (actual rows=2 loops=1) - -> Append (actual rows=1 loops=2) - -> Index Scan using tprt1_idx on tprt_1 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt2_idx on tprt_2 (actual rows=1 loops=2) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt3_idx on tprt_3 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt4_idx on tprt_4 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt5_idx on tprt_5 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt6_idx on tprt_6 (never executed) - Index Cond: (col1 = tbl1.col1) -(15 rows) - -select tbl1.col1, tprt.col1 from tbl1 -inner join tprt on tbl1.col1 > tprt.col1 -order by tbl1.col1, tprt.col1; - col1 | col1 -------+------ - 501 | 10 - 501 | 20 - 505 | 10 - 505 | 20 - 505 | 501 - 505 | 502 -(6 rows) - -select tbl1.col1, tprt.col1 from tbl1 -inner join tprt on tbl1.col1 = tprt.col1 -order by tbl1.col1, tprt.col1; - col1 | col1 -------+------ - 501 | 501 - 505 | 505 -(2 rows) - --- Multiple partitions -insert into tbl1 values (1001), (1010), (1011); -explain (analyze, costs off, summary off, timing off, buffers off) -select * from tbl1 inner join tprt on tbl1.col1 > tprt.col1; - QUERY PLAN --------------------------------------------------------------------------- - Nested Loop (actual rows=23 loops=1) - -> Seq Scan on tbl1 (actual rows=5 loops=1) - -> Append (actual rows=5 loops=5) - -> Index Scan using tprt1_idx on tprt_1 (actual rows=2 loops=5) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt2_idx on tprt_2 (actual rows=3 loops=4) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt3_idx on tprt_3 (actual rows=1 loops=2) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt4_idx on tprt_4 (never executed) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt5_idx on tprt_5 (never executed) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt6_idx on tprt_6 (never executed) - Index Cond: (col1 < tbl1.col1) -(15 rows) - -explain (analyze, costs off, summary off, timing off, buffers off) -select * from tbl1 inner join tprt on tbl1.col1 = tprt.col1; - QUERY PLAN --------------------------------------------------------------------------- - Nested Loop (actual rows=3 loops=1) - -> Seq Scan on tbl1 (actual rows=5 loops=1) - -> Append (actual rows=1 loops=5) - -> Index Scan using tprt1_idx on tprt_1 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt2_idx on tprt_2 (actual rows=1 loops=2) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt3_idx on tprt_3 (actual rows=0 loops=3) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt4_idx on tprt_4 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt5_idx on tprt_5 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt6_idx on tprt_6 (never executed) - Index Cond: (col1 = tbl1.col1) -(15 rows) - -select tbl1.col1, tprt.col1 from tbl1 -inner join tprt on tbl1.col1 > tprt.col1 -order by tbl1.col1, tprt.col1; - col1 | col1 -------+------ - 501 | 10 - 501 | 20 - 505 | 10 - 505 | 20 - 505 | 501 - 505 | 502 - 1001 | 10 - 1001 | 20 - 1001 | 501 - 1001 | 502 - 1001 | 505 - 1010 | 10 - 1010 | 20 - 1010 | 501 - 1010 | 502 - 1010 | 505 - 1010 | 1001 - 1011 | 10 - 1011 | 20 - 1011 | 501 - 1011 | 502 - 1011 | 505 - 1011 | 1001 -(23 rows) - -select tbl1.col1, tprt.col1 from tbl1 -inner join tprt on tbl1.col1 = tprt.col1 -order by tbl1.col1, tprt.col1; - col1 | col1 -------+------ - 501 | 501 - 505 | 505 - 1001 | 1001 -(3 rows) - --- Last partition -delete from tbl1; -insert into tbl1 values (4400); -explain (analyze, costs off, summary off, timing off, buffers off) -select * from tbl1 join tprt on tbl1.col1 < tprt.col1; - QUERY PLAN --------------------------------------------------------------------------- - Nested Loop (actual rows=1 loops=1) - -> Seq Scan on tbl1 (actual rows=1 loops=1) - -> Append (actual rows=1 loops=1) - -> Index Scan using tprt1_idx on tprt_1 (never executed) - Index Cond: (col1 > tbl1.col1) - -> Index Scan using tprt2_idx on tprt_2 (never executed) - Index Cond: (col1 > tbl1.col1) - -> Index Scan using tprt3_idx on tprt_3 (never executed) - Index Cond: (col1 > tbl1.col1) - -> Index Scan using tprt4_idx on tprt_4 (never executed) - Index Cond: (col1 > tbl1.col1) - -> Index Scan using tprt5_idx on tprt_5 (never executed) - Index Cond: (col1 > tbl1.col1) - -> Index Scan using tprt6_idx on tprt_6 (actual rows=1 loops=1) - Index Cond: (col1 > tbl1.col1) -(15 rows) - -select tbl1.col1, tprt.col1 from tbl1 -inner join tprt on tbl1.col1 < tprt.col1 -order by tbl1.col1, tprt.col1; - col1 | col1 -------+------ - 4400 | 4500 -(1 row) - --- No matching partition -delete from tbl1; -insert into tbl1 values (10000); -explain (analyze, costs off, summary off, timing off, buffers off) -select * from tbl1 join tprt on tbl1.col1 = tprt.col1; - QUERY PLAN -------------------------------------------------------------------- - Nested Loop (actual rows=0 loops=1) - -> Seq Scan on tbl1 (actual rows=1 loops=1) - -> Append (actual rows=0 loops=1) - -> Index Scan using tprt1_idx on tprt_1 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt2_idx on tprt_2 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt3_idx on tprt_3 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt4_idx on tprt_4 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt5_idx on tprt_5 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt6_idx on tprt_6 (never executed) - Index Cond: (col1 = tbl1.col1) -(15 rows) - -select tbl1.col1, tprt.col1 from tbl1 -inner join tprt on tbl1.col1 = tprt.col1 -order by tbl1.col1, tprt.col1; - col1 | col1 -------+------ -(0 rows) - -drop table tbl1, tprt; --- Test with columns defined in varying orders between each level -create table part_abc (a int not null, b int not null, c int not null) partition by list (a); -create table part_bac (b int not null, a int not null, c int not null) partition by list (b); -create table part_cab (c int not null, a int not null, b int not null) partition by list (c); -create table part_abc_p1 (a int not null, b int not null, c int not null); -alter table part_abc attach partition part_bac for values in(1); -alter table part_bac attach partition part_cab for values in(2); -alter table part_cab attach partition part_abc_p1 for values in(3); -prepare part_abc_q1 (int, int, int) as -select * from part_abc where a = $1 and b = $2 and c = $3; --- Single partition should be scanned. -explain (analyze, costs off, summary off, timing off, buffers off) execute part_abc_q1 (1, 2, 3); - QUERY PLAN ----------------------------------------------------------- - Seq Scan on part_abc_p1 part_abc (actual rows=0 loops=1) - Filter: ((a = $1) AND (b = $2) AND (c = $3)) -(2 rows) - -deallocate part_abc_q1; -drop table part_abc; --- Ensure that an Append node properly handles a sub-partitioned table --- matching without any of its leaf partitions matching the clause. -create table listp (a int, b int) partition by list (a); -create table listp_1 partition of listp for values in(1) partition by list (b); -create table listp_1_1 partition of listp_1 for values in(1); -create table listp_2 partition of listp for values in(2) partition by list (b); -create table listp_2_1 partition of listp_2 for values in(2); -select * from listp where b = 1; - a | b ----+--- -(0 rows) - --- Ensure that an Append node properly can handle selection of all first level --- partitions before finally detecting the correct set of 2nd level partitions --- which match the given parameter. -prepare q1 (int,int) as select * from listp where b in ($1,$2); -explain (analyze, costs off, summary off, timing off, buffers off) execute q1 (1,1); - QUERY PLAN -------------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 1 - -> Seq Scan on listp_1_1 listp_1 (actual rows=0 loops=1) - Filter: (b = ANY (ARRAY[$1, $2])) -(4 rows) - -explain (analyze, costs off, summary off, timing off, buffers off) execute q1 (2,2); - QUERY PLAN -------------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 1 - -> Seq Scan on listp_2_1 listp_1 (actual rows=0 loops=1) - Filter: (b = ANY (ARRAY[$1, $2])) -(4 rows) - --- Try with no matching partitions. -explain (analyze, costs off, summary off, timing off, buffers off) execute q1 (0,0); - QUERY PLAN --------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 2 -(2 rows) - -deallocate q1; --- Test more complex cases where a not-equal condition further eliminates partitions. -prepare q1 (int,int,int,int) as select * from listp where b in($1,$2) and $3 <> b and $4 <> b; --- Both partitions allowed by IN clause, but one disallowed by <> clause -explain (analyze, costs off, summary off, timing off, buffers off) execute q1 (1,2,2,0); - QUERY PLAN -------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 1 - -> Seq Scan on listp_1_1 listp_1 (actual rows=0 loops=1) - Filter: ((b = ANY (ARRAY[$1, $2])) AND ($3 <> b) AND ($4 <> b)) -(4 rows) - --- Both partitions allowed by IN clause, then both excluded again by <> clauses. -explain (analyze, costs off, summary off, timing off, buffers off) execute q1 (1,2,2,1); - QUERY PLAN --------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 2 -(2 rows) - --- Ensure Params that evaluate to NULL properly prune away all partitions -explain (analyze, costs off, summary off, timing off, buffers off) -select * from listp where a = (select null::int); - QUERY PLAN ------------------------------------------------------- - Append (actual rows=0 loops=1) - InitPlan 1 - -> Result (actual rows=1 loops=1) - -> Seq Scan on listp_1_1 listp_1 (never executed) - Filter: (a = (InitPlan 1).col1) - -> Seq Scan on listp_2_1 listp_2 (never executed) - Filter: (a = (InitPlan 1).col1) -(7 rows) - -drop table listp; --- --- check that stable query clauses are only used in run-time pruning --- -create table stable_qual_pruning (a timestamp) partition by range (a); -create table stable_qual_pruning1 partition of stable_qual_pruning - for values from ('2000-01-01') to ('2000-02-01'); -create table stable_qual_pruning2 partition of stable_qual_pruning - for values from ('2000-02-01') to ('2000-03-01'); -create table stable_qual_pruning3 partition of stable_qual_pruning - for values from ('3000-02-01') to ('3000-03-01'); --- comparison against a stable value requires run-time pruning -explain (analyze, costs off, summary off, timing off, buffers off) -select * from stable_qual_pruning where a < localtimestamp; - QUERY PLAN --------------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 1 - -> Seq Scan on stable_qual_pruning1 stable_qual_pruning_1 (actual rows=0 loops=1) - Filter: (a < LOCALTIMESTAMP) - -> Seq Scan on stable_qual_pruning2 stable_qual_pruning_2 (actual rows=0 loops=1) - Filter: (a < LOCALTIMESTAMP) -(6 rows) - --- timestamp < timestamptz comparison is only stable, not immutable -explain (analyze, costs off, summary off, timing off, buffers off) -select * from stable_qual_pruning where a < '2000-02-01'::timestamptz; - QUERY PLAN --------------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 2 - -> Seq Scan on stable_qual_pruning1 stable_qual_pruning_1 (actual rows=0 loops=1) - Filter: (a < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) -(4 rows) - --- check ScalarArrayOp cases -explain (analyze, costs off, summary off, timing off, buffers off) -select * from stable_qual_pruning - where a = any(array['2010-02-01', '2020-01-01']::timestamp[]); - QUERY PLAN --------------------------------- - Result (actual rows=0 loops=1) - One-Time Filter: false -(2 rows) - -explain (analyze, costs off, summary off, timing off, buffers off) -select * from stable_qual_pruning - where a = any(array['2000-02-01', '2010-01-01']::timestamp[]); - QUERY PLAN ----------------------------------------------------------------------------------------------------------------- - Seq Scan on stable_qual_pruning2 stable_qual_pruning (actual rows=0 loops=1) - Filter: (a = ANY ('{"Tue Feb 01 00:00:00 2000","Fri Jan 01 00:00:00 2010"}'::timestamp without time zone[])) -(2 rows) - -explain (analyze, costs off, summary off, timing off, buffers off) -select * from stable_qual_pruning - where a = any(array['2000-02-01', localtimestamp]::timestamp[]); - QUERY PLAN ------------------------------------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 2 - -> Seq Scan on stable_qual_pruning2 stable_qual_pruning_1 (actual rows=0 loops=1) - Filter: (a = ANY (ARRAY['Tue Feb 01 00:00:00 2000'::timestamp without time zone, LOCALTIMESTAMP])) -(4 rows) - -explain (analyze, costs off, summary off, timing off, buffers off) -select * from stable_qual_pruning - where a = any(array['2010-02-01', '2020-01-01']::timestamptz[]); - QUERY PLAN --------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 3 -(2 rows) - -explain (analyze, costs off, summary off, timing off, buffers off) -select * from stable_qual_pruning - where a = any(array['2000-02-01', '2010-01-01']::timestamptz[]); - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 2 - -> Seq Scan on stable_qual_pruning2 stable_qual_pruning_1 (actual rows=0 loops=1) - Filter: (a = ANY ('{"Tue Feb 01 00:00:00 2000 PST","Fri Jan 01 00:00:00 2010 PST"}'::timestamp with time zone[])) -(4 rows) - -explain (analyze, costs off, summary off, timing off, buffers off) -select * from stable_qual_pruning - where a = any(null::timestamptz[]); - QUERY PLAN --------------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - -> Seq Scan on stable_qual_pruning1 stable_qual_pruning_1 (actual rows=0 loops=1) - Filter: (a = ANY (NULL::timestamp with time zone[])) - -> Seq Scan on stable_qual_pruning2 stable_qual_pruning_2 (actual rows=0 loops=1) - Filter: (a = ANY (NULL::timestamp with time zone[])) - -> Seq Scan on stable_qual_pruning3 stable_qual_pruning_3 (actual rows=0 loops=1) - Filter: (a = ANY (NULL::timestamp with time zone[])) -(7 rows) - -drop table stable_qual_pruning; --- --- Check that pruning with composite range partitioning works correctly when --- it must ignore clauses for trailing keys once it has seen a clause with --- non-inclusive operator for an earlier key --- -create table mc3p (a int, b int, c int) partition by range (a, abs(b), c); -create table mc3p0 partition of mc3p - for values from (0, 0, 0) to (0, maxvalue, maxvalue); -create table mc3p1 partition of mc3p - for values from (1, 1, 1) to (2, minvalue, minvalue); -create table mc3p2 partition of mc3p - for values from (2, minvalue, minvalue) to (3, maxvalue, maxvalue); -insert into mc3p values (0, 1, 1), (1, 1, 1), (2, 1, 1); -explain (analyze, costs off, summary off, timing off, buffers off) -select * from mc3p where a < 3 and abs(b) = 1; - QUERY PLAN --------------------------------------------------------- - Append (actual rows=3 loops=1) - -> Seq Scan on mc3p0 mc3p_1 (actual rows=1 loops=1) - Filter: ((a < 3) AND (abs(b) = 1)) - -> Seq Scan on mc3p1 mc3p_2 (actual rows=1 loops=1) - Filter: ((a < 3) AND (abs(b) = 1)) - -> Seq Scan on mc3p2 mc3p_3 (actual rows=1 loops=1) - Filter: ((a < 3) AND (abs(b) = 1)) -(7 rows) - --- --- Check that pruning with composite range partitioning works correctly when --- a combination of runtime parameters is specified, not all of whose values --- are available at the same time --- -prepare ps1 as - select * from mc3p where a = $1 and abs(b) < (select 3); -explain (analyze, costs off, summary off, timing off, buffers off) -execute ps1(1); - QUERY PLAN -------------------------------------------------------------- - Append (actual rows=1 loops=1) - Subplans Removed: 2 - InitPlan 1 - -> Result (actual rows=1 loops=1) - -> Seq Scan on mc3p1 mc3p_1 (actual rows=1 loops=1) - Filter: ((a = $1) AND (abs(b) < (InitPlan 1).col1)) -(6 rows) - -deallocate ps1; -prepare ps2 as - select * from mc3p where a <= $1 and abs(b) < (select 3); -explain (analyze, costs off, summary off, timing off, buffers off) -execute ps2(1); - QUERY PLAN --------------------------------------------------------------- - Append (actual rows=2 loops=1) - Subplans Removed: 1 - InitPlan 1 - -> Result (actual rows=1 loops=1) - -> Seq Scan on mc3p0 mc3p_1 (actual rows=1 loops=1) - Filter: ((a <= $1) AND (abs(b) < (InitPlan 1).col1)) - -> Seq Scan on mc3p1 mc3p_2 (actual rows=1 loops=1) - Filter: ((a <= $1) AND (abs(b) < (InitPlan 1).col1)) -(8 rows) - -deallocate ps2; -drop table mc3p; --- Ensure runtime pruning works with initplans params with boolean types -create table boolvalues (value bool not null); -insert into boolvalues values('t'),('f'); -create table boolp (a bool) partition by list (a); -create table boolp_t partition of boolp for values in('t'); -create table boolp_f partition of boolp for values in('f'); -explain (analyze, costs off, summary off, timing off, buffers off) -select * from boolp where a = (select value from boolvalues where value); - QUERY PLAN ------------------------------------------------------------ - Append (actual rows=0 loops=1) - InitPlan 1 - -> Seq Scan on boolvalues (actual rows=1 loops=1) - Filter: value - Rows Removed by Filter: 1 - -> Seq Scan on boolp_f boolp_1 (never executed) - Filter: (a = (InitPlan 1).col1) - -> Seq Scan on boolp_t boolp_2 (actual rows=0 loops=1) - Filter: (a = (InitPlan 1).col1) -(9 rows) - -explain (analyze, costs off, summary off, timing off, buffers off) -select * from boolp where a = (select value from boolvalues where not value); - QUERY PLAN ------------------------------------------------------------ - Append (actual rows=0 loops=1) - InitPlan 1 - -> Seq Scan on boolvalues (actual rows=1 loops=1) - Filter: (NOT value) - Rows Removed by Filter: 1 - -> Seq Scan on boolp_f boolp_1 (actual rows=0 loops=1) - Filter: (a = (InitPlan 1).col1) - -> Seq Scan on boolp_t boolp_2 (never executed) - Filter: (a = (InitPlan 1).col1) -(9 rows) - -drop table boolp; --- --- Test run-time pruning of MergeAppend subnodes --- -set enable_seqscan = off; -set enable_sort = off; -create table ma_test (a int, b int) partition by range (a); -create table ma_test_p1 partition of ma_test for values from (0) to (10); -create table ma_test_p2 partition of ma_test for values from (10) to (20); -create table ma_test_p3 partition of ma_test for values from (20) to (30); -insert into ma_test select x,x from generate_series(0,29) t(x); -create index on ma_test (b); -analyze ma_test; -prepare mt_q1 (int) as select a from ma_test where a >= $1 and a % 10 = 5 order by b; -explain (analyze, costs off, summary off, timing off, buffers off) execute mt_q1(15); - QUERY PLAN ------------------------------------------------------------------------------------------ - Merge Append (actual rows=2 loops=1) - Sort Key: ma_test.b - Subplans Removed: 1 - -> Index Scan using ma_test_p2_b_idx on ma_test_p2 ma_test_1 (actual rows=1 loops=1) - Filter: ((a >= $1) AND ((a % 10) = 5)) - Rows Removed by Filter: 9 - -> Index Scan using ma_test_p3_b_idx on ma_test_p3 ma_test_2 (actual rows=1 loops=1) - Filter: ((a >= $1) AND ((a % 10) = 5)) - Rows Removed by Filter: 9 -(9 rows) - -execute mt_q1(15); - a ----- - 15 - 25 -(2 rows) - -explain (analyze, costs off, summary off, timing off, buffers off) execute mt_q1(25); - QUERY PLAN ------------------------------------------------------------------------------------------ - Merge Append (actual rows=1 loops=1) - Sort Key: ma_test.b - Subplans Removed: 2 - -> Index Scan using ma_test_p3_b_idx on ma_test_p3 ma_test_1 (actual rows=1 loops=1) - Filter: ((a >= $1) AND ((a % 10) = 5)) - Rows Removed by Filter: 9 -(6 rows) - -execute mt_q1(25); - a ----- - 25 -(1 row) - --- Ensure MergeAppend behaves correctly when no subplans match -explain (analyze, costs off, summary off, timing off, buffers off) execute mt_q1(35); - QUERY PLAN --------------------------------------- - Merge Append (actual rows=0 loops=1) - Sort Key: ma_test.b - Subplans Removed: 3 -(3 rows) - -execute mt_q1(35); - a ---- -(0 rows) - -deallocate mt_q1; -prepare mt_q2 (int) as select * from ma_test where a >= $1 order by b limit 1; --- Ensure output list looks sane when the MergeAppend has no subplans. -explain (analyze, verbose, costs off, summary off, timing off, buffers off) execute mt_q2 (35); - QUERY PLAN --------------------------------------------- - Limit (actual rows=0 loops=1) - Output: ma_test.a, ma_test.b - -> Merge Append (actual rows=0 loops=1) - Sort Key: ma_test.b - Subplans Removed: 3 -(5 rows) - -deallocate mt_q2; --- ensure initplan params properly prune partitions -explain (analyze, costs off, summary off, timing off, buffers off) select * from ma_test where a >= (select min(b) from ma_test_p2) order by b; - QUERY PLAN ------------------------------------------------------------------------------------------------ - Merge Append (actual rows=20 loops=1) - Sort Key: ma_test.b - InitPlan 2 - -> Result (actual rows=1 loops=1) - InitPlan 1 - -> Limit (actual rows=1 loops=1) - -> Index Scan using ma_test_p2_b_idx on ma_test_p2 (actual rows=1 loops=1) - Index Cond: (b IS NOT NULL) - -> Index Scan using ma_test_p1_b_idx on ma_test_p1 ma_test_1 (never executed) - Filter: (a >= (InitPlan 2).col1) - -> Index Scan using ma_test_p2_b_idx on ma_test_p2 ma_test_2 (actual rows=10 loops=1) - Filter: (a >= (InitPlan 2).col1) - -> Index Scan using ma_test_p3_b_idx on ma_test_p3 ma_test_3 (actual rows=10 loops=1) - Filter: (a >= (InitPlan 2).col1) -(14 rows) - -reset enable_seqscan; -reset enable_sort; -drop table ma_test; -reset enable_indexonlyscan; --- --- check that pruning works properly when the partition key is of a --- pseudotype --- --- array type list partition key -create table pp_arrpart (a int[]) partition by list (a); -create table pp_arrpart1 partition of pp_arrpart for values in ('{1}'); -create table pp_arrpart2 partition of pp_arrpart for values in ('{2, 3}', '{4, 5}'); -explain (costs off) select * from pp_arrpart where a = '{1}'; - QUERY PLAN ------------------------------------- - Seq Scan on pp_arrpart1 pp_arrpart - Filter: (a = '{1}'::integer[]) -(2 rows) - -explain (costs off) select * from pp_arrpart where a = '{1, 2}'; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -explain (costs off) select * from pp_arrpart where a in ('{4, 5}', '{1}'); - QUERY PLAN ----------------------------------------------------------------------- - Append - -> Seq Scan on pp_arrpart1 pp_arrpart_1 - Filter: ((a = '{4,5}'::integer[]) OR (a = '{1}'::integer[])) - -> Seq Scan on pp_arrpart2 pp_arrpart_2 - Filter: ((a = '{4,5}'::integer[]) OR (a = '{1}'::integer[])) -(5 rows) - -explain (costs off) update pp_arrpart set a = a where a = '{1}'; - QUERY PLAN --------------------------------------------- - Update on pp_arrpart - Update on pp_arrpart1 pp_arrpart_1 - -> Seq Scan on pp_arrpart1 pp_arrpart_1 - Filter: (a = '{1}'::integer[]) -(4 rows) - -explain (costs off) delete from pp_arrpart where a = '{1}'; - QUERY PLAN --------------------------------------------- - Delete on pp_arrpart - Delete on pp_arrpart1 pp_arrpart_1 - -> Seq Scan on pp_arrpart1 pp_arrpart_1 - Filter: (a = '{1}'::integer[]) -(4 rows) - -drop table pp_arrpart; --- array type hash partition key -create table pph_arrpart (a int[]) partition by hash (a); -create table pph_arrpart1 partition of pph_arrpart for values with (modulus 2, remainder 0); -create table pph_arrpart2 partition of pph_arrpart for values with (modulus 2, remainder 1); -insert into pph_arrpart values ('{1}'), ('{1, 2}'), ('{4, 5}'); -select tableoid::regclass, * from pph_arrpart order by 1; - tableoid | a ---------------+------- - pph_arrpart1 | {1,2} - pph_arrpart1 | {4,5} - pph_arrpart2 | {1} -(3 rows) - -explain (costs off) select * from pph_arrpart where a = '{1}'; - QUERY PLAN --------------------------------------- - Seq Scan on pph_arrpart2 pph_arrpart - Filter: (a = '{1}'::integer[]) -(2 rows) - -explain (costs off) select * from pph_arrpart where a = '{1, 2}'; - QUERY PLAN --------------------------------------- - Seq Scan on pph_arrpart1 pph_arrpart - Filter: (a = '{1,2}'::integer[]) -(2 rows) - -explain (costs off) select * from pph_arrpart where a in ('{4, 5}', '{1}'); - QUERY PLAN ----------------------------------------------------------------------- - Append - -> Seq Scan on pph_arrpart1 pph_arrpart_1 - Filter: ((a = '{4,5}'::integer[]) OR (a = '{1}'::integer[])) - -> Seq Scan on pph_arrpart2 pph_arrpart_2 - Filter: ((a = '{4,5}'::integer[]) OR (a = '{1}'::integer[])) -(5 rows) - -drop table pph_arrpart; --- enum type list partition key -create type pp_colors as enum ('green', 'blue', 'black'); -create table pp_enumpart (a pp_colors) partition by list (a); -create table pp_enumpart_green partition of pp_enumpart for values in ('green'); -create table pp_enumpart_blue partition of pp_enumpart for values in ('blue'); -explain (costs off) select * from pp_enumpart where a = 'blue'; - QUERY PLAN ------------------------------------------- - Seq Scan on pp_enumpart_blue pp_enumpart - Filter: (a = 'blue'::pp_colors) -(2 rows) - -explain (costs off) select * from pp_enumpart where a = 'black'; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -drop table pp_enumpart; -drop type pp_colors; --- record type as partition key -create type pp_rectype as (a int, b int); -create table pp_recpart (a pp_rectype) partition by list (a); -create table pp_recpart_11 partition of pp_recpart for values in ('(1,1)'); -create table pp_recpart_23 partition of pp_recpart for values in ('(2,3)'); -explain (costs off) select * from pp_recpart where a = '(1,1)'::pp_rectype; - QUERY PLAN --------------------------------------- - Seq Scan on pp_recpart_11 pp_recpart - Filter: (a = '(1,1)'::pp_rectype) -(2 rows) - -explain (costs off) select * from pp_recpart where a = '(1,2)'::pp_rectype; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -drop table pp_recpart; -drop type pp_rectype; --- range type partition key -create table pp_intrangepart (a int4range) partition by list (a); -create table pp_intrangepart12 partition of pp_intrangepart for values in ('[1,2]'); -create table pp_intrangepart2inf partition of pp_intrangepart for values in ('[2,)'); -explain (costs off) select * from pp_intrangepart where a = '[1,2]'::int4range; - QUERY PLAN ------------------------------------------------ - Seq Scan on pp_intrangepart12 pp_intrangepart - Filter: (a = '[1,3)'::int4range) -(2 rows) - -explain (costs off) select * from pp_intrangepart where a = '(1,2)'::int4range; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -drop table pp_intrangepart; --- --- Ensure the enable_partition_prune GUC properly disables partition pruning. --- -create table pp_lp (a int, value int) partition by list (a); -create table pp_lp1 partition of pp_lp for values in(1); -create table pp_lp2 partition of pp_lp for values in(2); -explain (costs off) select * from pp_lp where a = 1; - QUERY PLAN --------------------------- - Seq Scan on pp_lp1 pp_lp - Filter: (a = 1) -(2 rows) - -explain (costs off) update pp_lp set value = 10 where a = 1; - QUERY PLAN ----------------------------------- - Update on pp_lp - Update on pp_lp1 pp_lp_1 - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) -(4 rows) - -explain (costs off) delete from pp_lp where a = 1; - QUERY PLAN ----------------------------------- - Delete on pp_lp - Delete on pp_lp1 pp_lp_1 - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) -(4 rows) - -set enable_partition_pruning = off; -set constraint_exclusion = 'partition'; -- this should not affect the result. -explain (costs off) select * from pp_lp where a = 1; - QUERY PLAN ----------------------------------- - Append - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) - -> Seq Scan on pp_lp2 pp_lp_2 - Filter: (a = 1) -(5 rows) - -explain (costs off) update pp_lp set value = 10 where a = 1; - QUERY PLAN ----------------------------------------- - Update on pp_lp - Update on pp_lp1 pp_lp_1 - Update on pp_lp2 pp_lp_2 - -> Append - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) - -> Seq Scan on pp_lp2 pp_lp_2 - Filter: (a = 1) -(8 rows) - -explain (costs off) delete from pp_lp where a = 1; - QUERY PLAN ----------------------------------------- - Delete on pp_lp - Delete on pp_lp1 pp_lp_1 - Delete on pp_lp2 pp_lp_2 - -> Append - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) - -> Seq Scan on pp_lp2 pp_lp_2 - Filter: (a = 1) -(8 rows) - -set constraint_exclusion = 'off'; -- this should not affect the result. -explain (costs off) select * from pp_lp where a = 1; - QUERY PLAN ----------------------------------- - Append - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) - -> Seq Scan on pp_lp2 pp_lp_2 - Filter: (a = 1) -(5 rows) - -explain (costs off) update pp_lp set value = 10 where a = 1; - QUERY PLAN ----------------------------------------- - Update on pp_lp - Update on pp_lp1 pp_lp_1 - Update on pp_lp2 pp_lp_2 - -> Append - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) - -> Seq Scan on pp_lp2 pp_lp_2 - Filter: (a = 1) -(8 rows) - -explain (costs off) delete from pp_lp where a = 1; - QUERY PLAN ----------------------------------------- - Delete on pp_lp - Delete on pp_lp1 pp_lp_1 - Delete on pp_lp2 pp_lp_2 - -> Append - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) - -> Seq Scan on pp_lp2 pp_lp_2 - Filter: (a = 1) -(8 rows) - -drop table pp_lp; --- Ensure enable_partition_prune does not affect non-partitioned tables. -create table inh_lp (a int, value int); -create table inh_lp1 (a int, value int, check(a = 1)) inherits (inh_lp); -NOTICE: merging column "a" with inherited definition -NOTICE: merging column "value" with inherited definition -create table inh_lp2 (a int, value int, check(a = 2)) inherits (inh_lp); -NOTICE: merging column "a" with inherited definition -NOTICE: merging column "value" with inherited definition -set constraint_exclusion = 'partition'; --- inh_lp2 should be removed in the following 3 cases. -explain (costs off) select * from inh_lp where a = 1; - QUERY PLAN ------------------------------------- - Append - -> Seq Scan on inh_lp inh_lp_1 - Filter: (a = 1) - -> Seq Scan on inh_lp1 inh_lp_2 - Filter: (a = 1) -(5 rows) - -explain (costs off) update inh_lp set value = 10 where a = 1; - QUERY PLAN ------------------------------------------------- - Update on inh_lp - Update on inh_lp inh_lp_1 - Update on inh_lp1 inh_lp_2 - -> Result - -> Append - -> Seq Scan on inh_lp inh_lp_1 - Filter: (a = 1) - -> Seq Scan on inh_lp1 inh_lp_2 - Filter: (a = 1) -(9 rows) - -explain (costs off) delete from inh_lp where a = 1; - QUERY PLAN ------------------------------------------- - Delete on inh_lp - Delete on inh_lp inh_lp_1 - Delete on inh_lp1 inh_lp_2 - -> Append - -> Seq Scan on inh_lp inh_lp_1 - Filter: (a = 1) - -> Seq Scan on inh_lp1 inh_lp_2 - Filter: (a = 1) -(8 rows) - --- Ensure we don't exclude normal relations when we only expect to exclude --- inheritance children -explain (costs off) update inh_lp1 set value = 10 where a = 2; - QUERY PLAN ---------------------------- - Update on inh_lp1 - -> Seq Scan on inh_lp1 - Filter: (a = 2) -(3 rows) - -drop table inh_lp cascade; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table inh_lp1 -drop cascades to table inh_lp2 -reset enable_partition_pruning; -reset constraint_exclusion; --- Check pruning for a partition tree containing only temporary relations -create temp table pp_temp_parent (a int) partition by list (a); -create temp table pp_temp_part_1 partition of pp_temp_parent for values in (1); -create temp table pp_temp_part_def partition of pp_temp_parent default; -explain (costs off) select * from pp_temp_parent where true; - QUERY PLAN ------------------------------------------------------ - Append - -> Seq Scan on pp_temp_part_1 pp_temp_parent_1 - -> Seq Scan on pp_temp_part_def pp_temp_parent_2 -(3 rows) - -explain (costs off) select * from pp_temp_parent where a = 2; - QUERY PLAN ---------------------------------------------- - Seq Scan on pp_temp_part_def pp_temp_parent - Filter: (a = 2) -(2 rows) - -drop table pp_temp_parent; --- Stress run-time partition pruning a bit more, per bug reports -create temp table p (a int, b int, c int) partition by list (a); -create temp table p1 partition of p for values in (1); -create temp table p2 partition of p for values in (2); -create temp table q (a int, b int, c int) partition by list (a); -create temp table q1 partition of q for values in (1) partition by list (b); -create temp table q11 partition of q1 for values in (1) partition by list (c); -create temp table q111 partition of q11 for values in (1); -create temp table q2 partition of q for values in (2) partition by list (b); -create temp table q21 partition of q2 for values in (1); -create temp table q22 partition of q2 for values in (2); -insert into q22 values (2, 2, 3); -explain (costs off) -select * -from ( - select * from p - union all - select * from q1 - union all - select 1, 1, 1 - ) s(a, b, c) -where s.a = 1 and s.b = 1 and s.c = (select 1); - QUERY PLAN -------------------------------------------------------------------- - Append - InitPlan 1 - -> Result - -> Seq Scan on p1 p - Filter: ((a = 1) AND (b = 1) AND (c = (InitPlan 1).col1)) - -> Seq Scan on q111 q1 - Filter: ((a = 1) AND (b = 1) AND (c = (InitPlan 1).col1)) - -> Result - One-Time Filter: (1 = (InitPlan 1).col1) -(9 rows) - -select * -from ( - select * from p - union all - select * from q1 - union all - select 1, 1, 1 - ) s(a, b, c) -where s.a = 1 and s.b = 1 and s.c = (select 1); - a | b | c ----+---+--- - 1 | 1 | 1 -(1 row) - -prepare q (int, int) as -select * -from ( - select * from p - union all - select * from q1 - union all - select 1, 1, 1 - ) s(a, b, c) -where s.a = $1 and s.b = $2 and s.c = (select 1); -explain (costs off) execute q (1, 1); - QUERY PLAN ------------------------------------------------------------------------------- - Append - Subplans Removed: 1 - InitPlan 1 - -> Result - -> Seq Scan on p1 p - Filter: ((a = $1) AND (b = $2) AND (c = (InitPlan 1).col1)) - -> Seq Scan on q111 q1 - Filter: ((a = $1) AND (b = $2) AND (c = (InitPlan 1).col1)) - -> Result - One-Time Filter: ((1 = $1) AND (1 = $2) AND (1 = (InitPlan 1).col1)) -(10 rows) - -execute q (1, 1); - a | b | c ----+---+--- - 1 | 1 | 1 -(1 row) - -drop table p, q; --- Ensure run-time pruning works correctly when we match a partitioned table --- on the first level but find no matching partitions on the second level. -create table listp (a int, b int) partition by list (a); -create table listp1 partition of listp for values in(1); -create table listp2 partition of listp for values in(2) partition by list(b); -create table listp2_10 partition of listp2 for values in (10); -explain (analyze, costs off, summary off, timing off, buffers off) -select * from listp where a = (select 2) and b <> 10; - QUERY PLAN ---------------------------------------------------- - Seq Scan on listp1 listp (actual rows=0 loops=1) - Filter: ((b <> 10) AND (a = (InitPlan 1).col1)) - InitPlan 1 - -> Result (never executed) -(4 rows) - --- --- check that a partition directly accessed in a query is excluded with --- constraint_exclusion = on --- --- turn off partition pruning, so that it doesn't interfere -set enable_partition_pruning to off; --- setting constraint_exclusion to 'partition' disables exclusion -set constraint_exclusion to 'partition'; -explain (costs off) select * from listp1 where a = 2; - QUERY PLAN --------------------- - Seq Scan on listp1 - Filter: (a = 2) -(2 rows) - -explain (costs off) update listp1 set a = 1 where a = 2; - QUERY PLAN --------------------------- - Update on listp1 - -> Seq Scan on listp1 - Filter: (a = 2) -(3 rows) - --- constraint exclusion enabled -set constraint_exclusion to 'on'; -explain (costs off) select * from listp1 where a = 2; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -explain (costs off) update listp1 set a = 1 where a = 2; - QUERY PLAN --------------------------------- - Update on listp1 - -> Result - One-Time Filter: false -(3 rows) - -reset constraint_exclusion; -reset enable_partition_pruning; -drop table listp; --- Ensure run-time pruning works correctly for nested Append nodes -set parallel_setup_cost to 0; -set parallel_tuple_cost to 0; -create table listp (a int) partition by list(a); -create table listp_12 partition of listp for values in(1,2) partition by list(a); -create table listp_12_1 partition of listp_12 for values in(1); -create table listp_12_2 partition of listp_12 for values in(2); --- Force the 2nd subnode of the Append to be non-parallel. This results in --- a nested Append node because the mixed parallel / non-parallel paths cannot --- be pulled into the top-level Append. -alter table listp_12_1 set (parallel_workers = 0); --- Ensure that listp_12_2 is not scanned. (The nested Append is not seen in --- the plan as it's pulled in setref.c due to having just a single subnode). -select explain_parallel_append('select * from listp where a = (select 1);'); - explain_parallel_append ----------------------------------------------------------------------- - Gather (actual rows=N loops=N) - Workers Planned: 2 - Workers Launched: N - InitPlan 1 - -> Result (actual rows=N loops=N) - -> Parallel Append (actual rows=N loops=N) - -> Seq Scan on listp_12_1 listp_1 (actual rows=N loops=N) - Filter: (a = (InitPlan 1).col1) - -> Parallel Seq Scan on listp_12_2 listp_2 (never executed) - Filter: (a = (InitPlan 1).col1) -(10 rows) - --- Like the above but throw some more complexity at the planner by adding --- a UNION ALL. We expect both sides of the union not to scan the --- non-required partitions. -select explain_parallel_append( -'select * from listp where a = (select 1) - union all -select * from listp where a = (select 2);'); - explain_parallel_append ------------------------------------------------------------------------------------ - Gather (actual rows=N loops=N) - Workers Planned: 2 - Workers Launched: N - -> Parallel Append (actual rows=N loops=N) - -> Parallel Append (actual rows=N loops=N) - InitPlan 2 - -> Result (actual rows=N loops=N) - -> Seq Scan on listp_12_1 listp_1 (never executed) - Filter: (a = (InitPlan 2).col1) - -> Parallel Seq Scan on listp_12_2 listp_2 (actual rows=N loops=N) - Filter: (a = (InitPlan 2).col1) - -> Parallel Append (actual rows=N loops=N) - InitPlan 1 - -> Result (actual rows=N loops=N) - -> Seq Scan on listp_12_1 listp_4 (actual rows=N loops=N) - Filter: (a = (InitPlan 1).col1) - -> Parallel Seq Scan on listp_12_2 listp_5 (never executed) - Filter: (a = (InitPlan 1).col1) -(18 rows) - -drop table listp; -reset parallel_tuple_cost; -reset parallel_setup_cost; --- Test case for run-time pruning with a nested Merge Append -set enable_sort to 0; -create table rangep (a int, b int) partition by range (a); -create table rangep_0_to_100 partition of rangep for values from (0) to (100) partition by list (b); --- We need 3 sub-partitions. 1 to validate pruning worked and another two --- because a single remaining partition would be pulled up to the main Append. -create table rangep_0_to_100_1 partition of rangep_0_to_100 for values in(1); -create table rangep_0_to_100_2 partition of rangep_0_to_100 for values in(2); -create table rangep_0_to_100_3 partition of rangep_0_to_100 for values in(3); -create table rangep_100_to_200 partition of rangep for values from (100) to (200); -create index on rangep (a); --- Ensure run-time pruning works on the nested Merge Append -explain (analyze on, costs off, timing off, summary off, buffers off) -select * from rangep where b IN((select 1),(select 2)) order by a; - QUERY PLAN ------------------------------------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - InitPlan 1 - -> Result (actual rows=1 loops=1) - InitPlan 2 - -> Result (actual rows=1 loops=1) - -> Merge Append (actual rows=0 loops=1) - Sort Key: rangep_2.a - -> Index Scan using rangep_0_to_100_1_a_idx on rangep_0_to_100_1 rangep_2 (actual rows=0 loops=1) - Filter: (b = ANY (ARRAY[(InitPlan 1).col1, (InitPlan 2).col1])) - -> Index Scan using rangep_0_to_100_2_a_idx on rangep_0_to_100_2 rangep_3 (actual rows=0 loops=1) - Filter: (b = ANY (ARRAY[(InitPlan 1).col1, (InitPlan 2).col1])) - -> Index Scan using rangep_0_to_100_3_a_idx on rangep_0_to_100_3 rangep_4 (never executed) - Filter: (b = ANY (ARRAY[(InitPlan 1).col1, (InitPlan 2).col1])) - -> Index Scan using rangep_100_to_200_a_idx on rangep_100_to_200 rangep_5 (actual rows=0 loops=1) - Filter: (b = ANY (ARRAY[(InitPlan 1).col1, (InitPlan 2).col1])) -(15 rows) - -reset enable_sort; -drop table rangep; --- --- Check that gen_prune_steps_from_opexps() works well for various cases of --- clauses for different partition keys --- -create table rp_prefix_test1 (a int, b varchar) partition by range(a, b); -create table rp_prefix_test1_p1 partition of rp_prefix_test1 for values from (1, 'a') to (1, 'b'); -create table rp_prefix_test1_p2 partition of rp_prefix_test1 for values from (2, 'a') to (2, 'b'); --- Don't call get_steps_using_prefix() with the last partition key b plus --- an empty prefix -explain (costs off) select * from rp_prefix_test1 where a <= 1 and b = 'a'; - QUERY PLAN --------------------------------------------------- - Seq Scan on rp_prefix_test1_p1 rp_prefix_test1 - Filter: ((a <= 1) AND ((b)::text = 'a'::text)) -(2 rows) - -create table rp_prefix_test2 (a int, b int, c int) partition by range(a, b, c); -create table rp_prefix_test2_p1 partition of rp_prefix_test2 for values from (1, 1, 0) to (1, 1, 10); -create table rp_prefix_test2_p2 partition of rp_prefix_test2 for values from (2, 2, 0) to (2, 2, 10); --- Don't call get_steps_using_prefix() with the last partition key c plus --- an invalid prefix (ie, b = 1) -explain (costs off) select * from rp_prefix_test2 where a <= 1 and b = 1 and c >= 0; - QUERY PLAN ------------------------------------------------- - Seq Scan on rp_prefix_test2_p1 rp_prefix_test2 - Filter: ((a <= 1) AND (c >= 0) AND (b = 1)) -(2 rows) - -create table rp_prefix_test3 (a int, b int, c int, d int) partition by range(a, b, c, d); -create table rp_prefix_test3_p1 partition of rp_prefix_test3 for values from (1, 1, 1, 0) to (1, 1, 1, 10); -create table rp_prefix_test3_p2 partition of rp_prefix_test3 for values from (2, 2, 2, 0) to (2, 2, 2, 10); --- Test that get_steps_using_prefix() handles a prefix that contains multiple --- clauses for the partition key b (ie, b >= 1 and b >= 2) -explain (costs off) select * from rp_prefix_test3 where a >= 1 and b >= 1 and b >= 2 and c >= 2 and d >= 0; - QUERY PLAN --------------------------------------------------------------------------- - Seq Scan on rp_prefix_test3_p2 rp_prefix_test3 - Filter: ((a >= 1) AND (b >= 1) AND (b >= 2) AND (c >= 2) AND (d >= 0)) -(2 rows) - --- Test that get_steps_using_prefix() handles a prefix that contains multiple --- clauses for the partition key b (ie, b >= 1 and b = 2) (This also tests --- that the caller arranges clauses in that prefix in the required order) -explain (costs off) select * from rp_prefix_test3 where a >= 1 and b >= 1 and b = 2 and c = 2 and d >= 0; - QUERY PLAN ------------------------------------------------------------------------- - Seq Scan on rp_prefix_test3_p2 rp_prefix_test3 - Filter: ((a >= 1) AND (b >= 1) AND (d >= 0) AND (b = 2) AND (c = 2)) -(2 rows) - -drop table rp_prefix_test1; -drop table rp_prefix_test2; -drop table rp_prefix_test3; --- --- Test that get_steps_using_prefix() handles IS NULL clauses correctly --- -create table hp_prefix_test (a int, b int, c int, d int) - partition by hash (a part_test_int4_ops, b part_test_int4_ops, c part_test_int4_ops, d part_test_int4_ops); --- create 8 partitions -select 'create table hp_prefix_test_p' || x::text || ' partition of hp_prefix_test for values with (modulus 8, remainder ' || x::text || ');' -from generate_Series(0,7) x; - ?column? ------------------------------------------------------------------------------------------------------- - create table hp_prefix_test_p0 partition of hp_prefix_test for values with (modulus 8, remainder 0); - create table hp_prefix_test_p1 partition of hp_prefix_test for values with (modulus 8, remainder 1); - create table hp_prefix_test_p2 partition of hp_prefix_test for values with (modulus 8, remainder 2); - create table hp_prefix_test_p3 partition of hp_prefix_test for values with (modulus 8, remainder 3); - create table hp_prefix_test_p4 partition of hp_prefix_test for values with (modulus 8, remainder 4); - create table hp_prefix_test_p5 partition of hp_prefix_test for values with (modulus 8, remainder 5); - create table hp_prefix_test_p6 partition of hp_prefix_test for values with (modulus 8, remainder 6); - create table hp_prefix_test_p7 partition of hp_prefix_test for values with (modulus 8, remainder 7); -(8 rows) - -\gexec -create table hp_prefix_test_p0 partition of hp_prefix_test for values with (modulus 8, remainder 0); -create table hp_prefix_test_p1 partition of hp_prefix_test for values with (modulus 8, remainder 1); -create table hp_prefix_test_p2 partition of hp_prefix_test for values with (modulus 8, remainder 2); -create table hp_prefix_test_p3 partition of hp_prefix_test for values with (modulus 8, remainder 3); -create table hp_prefix_test_p4 partition of hp_prefix_test for values with (modulus 8, remainder 4); -create table hp_prefix_test_p5 partition of hp_prefix_test for values with (modulus 8, remainder 5); -create table hp_prefix_test_p6 partition of hp_prefix_test for values with (modulus 8, remainder 6); -create table hp_prefix_test_p7 partition of hp_prefix_test for values with (modulus 8, remainder 7); --- insert 16 rows, one row for each test to perform. -insert into hp_prefix_test -select - case a when 0 then null else 1 end, - case b when 0 then null else 2 end, - case c when 0 then null else 3 end, - case d when 0 then null else 4 end -from - generate_series(0,1) a, - generate_series(0,1) b, - generate_Series(0,1) c, - generate_Series(0,1) d; --- Ensure partition pruning works correctly for each combination of IS NULL --- and equality quals. This may seem a little excessive, but there have been --- a number of bugs in this area over the years. We make use of row only --- output to reduce the size of the expected results. -\t on -select - 'explain (costs off) select tableoid::regclass,* from hp_prefix_test where ' || - string_agg(c.colname || case when g.s & (1 << c.colpos) = 0 then ' is null' else ' = ' || (colpos+1)::text end, ' and ' order by c.colpos) -from (values('a',0),('b',1),('c',2),('d',3)) c(colname, colpos), generate_Series(0,15) g(s) -group by g.s -order by g.s; - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d = 4 - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d = 4 - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d = 4 - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d = 4 - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d = 4 - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d = 4 - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d = 4 - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d = 4 - -\gexec -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d is null - Seq Scan on hp_prefix_test_p0 hp_prefix_test - Filter: ((a IS NULL) AND (b IS NULL) AND (c IS NULL) AND (d IS NULL)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d is null - Seq Scan on hp_prefix_test_p1 hp_prefix_test - Filter: ((b IS NULL) AND (c IS NULL) AND (d IS NULL) AND (a = 1)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d is null - Seq Scan on hp_prefix_test_p2 hp_prefix_test - Filter: ((a IS NULL) AND (c IS NULL) AND (d IS NULL) AND (b = 2)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d is null - Seq Scan on hp_prefix_test_p4 hp_prefix_test - Filter: ((c IS NULL) AND (d IS NULL) AND (a = 1) AND (b = 2)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d is null - Seq Scan on hp_prefix_test_p3 hp_prefix_test - Filter: ((a IS NULL) AND (b IS NULL) AND (d IS NULL) AND (c = 3)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d is null - Seq Scan on hp_prefix_test_p7 hp_prefix_test - Filter: ((b IS NULL) AND (d IS NULL) AND (a = 1) AND (c = 3)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d is null - Seq Scan on hp_prefix_test_p4 hp_prefix_test - Filter: ((a IS NULL) AND (d IS NULL) AND (b = 2) AND (c = 3)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d is null - Seq Scan on hp_prefix_test_p5 hp_prefix_test - Filter: ((d IS NULL) AND (a = 1) AND (b = 2) AND (c = 3)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d = 4 - Seq Scan on hp_prefix_test_p4 hp_prefix_test - Filter: ((a IS NULL) AND (b IS NULL) AND (c IS NULL) AND (d = 4)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d = 4 - Seq Scan on hp_prefix_test_p6 hp_prefix_test - Filter: ((b IS NULL) AND (c IS NULL) AND (a = 1) AND (d = 4)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d = 4 - Seq Scan on hp_prefix_test_p5 hp_prefix_test - Filter: ((a IS NULL) AND (c IS NULL) AND (b = 2) AND (d = 4)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d = 4 - Seq Scan on hp_prefix_test_p6 hp_prefix_test - Filter: ((c IS NULL) AND (a = 1) AND (b = 2) AND (d = 4)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d = 4 - Seq Scan on hp_prefix_test_p4 hp_prefix_test - Filter: ((a IS NULL) AND (b IS NULL) AND (c = 3) AND (d = 4)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d = 4 - Seq Scan on hp_prefix_test_p5 hp_prefix_test - Filter: ((b IS NULL) AND (a = 1) AND (c = 3) AND (d = 4)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d = 4 - Seq Scan on hp_prefix_test_p6 hp_prefix_test - Filter: ((a IS NULL) AND (b = 2) AND (c = 3) AND (d = 4)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d = 4 - Seq Scan on hp_prefix_test_p4 hp_prefix_test - Filter: ((a = 1) AND (b = 2) AND (c = 3) AND (d = 4)) - --- And ensure we get exactly 1 row from each. Again, all 16 possible combinations. -select - 'select tableoid::regclass,* from hp_prefix_test where ' || - string_agg(c.colname || case when g.s & (1 << c.colpos) = 0 then ' is null' else ' = ' || (colpos+1)::text end, ' and ' order by c.colpos) -from (values('a',0),('b',1),('c',2),('d',3)) c(colname, colpos), generate_Series(0,15) g(s) -group by g.s -order by g.s; - select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d is null - select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d is null - select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d is null - select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d is null - select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d is null - select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d is null - select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d is null - select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d is null - select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d = 4 - select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d = 4 - select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d = 4 - select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d = 4 - select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d = 4 - select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d = 4 - select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d = 4 - select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d = 4 - -\gexec -select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d is null - hp_prefix_test_p0 | | | | - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d is null - hp_prefix_test_p1 | 1 | | | - -select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d is null - hp_prefix_test_p2 | | 2 | | - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d is null - hp_prefix_test_p4 | 1 | 2 | | - -select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d is null - hp_prefix_test_p3 | | | 3 | - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d is null - hp_prefix_test_p7 | 1 | | 3 | - -select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d is null - hp_prefix_test_p4 | | 2 | 3 | - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d is null - hp_prefix_test_p5 | 1 | 2 | 3 | - -select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d = 4 - hp_prefix_test_p4 | | | | 4 - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d = 4 - hp_prefix_test_p6 | 1 | | | 4 - -select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d = 4 - hp_prefix_test_p5 | | 2 | | 4 - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d = 4 - hp_prefix_test_p6 | 1 | 2 | | 4 - -select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d = 4 - hp_prefix_test_p4 | | | 3 | 4 - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d = 4 - hp_prefix_test_p5 | 1 | | 3 | 4 - -select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d = 4 - hp_prefix_test_p6 | | 2 | 3 | 4 - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d = 4 - hp_prefix_test_p4 | 1 | 2 | 3 | 4 - -\t off -drop table hp_prefix_test; --- --- Check that gen_partprune_steps() detects self-contradiction from clauses --- regardless of the order of the clauses (Here we use a custom operator to --- prevent the equivclass.c machinery from reordering the clauses) --- -create operator === ( - leftarg = int4, - rightarg = int4, - procedure = int4eq, - commutator = ===, - hashes -); -create operator class part_test_int4_ops2 -for type int4 -using hash as -operator 1 ===, -function 2 part_hashint4_noop(int4, int8); -create table hp_contradict_test (a int, b int) partition by hash (a part_test_int4_ops2, b part_test_int4_ops2); -create table hp_contradict_test_p1 partition of hp_contradict_test for values with (modulus 2, remainder 0); -create table hp_contradict_test_p2 partition of hp_contradict_test for values with (modulus 2, remainder 1); -explain (costs off) select * from hp_contradict_test where a is null and a === 1 and b === 1; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -explain (costs off) select * from hp_contradict_test where a === 1 and b === 1 and a is null; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -drop table hp_contradict_test; -drop operator class part_test_int4_ops2 using hash; -drop operator ===(int4, int4); -drop function explain_analyze(text); +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/reloptions.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/reloptions.out --- /Users/admin/pgsql/src/test/regress/expected/reloptions.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/reloptions.out 2024-12-13 13:20:13 @@ -1,237 +1,2 @@ --- Simple create -CREATE TABLE reloptions_test(i INT) WITH (FiLLFaCToR=30, - autovacuum_enabled = false, autovacuum_analyze_scale_factor = 0.2); -SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; - reloptions ------------------------------------------------------------------------------- - {fillfactor=30,autovacuum_enabled=false,autovacuum_analyze_scale_factor=0.2} -(1 row) - --- Fail min/max values check -CREATE TABLE reloptions_test2(i INT) WITH (fillfactor=2); -ERROR: value 2 out of bounds for option "fillfactor" -DETAIL: Valid values are between "10" and "100". -CREATE TABLE reloptions_test2(i INT) WITH (fillfactor=110); -ERROR: value 110 out of bounds for option "fillfactor" -DETAIL: Valid values are between "10" and "100". -CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_analyze_scale_factor = -10.0); -ERROR: value -10.0 out of bounds for option "autovacuum_analyze_scale_factor" -DETAIL: Valid values are between "0.000000" and "100.000000". -CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_analyze_scale_factor = 110.0); -ERROR: value 110.0 out of bounds for option "autovacuum_analyze_scale_factor" -DETAIL: Valid values are between "0.000000" and "100.000000". --- Fail when option and namespace do not exist -CREATE TABLE reloptions_test2(i INT) WITH (not_existing_option=2); -ERROR: unrecognized parameter "not_existing_option" -CREATE TABLE reloptions_test2(i INT) WITH (not_existing_namespace.fillfactor=2); -ERROR: unrecognized parameter namespace "not_existing_namespace" --- Fail while setting improper values -CREATE TABLE reloptions_test2(i INT) WITH (fillfactor=-30.1); -ERROR: value -30.1 out of bounds for option "fillfactor" -DETAIL: Valid values are between "10" and "100". -CREATE TABLE reloptions_test2(i INT) WITH (fillfactor='string'); -ERROR: invalid value for integer option "fillfactor": string -CREATE TABLE reloptions_test2(i INT) WITH (fillfactor=true); -ERROR: invalid value for integer option "fillfactor": true -CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_enabled=12); -ERROR: invalid value for boolean option "autovacuum_enabled": 12 -CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_enabled=30.5); -ERROR: invalid value for boolean option "autovacuum_enabled": 30.5 -CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_enabled='string'); -ERROR: invalid value for boolean option "autovacuum_enabled": string -CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_analyze_scale_factor='string'); -ERROR: invalid value for floating point option "autovacuum_analyze_scale_factor": string -CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_analyze_scale_factor=true); -ERROR: invalid value for floating point option "autovacuum_analyze_scale_factor": true --- Fail if option is specified twice -CREATE TABLE reloptions_test2(i INT) WITH (fillfactor=30, fillfactor=40); -ERROR: parameter "fillfactor" specified more than once --- Specifying name only for a non-Boolean option should fail -CREATE TABLE reloptions_test2(i INT) WITH (fillfactor); -ERROR: invalid value for integer option "fillfactor": true --- Simple ALTER TABLE -ALTER TABLE reloptions_test SET (fillfactor=31, - autovacuum_analyze_scale_factor = 0.3); -SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; - reloptions ------------------------------------------------------------------------------- - {autovacuum_enabled=false,fillfactor=31,autovacuum_analyze_scale_factor=0.3} -(1 row) - --- Set boolean option to true without specifying value -ALTER TABLE reloptions_test SET (autovacuum_enabled, fillfactor=32); -SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; - reloptions ------------------------------------------------------------------------------ - {autovacuum_analyze_scale_factor=0.3,autovacuum_enabled=true,fillfactor=32} -(1 row) - --- Check that RESET works well -ALTER TABLE reloptions_test RESET (fillfactor); -SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; - reloptions ---------------------------------------------------------------- - {autovacuum_analyze_scale_factor=0.3,autovacuum_enabled=true} -(1 row) - --- Resetting all values causes the column to become null -ALTER TABLE reloptions_test RESET (autovacuum_enabled, - autovacuum_analyze_scale_factor); -SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass AND - reloptions IS NULL; - reloptions ------------- - -(1 row) - --- RESET fails if a value is specified -ALTER TABLE reloptions_test RESET (fillfactor=12); -ERROR: RESET must not include values for parameters --- We can RESET an invalid option which for some reason is already set -UPDATE pg_class - SET reloptions = '{fillfactor=13,autovacuum_enabled=false,illegal_option=4}' - WHERE oid = 'reloptions_test'::regclass; -ALTER TABLE reloptions_test RESET (illegal_option); -SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; - reloptions ------------------------------------------- - {fillfactor=13,autovacuum_enabled=false} -(1 row) - --- Test vacuum_truncate option -DROP TABLE reloptions_test; -CREATE TEMP TABLE reloptions_test(i INT NOT NULL, j text) - WITH (vacuum_truncate=false, - toast.vacuum_truncate=false, - autovacuum_enabled=false); -SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; - reloptions --------------------------------------------------- - {vacuum_truncate=false,autovacuum_enabled=false} -(1 row) - -INSERT INTO reloptions_test VALUES (1, NULL), (NULL, NULL); -ERROR: null value in column "i" of relation "reloptions_test" violates not-null constraint -DETAIL: Failing row contains (null, null). --- Do an aggressive vacuum to prevent page-skipping. -VACUUM (FREEZE, DISABLE_PAGE_SKIPPING) reloptions_test; -SELECT pg_relation_size('reloptions_test') > 0; - ?column? ----------- - t -(1 row) - -SELECT reloptions FROM pg_class WHERE oid = - (SELECT reltoastrelid FROM pg_class - WHERE oid = 'reloptions_test'::regclass); - reloptions -------------------------- - {vacuum_truncate=false} -(1 row) - -ALTER TABLE reloptions_test RESET (vacuum_truncate); -SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; - reloptions ----------------------------- - {autovacuum_enabled=false} -(1 row) - -INSERT INTO reloptions_test VALUES (1, NULL), (NULL, NULL); -ERROR: null value in column "i" of relation "reloptions_test" violates not-null constraint -DETAIL: Failing row contains (null, null). --- Do an aggressive vacuum to prevent page-skipping. -VACUUM (FREEZE, DISABLE_PAGE_SKIPPING) reloptions_test; -SELECT pg_relation_size('reloptions_test') = 0; - ?column? ----------- - t -(1 row) - --- Test toast.* options -DROP TABLE reloptions_test; -CREATE TABLE reloptions_test (s VARCHAR) - WITH (toast.autovacuum_vacuum_cost_delay = 23); -SELECT reltoastrelid as toast_oid - FROM pg_class WHERE oid = 'reloptions_test'::regclass \gset -SELECT reloptions FROM pg_class WHERE oid = :toast_oid; - reloptions ------------------------------------ - {autovacuum_vacuum_cost_delay=23} -(1 row) - -ALTER TABLE reloptions_test SET (toast.autovacuum_vacuum_cost_delay = 24); -SELECT reloptions FROM pg_class WHERE oid = :toast_oid; - reloptions ------------------------------------ - {autovacuum_vacuum_cost_delay=24} -(1 row) - -ALTER TABLE reloptions_test RESET (toast.autovacuum_vacuum_cost_delay); -SELECT reloptions FROM pg_class WHERE oid = :toast_oid; - reloptions ------------- - -(1 row) - --- Fail on non-existent options in toast namespace -CREATE TABLE reloptions_test2 (i int) WITH (toast.not_existing_option = 42); -ERROR: unrecognized parameter "not_existing_option" --- Mix TOAST & heap -DROP TABLE reloptions_test; -CREATE TABLE reloptions_test (s VARCHAR) WITH - (toast.autovacuum_vacuum_cost_delay = 23, - autovacuum_vacuum_cost_delay = 24, fillfactor = 40); -SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; - reloptions -------------------------------------------------- - {autovacuum_vacuum_cost_delay=24,fillfactor=40} -(1 row) - -SELECT reloptions FROM pg_class WHERE oid = ( - SELECT reltoastrelid FROM pg_class WHERE oid = 'reloptions_test'::regclass); - reloptions ------------------------------------ - {autovacuum_vacuum_cost_delay=23} -(1 row) - --- --- CREATE INDEX, ALTER INDEX for btrees --- -CREATE INDEX reloptions_test_idx ON reloptions_test (s) WITH (fillfactor=30); -SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test_idx'::regclass; - reloptions ------------------ - {fillfactor=30} -(1 row) - --- Fail when option and namespace do not exist -CREATE INDEX reloptions_test_idx ON reloptions_test (s) - WITH (not_existing_option=2); -ERROR: unrecognized parameter "not_existing_option" -CREATE INDEX reloptions_test_idx ON reloptions_test (s) - WITH (not_existing_ns.fillfactor=2); -ERROR: unrecognized parameter namespace "not_existing_ns" --- Check allowed ranges -CREATE INDEX reloptions_test_idx2 ON reloptions_test (s) WITH (fillfactor=1); -ERROR: value 1 out of bounds for option "fillfactor" -DETAIL: Valid values are between "10" and "100". -CREATE INDEX reloptions_test_idx2 ON reloptions_test (s) WITH (fillfactor=130); -ERROR: value 130 out of bounds for option "fillfactor" -DETAIL: Valid values are between "10" and "100". --- Check ALTER -ALTER INDEX reloptions_test_idx SET (fillfactor=40); -SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test_idx'::regclass; - reloptions ------------------ - {fillfactor=40} -(1 row) - --- Check ALTER on empty reloption list -CREATE INDEX reloptions_test_idx3 ON reloptions_test (s); -ALTER INDEX reloptions_test_idx3 SET (fillfactor=40); -SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test_idx3'::regclass; - reloptions ------------------ - {fillfactor=40} -(1 row) - +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/hash_part.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/hash_part.out --- /Users/admin/pgsql/src/test/regress/expected/hash_part.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/hash_part.out 2024-12-13 13:20:13 @@ -1,114 +1,2 @@ --- --- Hash partitioning. --- --- Use hand-rolled hash functions and operator classes to get predictable --- result on different machines. See the definitions of --- part_test_int4_ops and part_test_text_ops in test_setup.sql. -CREATE TABLE mchash (a int, b text, c jsonb) - PARTITION BY HASH (a part_test_int4_ops, b part_test_text_ops); -CREATE TABLE mchash1 - PARTITION OF mchash FOR VALUES WITH (MODULUS 4, REMAINDER 0); --- invalid OID, no such table -SELECT satisfies_hash_partition(0, 4, 0, NULL); -ERROR: could not open relation with OID 0 --- not partitioned -SELECT satisfies_hash_partition('tenk1'::regclass, 4, 0, NULL); -ERROR: "tenk1" is not a hash partitioned table --- partition rather than the parent -SELECT satisfies_hash_partition('mchash1'::regclass, 4, 0, NULL); -ERROR: "mchash1" is not a hash partitioned table --- invalid modulus -SELECT satisfies_hash_partition('mchash'::regclass, 0, 0, NULL); -ERROR: modulus for hash partition must be an integer value greater than zero --- remainder too small -SELECT satisfies_hash_partition('mchash'::regclass, 1, -1, NULL); -ERROR: remainder for hash partition must be an integer value greater than or equal to zero --- remainder too large -SELECT satisfies_hash_partition('mchash'::regclass, 1, 1, NULL); -ERROR: remainder for hash partition must be less than modulus --- modulus is null -SELECT satisfies_hash_partition('mchash'::regclass, NULL, 0, NULL); - satisfies_hash_partition --------------------------- - f -(1 row) - --- remainder is null -SELECT satisfies_hash_partition('mchash'::regclass, 4, NULL, NULL); - satisfies_hash_partition --------------------------- - f -(1 row) - --- too many arguments -SELECT satisfies_hash_partition('mchash'::regclass, 4, 0, NULL::int, NULL::text, NULL::json); -ERROR: number of partitioning columns (2) does not match number of partition keys provided (3) --- too few arguments -SELECT satisfies_hash_partition('mchash'::regclass, 3, 1, NULL::int); -ERROR: number of partitioning columns (2) does not match number of partition keys provided (1) --- wrong argument type -SELECT satisfies_hash_partition('mchash'::regclass, 2, 1, NULL::int, NULL::int); -ERROR: column 2 of the partition key has type text, but supplied value is of type integer --- ok, should be false -SELECT satisfies_hash_partition('mchash'::regclass, 4, 0, 0, ''::text); - satisfies_hash_partition --------------------------- - f -(1 row) - --- ok, should be true -SELECT satisfies_hash_partition('mchash'::regclass, 4, 0, 2, ''::text); - satisfies_hash_partition --------------------------- - t -(1 row) - --- argument via variadic syntax, should fail because not all partitioning --- columns are of the correct type -SELECT satisfies_hash_partition('mchash'::regclass, 2, 1, - variadic array[1,2]::int[]); -ERROR: column 2 of the partition key has type "text", but supplied value is of type "integer" --- multiple partitioning columns of the same type -CREATE TABLE mcinthash (a int, b int, c jsonb) - PARTITION BY HASH (a part_test_int4_ops, b part_test_int4_ops); --- now variadic should work, should be false -SELECT satisfies_hash_partition('mcinthash'::regclass, 4, 0, - variadic array[0, 0]); - satisfies_hash_partition --------------------------- - f -(1 row) - --- should be true -SELECT satisfies_hash_partition('mcinthash'::regclass, 4, 0, - variadic array[0, 1]); - satisfies_hash_partition --------------------------- - t -(1 row) - --- wrong length -SELECT satisfies_hash_partition('mcinthash'::regclass, 4, 0, - variadic array[]::int[]); -ERROR: number of partitioning columns (2) does not match number of partition keys provided (0) --- wrong type -SELECT satisfies_hash_partition('mcinthash'::regclass, 4, 0, - variadic array[now(), now()]); -ERROR: column 1 of the partition key has type "integer", but supplied value is of type "timestamp with time zone" --- check satisfies_hash_partition passes correct collation -create table text_hashp (a text) partition by hash (a); -create table text_hashp0 partition of text_hashp for values with (modulus 2, remainder 0); -create table text_hashp1 partition of text_hashp for values with (modulus 2, remainder 1); --- The result here should always be true, because 'xxx' must belong to --- one of the two defined partitions -select satisfies_hash_partition('text_hashp'::regclass, 2, 0, 'xxx'::text) OR - satisfies_hash_partition('text_hashp'::regclass, 2, 1, 'xxx'::text) AS satisfies; - satisfies ------------ - t -(1 row) - --- cleanup -DROP TABLE mchash; -DROP TABLE mcinthash; -DROP TABLE text_hashp; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/indexing.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/indexing.out --- /Users/admin/pgsql/src/test/regress/expected/indexing.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/indexing.out 2024-12-13 13:20:13 @@ -1,1671 +1,2 @@ --- Creating an index on a partitioned table makes the partitions --- automatically get the index -create table idxpart (a int, b int, c text) partition by range (a); --- relhassubclass of a partitioned index is false before creating any partition. --- It will be set after the first partition is created. -create index idxpart_idx on idxpart (a); -select relhassubclass from pg_class where relname = 'idxpart_idx'; - relhassubclass ----------------- - f -(1 row) - --- Check that partitioned indexes are present in pg_indexes. -select indexdef from pg_indexes where indexname like 'idxpart_idx%'; - indexdef ------------------------------------------------------------------ - CREATE INDEX idxpart_idx ON ONLY public.idxpart USING btree (a) -(1 row) - -drop index idxpart_idx; -create table idxpart1 partition of idxpart for values from (0) to (10); -create table idxpart2 partition of idxpart for values from (10) to (100) - partition by range (b); -create table idxpart21 partition of idxpart2 for values from (0) to (100); --- Even with partitions, relhassubclass should not be set if a partitioned --- index is created only on the parent. -create index idxpart_idx on only idxpart(a); -select relhassubclass from pg_class where relname = 'idxpart_idx'; - relhassubclass ----------------- - f -(1 row) - -drop index idxpart_idx; -create index on idxpart (a); -select relname, relkind, relhassubclass, inhparent::regclass - from pg_class left join pg_index ix on (indexrelid = oid) - left join pg_inherits on (ix.indexrelid = inhrelid) - where relname like 'idxpart%' order by relname; - relname | relkind | relhassubclass | inhparent ------------------+---------+----------------+---------------- - idxpart | p | t | - idxpart1 | r | f | - idxpart1_a_idx | i | f | idxpart_a_idx - idxpart2 | p | t | - idxpart21 | r | f | - idxpart21_a_idx | i | f | idxpart2_a_idx - idxpart2_a_idx | I | t | idxpart_a_idx - idxpart_a_idx | I | t | -(8 rows) - -drop table idxpart; --- Some unsupported features -create table idxpart (a int, b int, c text) partition by range (a); -create table idxpart1 partition of idxpart for values from (0) to (10); -create index concurrently on idxpart (a); -ERROR: cannot create index on partitioned table "idxpart" concurrently -drop table idxpart; --- Verify bugfix with query on indexed partitioned table with no partitions --- https://postgr.es/m/20180124162006.pmapfiznhgngwtjf@alvherre.pgsql -CREATE TABLE idxpart (col1 INT) PARTITION BY RANGE (col1); -CREATE INDEX ON idxpart (col1); -CREATE TABLE idxpart_two (col2 INT); -SELECT col2 FROM idxpart_two fk LEFT OUTER JOIN idxpart pk ON (col1 = col2); - col2 ------- -(0 rows) - -DROP table idxpart, idxpart_two; --- Verify bugfix with index rewrite on ALTER TABLE / SET DATA TYPE --- https://postgr.es/m/CAKcux6mxNCGsgATwf5CGMF8g4WSupCXicCVMeKUTuWbyxHOMsQ@mail.gmail.com -CREATE TABLE idxpart (a INT, b TEXT, c INT) PARTITION BY RANGE(a); -CREATE TABLE idxpart1 PARTITION OF idxpart FOR VALUES FROM (MINVALUE) TO (MAXVALUE); -CREATE INDEX partidx_abc_idx ON idxpart (a, b, c); -INSERT INTO idxpart (a, b, c) SELECT i, i, i FROM generate_series(1, 50) i; -ALTER TABLE idxpart ALTER COLUMN c TYPE numeric; -DROP TABLE idxpart; --- If a table without index is attached as partition to a table with --- an index, the index is automatically created -create table idxpart (a int, b int, c text) partition by range (a); -create index idxparti on idxpart (a); -create index idxparti2 on idxpart (b, c); -create table idxpart1 (like idxpart); -\d idxpart1 - Table "public.idxpart1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | - c | text | | | - -alter table idxpart attach partition idxpart1 for values from (0) to (10); -\d idxpart1 - Table "public.idxpart1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | - c | text | | | -Partition of: idxpart FOR VALUES FROM (0) TO (10) -Indexes: - "idxpart1_a_idx" btree (a) - "idxpart1_b_c_idx" btree (b, c) - -\d+ idxpart1_a_idx - Index "public.idxpart1_a_idx" - Column | Type | Key? | Definition | Storage | Stats target ---------+---------+------+------------+---------+-------------- - a | integer | yes | a | plain | -Partition of: idxparti -No partition constraint -btree, for table "public.idxpart1" - -\d+ idxpart1_b_c_idx - Index "public.idxpart1_b_c_idx" - Column | Type | Key? | Definition | Storage | Stats target ---------+---------+------+------------+----------+-------------- - b | integer | yes | b | plain | - c | text | yes | c | extended | -Partition of: idxparti2 -No partition constraint -btree, for table "public.idxpart1" - --- Forbid ALTER TABLE when attaching or detaching an index to a partition. -create index idxpart_c on only idxpart (c); -create index idxpart1_c on idxpart1 (c); -alter table idxpart_c attach partition idxpart1_c for values from (10) to (20); -ERROR: "idxpart_c" is not a partitioned table -alter index idxpart_c attach partition idxpart1_c; -select relname, relpartbound from pg_class - where relname in ('idxpart_c', 'idxpart1_c') - order by relname; - relname | relpartbound -------------+-------------- - idxpart1_c | - idxpart_c | -(2 rows) - -alter table idxpart_c detach partition idxpart1_c; -ERROR: ALTER action DETACH PARTITION cannot be performed on relation "idxpart_c" -DETAIL: This operation is not supported for partitioned indexes. -drop table idxpart; --- If a partition already has an index, don't create a duplicative one -create table idxpart (a int, b int) partition by range (a, b); -create table idxpart1 partition of idxpart for values from (0, 0) to (10, 10); -create index on idxpart1 (a, b); -create index on idxpart (a, b); -\d idxpart1 - Table "public.idxpart1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition of: idxpart FOR VALUES FROM (0, 0) TO (10, 10) -Indexes: - "idxpart1_a_b_idx" btree (a, b) - -select relname, relkind, relhassubclass, inhparent::regclass - from pg_class left join pg_index ix on (indexrelid = oid) - left join pg_inherits on (ix.indexrelid = inhrelid) - where relname like 'idxpart%' order by relname; - relname | relkind | relhassubclass | inhparent -------------------+---------+----------------+----------------- - idxpart | p | t | - idxpart1 | r | f | - idxpart1_a_b_idx | i | f | idxpart_a_b_idx - idxpart_a_b_idx | I | t | -(4 rows) - -drop table idxpart; --- DROP behavior for partitioned indexes -create table idxpart (a int) partition by range (a); -create index on idxpart (a); -create table idxpart1 partition of idxpart for values from (0) to (10); -drop index idxpart1_a_idx; -- no way -ERROR: cannot drop index idxpart1_a_idx because index idxpart_a_idx requires it -HINT: You can drop index idxpart_a_idx instead. -drop index concurrently idxpart_a_idx; -- unsupported -ERROR: cannot drop partitioned index "idxpart_a_idx" concurrently -drop index idxpart_a_idx; -- both indexes go away -select relname, relkind from pg_class - where relname like 'idxpart%' order by relname; - relname | relkind -----------+--------- - idxpart | p - idxpart1 | r -(2 rows) - -create index on idxpart (a); -drop table idxpart1; -- the index on partition goes away too -select relname, relkind from pg_class - where relname like 'idxpart%' order by relname; - relname | relkind ----------------+--------- - idxpart | p - idxpart_a_idx | I -(2 rows) - -drop table idxpart; --- DROP behavior with temporary partitioned indexes -create temp table idxpart_temp (a int) partition by range (a); -create index on idxpart_temp(a); -create temp table idxpart1_temp partition of idxpart_temp - for values from (0) to (10); -drop index idxpart1_temp_a_idx; -- error -ERROR: cannot drop index idxpart1_temp_a_idx because index idxpart_temp_a_idx requires it -HINT: You can drop index idxpart_temp_a_idx instead. --- non-concurrent drop is enforced here, so it is a valid case. -drop index concurrently idxpart_temp_a_idx; -select relname, relkind from pg_class - where relname like 'idxpart_temp%' order by relname; - relname | relkind ---------------+--------- - idxpart_temp | p -(1 row) - -drop table idxpart_temp; --- ALTER INDEX .. ATTACH, error cases -create table idxpart (a int, b int) partition by range (a, b); -create table idxpart1 partition of idxpart for values from (0, 0) to (10, 10); -create index idxpart_a_b_idx on only idxpart (a, b); -create index idxpart1_a_b_idx on idxpart1 (a, b); -create index idxpart1_tst1 on idxpart1 (b, a); -create index idxpart1_tst2 on idxpart1 using hash (a); -create index idxpart1_tst3 on idxpart1 (a, b) where a > 10; -alter index idxpart attach partition idxpart1; -ERROR: "idxpart" is not an index -alter index idxpart_a_b_idx attach partition idxpart1; -ERROR: "idxpart1" is not an index -alter index idxpart_a_b_idx attach partition idxpart_a_b_idx; -ERROR: cannot attach index "idxpart_a_b_idx" as a partition of index "idxpart_a_b_idx" -DETAIL: Index "idxpart_a_b_idx" is not an index on any partition of table "idxpart". -alter index idxpart_a_b_idx attach partition idxpart1_b_idx; -ERROR: relation "idxpart1_b_idx" does not exist -alter index idxpart_a_b_idx attach partition idxpart1_tst1; -ERROR: cannot attach index "idxpart1_tst1" as a partition of index "idxpart_a_b_idx" -DETAIL: The index definitions do not match. -alter index idxpart_a_b_idx attach partition idxpart1_tst2; -ERROR: cannot attach index "idxpart1_tst2" as a partition of index "idxpart_a_b_idx" -DETAIL: The index definitions do not match. -alter index idxpart_a_b_idx attach partition idxpart1_tst3; -ERROR: cannot attach index "idxpart1_tst3" as a partition of index "idxpart_a_b_idx" -DETAIL: The index definitions do not match. --- OK -alter index idxpart_a_b_idx attach partition idxpart1_a_b_idx; -alter index idxpart_a_b_idx attach partition idxpart1_a_b_idx; -- quiet --- reject dupe -create index idxpart1_2_a_b on idxpart1 (a, b); -alter index idxpart_a_b_idx attach partition idxpart1_2_a_b; -ERROR: cannot attach index "idxpart1_2_a_b" as a partition of index "idxpart_a_b_idx" -DETAIL: Another index is already attached for partition "idxpart1". -drop table idxpart; --- make sure everything's gone -select indexrelid::regclass, indrelid::regclass - from pg_index where indexrelid::regclass::text like 'idxpart%'; - indexrelid | indrelid -------------+---------- -(0 rows) - --- Don't auto-attach incompatible indexes -create table idxpart (a int, b int) partition by range (a); -create table idxpart1 (a int, b int); -create index on idxpart1 using hash (a); -create index on idxpart1 (a) where b > 1; -create index on idxpart1 ((a + 0)); -create index on idxpart1 (a, a); -create index on idxpart (a); -alter table idxpart attach partition idxpart1 for values from (0) to (1000); -\d idxpart1 - Table "public.idxpart1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition of: idxpart FOR VALUES FROM (0) TO (1000) -Indexes: - "idxpart1_a_a1_idx" btree (a, a) - "idxpart1_a_idx" hash (a) - "idxpart1_a_idx1" btree (a) WHERE b > 1 - "idxpart1_a_idx2" btree (a) - "idxpart1_expr_idx" btree ((a + 0)) - -drop table idxpart; --- If CREATE INDEX ONLY, don't create indexes on partitions; and existing --- indexes on partitions don't change parent. ALTER INDEX ATTACH can change --- the parent after the fact. -create table idxpart (a int) partition by range (a); -create table idxpart1 partition of idxpart for values from (0) to (100); -create table idxpart2 partition of idxpart for values from (100) to (1000) - partition by range (a); -create table idxpart21 partition of idxpart2 for values from (100) to (200); -create table idxpart22 partition of idxpart2 for values from (200) to (300); -create index on idxpart22 (a); -create index on only idxpart2 (a); -create index on idxpart (a); --- Here we expect that idxpart1 and idxpart2 have a new index, but idxpart21 --- does not; also, idxpart22 is not attached. -\d idxpart1 - Table "public.idxpart1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition of: idxpart FOR VALUES FROM (0) TO (100) -Indexes: - "idxpart1_a_idx" btree (a) - -\d idxpart2 - Partitioned table "public.idxpart2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition of: idxpart FOR VALUES FROM (100) TO (1000) -Partition key: RANGE (a) -Indexes: - "idxpart2_a_idx" btree (a) INVALID -Number of partitions: 2 (Use \d+ to list them.) - -\d idxpart21 - Table "public.idxpart21" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition of: idxpart2 FOR VALUES FROM (100) TO (200) - -select indexrelid::regclass, indrelid::regclass, inhparent::regclass - from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) -where indexrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - indexrelid | indrelid | inhparent ------------------+-----------+--------------- - idxpart1_a_idx | idxpart1 | idxpart_a_idx - idxpart22_a_idx | idxpart22 | - idxpart2_a_idx | idxpart2 | idxpart_a_idx - idxpart_a_idx | idxpart | -(4 rows) - -alter index idxpart2_a_idx attach partition idxpart22_a_idx; -select indexrelid::regclass, indrelid::regclass, inhparent::regclass - from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) -where indexrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - indexrelid | indrelid | inhparent ------------------+-----------+---------------- - idxpart1_a_idx | idxpart1 | idxpart_a_idx - idxpart22_a_idx | idxpart22 | idxpart2_a_idx - idxpart2_a_idx | idxpart2 | idxpart_a_idx - idxpart_a_idx | idxpart | -(4 rows) - --- attaching idxpart22 is not enough to set idxpart22_a_idx valid ... -alter index idxpart2_a_idx attach partition idxpart22_a_idx; -\d idxpart2 - Partitioned table "public.idxpart2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition of: idxpart FOR VALUES FROM (100) TO (1000) -Partition key: RANGE (a) -Indexes: - "idxpart2_a_idx" btree (a) INVALID -Number of partitions: 2 (Use \d+ to list them.) - --- ... but this one is. -create index on idxpart21 (a); -alter index idxpart2_a_idx attach partition idxpart21_a_idx; -\d idxpart2 - Partitioned table "public.idxpart2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition of: idxpart FOR VALUES FROM (100) TO (1000) -Partition key: RANGE (a) -Indexes: - "idxpart2_a_idx" btree (a) -Number of partitions: 2 (Use \d+ to list them.) - -drop table idxpart; --- When a table is attached a partition and it already has an index, a --- duplicate index should not get created, but rather the index becomes --- attached to the parent's index. -create table idxpart (a int, b int, c text, d bool) partition by range (a); -create index idxparti on idxpart (a); -create index idxparti2 on idxpart (b, c); -create table idxpart1 (like idxpart including indexes); -\d idxpart1 - Table "public.idxpart1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | - c | text | | | - d | boolean | | | -Indexes: - "idxpart1_a_idx" btree (a) - "idxpart1_b_c_idx" btree (b, c) - -select relname, relkind, inhparent::regclass - from pg_class left join pg_index ix on (indexrelid = oid) - left join pg_inherits on (ix.indexrelid = inhrelid) - where relname like 'idxpart%' order by relname; - relname | relkind | inhparent -------------------+---------+----------- - idxpart | p | - idxpart1 | r | - idxpart1_a_idx | i | - idxpart1_b_c_idx | i | - idxparti | I | - idxparti2 | I | -(6 rows) - -alter table idxpart attach partition idxpart1 for values from (0) to (10); -\d idxpart1 - Table "public.idxpart1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | - c | text | | | - d | boolean | | | -Partition of: idxpart FOR VALUES FROM (0) TO (10) -Indexes: - "idxpart1_a_idx" btree (a) - "idxpart1_b_c_idx" btree (b, c) - -select relname, relkind, inhparent::regclass - from pg_class left join pg_index ix on (indexrelid = oid) - left join pg_inherits on (ix.indexrelid = inhrelid) - where relname like 'idxpart%' order by relname; - relname | relkind | inhparent -------------------+---------+----------- - idxpart | p | - idxpart1 | r | - idxpart1_a_idx | i | idxparti - idxpart1_b_c_idx | i | idxparti2 - idxparti | I | - idxparti2 | I | -(6 rows) - --- While here, also check matching when creating an index after the fact. -create index on idxpart1 ((a+b)) where d = true; -\d idxpart1 - Table "public.idxpart1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | - c | text | | | - d | boolean | | | -Partition of: idxpart FOR VALUES FROM (0) TO (10) -Indexes: - "idxpart1_a_idx" btree (a) - "idxpart1_b_c_idx" btree (b, c) - "idxpart1_expr_idx" btree ((a + b)) WHERE d = true - -select relname, relkind, inhparent::regclass - from pg_class left join pg_index ix on (indexrelid = oid) - left join pg_inherits on (ix.indexrelid = inhrelid) - where relname like 'idxpart%' order by relname; - relname | relkind | inhparent --------------------+---------+----------- - idxpart | p | - idxpart1 | r | - idxpart1_a_idx | i | idxparti - idxpart1_b_c_idx | i | idxparti2 - idxpart1_expr_idx | i | - idxparti | I | - idxparti2 | I | -(7 rows) - -create index idxparti3 on idxpart ((a+b)) where d = true; -\d idxpart1 - Table "public.idxpart1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | - c | text | | | - d | boolean | | | -Partition of: idxpart FOR VALUES FROM (0) TO (10) -Indexes: - "idxpart1_a_idx" btree (a) - "idxpart1_b_c_idx" btree (b, c) - "idxpart1_expr_idx" btree ((a + b)) WHERE d = true - -select relname, relkind, inhparent::regclass - from pg_class left join pg_index ix on (indexrelid = oid) - left join pg_inherits on (ix.indexrelid = inhrelid) - where relname like 'idxpart%' order by relname; - relname | relkind | inhparent --------------------+---------+----------- - idxpart | p | - idxpart1 | r | - idxpart1_a_idx | i | idxparti - idxpart1_b_c_idx | i | idxparti2 - idxpart1_expr_idx | i | idxparti3 - idxparti | I | - idxparti2 | I | - idxparti3 | I | -(8 rows) - -drop table idxpart; --- Verify that attaching an invalid index does not mark the parent index valid. --- On the other hand, attaching a valid index marks not only its direct --- ancestor valid, but also any indirect ancestor that was only missing the one --- that was just made valid -create table idxpart (a int, b int) partition by range (a); -create table idxpart1 partition of idxpart for values from (1) to (1000) partition by range (a); -create table idxpart11 partition of idxpart1 for values from (1) to (100); -create index on only idxpart1 (a); -create index on only idxpart (a); --- this results in two invalid indexes: -select relname, indisvalid from pg_class join pg_index on indexrelid = oid - where relname like 'idxpart%' order by relname; - relname | indisvalid -----------------+------------ - idxpart1_a_idx | f - idxpart_a_idx | f -(2 rows) - --- idxpart1_a_idx is not valid, so idxpart_a_idx should not become valid: -alter index idxpart_a_idx attach partition idxpart1_a_idx; -select relname, indisvalid from pg_class join pg_index on indexrelid = oid - where relname like 'idxpart%' order by relname; - relname | indisvalid -----------------+------------ - idxpart1_a_idx | f - idxpart_a_idx | f -(2 rows) - --- after creating and attaching this, both idxpart1_a_idx and idxpart_a_idx --- should become valid -create index on idxpart11 (a); -alter index idxpart1_a_idx attach partition idxpart11_a_idx; -select relname, indisvalid from pg_class join pg_index on indexrelid = oid - where relname like 'idxpart%' order by relname; - relname | indisvalid ------------------+------------ - idxpart11_a_idx | t - idxpart1_a_idx | t - idxpart_a_idx | t -(3 rows) - -drop table idxpart; --- verify dependency handling during ALTER TABLE DETACH PARTITION -create table idxpart (a int) partition by range (a); -create table idxpart1 (like idxpart); -create index on idxpart1 (a); -create index on idxpart (a); -create table idxpart2 (like idxpart); -alter table idxpart attach partition idxpart1 for values from (0000) to (1000); -alter table idxpart attach partition idxpart2 for values from (1000) to (2000); -create table idxpart3 partition of idxpart for values from (2000) to (3000); -select relname, relkind from pg_class where relname like 'idxpart%' order by relname; - relname | relkind -----------------+--------- - idxpart | p - idxpart1 | r - idxpart1_a_idx | i - idxpart2 | r - idxpart2_a_idx | i - idxpart3 | r - idxpart3_a_idx | i - idxpart_a_idx | I -(8 rows) - --- a) after detaching partitions, the indexes can be dropped independently -alter table idxpart detach partition idxpart1; -alter table idxpart detach partition idxpart2; -alter table idxpart detach partition idxpart3; -drop index idxpart1_a_idx; -drop index idxpart2_a_idx; -drop index idxpart3_a_idx; -select relname, relkind from pg_class where relname like 'idxpart%' order by relname; - relname | relkind ----------------+--------- - idxpart | p - idxpart1 | r - idxpart2 | r - idxpart3 | r - idxpart_a_idx | I -(5 rows) - -drop table idxpart, idxpart1, idxpart2, idxpart3; -select relname, relkind from pg_class where relname like 'idxpart%' order by relname; - relname | relkind ----------+--------- -(0 rows) - -create table idxpart (a int) partition by range (a); -create table idxpart1 (like idxpart); -create index on idxpart1 (a); -create index on idxpart (a); -create table idxpart2 (like idxpart); -alter table idxpart attach partition idxpart1 for values from (0000) to (1000); -alter table idxpart attach partition idxpart2 for values from (1000) to (2000); -create table idxpart3 partition of idxpart for values from (2000) to (3000); --- b) after detaching, dropping the index on parent does not remove the others -select relname, relkind from pg_class where relname like 'idxpart%' order by relname; - relname | relkind -----------------+--------- - idxpart | p - idxpart1 | r - idxpart1_a_idx | i - idxpart2 | r - idxpart2_a_idx | i - idxpart3 | r - idxpart3_a_idx | i - idxpart_a_idx | I -(8 rows) - -alter table idxpart detach partition idxpart1; -alter table idxpart detach partition idxpart2; -alter table idxpart detach partition idxpart3; -drop index idxpart_a_idx; -select relname, relkind from pg_class where relname like 'idxpart%' order by relname; - relname | relkind -----------------+--------- - idxpart | p - idxpart1 | r - idxpart1_a_idx | i - idxpart2 | r - idxpart2_a_idx | i - idxpart3 | r - idxpart3_a_idx | i -(7 rows) - -drop table idxpart, idxpart1, idxpart2, idxpart3; -select relname, relkind from pg_class where relname like 'idxpart%' order by relname; - relname | relkind ----------+--------- -(0 rows) - -create table idxpart (a int, b int, c int) partition by range(a); -create index on idxpart(c); -create table idxpart1 partition of idxpart for values from (0) to (250); -create table idxpart2 partition of idxpart for values from (250) to (500); -alter table idxpart detach partition idxpart2; -\d idxpart2 - Table "public.idxpart2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | - c | integer | | | -Indexes: - "idxpart2_c_idx" btree (c) - -alter table idxpart2 drop column c; -\d idxpart2 - Table "public.idxpart2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | - -drop table idxpart, idxpart2; --- Verify that expression indexes inherit correctly -create table idxpart (a int, b int) partition by range (a); -create table idxpart1 (like idxpart); -create index on idxpart1 ((a + b)); -create index on idxpart ((a + b)); -create table idxpart2 (like idxpart); -alter table idxpart attach partition idxpart1 for values from (0000) to (1000); -alter table idxpart attach partition idxpart2 for values from (1000) to (2000); -create table idxpart3 partition of idxpart for values from (2000) to (3000); -select relname as child, inhparent::regclass as parent, pg_get_indexdef as childdef - from pg_class join pg_inherits on inhrelid = oid, - lateral pg_get_indexdef(pg_class.oid) - where relkind in ('i', 'I') and relname like 'idxpart%' order by relname; - child | parent | childdef --------------------+------------------+--------------------------------------------------------------------------- - idxpart1_expr_idx | idxpart_expr_idx | CREATE INDEX idxpart1_expr_idx ON public.idxpart1 USING btree (((a + b))) - idxpart2_expr_idx | idxpart_expr_idx | CREATE INDEX idxpart2_expr_idx ON public.idxpart2 USING btree (((a + b))) - idxpart3_expr_idx | idxpart_expr_idx | CREATE INDEX idxpart3_expr_idx ON public.idxpart3 USING btree (((a + b))) -(3 rows) - -drop table idxpart; --- Verify behavior for collation (mis)matches -create table idxpart (a text) partition by range (a); -create table idxpart1 (like idxpart); -create table idxpart2 (like idxpart); -create index on idxpart2 (a collate "POSIX"); -create index on idxpart2 (a); -create index on idxpart2 (a collate "C"); -alter table idxpart attach partition idxpart1 for values from ('aaa') to ('bbb'); -alter table idxpart attach partition idxpart2 for values from ('bbb') to ('ccc'); -create table idxpart3 partition of idxpart for values from ('ccc') to ('ddd'); -create index on idxpart (a collate "C"); -create table idxpart4 partition of idxpart for values from ('ddd') to ('eee'); -select relname as child, inhparent::regclass as parent, pg_get_indexdef as childdef - from pg_class left join pg_inherits on inhrelid = oid, - lateral pg_get_indexdef(pg_class.oid) - where relkind in ('i', 'I') and relname like 'idxpart%' order by relname; - child | parent | childdef ------------------+---------------+-------------------------------------------------------------------------------- - idxpart1_a_idx | idxpart_a_idx | CREATE INDEX idxpart1_a_idx ON public.idxpart1 USING btree (a COLLATE "C") - idxpart2_a_idx | | CREATE INDEX idxpart2_a_idx ON public.idxpart2 USING btree (a COLLATE "POSIX") - idxpart2_a_idx1 | | CREATE INDEX idxpart2_a_idx1 ON public.idxpart2 USING btree (a) - idxpart2_a_idx2 | idxpart_a_idx | CREATE INDEX idxpart2_a_idx2 ON public.idxpart2 USING btree (a COLLATE "C") - idxpart3_a_idx | idxpart_a_idx | CREATE INDEX idxpart3_a_idx ON public.idxpart3 USING btree (a COLLATE "C") - idxpart4_a_idx | idxpart_a_idx | CREATE INDEX idxpart4_a_idx ON public.idxpart4 USING btree (a COLLATE "C") - idxpart_a_idx | | CREATE INDEX idxpart_a_idx ON ONLY public.idxpart USING btree (a COLLATE "C") -(7 rows) - -drop table idxpart; --- Verify behavior for opclass (mis)matches -create table idxpart (a text) partition by range (a); -create table idxpart1 (like idxpart); -create table idxpart2 (like idxpart); -create index on idxpart2 (a); -alter table idxpart attach partition idxpart1 for values from ('aaa') to ('bbb'); -alter table idxpart attach partition idxpart2 for values from ('bbb') to ('ccc'); -create table idxpart3 partition of idxpart for values from ('ccc') to ('ddd'); -create index on idxpart (a text_pattern_ops); -create table idxpart4 partition of idxpart for values from ('ddd') to ('eee'); --- must *not* have attached the index we created on idxpart2 -select relname as child, inhparent::regclass as parent, pg_get_indexdef as childdef - from pg_class left join pg_inherits on inhrelid = oid, - lateral pg_get_indexdef(pg_class.oid) - where relkind in ('i', 'I') and relname like 'idxpart%' order by relname; - child | parent | childdef ------------------+---------------+------------------------------------------------------------------------------------ - idxpart1_a_idx | idxpart_a_idx | CREATE INDEX idxpart1_a_idx ON public.idxpart1 USING btree (a text_pattern_ops) - idxpart2_a_idx | | CREATE INDEX idxpart2_a_idx ON public.idxpart2 USING btree (a) - idxpart2_a_idx1 | idxpart_a_idx | CREATE INDEX idxpart2_a_idx1 ON public.idxpart2 USING btree (a text_pattern_ops) - idxpart3_a_idx | idxpart_a_idx | CREATE INDEX idxpart3_a_idx ON public.idxpart3 USING btree (a text_pattern_ops) - idxpart4_a_idx | idxpart_a_idx | CREATE INDEX idxpart4_a_idx ON public.idxpart4 USING btree (a text_pattern_ops) - idxpart_a_idx | | CREATE INDEX idxpart_a_idx ON ONLY public.idxpart USING btree (a text_pattern_ops) -(6 rows) - -drop index idxpart_a_idx; -create index on only idxpart (a text_pattern_ops); --- must reject -alter index idxpart_a_idx attach partition idxpart2_a_idx; -ERROR: cannot attach index "idxpart2_a_idx" as a partition of index "idxpart_a_idx" -DETAIL: The index definitions do not match. -drop table idxpart; --- Verify that attaching indexes maps attribute numbers correctly -create table idxpart (col1 int, a int, col2 int, b int) partition by range (a); -create table idxpart1 (b int, col1 int, col2 int, col3 int, a int); -alter table idxpart drop column col1, drop column col2; -alter table idxpart1 drop column col1, drop column col2, drop column col3; -alter table idxpart attach partition idxpart1 for values from (0) to (1000); -create index idxpart_1_idx on only idxpart (b, a); -create index idxpart1_1_idx on idxpart1 (b, a); -create index idxpart1_1b_idx on idxpart1 (b); --- test expressions and partial-index predicate, too -create index idxpart_2_idx on only idxpart ((b + a)) where a > 1; -create index idxpart1_2_idx on idxpart1 ((b + a)) where a > 1; -create index idxpart1_2b_idx on idxpart1 ((a + b)) where a > 1; -create index idxpart1_2c_idx on idxpart1 ((b + a)) where b > 1; -alter index idxpart_1_idx attach partition idxpart1_1b_idx; -- fail -ERROR: cannot attach index "idxpart1_1b_idx" as a partition of index "idxpart_1_idx" -DETAIL: The index definitions do not match. -alter index idxpart_1_idx attach partition idxpart1_1_idx; -alter index idxpart_2_idx attach partition idxpart1_2b_idx; -- fail -ERROR: cannot attach index "idxpart1_2b_idx" as a partition of index "idxpart_2_idx" -DETAIL: The index definitions do not match. -alter index idxpart_2_idx attach partition idxpart1_2c_idx; -- fail -ERROR: cannot attach index "idxpart1_2c_idx" as a partition of index "idxpart_2_idx" -DETAIL: The index definitions do not match. -alter index idxpart_2_idx attach partition idxpart1_2_idx; -- ok -select relname as child, inhparent::regclass as parent, pg_get_indexdef as childdef - from pg_class left join pg_inherits on inhrelid = oid, - lateral pg_get_indexdef(pg_class.oid) - where relkind in ('i', 'I') and relname like 'idxpart%' order by relname; - child | parent | childdef ------------------+---------------+----------------------------------------------------------------------------------------- - idxpart1_1_idx | idxpart_1_idx | CREATE INDEX idxpart1_1_idx ON public.idxpart1 USING btree (b, a) - idxpart1_1b_idx | | CREATE INDEX idxpart1_1b_idx ON public.idxpart1 USING btree (b) - idxpart1_2_idx | idxpart_2_idx | CREATE INDEX idxpart1_2_idx ON public.idxpart1 USING btree (((b + a))) WHERE (a > 1) - idxpart1_2b_idx | | CREATE INDEX idxpart1_2b_idx ON public.idxpart1 USING btree (((a + b))) WHERE (a > 1) - idxpart1_2c_idx | | CREATE INDEX idxpart1_2c_idx ON public.idxpart1 USING btree (((b + a))) WHERE (b > 1) - idxpart_1_idx | | CREATE INDEX idxpart_1_idx ON ONLY public.idxpart USING btree (b, a) - idxpart_2_idx | | CREATE INDEX idxpart_2_idx ON ONLY public.idxpart USING btree (((b + a))) WHERE (a > 1) -(7 rows) - -drop table idxpart; --- Make sure the partition columns are mapped correctly -create table idxpart (a int, b int, c text) partition by range (a); -create index idxparti on idxpart (a); -create index idxparti2 on idxpart (c, b); -create table idxpart1 (c text, a int, b int); -alter table idxpart attach partition idxpart1 for values from (0) to (10); -create table idxpart2 (c text, a int, b int); -create index on idxpart2 (a); -create index on idxpart2 (c, b); -alter table idxpart attach partition idxpart2 for values from (10) to (20); -select c.relname, pg_get_indexdef(indexrelid) - from pg_class c join pg_index i on c.oid = i.indexrelid - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - relname | pg_get_indexdef -------------------+--------------------------------------------------------------------- - idxpart1_a_idx | CREATE INDEX idxpart1_a_idx ON public.idxpart1 USING btree (a) - idxpart1_c_b_idx | CREATE INDEX idxpart1_c_b_idx ON public.idxpart1 USING btree (c, b) - idxpart2_a_idx | CREATE INDEX idxpart2_a_idx ON public.idxpart2 USING btree (a) - idxpart2_c_b_idx | CREATE INDEX idxpart2_c_b_idx ON public.idxpart2 USING btree (c, b) - idxparti | CREATE INDEX idxparti ON ONLY public.idxpart USING btree (a) - idxparti2 | CREATE INDEX idxparti2 ON ONLY public.idxpart USING btree (c, b) -(6 rows) - -drop table idxpart; --- Verify that columns are mapped correctly in expression indexes -create table idxpart (col1 int, col2 int, a int, b int) partition by range (a); -create table idxpart1 (col2 int, b int, col1 int, a int); -create table idxpart2 (col1 int, col2 int, b int, a int); -alter table idxpart drop column col1, drop column col2; -alter table idxpart1 drop column col1, drop column col2; -alter table idxpart2 drop column col1, drop column col2; -create index on idxpart2 (abs(b)); -alter table idxpart attach partition idxpart2 for values from (0) to (1); -create index on idxpart (abs(b)); -create index on idxpart ((b + 1)); -alter table idxpart attach partition idxpart1 for values from (1) to (2); -select c.relname, pg_get_indexdef(indexrelid) - from pg_class c join pg_index i on c.oid = i.indexrelid - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - relname | pg_get_indexdef --------------------+------------------------------------------------------------------------------ - idxpart1_abs_idx | CREATE INDEX idxpart1_abs_idx ON public.idxpart1 USING btree (abs(b)) - idxpart1_expr_idx | CREATE INDEX idxpart1_expr_idx ON public.idxpart1 USING btree (((b + 1))) - idxpart2_abs_idx | CREATE INDEX idxpart2_abs_idx ON public.idxpart2 USING btree (abs(b)) - idxpart2_expr_idx | CREATE INDEX idxpart2_expr_idx ON public.idxpart2 USING btree (((b + 1))) - idxpart_abs_idx | CREATE INDEX idxpart_abs_idx ON ONLY public.idxpart USING btree (abs(b)) - idxpart_expr_idx | CREATE INDEX idxpart_expr_idx ON ONLY public.idxpart USING btree (((b + 1))) -(6 rows) - -drop table idxpart; --- Verify that columns are mapped correctly for WHERE in a partial index -create table idxpart (col1 int, a int, col3 int, b int) partition by range (a); -alter table idxpart drop column col1, drop column col3; -create table idxpart1 (col1 int, col2 int, col3 int, col4 int, b int, a int); -alter table idxpart1 drop column col1, drop column col2, drop column col3, drop column col4; -alter table idxpart attach partition idxpart1 for values from (0) to (1000); -create table idxpart2 (col1 int, col2 int, b int, a int); -create index on idxpart2 (a) where b > 1000; -alter table idxpart2 drop column col1, drop column col2; -alter table idxpart attach partition idxpart2 for values from (1000) to (2000); -create index on idxpart (a) where b > 1000; -select c.relname, pg_get_indexdef(indexrelid) - from pg_class c join pg_index i on c.oid = i.indexrelid - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - relname | pg_get_indexdef -----------------+------------------------------------------------------------------------------------ - idxpart1_a_idx | CREATE INDEX idxpart1_a_idx ON public.idxpart1 USING btree (a) WHERE (b > 1000) - idxpart2_a_idx | CREATE INDEX idxpart2_a_idx ON public.idxpart2 USING btree (a) WHERE (b > 1000) - idxpart_a_idx | CREATE INDEX idxpart_a_idx ON ONLY public.idxpart USING btree (a) WHERE (b > 1000) -(3 rows) - -drop table idxpart; --- Column number mapping: dropped columns in the partition -create table idxpart1 (drop_1 int, drop_2 int, col_keep int, drop_3 int); -alter table idxpart1 drop column drop_1; -alter table idxpart1 drop column drop_2; -alter table idxpart1 drop column drop_3; -create index on idxpart1 (col_keep); -create table idxpart (col_keep int) partition by range (col_keep); -create index on idxpart (col_keep); -alter table idxpart attach partition idxpart1 for values from (0) to (1000); -\d idxpart - Partitioned table "public.idxpart" - Column | Type | Collation | Nullable | Default -----------+---------+-----------+----------+--------- - col_keep | integer | | | -Partition key: RANGE (col_keep) -Indexes: - "idxpart_col_keep_idx" btree (col_keep) -Number of partitions: 1 (Use \d+ to list them.) - -\d idxpart1 - Table "public.idxpart1" - Column | Type | Collation | Nullable | Default -----------+---------+-----------+----------+--------- - col_keep | integer | | | -Partition of: idxpart FOR VALUES FROM (0) TO (1000) -Indexes: - "idxpart1_col_keep_idx" btree (col_keep) - -select attrelid::regclass, attname, attnum from pg_attribute - where attrelid::regclass::text like 'idxpart%' and attnum > 0 - order by attrelid::regclass, attnum; - attrelid | attname | attnum ------------------------+------------------------------+-------- - idxpart1 | ........pg.dropped.1........ | 1 - idxpart1 | ........pg.dropped.2........ | 2 - idxpart1 | col_keep | 3 - idxpart1 | ........pg.dropped.4........ | 4 - idxpart1_col_keep_idx | col_keep | 1 - idxpart | col_keep | 1 - idxpart_col_keep_idx | col_keep | 1 -(7 rows) - -drop table idxpart; --- Column number mapping: dropped columns in the parent table -create table idxpart(drop_1 int, drop_2 int, col_keep int, drop_3 int) partition by range (col_keep); -alter table idxpart drop column drop_1; -alter table idxpart drop column drop_2; -alter table idxpart drop column drop_3; -create table idxpart1 (col_keep int); -create index on idxpart1 (col_keep); -create index on idxpart (col_keep); -alter table idxpart attach partition idxpart1 for values from (0) to (1000); -\d idxpart - Partitioned table "public.idxpart" - Column | Type | Collation | Nullable | Default -----------+---------+-----------+----------+--------- - col_keep | integer | | | -Partition key: RANGE (col_keep) -Indexes: - "idxpart_col_keep_idx" btree (col_keep) -Number of partitions: 1 (Use \d+ to list them.) - -\d idxpart1 - Table "public.idxpart1" - Column | Type | Collation | Nullable | Default -----------+---------+-----------+----------+--------- - col_keep | integer | | | -Partition of: idxpart FOR VALUES FROM (0) TO (1000) -Indexes: - "idxpart1_col_keep_idx" btree (col_keep) - -select attrelid::regclass, attname, attnum from pg_attribute - where attrelid::regclass::text like 'idxpart%' and attnum > 0 - order by attrelid::regclass, attnum; - attrelid | attname | attnum ------------------------+------------------------------+-------- - idxpart | ........pg.dropped.1........ | 1 - idxpart | ........pg.dropped.2........ | 2 - idxpart | col_keep | 3 - idxpart | ........pg.dropped.4........ | 4 - idxpart1 | col_keep | 1 - idxpart1_col_keep_idx | col_keep | 1 - idxpart_col_keep_idx | col_keep | 1 -(7 rows) - -drop table idxpart; --- --- Constraint-related indexes --- --- Verify that it works to add primary key / unique to partitioned tables -create table idxpart (a int primary key, b int) partition by range (a); -\d idxpart - Partitioned table "public.idxpart" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | not null | - b | integer | | | -Partition key: RANGE (a) -Indexes: - "idxpart_pkey" PRIMARY KEY, btree (a) -Number of partitions: 0 - --- multiple primary key on child should fail -create table failpart partition of idxpart (b primary key) for values from (0) to (100); -ERROR: multiple primary keys for table "failpart" are not allowed -drop table idxpart; --- primary key on child is okay if there's no PK in the parent, though -create table idxpart (a int) partition by range (a); -create table idxpart1pk partition of idxpart (a primary key) for values from (0) to (100); -\d idxpart1pk - Table "public.idxpart1pk" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | not null | -Partition of: idxpart FOR VALUES FROM (0) TO (100) -Indexes: - "idxpart1pk_pkey" PRIMARY KEY, btree (a) - -drop table idxpart; --- Failing to use the full partition key is not allowed -create table idxpart (a int unique, b int) partition by range (a, b); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: UNIQUE constraint on table "idxpart" lacks column "b" which is part of the partition key. -create table idxpart (a int, b int unique) partition by range (a, b); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: UNIQUE constraint on table "idxpart" lacks column "a" which is part of the partition key. -create table idxpart (a int primary key, b int) partition by range (b, a); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: PRIMARY KEY constraint on table "idxpart" lacks column "b" which is part of the partition key. -create table idxpart (a int, b int primary key) partition by range (b, a); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: PRIMARY KEY constraint on table "idxpart" lacks column "a" which is part of the partition key. --- OK if you use them in some other order -create table idxpart (a int, b int, c text, primary key (a, b, c)) partition by range (b, c, a); -drop table idxpart; --- OK to add an exclusion constraint if partitioning by its equal column -create table idxpart (a int4range, exclude USING GIST (a with = )) partition by range (a); -drop table idxpart; --- OK more than one equal column -create table idxpart (a int4range, b int4range, exclude USING GIST (a with =, b with =)) partition by range (a, b); -drop table idxpart; --- OK with more than one equal column: constraint is a proper superset of partition key -create table idxpart (a int4range, b int4range, exclude USING GIST (a with =, b with =)) partition by range (a); -drop table idxpart; --- Not OK more than one equal column: partition keys are a proper superset of constraint -create table idxpart (a int4range, b int4range, exclude USING GIST (a with = )) partition by range (a, b); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: EXCLUDE constraint on table "idxpart" lacks column "b" which is part of the partition key. --- Not OK with just -|- -create table idxpart (a int4range, exclude USING GIST (a with -|- )) partition by range (a); -ERROR: cannot match partition key to index on column "a" using non-equal operator "-|-" --- OK with equals and &&, and equals is the partition key -create table idxpart (a int4range, b int4range, exclude USING GIST (a with =, b with &&)) partition by range (a); -drop table idxpart; --- Not OK with equals and &&, and equals is not the partition key -create table idxpart (a int4range, b int4range, c int4range, exclude USING GIST (b with =, c with &&)) partition by range (a); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: EXCLUDE constraint on table "idxpart" lacks column "a" which is part of the partition key. --- OK more than one equal column and a && column -create table idxpart (a int4range, b int4range, c int4range, exclude USING GIST (a with =, b with =, c with &&)) partition by range (a, b); -drop table idxpart; --- no expressions in partition key for PK/UNIQUE -create table idxpart (a int primary key, b int) partition by range ((b + a)); -ERROR: unsupported PRIMARY KEY constraint with partition key definition -DETAIL: PRIMARY KEY constraints cannot be used when partition keys include expressions. -create table idxpart (a int unique, b int) partition by range ((b + a)); -ERROR: unsupported UNIQUE constraint with partition key definition -DETAIL: UNIQUE constraints cannot be used when partition keys include expressions. --- use ALTER TABLE to add a primary key -create table idxpart (a int, b int, c text) partition by range (a, b); -alter table idxpart add primary key (a); -- not an incomplete one though -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: PRIMARY KEY constraint on table "idxpart" lacks column "b" which is part of the partition key. -alter table idxpart add primary key (a, b); -- this works -\d idxpart - Partitioned table "public.idxpart" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | not null | - b | integer | | not null | - c | text | | | -Partition key: RANGE (a, b) -Indexes: - "idxpart_pkey" PRIMARY KEY, btree (a, b) -Number of partitions: 0 - -create table idxpart1 partition of idxpart for values from (0, 0) to (1000, 1000); -\d idxpart1 - Table "public.idxpart1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | not null | - b | integer | | not null | - c | text | | | -Partition of: idxpart FOR VALUES FROM (0, 0) TO (1000, 1000) -Indexes: - "idxpart1_pkey" PRIMARY KEY, btree (a, b) - -drop table idxpart; --- use ALTER TABLE to add a unique constraint -create table idxpart (a int, b int) partition by range (a, b); -alter table idxpart add unique (a); -- not an incomplete one though -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: UNIQUE constraint on table "idxpart" lacks column "b" which is part of the partition key. -alter table idxpart add unique (b, a); -- this works -\d idxpart - Partitioned table "public.idxpart" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition key: RANGE (a, b) -Indexes: - "idxpart_b_a_key" UNIQUE CONSTRAINT, btree (b, a) -Number of partitions: 0 - -drop table idxpart; --- Exclusion constraints can be added if partitioning by their equal column -create table idxpart (a int4range, b int4range) partition by range (a); -alter table idxpart add exclude USING GIST (a with =); -drop table idxpart; --- OK more than one equal column -create table idxpart (a int4range, b int4range) partition by range (a, b); -alter table idxpart add exclude USING GIST (a with =, b with =); -drop table idxpart; --- OK with more than one equal column: constraint is a proper superset of partition key -create table idxpart (a int4range, b int4range) partition by range (a); -alter table idxpart add exclude USING GIST (a with =, b with =); -drop table idxpart; --- Not OK more than one equal column: partition keys are a proper superset of constraint -create table idxpart (a int4range, b int4range) partition by range (a, b); -alter table idxpart add exclude USING GIST (a with =); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: EXCLUDE constraint on table "idxpart" lacks column "b" which is part of the partition key. -drop table idxpart; --- Not OK with just -|- -create table idxpart (a int4range, b int4range) partition by range (a, b); -alter table idxpart add exclude USING GIST (a with -|-); -ERROR: cannot match partition key to index on column "a" using non-equal operator "-|-" -drop table idxpart; --- OK with equals and &&, and equals is the partition key -create table idxpart (a int4range, b int4range) partition by range (a); -alter table idxpart add exclude USING GIST (a with =, b with &&); -drop table idxpart; --- Not OK with equals and &&, and equals is not the partition key -create table idxpart (a int4range, b int4range, c int4range) partition by range (a); -alter table idxpart add exclude USING GIST (b with =, c with &&); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: EXCLUDE constraint on table "idxpart" lacks column "a" which is part of the partition key. -drop table idxpart; --- OK more than one equal column and a && column -create table idxpart (a int4range, b int4range, c int4range) partition by range (a, b); -alter table idxpart add exclude USING GIST (a with =, b with =, c with &&); -drop table idxpart; --- When (sub)partitions are created, they also contain the constraint -create table idxpart (a int, b int, primary key (a, b)) partition by range (a, b); -create table idxpart1 partition of idxpart for values from (1, 1) to (10, 10); -create table idxpart2 partition of idxpart for values from (10, 10) to (20, 20) - partition by range (b); -create table idxpart21 partition of idxpart2 for values from (10) to (15); -create table idxpart22 partition of idxpart2 for values from (15) to (20); -create table idxpart3 (b int not null, a int not null); -alter table idxpart attach partition idxpart3 for values from (20, 20) to (30, 30); -select conname, contype, conrelid::regclass, conindid::regclass, conkey - from pg_constraint where conrelid::regclass::text like 'idxpart%' - order by conrelid::regclass::text, conname; - conname | contype | conrelid | conindid | conkey ----------------------+---------+-----------+----------------+-------- - idxpart_a_not_null | n | idxpart | - | {1} - idxpart_b_not_null | n | idxpart | - | {2} - idxpart_pkey | p | idxpart | idxpart_pkey | {1,2} - idxpart1_pkey | p | idxpart1 | idxpart1_pkey | {1,2} - idxpart_a_not_null | n | idxpart1 | - | {1} - idxpart_b_not_null | n | idxpart1 | - | {2} - idxpart2_pkey | p | idxpart2 | idxpart2_pkey | {1,2} - idxpart_a_not_null | n | idxpart2 | - | {1} - idxpart_b_not_null | n | idxpart2 | - | {2} - idxpart21_pkey | p | idxpart21 | idxpart21_pkey | {1,2} - idxpart_a_not_null | n | idxpart21 | - | {1} - idxpart_b_not_null | n | idxpart21 | - | {2} - idxpart22_pkey | p | idxpart22 | idxpart22_pkey | {1,2} - idxpart_a_not_null | n | idxpart22 | - | {1} - idxpart_b_not_null | n | idxpart22 | - | {2} - idxpart3_a_not_null | n | idxpart3 | - | {2} - idxpart3_b_not_null | n | idxpart3 | - | {1} - idxpart3_pkey | p | idxpart3 | idxpart3_pkey | {2,1} -(18 rows) - -drop table idxpart; --- Verify that multi-layer partitioning honors the requirement that all --- columns in the partition key must appear in primary/unique key -create table idxpart (a int, b int, primary key (a)) partition by range (a); -create table idxpart2 partition of idxpart -for values from (0) to (1000) partition by range (b); -- fail -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: PRIMARY KEY constraint on table "idxpart2" lacks column "b" which is part of the partition key. -drop table idxpart; --- Ditto for the ATTACH PARTITION case -create table idxpart (a int unique, b int) partition by range (a); -create table idxpart1 (a int not null, b int, unique (a, b)) - partition by range (a, b); -alter table idxpart attach partition idxpart1 for values from (1) to (1000); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: UNIQUE constraint on table "idxpart1" lacks column "b" which is part of the partition key. -DROP TABLE idxpart, idxpart1; --- Multi-layer partitioning works correctly in this case: -create table idxpart (a int, b int, primary key (a, b)) partition by range (a); -create table idxpart2 partition of idxpart for values from (0) to (1000) partition by range (b); -create table idxpart21 partition of idxpart2 for values from (0) to (1000); -select conname, contype, conrelid::regclass, conindid::regclass, conkey - from pg_constraint where conrelid::regclass::text like 'idxpart%' - order by conrelid::regclass::text, conname; - conname | contype | conrelid | conindid | conkey ---------------------+---------+-----------+----------------+-------- - idxpart_a_not_null | n | idxpart | - | {1} - idxpart_b_not_null | n | idxpart | - | {2} - idxpart_pkey | p | idxpart | idxpart_pkey | {1,2} - idxpart2_pkey | p | idxpart2 | idxpart2_pkey | {1,2} - idxpart_a_not_null | n | idxpart2 | - | {1} - idxpart_b_not_null | n | idxpart2 | - | {2} - idxpart21_pkey | p | idxpart21 | idxpart21_pkey | {1,2} - idxpart_a_not_null | n | idxpart21 | - | {1} - idxpart_b_not_null | n | idxpart21 | - | {2} -(9 rows) - -drop table idxpart; --- If a partitioned table has a unique/PK constraint, then it's not possible --- to drop the corresponding constraint in the children; nor it's possible --- to drop the indexes individually. Dropping the constraint in the parent --- gets rid of the lot. -create table idxpart (i int) partition by hash (i); -create table idxpart0 partition of idxpart (i) for values with (modulus 2, remainder 0); -create table idxpart1 partition of idxpart (i) for values with (modulus 2, remainder 1); -alter table idxpart0 add primary key(i); -alter table idxpart add primary key(i); -select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, - conname, conislocal, coninhcount, connoinherit, convalidated - from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) - left join pg_constraint con on (idx.indexrelid = con.conindid) - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated -----------+---------------+--------------+------------+---------------+------------+-------------+--------------+-------------- - idxpart0 | idxpart0_pkey | idxpart_pkey | t | idxpart0_pkey | f | 1 | t | t - idxpart1 | idxpart1_pkey | idxpart_pkey | t | idxpart1_pkey | f | 1 | f | t - idxpart | idxpart_pkey | | t | idxpart_pkey | t | 0 | t | t -(3 rows) - -drop index idxpart0_pkey; -- fail -ERROR: cannot drop index idxpart0_pkey because index idxpart_pkey requires it -HINT: You can drop index idxpart_pkey instead. -drop index idxpart1_pkey; -- fail -ERROR: cannot drop index idxpart1_pkey because index idxpart_pkey requires it -HINT: You can drop index idxpart_pkey instead. -alter table idxpart0 drop constraint idxpart0_pkey; -- fail -ERROR: cannot drop inherited constraint "idxpart0_pkey" of relation "idxpart0" -alter table idxpart1 drop constraint idxpart1_pkey; -- fail -ERROR: cannot drop inherited constraint "idxpart1_pkey" of relation "idxpart1" -alter table idxpart drop constraint idxpart_pkey; -- ok -select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, - conname, conislocal, coninhcount, connoinherit, convalidated - from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) - left join pg_constraint con on (idx.indexrelid = con.conindid) - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated -----------+------------+-----------+------------+---------+------------+-------------+--------------+-------------- -(0 rows) - -drop table idxpart; --- If the partition to be attached already has a primary key, fail if --- it doesn't match the parent's PK. -CREATE TABLE idxpart (c1 INT PRIMARY KEY, c2 INT, c3 VARCHAR(10)) PARTITION BY RANGE(c1); -CREATE TABLE idxpart1 (LIKE idxpart); -ALTER TABLE idxpart1 ADD PRIMARY KEY (c1, c2); -ALTER TABLE idxpart ATTACH PARTITION idxpart1 FOR VALUES FROM (100) TO (200); -ERROR: multiple primary keys for table "idxpart1" are not allowed -DROP TABLE idxpart, idxpart1; --- Ditto if there is some distance between the PKs (subpartitioning) -create table idxpart (a int, b int, primary key (a)) partition by range (a); -create table idxpart1 (a int not null, b int) partition by range (a); -create table idxpart11 (a int not null, b int primary key); -alter table idxpart1 attach partition idxpart11 for values from (0) to (1000); -alter table idxpart attach partition idxpart1 for values from (0) to (10000); -ERROR: multiple primary keys for table "idxpart11" are not allowed -drop table idxpart, idxpart1, idxpart11; --- If a partitioned table has a constraint whose index is not valid, --- attaching a missing partition makes it valid. -create table idxpart (a int) partition by range (a); -create table idxpart0 (like idxpart); -alter table idxpart0 add primary key (a); -alter table idxpart attach partition idxpart0 for values from (0) to (1000); -alter table only idxpart add primary key (a); -select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, - conname, conislocal, coninhcount, connoinherit, convalidated - from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) - left join pg_constraint con on (idx.indexrelid = con.conindid) - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated -----------+---------------+-----------+------------+---------------+------------+-------------+--------------+-------------- - idxpart0 | idxpart0_pkey | | t | idxpart0_pkey | t | 0 | t | t - idxpart | idxpart_pkey | | f | idxpart_pkey | t | 0 | t | t -(2 rows) - -alter index idxpart_pkey attach partition idxpart0_pkey; -select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, - conname, conislocal, coninhcount, connoinherit, convalidated - from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) - left join pg_constraint con on (idx.indexrelid = con.conindid) - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated -----------+---------------+--------------+------------+---------------+------------+-------------+--------------+-------------- - idxpart0 | idxpart0_pkey | idxpart_pkey | t | idxpart0_pkey | f | 1 | t | t - idxpart | idxpart_pkey | | t | idxpart_pkey | t | 0 | t | t -(2 rows) - -drop table idxpart; --- Related to the above scenario: ADD PRIMARY KEY on the parent mustn't --- automatically propagate NOT NULL to child columns. -create table idxpart (a int) partition by range (a); -create table idxpart0 (like idxpart); -alter table idxpart0 add unique (a); -alter table idxpart attach partition idxpart0 default; -alter table only idxpart add primary key (a); -- fail, no not-null constraint -ERROR: column "a" of table "idxpart0" is not marked NOT NULL -alter table idxpart0 alter column a set not null; -alter table only idxpart add primary key (a); -- now it works -alter index idxpart_pkey attach partition idxpart0_a_key; -drop table idxpart; --- if a partition has a unique index without a constraint, does not attach --- automatically; creates a new index instead. -create table idxpart (a int, b int) partition by range (a); -create table idxpart1 (a int not null, b int); -create unique index on idxpart1 (a); -alter table idxpart add primary key (a); -alter table idxpart attach partition idxpart1 for values from (1) to (1000); -select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, - conname, conislocal, coninhcount, connoinherit, convalidated - from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) - left join pg_constraint con on (idx.indexrelid = con.conindid) - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated -----------+----------------+--------------+------------+---------------+------------+-------------+--------------+-------------- - idxpart1 | idxpart1_a_idx | | t | | | | | - idxpart1 | idxpart1_pkey | idxpart_pkey | t | idxpart1_pkey | f | 1 | f | t - idxpart | idxpart_pkey | | t | idxpart_pkey | t | 0 | t | t -(3 rows) - -drop table idxpart; --- Can't attach an index without a corresponding constraint -create table idxpart (a int, b int) partition by range (a); -create table idxpart1 (a int not null, b int); -create unique index on idxpart1 (a); -alter table idxpart attach partition idxpart1 for values from (1) to (1000); -alter table only idxpart add primary key (a); -alter index idxpart_pkey attach partition idxpart1_a_idx; -- fail -ERROR: cannot attach index "idxpart1_a_idx" as a partition of index "idxpart_pkey" -DETAIL: The index "idxpart_pkey" belongs to a constraint in table "idxpart" but no constraint exists for index "idxpart1_a_idx". -drop table idxpart; --- Test that unique constraints are working -create table idxpart (a int, b text, primary key (a, b)) partition by range (a); -create table idxpart1 partition of idxpart for values from (0) to (100000); -create table idxpart2 (c int, like idxpart); -insert into idxpart2 (c, a, b) values (42, 572814, 'inserted first'); -alter table idxpart2 drop column c; -create unique index on idxpart (a); -alter table idxpart attach partition idxpart2 for values from (100000) to (1000000); -insert into idxpart values (0, 'zero'), (42, 'life'), (2^16, 'sixteen'); -insert into idxpart select 2^g, format('two to power of %s', g) from generate_series(15, 17) g; -ERROR: duplicate key value violates unique constraint "idxpart1_a_idx" -DETAIL: Key (a)=(65536) already exists. -insert into idxpart values (16, 'sixteen'); -insert into idxpart (b, a) values ('one', 142857), ('two', 285714); -insert into idxpart select a * 2, b || b from idxpart where a between 2^16 and 2^19; -ERROR: duplicate key value violates unique constraint "idxpart2_a_idx" -DETAIL: Key (a)=(285714) already exists. -insert into idxpart values (572814, 'five'); -ERROR: duplicate key value violates unique constraint "idxpart2_a_idx" -DETAIL: Key (a)=(572814) already exists. -insert into idxpart values (857142, 'six'); -select tableoid::regclass, * from idxpart order by a; - tableoid | a | b -----------+--------+---------------- - idxpart1 | 0 | zero - idxpart1 | 16 | sixteen - idxpart1 | 42 | life - idxpart1 | 65536 | sixteen - idxpart2 | 142857 | one - idxpart2 | 285714 | two - idxpart2 | 572814 | inserted first - idxpart2 | 857142 | six -(8 rows) - -drop table idxpart; --- Test some other non-btree index types -create table idxpart (a int, b text, c int[]) partition by range (a); -create table idxpart1 partition of idxpart for values from (0) to (100000); -set enable_seqscan to off; -create index idxpart_brin on idxpart using brin(b); -explain (costs off) select * from idxpart where b = 'abcd'; - QUERY PLAN -------------------------------------------- - Bitmap Heap Scan on idxpart1 idxpart - Recheck Cond: (b = 'abcd'::text) - -> Bitmap Index Scan on idxpart1_b_idx - Index Cond: (b = 'abcd'::text) -(4 rows) - -drop index idxpart_brin; -create index idxpart_spgist on idxpart using spgist(b); -explain (costs off) select * from idxpart where b = 'abcd'; - QUERY PLAN -------------------------------------------- - Bitmap Heap Scan on idxpart1 idxpart - Recheck Cond: (b = 'abcd'::text) - -> Bitmap Index Scan on idxpart1_b_idx - Index Cond: (b = 'abcd'::text) -(4 rows) - -drop index idxpart_spgist; -create index idxpart_gin on idxpart using gin(c); -explain (costs off) select * from idxpart where c @> array[42]; - QUERY PLAN ----------------------------------------------- - Bitmap Heap Scan on idxpart1 idxpart - Recheck Cond: (c @> '{42}'::integer[]) - -> Bitmap Index Scan on idxpart1_c_idx - Index Cond: (c @> '{42}'::integer[]) -(4 rows) - -drop index idxpart_gin; -reset enable_seqscan; -drop table idxpart; --- intentionally leave some objects around -create table idxpart (a int) partition by range (a); -create table idxpart1 partition of idxpart for values from (0) to (100); -create table idxpart2 partition of idxpart for values from (100) to (1000) - partition by range (a); -create table idxpart21 partition of idxpart2 for values from (100) to (200); -create table idxpart22 partition of idxpart2 for values from (200) to (300); -create index on idxpart22 (a); -create index on only idxpart2 (a); -alter index idxpart2_a_idx attach partition idxpart22_a_idx; -create index on idxpart (a); -create table idxpart_another (a int, b int, primary key (a, b)) partition by range (a); -create table idxpart_another_1 partition of idxpart_another for values from (0) to (100); -create table idxpart3 (c int, b int, a int) partition by range (a); -alter table idxpart3 drop column b, drop column c; -create table idxpart31 partition of idxpart3 for values from (1000) to (1200); -create table idxpart32 partition of idxpart3 for values from (1200) to (1400); -alter table idxpart attach partition idxpart3 for values from (1000) to (2000); --- More objects intentionally left behind, to verify some pg_dump/pg_upgrade --- behavior; see https://postgr.es/m/20190321204928.GA17535@alvherre.pgsql -create schema regress_indexing; -set search_path to regress_indexing; -create table pk (a int primary key) partition by range (a); -create table pk1 partition of pk for values from (0) to (1000); -create table pk2 (b int, a int); -alter table pk2 drop column b; -alter table pk2 alter a set not null; -alter table pk attach partition pk2 for values from (1000) to (2000); -create table pk3 partition of pk for values from (2000) to (3000); -create table pk4 (like pk); -alter table pk attach partition pk4 for values from (3000) to (4000); -create table pk5 (like pk) partition by range (a); -create table pk51 partition of pk5 for values from (4000) to (4500); -create table pk52 partition of pk5 for values from (4500) to (5000); -alter table pk attach partition pk5 for values from (4000) to (5000); -reset search_path; --- Test that covering partitioned indexes work in various cases -create table covidxpart (a int, b int) partition by list (a); -create unique index on covidxpart (a) include (b); -create table covidxpart1 partition of covidxpart for values in (1); -create table covidxpart2 partition of covidxpart for values in (2); -insert into covidxpart values (1, 1); -insert into covidxpart values (1, 1); -ERROR: duplicate key value violates unique constraint "covidxpart1_a_b_idx" -DETAIL: Key (a)=(1) already exists. -create table covidxpart3 (b int, c int, a int); -alter table covidxpart3 drop c; -alter table covidxpart attach partition covidxpart3 for values in (3); -insert into covidxpart values (3, 1); -insert into covidxpart values (3, 1); -ERROR: duplicate key value violates unique constraint "covidxpart3_a_b_idx" -DETAIL: Key (a)=(3) already exists. -create table covidxpart4 (b int, a int); -create unique index on covidxpart4 (a) include (b); -create unique index on covidxpart4 (a); -alter table covidxpart attach partition covidxpart4 for values in (4); -insert into covidxpart values (4, 1); -insert into covidxpart values (4, 1); -ERROR: duplicate key value violates unique constraint "covidxpart4_a_b_idx" -DETAIL: Key (a)=(4) already exists. -create unique index on covidxpart (b) include (a); -- should fail -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: UNIQUE constraint on table "covidxpart" lacks column "a" which is part of the partition key. --- check that detaching a partition also detaches the primary key constraint -create table parted_pk_detach_test (a int primary key) partition by list (a); -create table parted_pk_detach_test1 partition of parted_pk_detach_test for values in (1); -alter table parted_pk_detach_test1 drop constraint parted_pk_detach_test1_pkey; -- should fail -ERROR: cannot drop inherited constraint "parted_pk_detach_test1_pkey" of relation "parted_pk_detach_test1" -alter table parted_pk_detach_test detach partition parted_pk_detach_test1; -alter table parted_pk_detach_test1 drop constraint parted_pk_detach_test1_pkey; -drop table parted_pk_detach_test, parted_pk_detach_test1; -create table parted_uniq_detach_test (a int unique) partition by list (a); -create table parted_uniq_detach_test1 partition of parted_uniq_detach_test for values in (1); -alter table parted_uniq_detach_test1 drop constraint parted_uniq_detach_test1_a_key; -- should fail -ERROR: cannot drop inherited constraint "parted_uniq_detach_test1_a_key" of relation "parted_uniq_detach_test1" -alter table parted_uniq_detach_test detach partition parted_uniq_detach_test1; -alter table parted_uniq_detach_test1 drop constraint parted_uniq_detach_test1_a_key; -drop table parted_uniq_detach_test, parted_uniq_detach_test1; --- check that dropping a column takes with it any partitioned indexes --- depending on it. -create table parted_index_col_drop(a int, b int, c int) - partition by list (a); -create table parted_index_col_drop1 partition of parted_index_col_drop - for values in (1) partition by list (a); --- leave this partition without children. -create table parted_index_col_drop2 partition of parted_index_col_drop - for values in (2) partition by list (a); -create table parted_index_col_drop11 partition of parted_index_col_drop1 - for values in (1); -create index on parted_index_col_drop (b); -create index on parted_index_col_drop (c); -create index on parted_index_col_drop (b, c); -alter table parted_index_col_drop drop column c; -\d parted_index_col_drop - Partitioned table "public.parted_index_col_drop" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition key: LIST (a) -Indexes: - "parted_index_col_drop_b_idx" btree (b) -Number of partitions: 2 (Use \d+ to list them.) - -\d parted_index_col_drop1 - Partitioned table "public.parted_index_col_drop1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition of: parted_index_col_drop FOR VALUES IN (1) -Partition key: LIST (a) -Indexes: - "parted_index_col_drop1_b_idx" btree (b) -Number of partitions: 1 (Use \d+ to list them.) - -\d parted_index_col_drop2 - Partitioned table "public.parted_index_col_drop2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition of: parted_index_col_drop FOR VALUES IN (2) -Partition key: LIST (a) -Indexes: - "parted_index_col_drop2_b_idx" btree (b) -Number of partitions: 0 - -\d parted_index_col_drop11 - Table "public.parted_index_col_drop11" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition of: parted_index_col_drop1 FOR VALUES IN (1) -Indexes: - "parted_index_col_drop11_b_idx" btree (b) - -drop table parted_index_col_drop; --- Check that invalid indexes are not selected when attaching a partition. -create table parted_inval_tab (a int) partition by range (a); -create index parted_inval_idx on parted_inval_tab (a); -create table parted_inval_tab_1 (a int) partition by range (a); -create table parted_inval_tab_1_1 partition of parted_inval_tab_1 - for values from (0) to (10); -create table parted_inval_tab_1_2 partition of parted_inval_tab_1 - for values from (10) to (20); --- this creates an invalid index. -create index parted_inval_ixd_1 on only parted_inval_tab_1 (a); --- this creates new indexes for all the partitions of parted_inval_tab_1, --- discarding the invalid index created previously as what is chosen. -alter table parted_inval_tab attach partition parted_inval_tab_1 - for values from (1) to (100); -select indexrelid::regclass, indisvalid, - indrelid::regclass, inhparent::regclass - from pg_index idx left join - pg_inherits inh on (idx.indexrelid = inh.inhrelid) - where indexrelid::regclass::text like 'parted_inval%' - order by indexrelid::regclass::text collate "C"; - indexrelid | indisvalid | indrelid | inhparent -----------------------------+------------+----------------------+-------------------------- - parted_inval_idx | t | parted_inval_tab | - parted_inval_ixd_1 | f | parted_inval_tab_1 | - parted_inval_tab_1_1_a_idx | t | parted_inval_tab_1_1 | parted_inval_tab_1_a_idx - parted_inval_tab_1_2_a_idx | t | parted_inval_tab_1_2 | parted_inval_tab_1_a_idx - parted_inval_tab_1_a_idx | t | parted_inval_tab_1 | parted_inval_idx -(5 rows) - -drop table parted_inval_tab; --- Check setup of indisvalid across a complex partition tree on index --- creation. If one index in a partition index is invalid, so should its --- partitioned index. -create table parted_isvalid_tab (a int, b int) partition by range (a); -create table parted_isvalid_tab_1 partition of parted_isvalid_tab - for values from (1) to (10) partition by range (a); -create table parted_isvalid_tab_2 partition of parted_isvalid_tab - for values from (10) to (20) partition by range (a); -create table parted_isvalid_tab_11 partition of parted_isvalid_tab_1 - for values from (1) to (5); -create table parted_isvalid_tab_12 partition of parted_isvalid_tab_1 - for values from (5) to (10); --- create an invalid index on one of the partitions. -insert into parted_isvalid_tab_11 values (1, 0); -create index concurrently parted_isvalid_idx_11 on parted_isvalid_tab_11 ((a/b)); -ERROR: division by zero --- The previous invalid index is selected, invalidating all the indexes up to --- the top-most parent. -create index parted_isvalid_idx on parted_isvalid_tab ((a/b)); -select indexrelid::regclass, indisvalid, - indrelid::regclass, inhparent::regclass - from pg_index idx left join - pg_inherits inh on (idx.indexrelid = inh.inhrelid) - where indexrelid::regclass::text like 'parted_isvalid%' - order by indexrelid::regclass::text collate "C"; - indexrelid | indisvalid | indrelid | inhparent ---------------------------------+------------+-----------------------+------------------------------- - parted_isvalid_idx | f | parted_isvalid_tab | - parted_isvalid_idx_11 | f | parted_isvalid_tab_11 | parted_isvalid_tab_1_expr_idx - parted_isvalid_tab_12_expr_idx | t | parted_isvalid_tab_12 | parted_isvalid_tab_1_expr_idx - parted_isvalid_tab_1_expr_idx | f | parted_isvalid_tab_1 | parted_isvalid_idx - parted_isvalid_tab_2_expr_idx | t | parted_isvalid_tab_2 | parted_isvalid_idx -(5 rows) - -drop table parted_isvalid_tab; --- Check state of replica indexes when attaching a partition. -begin; -create table parted_replica_tab (id int not null) partition by range (id); -create table parted_replica_tab_1 partition of parted_replica_tab - for values from (1) to (10) partition by range (id); -create table parted_replica_tab_11 partition of parted_replica_tab_1 - for values from (1) to (5); -create unique index parted_replica_idx - on only parted_replica_tab using btree (id); -create unique index parted_replica_idx_1 - on only parted_replica_tab_1 using btree (id); --- This triggers an update of pg_index.indisreplident for parted_replica_idx. -alter table only parted_replica_tab_1 replica identity - using index parted_replica_idx_1; -create unique index parted_replica_idx_11 on parted_replica_tab_11 USING btree (id); -select indexrelid::regclass, indisvalid, indisreplident, - indrelid::regclass, inhparent::regclass - from pg_index idx left join - pg_inherits inh on (idx.indexrelid = inh.inhrelid) - where indexrelid::regclass::text like 'parted_replica%' - order by indexrelid::regclass::text collate "C"; - indexrelid | indisvalid | indisreplident | indrelid | inhparent ------------------------+------------+----------------+-----------------------+----------- - parted_replica_idx | f | f | parted_replica_tab | - parted_replica_idx_1 | f | t | parted_replica_tab_1 | - parted_replica_idx_11 | t | f | parted_replica_tab_11 | -(3 rows) - --- parted_replica_idx is not valid yet here, because parted_replica_idx_1 --- is not valid. -alter index parted_replica_idx ATTACH PARTITION parted_replica_idx_1; -select indexrelid::regclass, indisvalid, indisreplident, - indrelid::regclass, inhparent::regclass - from pg_index idx left join - pg_inherits inh on (idx.indexrelid = inh.inhrelid) - where indexrelid::regclass::text like 'parted_replica%' - order by indexrelid::regclass::text collate "C"; - indexrelid | indisvalid | indisreplident | indrelid | inhparent ------------------------+------------+----------------+-----------------------+-------------------- - parted_replica_idx | f | f | parted_replica_tab | - parted_replica_idx_1 | f | t | parted_replica_tab_1 | parted_replica_idx - parted_replica_idx_11 | t | f | parted_replica_tab_11 | -(3 rows) - --- parted_replica_idx becomes valid here. -alter index parted_replica_idx_1 ATTACH PARTITION parted_replica_idx_11; -alter table only parted_replica_tab_1 replica identity - using index parted_replica_idx_1; -commit; -select indexrelid::regclass, indisvalid, indisreplident, - indrelid::regclass, inhparent::regclass - from pg_index idx left join - pg_inherits inh on (idx.indexrelid = inh.inhrelid) - where indexrelid::regclass::text like 'parted_replica%' - order by indexrelid::regclass::text collate "C"; - indexrelid | indisvalid | indisreplident | indrelid | inhparent ------------------------+------------+----------------+-----------------------+---------------------- - parted_replica_idx | t | f | parted_replica_tab | - parted_replica_idx_1 | t | t | parted_replica_tab_1 | parted_replica_idx - parted_replica_idx_11 | t | f | parted_replica_tab_11 | parted_replica_idx_1 -(3 rows) - -drop table parted_replica_tab; --- test that indexing commands work with TOASTed values in pg_index -create table test_pg_index_toast_table (a int); -create or replace function test_pg_index_toast_func (a int, b int[]) - returns bool as $$ select true $$ language sql immutable; -select array_agg(n) b from generate_series(1, 10000) n \gset -create index concurrently test_pg_index_toast_index - on test_pg_index_toast_table (test_pg_index_toast_func(a, :'b')); -reindex index concurrently test_pg_index_toast_index; -drop index concurrently test_pg_index_toast_index; -create index test_pg_index_toast_index - on test_pg_index_toast_table (test_pg_index_toast_func(a, :'b')); -reindex index test_pg_index_toast_index; -drop index test_pg_index_toast_index; -drop function test_pg_index_toast_func; -drop table test_pg_index_toast_table; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/partition_aggregate.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/partition_aggregate.out --- /Users/admin/pgsql/src/test/regress/expected/partition_aggregate.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/partition_aggregate.out 2024-12-13 13:20:13 @@ -1,1520 +1,2 @@ --- --- PARTITION_AGGREGATE --- Test partitionwise aggregation on partitioned tables --- --- Note: to ensure plan stability, it's a good idea to make the partitions of --- any one partitioned table in this test all have different numbers of rows. --- --- Enable partitionwise aggregate, which by default is disabled. -SET enable_partitionwise_aggregate TO true; --- Enable partitionwise join, which by default is disabled. -SET enable_partitionwise_join TO true; --- Disable parallel plans. -SET max_parallel_workers_per_gather TO 0; --- Disable incremental sort, which can influence selected plans due to fuzz factor. -SET enable_incremental_sort TO off; --- --- Tests for list partitioned tables. --- -CREATE TABLE pagg_tab (a int, b int, c text, d int) PARTITION BY LIST(c); -CREATE TABLE pagg_tab_p1 PARTITION OF pagg_tab FOR VALUES IN ('0000', '0001', '0002', '0003', '0004'); -CREATE TABLE pagg_tab_p2 PARTITION OF pagg_tab FOR VALUES IN ('0005', '0006', '0007', '0008'); -CREATE TABLE pagg_tab_p3 PARTITION OF pagg_tab FOR VALUES IN ('0009', '0010', '0011'); -INSERT INTO pagg_tab SELECT i % 20, i % 30, to_char(i % 12, 'FM0000'), i % 30 FROM generate_series(0, 2999) i; -ANALYZE pagg_tab; --- When GROUP BY clause matches; full aggregation is performed for each partition. -EXPLAIN (COSTS OFF) -SELECT c, sum(a), avg(b), count(*), min(a), max(b) FROM pagg_tab GROUP BY c HAVING avg(d) < 15 ORDER BY 1, 2, 3; - QUERY PLAN --------------------------------------------------------------- - Sort - Sort Key: pagg_tab.c, (sum(pagg_tab.a)), (avg(pagg_tab.b)) - -> Append - -> HashAggregate - Group Key: pagg_tab.c - Filter: (avg(pagg_tab.d) < '15'::numeric) - -> Seq Scan on pagg_tab_p1 pagg_tab - -> HashAggregate - Group Key: pagg_tab_1.c - Filter: (avg(pagg_tab_1.d) < '15'::numeric) - -> Seq Scan on pagg_tab_p2 pagg_tab_1 - -> HashAggregate - Group Key: pagg_tab_2.c - Filter: (avg(pagg_tab_2.d) < '15'::numeric) - -> Seq Scan on pagg_tab_p3 pagg_tab_2 -(15 rows) - -SELECT c, sum(a), avg(b), count(*), min(a), max(b) FROM pagg_tab GROUP BY c HAVING avg(d) < 15 ORDER BY 1, 2, 3; - c | sum | avg | count | min | max -------+------+---------------------+-------+-----+----- - 0000 | 2000 | 12.0000000000000000 | 250 | 0 | 24 - 0001 | 2250 | 13.0000000000000000 | 250 | 1 | 25 - 0002 | 2500 | 14.0000000000000000 | 250 | 2 | 26 - 0006 | 2500 | 12.0000000000000000 | 250 | 2 | 24 - 0007 | 2750 | 13.0000000000000000 | 250 | 3 | 25 - 0008 | 2000 | 14.0000000000000000 | 250 | 0 | 26 -(6 rows) - --- When GROUP BY clause does not match; partial aggregation is performed for each partition. -EXPLAIN (COSTS OFF) -SELECT a, sum(b), avg(b), count(*), min(a), max(b) FROM pagg_tab GROUP BY a HAVING avg(d) < 15 ORDER BY 1, 2, 3; - QUERY PLAN --------------------------------------------------------------- - Sort - Sort Key: pagg_tab.a, (sum(pagg_tab.b)), (avg(pagg_tab.b)) - -> Finalize HashAggregate - Group Key: pagg_tab.a - Filter: (avg(pagg_tab.d) < '15'::numeric) - -> Append - -> Partial HashAggregate - Group Key: pagg_tab.a - -> Seq Scan on pagg_tab_p1 pagg_tab - -> Partial HashAggregate - Group Key: pagg_tab_1.a - -> Seq Scan on pagg_tab_p2 pagg_tab_1 - -> Partial HashAggregate - Group Key: pagg_tab_2.a - -> Seq Scan on pagg_tab_p3 pagg_tab_2 -(15 rows) - -SELECT a, sum(b), avg(b), count(*), min(a), max(b) FROM pagg_tab GROUP BY a HAVING avg(d) < 15 ORDER BY 1, 2, 3; - a | sum | avg | count | min | max -----+------+---------------------+-------+-----+----- - 0 | 1500 | 10.0000000000000000 | 150 | 0 | 20 - 1 | 1650 | 11.0000000000000000 | 150 | 1 | 21 - 2 | 1800 | 12.0000000000000000 | 150 | 2 | 22 - 3 | 1950 | 13.0000000000000000 | 150 | 3 | 23 - 4 | 2100 | 14.0000000000000000 | 150 | 4 | 24 - 10 | 1500 | 10.0000000000000000 | 150 | 10 | 20 - 11 | 1650 | 11.0000000000000000 | 150 | 11 | 21 - 12 | 1800 | 12.0000000000000000 | 150 | 12 | 22 - 13 | 1950 | 13.0000000000000000 | 150 | 13 | 23 - 14 | 2100 | 14.0000000000000000 | 150 | 14 | 24 -(10 rows) - --- Check with multiple columns in GROUP BY -EXPLAIN (COSTS OFF) -SELECT a, c, count(*) FROM pagg_tab GROUP BY a, c; - QUERY PLAN ------------------------------------------------- - Append - -> HashAggregate - Group Key: pagg_tab.a, pagg_tab.c - -> Seq Scan on pagg_tab_p1 pagg_tab - -> HashAggregate - Group Key: pagg_tab_1.a, pagg_tab_1.c - -> Seq Scan on pagg_tab_p2 pagg_tab_1 - -> HashAggregate - Group Key: pagg_tab_2.a, pagg_tab_2.c - -> Seq Scan on pagg_tab_p3 pagg_tab_2 -(10 rows) - --- Check with multiple columns in GROUP BY, order in GROUP BY is reversed -EXPLAIN (COSTS OFF) -SELECT a, c, count(*) FROM pagg_tab GROUP BY c, a; - QUERY PLAN ------------------------------------------------- - Append - -> HashAggregate - Group Key: pagg_tab.c, pagg_tab.a - -> Seq Scan on pagg_tab_p1 pagg_tab - -> HashAggregate - Group Key: pagg_tab_1.c, pagg_tab_1.a - -> Seq Scan on pagg_tab_p2 pagg_tab_1 - -> HashAggregate - Group Key: pagg_tab_2.c, pagg_tab_2.a - -> Seq Scan on pagg_tab_p3 pagg_tab_2 -(10 rows) - --- Check with multiple columns in GROUP BY, order in target-list is reversed -EXPLAIN (COSTS OFF) -SELECT c, a, count(*) FROM pagg_tab GROUP BY a, c; - QUERY PLAN ------------------------------------------------- - Append - -> HashAggregate - Group Key: pagg_tab.a, pagg_tab.c - -> Seq Scan on pagg_tab_p1 pagg_tab - -> HashAggregate - Group Key: pagg_tab_1.a, pagg_tab_1.c - -> Seq Scan on pagg_tab_p2 pagg_tab_1 - -> HashAggregate - Group Key: pagg_tab_2.a, pagg_tab_2.c - -> Seq Scan on pagg_tab_p3 pagg_tab_2 -(10 rows) - --- Test when input relation for grouping is dummy -EXPLAIN (COSTS OFF) -SELECT c, sum(a) FROM pagg_tab WHERE 1 = 2 GROUP BY c; - QUERY PLAN --------------------------------- - HashAggregate - Group Key: c - -> Result - One-Time Filter: false -(4 rows) - -SELECT c, sum(a) FROM pagg_tab WHERE 1 = 2 GROUP BY c; - c | sum ----+----- -(0 rows) - -EXPLAIN (COSTS OFF) -SELECT c, sum(a) FROM pagg_tab WHERE c = 'x' GROUP BY c; - QUERY PLAN --------------------------------- - GroupAggregate - -> Result - One-Time Filter: false -(3 rows) - -SELECT c, sum(a) FROM pagg_tab WHERE c = 'x' GROUP BY c; - c | sum ----+----- -(0 rows) - --- Test GroupAggregate paths by disabling hash aggregates. -SET enable_hashagg TO false; --- When GROUP BY clause matches full aggregation is performed for each partition. -EXPLAIN (COSTS OFF) -SELECT c, sum(a), avg(b), count(*) FROM pagg_tab GROUP BY 1 HAVING avg(d) < 15 ORDER BY 1, 2, 3; - QUERY PLAN --------------------------------------------------------------- - Sort - Sort Key: pagg_tab.c, (sum(pagg_tab.a)), (avg(pagg_tab.b)) - -> Append - -> GroupAggregate - Group Key: pagg_tab.c - Filter: (avg(pagg_tab.d) < '15'::numeric) - -> Sort - Sort Key: pagg_tab.c - -> Seq Scan on pagg_tab_p1 pagg_tab - -> GroupAggregate - Group Key: pagg_tab_1.c - Filter: (avg(pagg_tab_1.d) < '15'::numeric) - -> Sort - Sort Key: pagg_tab_1.c - -> Seq Scan on pagg_tab_p2 pagg_tab_1 - -> GroupAggregate - Group Key: pagg_tab_2.c - Filter: (avg(pagg_tab_2.d) < '15'::numeric) - -> Sort - Sort Key: pagg_tab_2.c - -> Seq Scan on pagg_tab_p3 pagg_tab_2 -(21 rows) - -SELECT c, sum(a), avg(b), count(*) FROM pagg_tab GROUP BY 1 HAVING avg(d) < 15 ORDER BY 1, 2, 3; - c | sum | avg | count -------+------+---------------------+------- - 0000 | 2000 | 12.0000000000000000 | 250 - 0001 | 2250 | 13.0000000000000000 | 250 - 0002 | 2500 | 14.0000000000000000 | 250 - 0006 | 2500 | 12.0000000000000000 | 250 - 0007 | 2750 | 13.0000000000000000 | 250 - 0008 | 2000 | 14.0000000000000000 | 250 -(6 rows) - --- When GROUP BY clause does not match; partial aggregation is performed for each partition. -EXPLAIN (COSTS OFF) -SELECT a, sum(b), avg(b), count(*) FROM pagg_tab GROUP BY 1 HAVING avg(d) < 15 ORDER BY 1, 2, 3; - QUERY PLAN ------------------------------------------------------------------- - Sort - Sort Key: pagg_tab.a, (sum(pagg_tab.b)), (avg(pagg_tab.b)) - -> Finalize GroupAggregate - Group Key: pagg_tab.a - Filter: (avg(pagg_tab.d) < '15'::numeric) - -> Merge Append - Sort Key: pagg_tab.a - -> Partial GroupAggregate - Group Key: pagg_tab.a - -> Sort - Sort Key: pagg_tab.a - -> Seq Scan on pagg_tab_p1 pagg_tab - -> Partial GroupAggregate - Group Key: pagg_tab_1.a - -> Sort - Sort Key: pagg_tab_1.a - -> Seq Scan on pagg_tab_p2 pagg_tab_1 - -> Partial GroupAggregate - Group Key: pagg_tab_2.a - -> Sort - Sort Key: pagg_tab_2.a - -> Seq Scan on pagg_tab_p3 pagg_tab_2 -(22 rows) - -SELECT a, sum(b), avg(b), count(*) FROM pagg_tab GROUP BY 1 HAVING avg(d) < 15 ORDER BY 1, 2, 3; - a | sum | avg | count -----+------+---------------------+------- - 0 | 1500 | 10.0000000000000000 | 150 - 1 | 1650 | 11.0000000000000000 | 150 - 2 | 1800 | 12.0000000000000000 | 150 - 3 | 1950 | 13.0000000000000000 | 150 - 4 | 2100 | 14.0000000000000000 | 150 - 10 | 1500 | 10.0000000000000000 | 150 - 11 | 1650 | 11.0000000000000000 | 150 - 12 | 1800 | 12.0000000000000000 | 150 - 13 | 1950 | 13.0000000000000000 | 150 - 14 | 2100 | 14.0000000000000000 | 150 -(10 rows) - --- Test partitionwise grouping without any aggregates -EXPLAIN (COSTS OFF) -SELECT c FROM pagg_tab GROUP BY c ORDER BY 1; - QUERY PLAN ------------------------------------------------------- - Merge Append - Sort Key: pagg_tab.c - -> Group - Group Key: pagg_tab.c - -> Sort - Sort Key: pagg_tab.c - -> Seq Scan on pagg_tab_p1 pagg_tab - -> Group - Group Key: pagg_tab_1.c - -> Sort - Sort Key: pagg_tab_1.c - -> Seq Scan on pagg_tab_p2 pagg_tab_1 - -> Group - Group Key: pagg_tab_2.c - -> Sort - Sort Key: pagg_tab_2.c - -> Seq Scan on pagg_tab_p3 pagg_tab_2 -(17 rows) - -SELECT c FROM pagg_tab GROUP BY c ORDER BY 1; - c ------- - 0000 - 0001 - 0002 - 0003 - 0004 - 0005 - 0006 - 0007 - 0008 - 0009 - 0010 - 0011 -(12 rows) - -EXPLAIN (COSTS OFF) -SELECT a FROM pagg_tab WHERE a < 3 GROUP BY a ORDER BY 1; - QUERY PLAN ------------------------------------------------------------- - Group - Group Key: pagg_tab.a - -> Merge Append - Sort Key: pagg_tab.a - -> Group - Group Key: pagg_tab.a - -> Sort - Sort Key: pagg_tab.a - -> Seq Scan on pagg_tab_p1 pagg_tab - Filter: (a < 3) - -> Group - Group Key: pagg_tab_1.a - -> Sort - Sort Key: pagg_tab_1.a - -> Seq Scan on pagg_tab_p2 pagg_tab_1 - Filter: (a < 3) - -> Group - Group Key: pagg_tab_2.a - -> Sort - Sort Key: pagg_tab_2.a - -> Seq Scan on pagg_tab_p3 pagg_tab_2 - Filter: (a < 3) -(22 rows) - -SELECT a FROM pagg_tab WHERE a < 3 GROUP BY a ORDER BY 1; - a ---- - 0 - 1 - 2 -(3 rows) - -RESET enable_hashagg; --- ROLLUP, partitionwise aggregation does not apply -EXPLAIN (COSTS OFF) -SELECT c, sum(a) FROM pagg_tab GROUP BY rollup(c) ORDER BY 1, 2; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: pagg_tab.c, (sum(pagg_tab.a)) - -> MixedAggregate - Hash Key: pagg_tab.c - Group Key: () - -> Append - -> Seq Scan on pagg_tab_p1 pagg_tab_1 - -> Seq Scan on pagg_tab_p2 pagg_tab_2 - -> Seq Scan on pagg_tab_p3 pagg_tab_3 -(9 rows) - --- ORDERED SET within the aggregate. --- Full aggregation; since all the rows that belong to the same group come --- from the same partition, having an ORDER BY within the aggregate doesn't --- make any difference. -EXPLAIN (COSTS OFF) -SELECT c, sum(b order by a) FROM pagg_tab GROUP BY c ORDER BY 1, 2; - QUERY PLAN ---------------------------------------------------------------- - Sort - Sort Key: pagg_tab.c, (sum(pagg_tab.b ORDER BY pagg_tab.a)) - -> Append - -> GroupAggregate - Group Key: pagg_tab.c - -> Sort - Sort Key: pagg_tab.c, pagg_tab.a - -> Seq Scan on pagg_tab_p1 pagg_tab - -> GroupAggregate - Group Key: pagg_tab_1.c - -> Sort - Sort Key: pagg_tab_1.c, pagg_tab_1.a - -> Seq Scan on pagg_tab_p2 pagg_tab_1 - -> GroupAggregate - Group Key: pagg_tab_2.c - -> Sort - Sort Key: pagg_tab_2.c, pagg_tab_2.a - -> Seq Scan on pagg_tab_p3 pagg_tab_2 -(18 rows) - --- Since GROUP BY clause does not match with PARTITION KEY; we need to do --- partial aggregation. However, ORDERED SET are not partial safe and thus --- partitionwise aggregation plan is not generated. -EXPLAIN (COSTS OFF) -SELECT a, sum(b order by a) FROM pagg_tab GROUP BY a ORDER BY 1, 2; - QUERY PLAN ---------------------------------------------------------------- - Sort - Sort Key: pagg_tab.a, (sum(pagg_tab.b ORDER BY pagg_tab.a)) - -> GroupAggregate - Group Key: pagg_tab.a - -> Sort - Sort Key: pagg_tab.a - -> Append - -> Seq Scan on pagg_tab_p1 pagg_tab_1 - -> Seq Scan on pagg_tab_p2 pagg_tab_2 - -> Seq Scan on pagg_tab_p3 pagg_tab_3 -(10 rows) - --- JOIN query -CREATE TABLE pagg_tab1(x int, y int) PARTITION BY RANGE(x); -CREATE TABLE pagg_tab1_p1 PARTITION OF pagg_tab1 FOR VALUES FROM (0) TO (10); -CREATE TABLE pagg_tab1_p2 PARTITION OF pagg_tab1 FOR VALUES FROM (10) TO (20); -CREATE TABLE pagg_tab1_p3 PARTITION OF pagg_tab1 FOR VALUES FROM (20) TO (30); -CREATE TABLE pagg_tab2(x int, y int) PARTITION BY RANGE(y); -CREATE TABLE pagg_tab2_p1 PARTITION OF pagg_tab2 FOR VALUES FROM (0) TO (10); -CREATE TABLE pagg_tab2_p2 PARTITION OF pagg_tab2 FOR VALUES FROM (10) TO (20); -CREATE TABLE pagg_tab2_p3 PARTITION OF pagg_tab2 FOR VALUES FROM (20) TO (30); -INSERT INTO pagg_tab1 SELECT i % 30, i % 20 FROM generate_series(0, 299, 2) i; -INSERT INTO pagg_tab2 SELECT i % 20, i % 30 FROM generate_series(0, 299, 3) i; -ANALYZE pagg_tab1; -ANALYZE pagg_tab2; --- When GROUP BY clause matches; full aggregation is performed for each partition. -EXPLAIN (COSTS OFF) -SELECT t1.x, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------- - Sort - Sort Key: t1.x, (sum(t1.y)), (count(*)) - -> Append - -> HashAggregate - Group Key: t1.x - -> Hash Join - Hash Cond: (t1.x = t2.y) - -> Seq Scan on pagg_tab1_p1 t1 - -> Hash - -> Seq Scan on pagg_tab2_p1 t2 - -> HashAggregate - Group Key: t1_1.x - -> Hash Join - Hash Cond: (t1_1.x = t2_1.y) - -> Seq Scan on pagg_tab1_p2 t1_1 - -> Hash - -> Seq Scan on pagg_tab2_p2 t2_1 - -> HashAggregate - Group Key: t1_2.x - -> Hash Join - Hash Cond: (t2_2.y = t1_2.x) - -> Seq Scan on pagg_tab2_p3 t2_2 - -> Hash - -> Seq Scan on pagg_tab1_p3 t1_2 -(24 rows) - -SELECT t1.x, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; - x | sum | count -----+------+------- - 0 | 500 | 100 - 6 | 1100 | 100 - 12 | 700 | 100 - 18 | 1300 | 100 - 24 | 900 | 100 -(5 rows) - --- Check with whole-row reference; partitionwise aggregation does not apply -EXPLAIN (COSTS OFF) -SELECT t1.x, sum(t1.y), count(t1) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------- - Sort - Sort Key: t1.x, (sum(t1.y)), (count(((t1.*)::pagg_tab1))) - -> HashAggregate - Group Key: t1.x - -> Hash Join - Hash Cond: (t1.x = t2.y) - -> Append - -> Seq Scan on pagg_tab1_p1 t1_1 - -> Seq Scan on pagg_tab1_p2 t1_2 - -> Seq Scan on pagg_tab1_p3 t1_3 - -> Hash - -> Append - -> Seq Scan on pagg_tab2_p1 t2_1 - -> Seq Scan on pagg_tab2_p2 t2_2 - -> Seq Scan on pagg_tab2_p3 t2_3 -(15 rows) - -SELECT t1.x, sum(t1.y), count(t1) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; - x | sum | count -----+------+------- - 0 | 500 | 100 - 6 | 1100 | 100 - 12 | 700 | 100 - 18 | 1300 | 100 - 24 | 900 | 100 -(5 rows) - --- GROUP BY having other matching key -EXPLAIN (COSTS OFF) -SELECT t2.y, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t2.y ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------- - Sort - Sort Key: t2.y, (sum(t1.y)), (count(*)) - -> Append - -> HashAggregate - Group Key: t2.y - -> Hash Join - Hash Cond: (t1.x = t2.y) - -> Seq Scan on pagg_tab1_p1 t1 - -> Hash - -> Seq Scan on pagg_tab2_p1 t2 - -> HashAggregate - Group Key: t2_1.y - -> Hash Join - Hash Cond: (t1_1.x = t2_1.y) - -> Seq Scan on pagg_tab1_p2 t1_1 - -> Hash - -> Seq Scan on pagg_tab2_p2 t2_1 - -> HashAggregate - Group Key: t2_2.y - -> Hash Join - Hash Cond: (t2_2.y = t1_2.x) - -> Seq Scan on pagg_tab2_p3 t2_2 - -> Hash - -> Seq Scan on pagg_tab1_p3 t1_2 -(24 rows) - --- When GROUP BY clause does not match; partial aggregation is performed for each partition. --- Also test GroupAggregate paths by disabling hash aggregates. -SET enable_hashagg TO false; -EXPLAIN (COSTS OFF) -SELECT t1.y, sum(t1.x), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.y HAVING avg(t1.x) > 10 ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------------------- - Sort - Sort Key: t1.y, (sum(t1.x)), (count(*)) - -> Finalize GroupAggregate - Group Key: t1.y - Filter: (avg(t1.x) > '10'::numeric) - -> Merge Append - Sort Key: t1.y - -> Partial GroupAggregate - Group Key: t1.y - -> Sort - Sort Key: t1.y - -> Hash Join - Hash Cond: (t1.x = t2.y) - -> Seq Scan on pagg_tab1_p1 t1 - -> Hash - -> Seq Scan on pagg_tab2_p1 t2 - -> Partial GroupAggregate - Group Key: t1_1.y - -> Sort - Sort Key: t1_1.y - -> Hash Join - Hash Cond: (t1_1.x = t2_1.y) - -> Seq Scan on pagg_tab1_p2 t1_1 - -> Hash - -> Seq Scan on pagg_tab2_p2 t2_1 - -> Partial GroupAggregate - Group Key: t1_2.y - -> Sort - Sort Key: t1_2.y - -> Hash Join - Hash Cond: (t2_2.y = t1_2.x) - -> Seq Scan on pagg_tab2_p3 t2_2 - -> Hash - -> Seq Scan on pagg_tab1_p3 t1_2 -(34 rows) - -SELECT t1.y, sum(t1.x), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.y HAVING avg(t1.x) > 10 ORDER BY 1, 2, 3; - y | sum | count -----+------+------- - 2 | 600 | 50 - 4 | 1200 | 50 - 8 | 900 | 50 - 12 | 600 | 50 - 14 | 1200 | 50 - 18 | 900 | 50 -(6 rows) - -RESET enable_hashagg; --- Check with LEFT/RIGHT/FULL OUTER JOINs which produces NULL values for --- aggregation --- LEFT JOIN, should produce partial partitionwise aggregation plan as --- GROUP BY is on nullable column -EXPLAIN (COSTS OFF) -SELECT b.y, sum(a.y) FROM pagg_tab1 a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; - QUERY PLAN ------------------------------------------------------------------- - Finalize GroupAggregate - Group Key: b.y - -> Sort - Sort Key: b.y - -> Append - -> Partial HashAggregate - Group Key: b.y - -> Hash Left Join - Hash Cond: (a.x = b.y) - -> Seq Scan on pagg_tab1_p1 a - -> Hash - -> Seq Scan on pagg_tab2_p1 b - -> Partial HashAggregate - Group Key: b_1.y - -> Hash Left Join - Hash Cond: (a_1.x = b_1.y) - -> Seq Scan on pagg_tab1_p2 a_1 - -> Hash - -> Seq Scan on pagg_tab2_p2 b_1 - -> Partial HashAggregate - Group Key: b_2.y - -> Hash Right Join - Hash Cond: (b_2.y = a_2.x) - -> Seq Scan on pagg_tab2_p3 b_2 - -> Hash - -> Seq Scan on pagg_tab1_p3 a_2 -(26 rows) - -SELECT b.y, sum(a.y) FROM pagg_tab1 a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; - y | sum -----+------ - 0 | 500 - 6 | 1100 - 12 | 700 - 18 | 1300 - 24 | 900 - | 900 -(6 rows) - --- RIGHT JOIN, should produce full partitionwise aggregation plan as --- GROUP BY is on non-nullable column -EXPLAIN (COSTS OFF) -SELECT b.y, sum(a.y) FROM pagg_tab1 a RIGHT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; - QUERY PLAN ------------------------------------------------------------- - Sort - Sort Key: b.y - -> Append - -> HashAggregate - Group Key: b.y - -> Hash Right Join - Hash Cond: (a.x = b.y) - -> Seq Scan on pagg_tab1_p1 a - -> Hash - -> Seq Scan on pagg_tab2_p1 b - -> HashAggregate - Group Key: b_1.y - -> Hash Right Join - Hash Cond: (a_1.x = b_1.y) - -> Seq Scan on pagg_tab1_p2 a_1 - -> Hash - -> Seq Scan on pagg_tab2_p2 b_1 - -> HashAggregate - Group Key: b_2.y - -> Hash Left Join - Hash Cond: (b_2.y = a_2.x) - -> Seq Scan on pagg_tab2_p3 b_2 - -> Hash - -> Seq Scan on pagg_tab1_p3 a_2 -(24 rows) - -SELECT b.y, sum(a.y) FROM pagg_tab1 a RIGHT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; - y | sum -----+------ - 0 | 500 - 3 | - 6 | 1100 - 9 | - 12 | 700 - 15 | - 18 | 1300 - 21 | - 24 | 900 - 27 | -(10 rows) - --- FULL JOIN, should produce partial partitionwise aggregation plan as --- GROUP BY is on nullable column -EXPLAIN (COSTS OFF) -SELECT a.x, sum(b.x) FROM pagg_tab1 a FULL OUTER JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x ORDER BY 1 NULLS LAST; - QUERY PLAN ------------------------------------------------------------------- - Finalize GroupAggregate - Group Key: a.x - -> Sort - Sort Key: a.x - -> Append - -> Partial HashAggregate - Group Key: a.x - -> Hash Full Join - Hash Cond: (a.x = b.y) - -> Seq Scan on pagg_tab1_p1 a - -> Hash - -> Seq Scan on pagg_tab2_p1 b - -> Partial HashAggregate - Group Key: a_1.x - -> Hash Full Join - Hash Cond: (a_1.x = b_1.y) - -> Seq Scan on pagg_tab1_p2 a_1 - -> Hash - -> Seq Scan on pagg_tab2_p2 b_1 - -> Partial HashAggregate - Group Key: a_2.x - -> Hash Full Join - Hash Cond: (b_2.y = a_2.x) - -> Seq Scan on pagg_tab2_p3 b_2 - -> Hash - -> Seq Scan on pagg_tab1_p3 a_2 -(26 rows) - -SELECT a.x, sum(b.x) FROM pagg_tab1 a FULL OUTER JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x ORDER BY 1 NULLS LAST; - x | sum -----+------ - 0 | 500 - 2 | - 4 | - 6 | 1100 - 8 | - 10 | - 12 | 700 - 14 | - 16 | - 18 | 1300 - 20 | - 22 | - 24 | 900 - 26 | - 28 | - | 500 -(16 rows) - --- LEFT JOIN, with dummy relation on right side, ideally --- should produce full partitionwise aggregation plan as GROUP BY is on --- non-nullable columns. --- But right now we are unable to do partitionwise join in this case. -EXPLAIN (COSTS OFF) -SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a LEFT JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: pagg_tab1.x, pagg_tab2.y - -> HashAggregate - Group Key: pagg_tab1.x, pagg_tab2.y - -> Hash Left Join - Hash Cond: (pagg_tab1.x = pagg_tab2.y) - Filter: ((pagg_tab1.x > 5) OR (pagg_tab2.y < 20)) - -> Append - -> Seq Scan on pagg_tab1_p1 pagg_tab1_1 - Filter: (x < 20) - -> Seq Scan on pagg_tab1_p2 pagg_tab1_2 - Filter: (x < 20) - -> Hash - -> Append - -> Seq Scan on pagg_tab2_p2 pagg_tab2_1 - Filter: (y > 10) - -> Seq Scan on pagg_tab2_p3 pagg_tab2_2 - Filter: (y > 10) -(18 rows) - -SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a LEFT JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; - x | y | count -----+----+------- - 6 | | 10 - 8 | | 10 - 10 | | 10 - 12 | 12 | 100 - 14 | | 10 - 16 | | 10 - 18 | 18 | 100 -(7 rows) - --- FULL JOIN, with dummy relations on both sides, ideally --- should produce partial partitionwise aggregation plan as GROUP BY is on --- nullable columns. --- But right now we are unable to do partitionwise join in this case. -EXPLAIN (COSTS OFF) -SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a FULL JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: pagg_tab1.x, pagg_tab2.y - -> HashAggregate - Group Key: pagg_tab1.x, pagg_tab2.y - -> Hash Full Join - Hash Cond: (pagg_tab1.x = pagg_tab2.y) - Filter: ((pagg_tab1.x > 5) OR (pagg_tab2.y < 20)) - -> Append - -> Seq Scan on pagg_tab1_p1 pagg_tab1_1 - Filter: (x < 20) - -> Seq Scan on pagg_tab1_p2 pagg_tab1_2 - Filter: (x < 20) - -> Hash - -> Append - -> Seq Scan on pagg_tab2_p2 pagg_tab2_1 - Filter: (y > 10) - -> Seq Scan on pagg_tab2_p3 pagg_tab2_2 - Filter: (y > 10) -(18 rows) - -SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a FULL JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; - x | y | count -----+----+------- - 6 | | 10 - 8 | | 10 - 10 | | 10 - 12 | 12 | 100 - 14 | | 10 - 16 | | 10 - 18 | 18 | 100 - | 15 | 10 -(8 rows) - --- Empty join relation because of empty outer side, no partitionwise agg plan -EXPLAIN (COSTS OFF) -SELECT a.x, a.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x = 1 AND x = 2) a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x, a.y ORDER BY 1, 2; - QUERY PLAN --------------------------------------- - GroupAggregate - Group Key: pagg_tab1.y - -> Sort - Sort Key: pagg_tab1.y - -> Result - One-Time Filter: false -(6 rows) - -SELECT a.x, a.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x = 1 AND x = 2) a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x, a.y ORDER BY 1, 2; - x | y | count ----+---+------- -(0 rows) - --- Partition by multiple columns -CREATE TABLE pagg_tab_m (a int, b int, c int) PARTITION BY RANGE(a, ((a+b)/2)); -CREATE TABLE pagg_tab_m_p1 PARTITION OF pagg_tab_m FOR VALUES FROM (0, 0) TO (12, 12); -CREATE TABLE pagg_tab_m_p2 PARTITION OF pagg_tab_m FOR VALUES FROM (12, 12) TO (22, 22); -CREATE TABLE pagg_tab_m_p3 PARTITION OF pagg_tab_m FOR VALUES FROM (22, 22) TO (30, 30); -INSERT INTO pagg_tab_m SELECT i % 30, i % 40, i % 50 FROM generate_series(0, 2999) i; -ANALYZE pagg_tab_m; --- Partial aggregation as GROUP BY clause does not match with PARTITION KEY -EXPLAIN (COSTS OFF) -SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a HAVING avg(c) < 22 ORDER BY 1, 2, 3; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_m.a, (sum(pagg_tab_m.b)), (avg(pagg_tab_m.c)) - -> Finalize HashAggregate - Group Key: pagg_tab_m.a - Filter: (avg(pagg_tab_m.c) < '22'::numeric) - -> Append - -> Partial HashAggregate - Group Key: pagg_tab_m.a - -> Seq Scan on pagg_tab_m_p1 pagg_tab_m - -> Partial HashAggregate - Group Key: pagg_tab_m_1.a - -> Seq Scan on pagg_tab_m_p2 pagg_tab_m_1 - -> Partial HashAggregate - Group Key: pagg_tab_m_2.a - -> Seq Scan on pagg_tab_m_p3 pagg_tab_m_2 -(15 rows) - -SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a HAVING avg(c) < 22 ORDER BY 1, 2, 3; - a | sum | avg | count -----+------+---------------------+------- - 0 | 1500 | 20.0000000000000000 | 100 - 1 | 1600 | 21.0000000000000000 | 100 - 10 | 1500 | 20.0000000000000000 | 100 - 11 | 1600 | 21.0000000000000000 | 100 - 20 | 1500 | 20.0000000000000000 | 100 - 21 | 1600 | 21.0000000000000000 | 100 -(6 rows) - --- Full aggregation as GROUP BY clause matches with PARTITION KEY -EXPLAIN (COSTS OFF) -SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a, (a+b)/2 HAVING sum(b) < 50 ORDER BY 1, 2, 3; - QUERY PLAN ----------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_m.a, (sum(pagg_tab_m.b)), (avg(pagg_tab_m.c)) - -> Append - -> HashAggregate - Group Key: pagg_tab_m.a, ((pagg_tab_m.a + pagg_tab_m.b) / 2) - Filter: (sum(pagg_tab_m.b) < 50) - -> Seq Scan on pagg_tab_m_p1 pagg_tab_m - -> HashAggregate - Group Key: pagg_tab_m_1.a, ((pagg_tab_m_1.a + pagg_tab_m_1.b) / 2) - Filter: (sum(pagg_tab_m_1.b) < 50) - -> Seq Scan on pagg_tab_m_p2 pagg_tab_m_1 - -> HashAggregate - Group Key: pagg_tab_m_2.a, ((pagg_tab_m_2.a + pagg_tab_m_2.b) / 2) - Filter: (sum(pagg_tab_m_2.b) < 50) - -> Seq Scan on pagg_tab_m_p3 pagg_tab_m_2 -(15 rows) - -SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a, (a+b)/2 HAVING sum(b) < 50 ORDER BY 1, 2, 3; - a | sum | avg | count -----+-----+---------------------+------- - 0 | 0 | 20.0000000000000000 | 25 - 1 | 25 | 21.0000000000000000 | 25 - 10 | 0 | 20.0000000000000000 | 25 - 11 | 25 | 21.0000000000000000 | 25 - 20 | 0 | 20.0000000000000000 | 25 - 21 | 25 | 21.0000000000000000 | 25 -(6 rows) - --- Full aggregation as PARTITION KEY is part of GROUP BY clause -EXPLAIN (COSTS OFF) -SELECT a, c, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY (a+b)/2, 2, 1 HAVING sum(b) = 50 AND avg(c) > 25 ORDER BY 1, 2, 3; - QUERY PLAN --------------------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_m.a, pagg_tab_m.c, (sum(pagg_tab_m.b)) - -> Append - -> HashAggregate - Group Key: pagg_tab_m.a, pagg_tab_m.c, ((pagg_tab_m.a + pagg_tab_m.b) / 2) - Filter: ((sum(pagg_tab_m.b) = 50) AND (avg(pagg_tab_m.c) > '25'::numeric)) - -> Seq Scan on pagg_tab_m_p1 pagg_tab_m - -> HashAggregate - Group Key: pagg_tab_m_1.a, pagg_tab_m_1.c, ((pagg_tab_m_1.a + pagg_tab_m_1.b) / 2) - Filter: ((sum(pagg_tab_m_1.b) = 50) AND (avg(pagg_tab_m_1.c) > '25'::numeric)) - -> Seq Scan on pagg_tab_m_p2 pagg_tab_m_1 - -> HashAggregate - Group Key: pagg_tab_m_2.a, pagg_tab_m_2.c, ((pagg_tab_m_2.a + pagg_tab_m_2.b) / 2) - Filter: ((sum(pagg_tab_m_2.b) = 50) AND (avg(pagg_tab_m_2.c) > '25'::numeric)) - -> Seq Scan on pagg_tab_m_p3 pagg_tab_m_2 -(15 rows) - -SELECT a, c, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY (a+b)/2, 2, 1 HAVING sum(b) = 50 AND avg(c) > 25 ORDER BY 1, 2, 3; - a | c | sum | avg | count -----+----+-----+---------------------+------- - 0 | 30 | 50 | 30.0000000000000000 | 5 - 0 | 40 | 50 | 40.0000000000000000 | 5 - 10 | 30 | 50 | 30.0000000000000000 | 5 - 10 | 40 | 50 | 40.0000000000000000 | 5 - 20 | 30 | 50 | 30.0000000000000000 | 5 - 20 | 40 | 50 | 40.0000000000000000 | 5 -(6 rows) - --- Test with multi-level partitioning scheme -CREATE TABLE pagg_tab_ml (a int, b int, c text) PARTITION BY RANGE(a); -CREATE TABLE pagg_tab_ml_p1 PARTITION OF pagg_tab_ml FOR VALUES FROM (0) TO (12); -CREATE TABLE pagg_tab_ml_p2 PARTITION OF pagg_tab_ml FOR VALUES FROM (12) TO (20) PARTITION BY LIST (c); -CREATE TABLE pagg_tab_ml_p2_s1 PARTITION OF pagg_tab_ml_p2 FOR VALUES IN ('0000', '0001', '0002'); -CREATE TABLE pagg_tab_ml_p2_s2 PARTITION OF pagg_tab_ml_p2 FOR VALUES IN ('0003'); --- This level of partitioning has different column positions than the parent -CREATE TABLE pagg_tab_ml_p3(b int, c text, a int) PARTITION BY RANGE (b); -CREATE TABLE pagg_tab_ml_p3_s1(c text, a int, b int); -CREATE TABLE pagg_tab_ml_p3_s2 PARTITION OF pagg_tab_ml_p3 FOR VALUES FROM (7) TO (10); -ALTER TABLE pagg_tab_ml_p3 ATTACH PARTITION pagg_tab_ml_p3_s1 FOR VALUES FROM (0) TO (7); -ALTER TABLE pagg_tab_ml ATTACH PARTITION pagg_tab_ml_p3 FOR VALUES FROM (20) TO (30); -INSERT INTO pagg_tab_ml SELECT i % 30, i % 10, to_char(i % 4, 'FM0000') FROM generate_series(0, 29999) i; -ANALYZE pagg_tab_ml; --- For Parallel Append -SET max_parallel_workers_per_gather TO 2; -SET parallel_setup_cost = 0; --- Full aggregation at level 1 as GROUP BY clause matches with PARTITION KEY --- for level 1 only. For subpartitions, GROUP BY clause does not match with --- PARTITION KEY, but still we do not see a partial aggregation as array_agg() --- is not partial agg safe. -EXPLAIN (COSTS OFF) -SELECT a, sum(b), array_agg(distinct c), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - QUERY PLAN --------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (array_agg(DISTINCT pagg_tab_ml.c)) - -> Gather - Workers Planned: 2 - -> Parallel Append - -> GroupAggregate - Group Key: pagg_tab_ml.a - Filter: (avg(pagg_tab_ml.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml.a, pagg_tab_ml.c - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> GroupAggregate - Group Key: pagg_tab_ml_5.a - Filter: (avg(pagg_tab_ml_5.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_5.a, pagg_tab_ml_5.c - -> Append - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_5 - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_6 - -> GroupAggregate - Group Key: pagg_tab_ml_2.a - Filter: (avg(pagg_tab_ml_2.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_2.a, pagg_tab_ml_2.c - -> Append - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_2 - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_3 -(27 rows) - -SELECT a, sum(b), array_agg(distinct c), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - a | sum | array_agg | count -----+------+-------------+------- - 0 | 0 | {0000,0002} | 1000 - 1 | 1000 | {0001,0003} | 1000 - 2 | 2000 | {0000,0002} | 1000 - 10 | 0 | {0000,0002} | 1000 - 11 | 1000 | {0001,0003} | 1000 - 12 | 2000 | {0000,0002} | 1000 - 20 | 0 | {0000,0002} | 1000 - 21 | 1000 | {0001,0003} | 1000 - 22 | 2000 | {0000,0002} | 1000 -(9 rows) - --- Without ORDER BY clause, to test Gather at top-most path -EXPLAIN (COSTS OFF) -SELECT a, sum(b), array_agg(distinct c), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3; - QUERY PLAN ---------------------------------------------------------------------------- - Gather - Workers Planned: 2 - -> Parallel Append - -> GroupAggregate - Group Key: pagg_tab_ml.a - Filter: (avg(pagg_tab_ml.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml.a, pagg_tab_ml.c - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> GroupAggregate - Group Key: pagg_tab_ml_5.a - Filter: (avg(pagg_tab_ml_5.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_5.a, pagg_tab_ml_5.c - -> Append - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_5 - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_6 - -> GroupAggregate - Group Key: pagg_tab_ml_2.a - Filter: (avg(pagg_tab_ml_2.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_2.a, pagg_tab_ml_2.c - -> Append - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_2 - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_3 -(25 rows) - -RESET parallel_setup_cost; --- Full aggregation at level 1 as GROUP BY clause matches with PARTITION KEY --- for level 1 only. For subpartitions, GROUP BY clause does not match with --- PARTITION KEY, thus we will have a partial aggregation for them. -EXPLAIN (COSTS OFF) -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - QUERY PLAN ---------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (count(*)) - -> Append - -> HashAggregate - Group Key: pagg_tab_ml.a - Filter: (avg(pagg_tab_ml.b) < '3'::numeric) - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> Finalize GroupAggregate - Group Key: pagg_tab_ml_2.a - Filter: (avg(pagg_tab_ml_2.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_2.a - -> Append - -> Partial HashAggregate - Group Key: pagg_tab_ml_2.a - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_2 - -> Partial HashAggregate - Group Key: pagg_tab_ml_3.a - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_3 - -> Finalize GroupAggregate - Group Key: pagg_tab_ml_5.a - Filter: (avg(pagg_tab_ml_5.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_5.a - -> Append - -> Partial HashAggregate - Group Key: pagg_tab_ml_5.a - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_5 - -> Partial HashAggregate - Group Key: pagg_tab_ml_6.a - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_6 -(31 rows) - -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - a | sum | count -----+------+------- - 0 | 0 | 1000 - 1 | 1000 | 1000 - 2 | 2000 | 1000 - 10 | 0 | 1000 - 11 | 1000 | 1000 - 12 | 2000 | 1000 - 20 | 0 | 1000 - 21 | 1000 | 1000 - 22 | 2000 | 1000 -(9 rows) - --- Partial aggregation at all levels as GROUP BY clause does not match with --- PARTITION KEY -EXPLAIN (COSTS OFF) -SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b ORDER BY 1, 2, 3; - QUERY PLAN ---------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.b, (sum(pagg_tab_ml.a)), (count(*)) - -> Finalize GroupAggregate - Group Key: pagg_tab_ml.b - -> Sort - Sort Key: pagg_tab_ml.b - -> Append - -> Partial HashAggregate - Group Key: pagg_tab_ml.b - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> Partial HashAggregate - Group Key: pagg_tab_ml_1.b - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1 - -> Partial HashAggregate - Group Key: pagg_tab_ml_2.b - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2 - -> Partial HashAggregate - Group Key: pagg_tab_ml_3.b - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3 - -> Partial HashAggregate - Group Key: pagg_tab_ml_4.b - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4 -(22 rows) - -SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b HAVING avg(a) < 15 ORDER BY 1, 2, 3; - b | sum | count ----+-------+------- - 0 | 30000 | 3000 - 1 | 33000 | 3000 - 2 | 36000 | 3000 - 3 | 39000 | 3000 - 4 | 42000 | 3000 -(5 rows) - --- Full aggregation at all levels as GROUP BY clause matches with PARTITION KEY -EXPLAIN (COSTS OFF) -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; - QUERY PLAN ----------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (count(*)) - -> Append - -> HashAggregate - Group Key: pagg_tab_ml.a, pagg_tab_ml.b, pagg_tab_ml.c - Filter: (avg(pagg_tab_ml.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> HashAggregate - Group Key: pagg_tab_ml_1.a, pagg_tab_ml_1.b, pagg_tab_ml_1.c - Filter: (avg(pagg_tab_ml_1.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1 - -> HashAggregate - Group Key: pagg_tab_ml_2.a, pagg_tab_ml_2.b, pagg_tab_ml_2.c - Filter: (avg(pagg_tab_ml_2.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2 - -> HashAggregate - Group Key: pagg_tab_ml_3.a, pagg_tab_ml_3.b, pagg_tab_ml_3.c - Filter: (avg(pagg_tab_ml_3.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3 - -> HashAggregate - Group Key: pagg_tab_ml_4.a, pagg_tab_ml_4.b, pagg_tab_ml_4.c - Filter: (avg(pagg_tab_ml_4.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4 -(23 rows) - -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; - a | sum | count -----+------+------- - 8 | 4000 | 500 - 8 | 4000 | 500 - 9 | 4500 | 500 - 9 | 4500 | 500 - 18 | 4000 | 500 - 18 | 4000 | 500 - 19 | 4500 | 500 - 19 | 4500 | 500 - 28 | 4000 | 500 - 28 | 4000 | 500 - 29 | 4500 | 500 - 29 | 4500 | 500 -(12 rows) - --- Parallelism within partitionwise aggregates -SET min_parallel_table_scan_size TO '8kB'; -SET parallel_setup_cost TO 0; --- Full aggregation at level 1 as GROUP BY clause matches with PARTITION KEY --- for level 1 only. For subpartitions, GROUP BY clause does not match with --- PARTITION KEY, thus we will have a partial aggregation for them. -EXPLAIN (COSTS OFF) -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - QUERY PLAN ------------------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (count(*)) - -> Append - -> Finalize GroupAggregate - Group Key: pagg_tab_ml.a - Filter: (avg(pagg_tab_ml.b) < '3'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_ml.a - -> Partial HashAggregate - Group Key: pagg_tab_ml.a - -> Parallel Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> Finalize GroupAggregate - Group Key: pagg_tab_ml_2.a - Filter: (avg(pagg_tab_ml_2.b) < '3'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_ml_2.a - -> Parallel Append - -> Partial HashAggregate - Group Key: pagg_tab_ml_2.a - -> Parallel Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_2 - -> Partial HashAggregate - Group Key: pagg_tab_ml_3.a - -> Parallel Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_3 - -> Finalize GroupAggregate - Group Key: pagg_tab_ml_5.a - Filter: (avg(pagg_tab_ml_5.b) < '3'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_ml_5.a - -> Parallel Append - -> Partial HashAggregate - Group Key: pagg_tab_ml_5.a - -> Parallel Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_5 - -> Partial HashAggregate - Group Key: pagg_tab_ml_6.a - -> Parallel Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_6 -(41 rows) - -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - a | sum | count -----+------+------- - 0 | 0 | 1000 - 1 | 1000 | 1000 - 2 | 2000 | 1000 - 10 | 0 | 1000 - 11 | 1000 | 1000 - 12 | 2000 | 1000 - 20 | 0 | 1000 - 21 | 1000 | 1000 - 22 | 2000 | 1000 -(9 rows) - --- Partial aggregation at all levels as GROUP BY clause does not match with --- PARTITION KEY -EXPLAIN (COSTS OFF) -SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b ORDER BY 1, 2, 3; - QUERY PLAN ------------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.b, (sum(pagg_tab_ml.a)), (count(*)) - -> Finalize GroupAggregate - Group Key: pagg_tab_ml.b - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_ml.b - -> Parallel Append - -> Partial HashAggregate - Group Key: pagg_tab_ml.b - -> Parallel Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> Partial HashAggregate - Group Key: pagg_tab_ml_3.b - -> Parallel Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3 - -> Partial HashAggregate - Group Key: pagg_tab_ml_1.b - -> Parallel Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1 - -> Partial HashAggregate - Group Key: pagg_tab_ml_4.b - -> Parallel Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4 - -> Partial HashAggregate - Group Key: pagg_tab_ml_2.b - -> Parallel Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2 -(24 rows) - -SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b HAVING avg(a) < 15 ORDER BY 1, 2, 3; - b | sum | count ----+-------+------- - 0 | 30000 | 3000 - 1 | 33000 | 3000 - 2 | 36000 | 3000 - 3 | 39000 | 3000 - 4 | 42000 | 3000 -(5 rows) - --- Full aggregation at all levels as GROUP BY clause matches with PARTITION KEY -EXPLAIN (COSTS OFF) -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; - QUERY PLAN ----------------------------------------------------------------------------------- - Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (count(*)) - -> Parallel Append - -> HashAggregate - Group Key: pagg_tab_ml.a, pagg_tab_ml.b, pagg_tab_ml.c - Filter: (avg(pagg_tab_ml.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> HashAggregate - Group Key: pagg_tab_ml_3.a, pagg_tab_ml_3.b, pagg_tab_ml_3.c - Filter: (avg(pagg_tab_ml_3.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3 - -> HashAggregate - Group Key: pagg_tab_ml_1.a, pagg_tab_ml_1.b, pagg_tab_ml_1.c - Filter: (avg(pagg_tab_ml_1.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1 - -> HashAggregate - Group Key: pagg_tab_ml_4.a, pagg_tab_ml_4.b, pagg_tab_ml_4.c - Filter: (avg(pagg_tab_ml_4.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4 - -> HashAggregate - Group Key: pagg_tab_ml_2.a, pagg_tab_ml_2.b, pagg_tab_ml_2.c - Filter: (avg(pagg_tab_ml_2.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2 -(25 rows) - -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; - a | sum | count -----+------+------- - 8 | 4000 | 500 - 8 | 4000 | 500 - 9 | 4500 | 500 - 9 | 4500 | 500 - 18 | 4000 | 500 - 18 | 4000 | 500 - 19 | 4500 | 500 - 19 | 4500 | 500 - 28 | 4000 | 500 - 28 | 4000 | 500 - 29 | 4500 | 500 - 29 | 4500 | 500 -(12 rows) - --- Parallelism within partitionwise aggregates (single level) --- Add few parallel setup cost, so that we will see a plan which gathers --- partially created paths even for full aggregation and sticks a single Gather --- followed by finalization step. --- Without this, the cost of doing partial aggregation + Gather + finalization --- for each partition and then Append over it turns out to be same and this --- wins as we add it first. This parallel_setup_cost plays a vital role in --- costing such plans. -SET parallel_setup_cost TO 10; -CREATE TABLE pagg_tab_para(x int, y int) PARTITION BY RANGE(x); -CREATE TABLE pagg_tab_para_p1 PARTITION OF pagg_tab_para FOR VALUES FROM (0) TO (12); -CREATE TABLE pagg_tab_para_p2 PARTITION OF pagg_tab_para FOR VALUES FROM (12) TO (22); -CREATE TABLE pagg_tab_para_p3 PARTITION OF pagg_tab_para FOR VALUES FROM (22) TO (30); -INSERT INTO pagg_tab_para SELECT i % 30, i % 20 FROM generate_series(0, 29999) i; -ANALYZE pagg_tab_para; --- When GROUP BY clause matches; full aggregation is performed for each partition. -EXPLAIN (COSTS OFF) -SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_para.x, (sum(pagg_tab_para.y)), (avg(pagg_tab_para.y)) - -> Finalize GroupAggregate - Group Key: pagg_tab_para.x - Filter: (avg(pagg_tab_para.y) < '7'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_para.x - -> Parallel Append - -> Partial HashAggregate - Group Key: pagg_tab_para.x - -> Parallel Seq Scan on pagg_tab_para_p1 pagg_tab_para - -> Partial HashAggregate - Group Key: pagg_tab_para_1.x - -> Parallel Seq Scan on pagg_tab_para_p2 pagg_tab_para_1 - -> Partial HashAggregate - Group Key: pagg_tab_para_2.x - -> Parallel Seq Scan on pagg_tab_para_p3 pagg_tab_para_2 -(19 rows) - -SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - x | sum | avg | count -----+------+--------------------+------- - 0 | 5000 | 5.0000000000000000 | 1000 - 1 | 6000 | 6.0000000000000000 | 1000 - 10 | 5000 | 5.0000000000000000 | 1000 - 11 | 6000 | 6.0000000000000000 | 1000 - 20 | 5000 | 5.0000000000000000 | 1000 - 21 | 6000 | 6.0000000000000000 | 1000 -(6 rows) - --- When GROUP BY clause does not match; partial aggregation is performed for each partition. -EXPLAIN (COSTS OFF) -SELECT y, sum(x), avg(x), count(*) FROM pagg_tab_para GROUP BY y HAVING avg(x) < 12 ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_para.y, (sum(pagg_tab_para.x)), (avg(pagg_tab_para.x)) - -> Finalize GroupAggregate - Group Key: pagg_tab_para.y - Filter: (avg(pagg_tab_para.x) < '12'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_para.y - -> Parallel Append - -> Partial HashAggregate - Group Key: pagg_tab_para.y - -> Parallel Seq Scan on pagg_tab_para_p1 pagg_tab_para - -> Partial HashAggregate - Group Key: pagg_tab_para_1.y - -> Parallel Seq Scan on pagg_tab_para_p2 pagg_tab_para_1 - -> Partial HashAggregate - Group Key: pagg_tab_para_2.y - -> Parallel Seq Scan on pagg_tab_para_p3 pagg_tab_para_2 -(19 rows) - -SELECT y, sum(x), avg(x), count(*) FROM pagg_tab_para GROUP BY y HAVING avg(x) < 12 ORDER BY 1, 2, 3; - y | sum | avg | count -----+-------+---------------------+------- - 0 | 15000 | 10.0000000000000000 | 1500 - 1 | 16500 | 11.0000000000000000 | 1500 - 10 | 15000 | 10.0000000000000000 | 1500 - 11 | 16500 | 11.0000000000000000 | 1500 -(4 rows) - --- Test when parent can produce parallel paths but not any (or some) of its children --- (Use one more aggregate to tilt the cost estimates for the plan we want) -ALTER TABLE pagg_tab_para_p1 SET (parallel_workers = 0); -ALTER TABLE pagg_tab_para_p3 SET (parallel_workers = 0); -ANALYZE pagg_tab_para; -EXPLAIN (COSTS OFF) -SELECT x, sum(y), avg(y), sum(x+y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_para.x, (sum(pagg_tab_para.y)), (avg(pagg_tab_para.y)) - -> Finalize GroupAggregate - Group Key: pagg_tab_para.x - Filter: (avg(pagg_tab_para.y) < '7'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_para.x - -> Partial HashAggregate - Group Key: pagg_tab_para.x - -> Parallel Append - -> Seq Scan on pagg_tab_para_p1 pagg_tab_para_1 - -> Seq Scan on pagg_tab_para_p3 pagg_tab_para_3 - -> Parallel Seq Scan on pagg_tab_para_p2 pagg_tab_para_2 -(15 rows) - -SELECT x, sum(y), avg(y), sum(x+y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - x | sum | avg | sum | count -----+------+--------------------+-------+------- - 0 | 5000 | 5.0000000000000000 | 5000 | 1000 - 1 | 6000 | 6.0000000000000000 | 7000 | 1000 - 10 | 5000 | 5.0000000000000000 | 15000 | 1000 - 11 | 6000 | 6.0000000000000000 | 17000 | 1000 - 20 | 5000 | 5.0000000000000000 | 25000 | 1000 - 21 | 6000 | 6.0000000000000000 | 27000 | 1000 -(6 rows) - -ALTER TABLE pagg_tab_para_p2 SET (parallel_workers = 0); -ANALYZE pagg_tab_para; -EXPLAIN (COSTS OFF) -SELECT x, sum(y), avg(y), sum(x+y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - QUERY PLAN ----------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_para.x, (sum(pagg_tab_para.y)), (avg(pagg_tab_para.y)) - -> Finalize GroupAggregate - Group Key: pagg_tab_para.x - Filter: (avg(pagg_tab_para.y) < '7'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_para.x - -> Partial HashAggregate - Group Key: pagg_tab_para.x - -> Parallel Append - -> Seq Scan on pagg_tab_para_p1 pagg_tab_para_1 - -> Seq Scan on pagg_tab_para_p2 pagg_tab_para_2 - -> Seq Scan on pagg_tab_para_p3 pagg_tab_para_3 -(15 rows) - -SELECT x, sum(y), avg(y), sum(x+y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - x | sum | avg | sum | count -----+------+--------------------+-------+------- - 0 | 5000 | 5.0000000000000000 | 5000 | 1000 - 1 | 6000 | 6.0000000000000000 | 7000 | 1000 - 10 | 5000 | 5.0000000000000000 | 15000 | 1000 - 11 | 6000 | 6.0000000000000000 | 17000 | 1000 - 20 | 5000 | 5.0000000000000000 | 25000 | 1000 - 21 | 6000 | 6.0000000000000000 | 27000 | 1000 -(6 rows) - --- Reset parallelism parameters to get partitionwise aggregation plan. -RESET min_parallel_table_scan_size; -RESET parallel_setup_cost; -EXPLAIN (COSTS OFF) -SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - QUERY PLAN ------------------------------------------------------------------------------ - Sort - Sort Key: pagg_tab_para.x, (sum(pagg_tab_para.y)), (avg(pagg_tab_para.y)) - -> Append - -> HashAggregate - Group Key: pagg_tab_para.x - Filter: (avg(pagg_tab_para.y) < '7'::numeric) - -> Seq Scan on pagg_tab_para_p1 pagg_tab_para - -> HashAggregate - Group Key: pagg_tab_para_1.x - Filter: (avg(pagg_tab_para_1.y) < '7'::numeric) - -> Seq Scan on pagg_tab_para_p2 pagg_tab_para_1 - -> HashAggregate - Group Key: pagg_tab_para_2.x - Filter: (avg(pagg_tab_para_2.y) < '7'::numeric) - -> Seq Scan on pagg_tab_para_p3 pagg_tab_para_2 -(15 rows) - -SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - x | sum | avg | count -----+------+--------------------+------- - 0 | 5000 | 5.0000000000000000 | 1000 - 1 | 6000 | 6.0000000000000000 | 1000 - 10 | 5000 | 5.0000000000000000 | 1000 - 11 | 6000 | 6.0000000000000000 | 1000 - 20 | 5000 | 5.0000000000000000 | 1000 - 21 | 6000 | 6.0000000000000000 | 1000 -(6 rows) - +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/partition_info.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/partition_info.out --- /Users/admin/pgsql/src/test/regress/expected/partition_info.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/partition_info.out 2024-12-13 13:20:13 @@ -1,351 +1,2 @@ --- --- Tests for functions providing information about partitions --- -SELECT * FROM pg_partition_tree(NULL); - relid | parentrelid | isleaf | level --------+-------------+--------+------- -(0 rows) - -SELECT * FROM pg_partition_tree(0); - relid | parentrelid | isleaf | level --------+-------------+--------+------- -(0 rows) - -SELECT * FROM pg_partition_ancestors(NULL); - relid -------- -(0 rows) - -SELECT * FROM pg_partition_ancestors(0); - relid -------- -(0 rows) - -SELECT pg_partition_root(NULL); - pg_partition_root -------------------- - -(1 row) - -SELECT pg_partition_root(0); - pg_partition_root -------------------- - -(1 row) - --- Test table partition trees -CREATE TABLE ptif_test (a int, b int) PARTITION BY range (a); -CREATE TABLE ptif_test0 PARTITION OF ptif_test - FOR VALUES FROM (minvalue) TO (0) PARTITION BY list (b); -CREATE TABLE ptif_test01 PARTITION OF ptif_test0 FOR VALUES IN (1); -CREATE TABLE ptif_test1 PARTITION OF ptif_test - FOR VALUES FROM (0) TO (100) PARTITION BY list (b); -CREATE TABLE ptif_test11 PARTITION OF ptif_test1 FOR VALUES IN (1); -CREATE TABLE ptif_test2 PARTITION OF ptif_test - FOR VALUES FROM (100) TO (200); --- This partitioned table should remain with no partitions. -CREATE TABLE ptif_test3 PARTITION OF ptif_test - FOR VALUES FROM (200) TO (maxvalue) PARTITION BY list (b); --- Test pg_partition_root for tables -SELECT pg_partition_root('ptif_test'); - pg_partition_root -------------------- - ptif_test -(1 row) - -SELECT pg_partition_root('ptif_test0'); - pg_partition_root -------------------- - ptif_test -(1 row) - -SELECT pg_partition_root('ptif_test01'); - pg_partition_root -------------------- - ptif_test -(1 row) - -SELECT pg_partition_root('ptif_test3'); - pg_partition_root -------------------- - ptif_test -(1 row) - --- Test index partition tree -CREATE INDEX ptif_test_index ON ONLY ptif_test (a); -CREATE INDEX ptif_test0_index ON ONLY ptif_test0 (a); -ALTER INDEX ptif_test_index ATTACH PARTITION ptif_test0_index; -CREATE INDEX ptif_test01_index ON ptif_test01 (a); -ALTER INDEX ptif_test0_index ATTACH PARTITION ptif_test01_index; -CREATE INDEX ptif_test1_index ON ONLY ptif_test1 (a); -ALTER INDEX ptif_test_index ATTACH PARTITION ptif_test1_index; -CREATE INDEX ptif_test11_index ON ptif_test11 (a); -ALTER INDEX ptif_test1_index ATTACH PARTITION ptif_test11_index; -CREATE INDEX ptif_test2_index ON ptif_test2 (a); -ALTER INDEX ptif_test_index ATTACH PARTITION ptif_test2_index; -CREATE INDEX ptif_test3_index ON ptif_test3 (a); -ALTER INDEX ptif_test_index ATTACH PARTITION ptif_test3_index; --- Test pg_partition_root for indexes -SELECT pg_partition_root('ptif_test_index'); - pg_partition_root -------------------- - ptif_test_index -(1 row) - -SELECT pg_partition_root('ptif_test0_index'); - pg_partition_root -------------------- - ptif_test_index -(1 row) - -SELECT pg_partition_root('ptif_test01_index'); - pg_partition_root -------------------- - ptif_test_index -(1 row) - -SELECT pg_partition_root('ptif_test3_index'); - pg_partition_root -------------------- - ptif_test_index -(1 row) - --- List all tables members of the tree -SELECT relid, parentrelid, level, isleaf - FROM pg_partition_tree('ptif_test'); - relid | parentrelid | level | isleaf --------------+-------------+-------+-------- - ptif_test | | 0 | f - ptif_test0 | ptif_test | 1 | f - ptif_test1 | ptif_test | 1 | f - ptif_test2 | ptif_test | 1 | t - ptif_test3 | ptif_test | 1 | f - ptif_test01 | ptif_test0 | 2 | t - ptif_test11 | ptif_test1 | 2 | t -(7 rows) - --- List tables from an intermediate level -SELECT relid, parentrelid, level, isleaf - FROM pg_partition_tree('ptif_test0') p - JOIN pg_class c ON (p.relid = c.oid); - relid | parentrelid | level | isleaf --------------+-------------+-------+-------- - ptif_test0 | ptif_test | 0 | f - ptif_test01 | ptif_test0 | 1 | t -(2 rows) - --- List from leaf table -SELECT relid, parentrelid, level, isleaf - FROM pg_partition_tree('ptif_test01') p - JOIN pg_class c ON (p.relid = c.oid); - relid | parentrelid | level | isleaf --------------+-------------+-------+-------- - ptif_test01 | ptif_test0 | 0 | t -(1 row) - --- List from partitioned table with no partitions -SELECT relid, parentrelid, level, isleaf - FROM pg_partition_tree('ptif_test3') p - JOIN pg_class c ON (p.relid = c.oid); - relid | parentrelid | level | isleaf -------------+-------------+-------+-------- - ptif_test3 | ptif_test | 0 | f -(1 row) - --- List all ancestors of root and leaf tables -SELECT * FROM pg_partition_ancestors('ptif_test01'); - relid -------------- - ptif_test01 - ptif_test0 - ptif_test -(3 rows) - -SELECT * FROM pg_partition_ancestors('ptif_test'); - relid ------------ - ptif_test -(1 row) - --- List all members using pg_partition_root with leaf table reference -SELECT relid, parentrelid, level, isleaf - FROM pg_partition_tree(pg_partition_root('ptif_test01')) p - JOIN pg_class c ON (p.relid = c.oid); - relid | parentrelid | level | isleaf --------------+-------------+-------+-------- - ptif_test | | 0 | f - ptif_test0 | ptif_test | 1 | f - ptif_test1 | ptif_test | 1 | f - ptif_test2 | ptif_test | 1 | t - ptif_test3 | ptif_test | 1 | f - ptif_test01 | ptif_test0 | 2 | t - ptif_test11 | ptif_test1 | 2 | t -(7 rows) - --- List all indexes members of the tree -SELECT relid, parentrelid, level, isleaf - FROM pg_partition_tree('ptif_test_index'); - relid | parentrelid | level | isleaf --------------------+------------------+-------+-------- - ptif_test_index | | 0 | f - ptif_test0_index | ptif_test_index | 1 | f - ptif_test1_index | ptif_test_index | 1 | f - ptif_test2_index | ptif_test_index | 1 | t - ptif_test3_index | ptif_test_index | 1 | f - ptif_test01_index | ptif_test0_index | 2 | t - ptif_test11_index | ptif_test1_index | 2 | t -(7 rows) - --- List indexes from an intermediate level -SELECT relid, parentrelid, level, isleaf - FROM pg_partition_tree('ptif_test0_index') p - JOIN pg_class c ON (p.relid = c.oid); - relid | parentrelid | level | isleaf --------------------+------------------+-------+-------- - ptif_test0_index | ptif_test_index | 0 | f - ptif_test01_index | ptif_test0_index | 1 | t -(2 rows) - --- List from leaf index -SELECT relid, parentrelid, level, isleaf - FROM pg_partition_tree('ptif_test01_index') p - JOIN pg_class c ON (p.relid = c.oid); - relid | parentrelid | level | isleaf --------------------+------------------+-------+-------- - ptif_test01_index | ptif_test0_index | 0 | t -(1 row) - --- List from partitioned index with no partitions -SELECT relid, parentrelid, level, isleaf - FROM pg_partition_tree('ptif_test3_index') p - JOIN pg_class c ON (p.relid = c.oid); - relid | parentrelid | level | isleaf -------------------+-----------------+-------+-------- - ptif_test3_index | ptif_test_index | 0 | f -(1 row) - --- List all members using pg_partition_root with leaf index reference -SELECT relid, parentrelid, level, isleaf - FROM pg_partition_tree(pg_partition_root('ptif_test01_index')) p - JOIN pg_class c ON (p.relid = c.oid); - relid | parentrelid | level | isleaf --------------------+------------------+-------+-------- - ptif_test_index | | 0 | f - ptif_test0_index | ptif_test_index | 1 | f - ptif_test1_index | ptif_test_index | 1 | f - ptif_test2_index | ptif_test_index | 1 | t - ptif_test3_index | ptif_test_index | 1 | f - ptif_test01_index | ptif_test0_index | 2 | t - ptif_test11_index | ptif_test1_index | 2 | t -(7 rows) - --- List all ancestors of root and leaf indexes -SELECT * FROM pg_partition_ancestors('ptif_test01_index'); - relid -------------------- - ptif_test01_index - ptif_test0_index - ptif_test_index -(3 rows) - -SELECT * FROM pg_partition_ancestors('ptif_test_index'); - relid ------------------ - ptif_test_index -(1 row) - -DROP TABLE ptif_test; --- Table that is not part of any partition tree is not listed. -CREATE TABLE ptif_normal_table(a int); -SELECT relid, parentrelid, level, isleaf - FROM pg_partition_tree('ptif_normal_table'); - relid | parentrelid | level | isleaf --------+-------------+-------+-------- -(0 rows) - -SELECT * FROM pg_partition_ancestors('ptif_normal_table'); - relid -------- -(0 rows) - -SELECT pg_partition_root('ptif_normal_table'); - pg_partition_root -------------------- - -(1 row) - -DROP TABLE ptif_normal_table; --- Various partitioning-related functions return empty/NULL if passed relations --- of types that cannot be part of a partition tree; for example, views, --- materialized views, legacy inheritance children or parents, etc. -CREATE VIEW ptif_test_view AS SELECT 1; -CREATE MATERIALIZED VIEW ptif_test_matview AS SELECT 1; -CREATE TABLE ptif_li_parent (); -CREATE TABLE ptif_li_child () INHERITS (ptif_li_parent); -SELECT * FROM pg_partition_tree('ptif_test_view'); - relid | parentrelid | isleaf | level --------+-------------+--------+------- -(0 rows) - -SELECT * FROM pg_partition_tree('ptif_test_matview'); - relid | parentrelid | isleaf | level --------+-------------+--------+------- -(0 rows) - -SELECT * FROM pg_partition_tree('ptif_li_parent'); - relid | parentrelid | isleaf | level --------+-------------+--------+------- -(0 rows) - -SELECT * FROM pg_partition_tree('ptif_li_child'); - relid | parentrelid | isleaf | level --------+-------------+--------+------- -(0 rows) - -SELECT * FROM pg_partition_ancestors('ptif_test_view'); - relid -------- -(0 rows) - -SELECT * FROM pg_partition_ancestors('ptif_test_matview'); - relid -------- -(0 rows) - -SELECT * FROM pg_partition_ancestors('ptif_li_parent'); - relid -------- -(0 rows) - -SELECT * FROM pg_partition_ancestors('ptif_li_child'); - relid -------- -(0 rows) - -SELECT pg_partition_root('ptif_test_view'); - pg_partition_root -------------------- - -(1 row) - -SELECT pg_partition_root('ptif_test_matview'); - pg_partition_root -------------------- - -(1 row) - -SELECT pg_partition_root('ptif_li_parent'); - pg_partition_root -------------------- - -(1 row) - -SELECT pg_partition_root('ptif_li_child'); - pg_partition_root -------------------- - -(1 row) - -DROP VIEW ptif_test_view; -DROP MATERIALIZED VIEW ptif_test_matview; -DROP TABLE ptif_li_parent, ptif_li_child; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/tuplesort.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/tuplesort.out --- /Users/admin/pgsql/src/test/regress/expected/tuplesort.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/tuplesort.out 2024-12-13 13:20:13 @@ -1,705 +1,2 @@ --- only use parallelism when explicitly intending to do so -SET max_parallel_maintenance_workers = 0; -SET max_parallel_workers = 0; --- A table with contents that, when sorted, triggers abbreviated --- key aborts. One easy way to achieve that is to use uuids that all --- have the same prefix, as abbreviated keys for uuids just use the --- first sizeof(Datum) bytes. -CREATE TEMP TABLE abbrev_abort_uuids ( - id serial not null, - abort_increasing uuid, - abort_decreasing uuid, - noabort_increasing uuid, - noabort_decreasing uuid); -INSERT INTO abbrev_abort_uuids (abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing) - SELECT - ('00000000-0000-0000-0000-'||to_char(g.i, '000000000000FM'))::uuid abort_increasing, - ('00000000-0000-0000-0000-'||to_char(20000 - g.i, '000000000000FM'))::uuid abort_decreasing, - (to_char(g.i % 10009, '00000000FM')||'-0000-0000-0000-'||to_char(g.i, '000000000000FM'))::uuid noabort_increasing, - (to_char(((20000 - g.i) % 10009), '00000000FM')||'-0000-0000-0000-'||to_char(20000 - g.i, '000000000000FM'))::uuid noabort_decreasing - FROM generate_series(0, 20000, 1) g(i); --- and a few NULLs -INSERT INTO abbrev_abort_uuids(id) VALUES(0); -INSERT INTO abbrev_abort_uuids DEFAULT VALUES; -INSERT INTO abbrev_abort_uuids DEFAULT VALUES; --- add just a few duplicates -INSERT INTO abbrev_abort_uuids (abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing) - SELECT abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing - FROM abbrev_abort_uuids - WHERE (id < 10 OR id > 19990) AND id % 3 = 0 AND abort_increasing is not null; ----- --- Check sort node uses of tuplesort wrt. abbreviated keys ----- --- plain sort triggering abbreviated abort -SELECT abort_increasing, abort_decreasing FROM abbrev_abort_uuids ORDER BY abort_increasing OFFSET 20000 - 4; - abort_increasing | abort_decreasing ---------------------------------------+-------------------------------------- - 00000000-0000-0000-0000-000000019992 | 00000000-0000-0000-0000-000000000008 - 00000000-0000-0000-0000-000000019993 | 00000000-0000-0000-0000-000000000007 - 00000000-0000-0000-0000-000000019994 | 00000000-0000-0000-0000-000000000006 - 00000000-0000-0000-0000-000000019994 | 00000000-0000-0000-0000-000000000006 - 00000000-0000-0000-0000-000000019995 | 00000000-0000-0000-0000-000000000005 - 00000000-0000-0000-0000-000000019996 | 00000000-0000-0000-0000-000000000004 - 00000000-0000-0000-0000-000000019997 | 00000000-0000-0000-0000-000000000003 - 00000000-0000-0000-0000-000000019997 | 00000000-0000-0000-0000-000000000003 - 00000000-0000-0000-0000-000000019998 | 00000000-0000-0000-0000-000000000002 - 00000000-0000-0000-0000-000000019999 | 00000000-0000-0000-0000-000000000001 - 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 - 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 - | - | - | -(15 rows) - -SELECT abort_increasing, abort_decreasing FROM abbrev_abort_uuids ORDER BY abort_decreasing NULLS FIRST OFFSET 20000 - 4; - abort_increasing | abort_decreasing ---------------------------------------+-------------------------------------- - 00000000-0000-0000-0000-000000000011 | 00000000-0000-0000-0000-000000019989 - 00000000-0000-0000-0000-000000000010 | 00000000-0000-0000-0000-000000019990 - 00000000-0000-0000-0000-000000000009 | 00000000-0000-0000-0000-000000019991 - 00000000-0000-0000-0000-000000000008 | 00000000-0000-0000-0000-000000019992 - 00000000-0000-0000-0000-000000000008 | 00000000-0000-0000-0000-000000019992 - 00000000-0000-0000-0000-000000000007 | 00000000-0000-0000-0000-000000019993 - 00000000-0000-0000-0000-000000000006 | 00000000-0000-0000-0000-000000019994 - 00000000-0000-0000-0000-000000000005 | 00000000-0000-0000-0000-000000019995 - 00000000-0000-0000-0000-000000000005 | 00000000-0000-0000-0000-000000019995 - 00000000-0000-0000-0000-000000000004 | 00000000-0000-0000-0000-000000019996 - 00000000-0000-0000-0000-000000000003 | 00000000-0000-0000-0000-000000019997 - 00000000-0000-0000-0000-000000000002 | 00000000-0000-0000-0000-000000019998 - 00000000-0000-0000-0000-000000000002 | 00000000-0000-0000-0000-000000019998 - 00000000-0000-0000-0000-000000000001 | 00000000-0000-0000-0000-000000019999 - 00000000-0000-0000-0000-000000000000 | 00000000-0000-0000-0000-000000020000 -(15 rows) - --- plain sort not triggering abbreviated abort -SELECT noabort_increasing, noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_increasing OFFSET 20000 - 4; - noabort_increasing | noabort_decreasing ---------------------------------------+-------------------------------------- - 00009997-0000-0000-0000-000000009997 | 00010003-0000-0000-0000-000000010003 - 00009998-0000-0000-0000-000000009998 | 00010002-0000-0000-0000-000000010002 - 00009999-0000-0000-0000-000000009999 | 00010001-0000-0000-0000-000000010001 - 00010000-0000-0000-0000-000000010000 | 00010000-0000-0000-0000-000000010000 - 00010001-0000-0000-0000-000000010001 | 00009999-0000-0000-0000-000000009999 - 00010002-0000-0000-0000-000000010002 | 00009998-0000-0000-0000-000000009998 - 00010003-0000-0000-0000-000000010003 | 00009997-0000-0000-0000-000000009997 - 00010004-0000-0000-0000-000000010004 | 00009996-0000-0000-0000-000000009996 - 00010005-0000-0000-0000-000000010005 | 00009995-0000-0000-0000-000000009995 - 00010006-0000-0000-0000-000000010006 | 00009994-0000-0000-0000-000000009994 - 00010007-0000-0000-0000-000000010007 | 00009993-0000-0000-0000-000000009993 - 00010008-0000-0000-0000-000000010008 | 00009992-0000-0000-0000-000000009992 - | - | - | -(15 rows) - -SELECT noabort_increasing, noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_decreasing NULLS FIRST OFFSET 20000 - 4; - noabort_increasing | noabort_decreasing ---------------------------------------+-------------------------------------- - 00010006-0000-0000-0000-000000010006 | 00009994-0000-0000-0000-000000009994 - 00010005-0000-0000-0000-000000010005 | 00009995-0000-0000-0000-000000009995 - 00010004-0000-0000-0000-000000010004 | 00009996-0000-0000-0000-000000009996 - 00010003-0000-0000-0000-000000010003 | 00009997-0000-0000-0000-000000009997 - 00010002-0000-0000-0000-000000010002 | 00009998-0000-0000-0000-000000009998 - 00010001-0000-0000-0000-000000010001 | 00009999-0000-0000-0000-000000009999 - 00010000-0000-0000-0000-000000010000 | 00010000-0000-0000-0000-000000010000 - 00009999-0000-0000-0000-000000009999 | 00010001-0000-0000-0000-000000010001 - 00009998-0000-0000-0000-000000009998 | 00010002-0000-0000-0000-000000010002 - 00009997-0000-0000-0000-000000009997 | 00010003-0000-0000-0000-000000010003 - 00009996-0000-0000-0000-000000009996 | 00010004-0000-0000-0000-000000010004 - 00009995-0000-0000-0000-000000009995 | 00010005-0000-0000-0000-000000010005 - 00009994-0000-0000-0000-000000009994 | 00010006-0000-0000-0000-000000010006 - 00009993-0000-0000-0000-000000009993 | 00010007-0000-0000-0000-000000010007 - 00009992-0000-0000-0000-000000009992 | 00010008-0000-0000-0000-000000010008 -(15 rows) - --- bounded sort (disables abbreviated keys) -SELECT abort_increasing, noabort_increasing FROM abbrev_abort_uuids ORDER BY abort_increasing LIMIT 5; - abort_increasing | noabort_increasing ---------------------------------------+-------------------------------------- - 00000000-0000-0000-0000-000000000000 | 00000000-0000-0000-0000-000000000000 - 00000000-0000-0000-0000-000000000001 | 00000001-0000-0000-0000-000000000001 - 00000000-0000-0000-0000-000000000002 | 00000002-0000-0000-0000-000000000002 - 00000000-0000-0000-0000-000000000002 | 00000002-0000-0000-0000-000000000002 - 00000000-0000-0000-0000-000000000003 | 00000003-0000-0000-0000-000000000003 -(5 rows) - -SELECT abort_increasing, noabort_increasing FROM abbrev_abort_uuids ORDER BY noabort_increasing NULLS FIRST LIMIT 5; - abort_increasing | noabort_increasing ---------------------------------------+-------------------------------------- - | - | - | - 00000000-0000-0000-0000-000000000000 | 00000000-0000-0000-0000-000000000000 - 00000000-0000-0000-0000-000000010009 | 00000000-0000-0000-0000-000000010009 -(5 rows) - ----- --- Check index creation uses of tuplesort wrt. abbreviated keys ----- --- index creation using abbreviated keys successfully -CREATE INDEX abbrev_abort_uuids__noabort_increasing_idx ON abbrev_abort_uuids (noabort_increasing); -CREATE INDEX abbrev_abort_uuids__noabort_decreasing_idx ON abbrev_abort_uuids (noabort_decreasing); --- verify -EXPLAIN (COSTS OFF) -SELECT id, noabort_increasing, noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_increasing LIMIT 5; - QUERY PLAN ------------------------------------------------------------------------------------------ - Limit - -> Index Scan using abbrev_abort_uuids__noabort_increasing_idx on abbrev_abort_uuids -(2 rows) - -SELECT id, noabort_increasing, noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_increasing LIMIT 5; - id | noabort_increasing | noabort_decreasing --------+--------------------------------------+-------------------------------------- - 1 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 - 10010 | 00000000-0000-0000-0000-000000010009 | 00009991-0000-0000-0000-000000009991 - 2 | 00000001-0000-0000-0000-000000000001 | 00009990-0000-0000-0000-000000019999 - 10011 | 00000001-0000-0000-0000-000000010010 | 00009990-0000-0000-0000-000000009990 - 3 | 00000002-0000-0000-0000-000000000002 | 00009989-0000-0000-0000-000000019998 -(5 rows) - -EXPLAIN (COSTS OFF) -SELECT id, noabort_increasing, noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_decreasing LIMIT 5; - QUERY PLAN ------------------------------------------------------------------------------------------ - Limit - -> Index Scan using abbrev_abort_uuids__noabort_decreasing_idx on abbrev_abort_uuids -(2 rows) - -SELECT id, noabort_increasing, noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_decreasing LIMIT 5; - id | noabort_increasing | noabort_decreasing --------+--------------------------------------+-------------------------------------- - 20001 | 00009991-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 - 20010 | 00009991-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 - 9992 | 00009991-0000-0000-0000-000000009991 | 00000000-0000-0000-0000-000000010009 - 20000 | 00009990-0000-0000-0000-000000019999 | 00000001-0000-0000-0000-000000000001 - 9991 | 00009990-0000-0000-0000-000000009990 | 00000001-0000-0000-0000-000000010010 -(5 rows) - --- index creation using abbreviated keys, hitting abort -CREATE INDEX abbrev_abort_uuids__abort_increasing_idx ON abbrev_abort_uuids (abort_increasing); -CREATE INDEX abbrev_abort_uuids__abort_decreasing_idx ON abbrev_abort_uuids (abort_decreasing); --- verify -EXPLAIN (COSTS OFF) -SELECT id, abort_increasing, abort_decreasing FROM abbrev_abort_uuids ORDER BY abort_increasing LIMIT 5; - QUERY PLAN ---------------------------------------------------------------------------------------- - Limit - -> Index Scan using abbrev_abort_uuids__abort_increasing_idx on abbrev_abort_uuids -(2 rows) - -SELECT id, abort_increasing, abort_decreasing FROM abbrev_abort_uuids ORDER BY abort_increasing LIMIT 5; - id | abort_increasing | abort_decreasing --------+--------------------------------------+-------------------------------------- - 1 | 00000000-0000-0000-0000-000000000000 | 00000000-0000-0000-0000-000000020000 - 2 | 00000000-0000-0000-0000-000000000001 | 00000000-0000-0000-0000-000000019999 - 3 | 00000000-0000-0000-0000-000000000002 | 00000000-0000-0000-0000-000000019998 - 20004 | 00000000-0000-0000-0000-000000000002 | 00000000-0000-0000-0000-000000019998 - 4 | 00000000-0000-0000-0000-000000000003 | 00000000-0000-0000-0000-000000019997 -(5 rows) - -EXPLAIN (COSTS OFF) -SELECT id, abort_increasing, abort_decreasing FROM abbrev_abort_uuids ORDER BY abort_decreasing LIMIT 5; - QUERY PLAN ---------------------------------------------------------------------------------------- - Limit - -> Index Scan using abbrev_abort_uuids__abort_decreasing_idx on abbrev_abort_uuids -(2 rows) - -SELECT id, abort_increasing, abort_decreasing FROM abbrev_abort_uuids ORDER BY abort_decreasing LIMIT 5; - id | abort_increasing | abort_decreasing --------+--------------------------------------+-------------------------------------- - 20001 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 - 20010 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 - 20000 | 00000000-0000-0000-0000-000000019999 | 00000000-0000-0000-0000-000000000001 - 19999 | 00000000-0000-0000-0000-000000019998 | 00000000-0000-0000-0000-000000000002 - 19998 | 00000000-0000-0000-0000-000000019997 | 00000000-0000-0000-0000-000000000003 -(5 rows) - ----- --- Check CLUSTER uses of tuplesort wrt. abbreviated keys ----- --- when aborting, increasing order -BEGIN; -SET LOCAL enable_indexscan = false; -CLUSTER abbrev_abort_uuids USING abbrev_abort_uuids__abort_increasing_idx; --- head -SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing -FROM abbrev_abort_uuids -ORDER BY ctid LIMIT 5; - id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing --------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- - 1 | 00000000-0000-0000-0000-000000000000 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 - 2 | 00000000-0000-0000-0000-000000000001 | 00000000-0000-0000-0000-000000019999 | 00000001-0000-0000-0000-000000000001 | 00009990-0000-0000-0000-000000019999 - 3 | 00000000-0000-0000-0000-000000000002 | 00000000-0000-0000-0000-000000019998 | 00000002-0000-0000-0000-000000000002 | 00009989-0000-0000-0000-000000019998 - 20004 | 00000000-0000-0000-0000-000000000002 | 00000000-0000-0000-0000-000000019998 | 00000002-0000-0000-0000-000000000002 | 00009989-0000-0000-0000-000000019998 - 4 | 00000000-0000-0000-0000-000000000003 | 00000000-0000-0000-0000-000000019997 | 00000003-0000-0000-0000-000000000003 | 00009988-0000-0000-0000-000000019997 -(5 rows) - --- tail -SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing -FROM abbrev_abort_uuids -ORDER BY ctid DESC LIMIT 5; - id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing --------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- - 0 | | | | - 20002 | | | | - 20003 | | | | - 20001 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 - 20010 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 -(5 rows) - -ROLLBACK; --- when aborting, decreasing order -BEGIN; -SET LOCAL enable_indexscan = false; -CLUSTER abbrev_abort_uuids USING abbrev_abort_uuids__abort_decreasing_idx; --- head -SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing -FROM abbrev_abort_uuids -ORDER BY ctid LIMIT 5; - id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing --------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- - 20010 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 - 20001 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 - 20000 | 00000000-0000-0000-0000-000000019999 | 00000000-0000-0000-0000-000000000001 | 00009990-0000-0000-0000-000000019999 | 00000001-0000-0000-0000-000000000001 - 19999 | 00000000-0000-0000-0000-000000019998 | 00000000-0000-0000-0000-000000000002 | 00009989-0000-0000-0000-000000019998 | 00000002-0000-0000-0000-000000000002 - 20009 | 00000000-0000-0000-0000-000000019997 | 00000000-0000-0000-0000-000000000003 | 00009988-0000-0000-0000-000000019997 | 00000003-0000-0000-0000-000000000003 -(5 rows) - --- tail -SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing -FROM abbrev_abort_uuids -ORDER BY ctid DESC LIMIT 5; - id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing --------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- - 0 | | | | - 20002 | | | | - 20003 | | | | - 1 | 00000000-0000-0000-0000-000000000000 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 - 2 | 00000000-0000-0000-0000-000000000001 | 00000000-0000-0000-0000-000000019999 | 00000001-0000-0000-0000-000000000001 | 00009990-0000-0000-0000-000000019999 -(5 rows) - -ROLLBACK; --- when not aborting, increasing order -BEGIN; -SET LOCAL enable_indexscan = false; -CLUSTER abbrev_abort_uuids USING abbrev_abort_uuids__noabort_increasing_idx; --- head -SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing -FROM abbrev_abort_uuids -ORDER BY ctid LIMIT 5; - id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing --------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- - 1 | 00000000-0000-0000-0000-000000000000 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 - 10010 | 00000000-0000-0000-0000-000000010009 | 00000000-0000-0000-0000-000000009991 | 00000000-0000-0000-0000-000000010009 | 00009991-0000-0000-0000-000000009991 - 2 | 00000000-0000-0000-0000-000000000001 | 00000000-0000-0000-0000-000000019999 | 00000001-0000-0000-0000-000000000001 | 00009990-0000-0000-0000-000000019999 - 10011 | 00000000-0000-0000-0000-000000010010 | 00000000-0000-0000-0000-000000009990 | 00000001-0000-0000-0000-000000010010 | 00009990-0000-0000-0000-000000009990 - 20004 | 00000000-0000-0000-0000-000000000002 | 00000000-0000-0000-0000-000000019998 | 00000002-0000-0000-0000-000000000002 | 00009989-0000-0000-0000-000000019998 -(5 rows) - --- tail -SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing -FROM abbrev_abort_uuids -ORDER BY ctid DESC LIMIT 5; - id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing --------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- - 0 | | | | - 20002 | | | | - 20003 | | | | - 10009 | 00000000-0000-0000-0000-000000010008 | 00000000-0000-0000-0000-000000009992 | 00010008-0000-0000-0000-000000010008 | 00009992-0000-0000-0000-000000009992 - 10008 | 00000000-0000-0000-0000-000000010007 | 00000000-0000-0000-0000-000000009993 | 00010007-0000-0000-0000-000000010007 | 00009993-0000-0000-0000-000000009993 -(5 rows) - -ROLLBACK; --- when no aborting, decreasing order -BEGIN; -SET LOCAL enable_indexscan = false; -CLUSTER abbrev_abort_uuids USING abbrev_abort_uuids__noabort_decreasing_idx; --- head -SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing -FROM abbrev_abort_uuids -ORDER BY ctid LIMIT 5; - id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing --------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- - 20010 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 - 20001 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 - 9992 | 00000000-0000-0000-0000-000000009991 | 00000000-0000-0000-0000-000000010009 | 00009991-0000-0000-0000-000000009991 | 00000000-0000-0000-0000-000000010009 - 20000 | 00000000-0000-0000-0000-000000019999 | 00000000-0000-0000-0000-000000000001 | 00009990-0000-0000-0000-000000019999 | 00000001-0000-0000-0000-000000000001 - 9991 | 00000000-0000-0000-0000-000000009990 | 00000000-0000-0000-0000-000000010010 | 00009990-0000-0000-0000-000000009990 | 00000001-0000-0000-0000-000000010010 -(5 rows) - --- tail -SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing -FROM abbrev_abort_uuids -ORDER BY ctid DESC LIMIT 5; - id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing --------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- - 0 | | | | - 20003 | | | | - 20002 | | | | - 9993 | 00000000-0000-0000-0000-000000009992 | 00000000-0000-0000-0000-000000010008 | 00009992-0000-0000-0000-000000009992 | 00010008-0000-0000-0000-000000010008 - 9994 | 00000000-0000-0000-0000-000000009993 | 00000000-0000-0000-0000-000000010007 | 00009993-0000-0000-0000-000000009993 | 00010007-0000-0000-0000-000000010007 -(5 rows) - -ROLLBACK; ----- --- test sorting of large datums VALUES ----- --- Ensure the order is correct and values look intact -SELECT LEFT(a,10),b FROM - (VALUES(REPEAT('a', 512 * 1024),1),(REPEAT('b', 512 * 1024),2)) v(a,b) -ORDER BY v.a DESC; - left | b -------------+--- - bbbbbbbbbb | 2 - aaaaaaaaaa | 1 -(2 rows) - ----- --- test forward and backward scans for in-memory and disk based tuplesort ----- --- in-memory -BEGIN; -SET LOCAL enable_indexscan = false; --- unfortunately can't show analyze output confirming sort method, --- the memory used output wouldn't be stable -EXPLAIN (COSTS OFF) DECLARE c SCROLL CURSOR FOR SELECT noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_decreasing; - QUERY PLAN --------------------------------------- - Sort - Sort Key: noabort_decreasing - -> Seq Scan on abbrev_abort_uuids -(3 rows) - -DECLARE c SCROLL CURSOR FOR SELECT noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_decreasing; --- first and second -FETCH NEXT FROM c; - noabort_decreasing --------------------------------------- - 00000000-0000-0000-0000-000000000000 -(1 row) - -FETCH NEXT FROM c; - noabort_decreasing --------------------------------------- - 00000000-0000-0000-0000-000000000000 -(1 row) - --- scroll beyond beginning -FETCH BACKWARD FROM c; - noabort_decreasing --------------------------------------- - 00000000-0000-0000-0000-000000000000 -(1 row) - -FETCH BACKWARD FROM c; - noabort_decreasing --------------------- -(0 rows) - -FETCH BACKWARD FROM c; - noabort_decreasing --------------------- -(0 rows) - -FETCH BACKWARD FROM c; - noabort_decreasing --------------------- -(0 rows) - -FETCH NEXT FROM c; - noabort_decreasing --------------------------------------- - 00000000-0000-0000-0000-000000000000 -(1 row) - --- scroll beyond end -FETCH LAST FROM c; - noabort_decreasing --------------------- - -(1 row) - -FETCH BACKWARD FROM c; - noabort_decreasing --------------------- - -(1 row) - -FETCH NEXT FROM c; - noabort_decreasing --------------------- - -(1 row) - -FETCH NEXT FROM c; - noabort_decreasing --------------------- -(0 rows) - -FETCH NEXT FROM c; - noabort_decreasing --------------------- -(0 rows) - -FETCH BACKWARD FROM c; - noabort_decreasing --------------------- - -(1 row) - -FETCH NEXT FROM c; - noabort_decreasing --------------------- -(0 rows) - -COMMIT; --- disk based -BEGIN; -SET LOCAL enable_indexscan = false; -SET LOCAL work_mem = '100kB'; --- unfortunately can't show analyze output confirming sort method, --- the memory used output wouldn't be stable -EXPLAIN (COSTS OFF) DECLARE c SCROLL CURSOR FOR SELECT noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_decreasing; - QUERY PLAN --------------------------------------- - Sort - Sort Key: noabort_decreasing - -> Seq Scan on abbrev_abort_uuids -(3 rows) - -DECLARE c SCROLL CURSOR FOR SELECT noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_decreasing; --- first and second -FETCH NEXT FROM c; - noabort_decreasing --------------------------------------- - 00000000-0000-0000-0000-000000000000 -(1 row) - -FETCH NEXT FROM c; - noabort_decreasing --------------------------------------- - 00000000-0000-0000-0000-000000000000 -(1 row) - --- scroll beyond beginning -FETCH BACKWARD FROM c; - noabort_decreasing --------------------------------------- - 00000000-0000-0000-0000-000000000000 -(1 row) - -FETCH BACKWARD FROM c; - noabort_decreasing --------------------- -(0 rows) - -FETCH BACKWARD FROM c; - noabort_decreasing --------------------- -(0 rows) - -FETCH BACKWARD FROM c; - noabort_decreasing --------------------- -(0 rows) - -FETCH NEXT FROM c; - noabort_decreasing --------------------------------------- - 00000000-0000-0000-0000-000000000000 -(1 row) - --- scroll beyond end -FETCH LAST FROM c; - noabort_decreasing --------------------- - -(1 row) - -FETCH BACKWARD FROM c; - noabort_decreasing --------------------- - -(1 row) - -FETCH NEXT FROM c; - noabort_decreasing --------------------- - -(1 row) - -FETCH NEXT FROM c; - noabort_decreasing --------------------- -(0 rows) - -FETCH NEXT FROM c; - noabort_decreasing --------------------- -(0 rows) - -FETCH BACKWARD FROM c; - noabort_decreasing --------------------- - -(1 row) - -FETCH NEXT FROM c; - noabort_decreasing --------------------- -(0 rows) - -COMMIT; ----- --- test tuplesort using both in-memory and disk sort ---- --- memory based -SELECT - -- fixed-width by-value datum - (array_agg(id ORDER BY id DESC NULLS FIRST))[0:5], - -- fixed-width by-ref datum - (array_agg(abort_increasing ORDER BY abort_increasing DESC NULLS LAST))[0:5], - -- variable-width datum - (array_agg(id::text ORDER BY id::text DESC NULLS LAST))[0:5], - -- fixed width by-value datum tuplesort - percentile_disc(0.99) WITHIN GROUP (ORDER BY id), - -- ensure state is shared - percentile_disc(0.01) WITHIN GROUP (ORDER BY id), - -- fixed width by-ref datum tuplesort - percentile_disc(0.8) WITHIN GROUP (ORDER BY abort_increasing), - -- variable width by-ref datum tuplesort - percentile_disc(0.2) WITHIN GROUP (ORDER BY id::text), - -- multi-column tuplesort - rank('00000000-0000-0000-0000-000000000000', '2', '2') WITHIN GROUP (ORDER BY noabort_increasing, id, id::text) -FROM ( - SELECT * FROM abbrev_abort_uuids - UNION ALL - SELECT NULL, NULL, NULL, NULL, NULL) s; - array_agg | array_agg | array_agg | percentile_disc | percentile_disc | percentile_disc | percentile_disc | rank ---------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+-----------------+-----------------+--------------------------------------+-----------------+------ - {NULL,20010,20009,20008,20007} | {00000000-0000-0000-0000-000000020000,00000000-0000-0000-0000-000000020000,00000000-0000-0000-0000-000000019999,00000000-0000-0000-0000-000000019998,00000000-0000-0000-0000-000000019997} | {9999,9998,9997,9996,9995} | 19810 | 200 | 00000000-0000-0000-0000-000000016003 | 136 | 2 -(1 row) - --- disk based (see also above) -BEGIN; -SET LOCAL work_mem = '100kB'; -SELECT - (array_agg(id ORDER BY id DESC NULLS FIRST))[0:5], - (array_agg(abort_increasing ORDER BY abort_increasing DESC NULLS LAST))[0:5], - (array_agg(id::text ORDER BY id::text DESC NULLS LAST))[0:5], - percentile_disc(0.99) WITHIN GROUP (ORDER BY id), - percentile_disc(0.01) WITHIN GROUP (ORDER BY id), - percentile_disc(0.8) WITHIN GROUP (ORDER BY abort_increasing), - percentile_disc(0.2) WITHIN GROUP (ORDER BY id::text), - rank('00000000-0000-0000-0000-000000000000', '2', '2') WITHIN GROUP (ORDER BY noabort_increasing, id, id::text) -FROM ( - SELECT * FROM abbrev_abort_uuids - UNION ALL - SELECT NULL, NULL, NULL, NULL, NULL) s; - array_agg | array_agg | array_agg | percentile_disc | percentile_disc | percentile_disc | percentile_disc | rank ---------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+-----------------+-----------------+--------------------------------------+-----------------+------ - {NULL,20010,20009,20008,20007} | {00000000-0000-0000-0000-000000020000,00000000-0000-0000-0000-000000020000,00000000-0000-0000-0000-000000019999,00000000-0000-0000-0000-000000019998,00000000-0000-0000-0000-000000019997} | {9999,9998,9997,9996,9995} | 19810 | 200 | 00000000-0000-0000-0000-000000016003 | 136 | 2 -(1 row) - -ROLLBACK; ----- --- test tuplesort mark/restore ---- -CREATE TEMP TABLE test_mark_restore(col1 int, col2 int, col12 int); --- need a few duplicates for mark/restore to matter -INSERT INTO test_mark_restore(col1, col2, col12) - SELECT a.i, b.i, a.i * b.i FROM generate_series(1, 500) a(i), generate_series(1, 5) b(i); -BEGIN; -SET LOCAL enable_nestloop = off; -SET LOCAL enable_hashjoin = off; -SET LOCAL enable_material = off; --- set query into variable once, to avoid repetition of the fairly long query -SELECT $$ - SELECT col12, count(distinct a.col1), count(distinct a.col2), count(distinct b.col1), count(distinct b.col2), count(*) - FROM test_mark_restore a - JOIN test_mark_restore b USING(col12) - GROUP BY 1 - HAVING count(*) > 1 - ORDER BY 2 DESC, 1 DESC, 3 DESC, 4 DESC, 5 DESC, 6 DESC - LIMIT 10 -$$ AS qry \gset --- test mark/restore with in-memory sorts -EXPLAIN (COSTS OFF) :qry; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ - Limit - -> Sort - Sort Key: (count(DISTINCT a.col1)) DESC, a.col12 DESC, (count(DISTINCT a.col2)) DESC, (count(DISTINCT b.col1)) DESC, (count(DISTINCT b.col2)) DESC, (count(*)) DESC - -> GroupAggregate - Group Key: a.col12 - Filter: (count(*) > 1) - -> Incremental Sort - Sort Key: a.col12 DESC, a.col1 - Presorted Key: a.col12 - -> Merge Join - Merge Cond: (a.col12 = b.col12) - -> Sort - Sort Key: a.col12 DESC - -> Seq Scan on test_mark_restore a - -> Sort - Sort Key: b.col12 DESC - -> Seq Scan on test_mark_restore b -(17 rows) - -:qry; - col12 | count | count | count | count | count --------+-------+-------+-------+-------+------- - 480 | 5 | 5 | 5 | 5 | 25 - 420 | 5 | 5 | 5 | 5 | 25 - 360 | 5 | 5 | 5 | 5 | 25 - 300 | 5 | 5 | 5 | 5 | 25 - 240 | 5 | 5 | 5 | 5 | 25 - 180 | 5 | 5 | 5 | 5 | 25 - 120 | 5 | 5 | 5 | 5 | 25 - 60 | 5 | 5 | 5 | 5 | 25 - 960 | 4 | 4 | 4 | 4 | 16 - 900 | 4 | 4 | 4 | 4 | 16 -(10 rows) - --- test mark/restore with on-disk sorts -SET LOCAL work_mem = '100kB'; -EXPLAIN (COSTS OFF) :qry; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ - Limit - -> Sort - Sort Key: (count(DISTINCT a.col1)) DESC, a.col12 DESC, (count(DISTINCT a.col2)) DESC, (count(DISTINCT b.col1)) DESC, (count(DISTINCT b.col2)) DESC, (count(*)) DESC - -> GroupAggregate - Group Key: a.col12 - Filter: (count(*) > 1) - -> Incremental Sort - Sort Key: a.col12 DESC, a.col1 - Presorted Key: a.col12 - -> Merge Join - Merge Cond: (a.col12 = b.col12) - -> Sort - Sort Key: a.col12 DESC - -> Seq Scan on test_mark_restore a - -> Sort - Sort Key: b.col12 DESC - -> Seq Scan on test_mark_restore b -(17 rows) - -:qry; - col12 | count | count | count | count | count --------+-------+-------+-------+-------+------- - 480 | 5 | 5 | 5 | 5 | 25 - 420 | 5 | 5 | 5 | 5 | 25 - 360 | 5 | 5 | 5 | 5 | 25 - 300 | 5 | 5 | 5 | 5 | 25 - 240 | 5 | 5 | 5 | 5 | 25 - 180 | 5 | 5 | 5 | 5 | 25 - 120 | 5 | 5 | 5 | 5 | 25 - 60 | 5 | 5 | 5 | 5 | 25 - 960 | 4 | 4 | 4 | 4 | 16 - 900 | 4 | 4 | 4 | 4 | 16 -(10 rows) - -COMMIT; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/explain.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/explain.out --- /Users/admin/pgsql/src/test/regress/expected/explain.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/explain.out 2024-12-13 13:20:13 @@ -1,777 +1,2 @@ --- --- EXPLAIN --- --- There are many test cases elsewhere that use EXPLAIN as a vehicle for --- checking something else (usually planner behavior). This file is --- concerned with testing EXPLAIN in its own right. --- --- To produce stable regression test output, it's usually necessary to --- ignore details such as exact costs or row counts. These filter --- functions replace changeable output details with fixed strings. -create function explain_filter(text) returns setof text -language plpgsql as -$$ -declare - ln text; -begin - for ln in execute $1 - loop - -- Replace any numeric word with just 'N' - ln := regexp_replace(ln, '-?\m\d+\M', 'N', 'g'); - -- In sort output, the above won't match units-suffixed numbers - ln := regexp_replace(ln, '\m\d+kB', 'NkB', 'g'); - -- Ignore text-mode buffers output because it varies depending - -- on the system state - CONTINUE WHEN (ln ~ ' +Buffers: .*'); - -- Ignore text-mode "Planning:" line because whether it's output - -- varies depending on the system state - CONTINUE WHEN (ln = 'Planning:'); - return next ln; - end loop; -end; -$$; --- To produce valid JSON output, replace numbers with "0" or "0.0" not "N" -create function explain_filter_to_json(text) returns jsonb -language plpgsql as -$$ -declare - data text := ''; - ln text; -begin - for ln in execute $1 - loop - -- Replace any numeric word with just '0' - ln := regexp_replace(ln, '\m\d+\M', '0', 'g'); - data := data || ln; - end loop; - return data::jsonb; -end; -$$; --- Disable JIT, or we'll get different output on machines where that's been --- forced on -set jit = off; --- Similarly, disable track_io_timing, to avoid output differences when --- enabled. -set track_io_timing = off; --- Simple cases -select explain_filter('explain select * from int8_tbl i8'); - explain_filter ---------------------------------------------------------- - Seq Scan on int8_tbl i8 (cost=N.N..N.N rows=N width=N) -(1 row) - -select explain_filter('explain (analyze, buffers off) select * from int8_tbl i8'); - explain_filter ------------------------------------------------------------------------------------------------ - Seq Scan on int8_tbl i8 (cost=N.N..N.N rows=N width=N) (actual time=N.N..N.N rows=N loops=N) - Planning Time: N.N ms - Execution Time: N.N ms -(3 rows) - -select explain_filter('explain (analyze, buffers off, verbose) select * from int8_tbl i8'); - explain_filter ------------------------------------------------------------------------------------------------------- - Seq Scan on public.int8_tbl i8 (cost=N.N..N.N rows=N width=N) (actual time=N.N..N.N rows=N loops=N) - Output: q1, q2 - Planning Time: N.N ms - Execution Time: N.N ms -(4 rows) - -select explain_filter('explain (analyze, buffers, format text) select * from int8_tbl i8'); - explain_filter ------------------------------------------------------------------------------------------------ - Seq Scan on int8_tbl i8 (cost=N.N..N.N rows=N width=N) (actual time=N.N..N.N rows=N loops=N) - Planning Time: N.N ms - Execution Time: N.N ms -(3 rows) - -select explain_filter('explain (analyze, buffers, format xml) select * from int8_tbl i8'); - explain_filter --------------------------------------------------------- - + - + - + - Seq Scan + - false + - false + - int8_tbl + - i8 + - N.N + - N.N + - N + - N + - N.N + - N.N + - N + - N + - false + - N + - N + - N+ - N+ - N + - N + - N + - N + - N + - N + - + - + - N + - N + - N+ - N+ - N + - N + - N + - N + - N + - N + - + - N.N + - + - + - N.N + - + - -(1 row) - -select explain_filter('explain (analyze, serialize, buffers, format yaml) select * from int8_tbl i8'); - explain_filter -------------------------------- - - Plan: + - Node Type: "Seq Scan" + - Parallel Aware: false + - Async Capable: false + - Relation Name: "int8_tbl"+ - Alias: "i8" + - Startup Cost: N.N + - Total Cost: N.N + - Plan Rows: N + - Plan Width: N + - Actual Startup Time: N.N + - Actual Total Time: N.N + - Actual Rows: N + - Actual Loops: N + - Disabled: false + - Shared Hit Blocks: N + - Shared Read Blocks: N + - Shared Dirtied Blocks: N + - Shared Written Blocks: N + - Local Hit Blocks: N + - Local Read Blocks: N + - Local Dirtied Blocks: N + - Local Written Blocks: N + - Temp Read Blocks: N + - Temp Written Blocks: N + - Planning: + - Shared Hit Blocks: N + - Shared Read Blocks: N + - Shared Dirtied Blocks: N + - Shared Written Blocks: N + - Local Hit Blocks: N + - Local Read Blocks: N + - Local Dirtied Blocks: N + - Local Written Blocks: N + - Temp Read Blocks: N + - Temp Written Blocks: N + - Planning Time: N.N + - Triggers: + - Serialization: + - Time: N.N + - Output Volume: N + - Format: "text" + - Shared Hit Blocks: N + - Shared Read Blocks: N + - Shared Dirtied Blocks: N + - Shared Written Blocks: N + - Local Hit Blocks: N + - Local Read Blocks: N + - Local Dirtied Blocks: N + - Local Written Blocks: N + - Temp Read Blocks: N + - Temp Written Blocks: N + - Execution Time: N.N -(1 row) - -select explain_filter('explain (buffers, format text) select * from int8_tbl i8'); - explain_filter ---------------------------------------------------------- - Seq Scan on int8_tbl i8 (cost=N.N..N.N rows=N width=N) -(1 row) - -select explain_filter('explain (buffers, format json) select * from int8_tbl i8'); - explain_filter ------------------------------------- - [ + - { + - "Plan": { + - "Node Type": "Seq Scan", + - "Parallel Aware": false, + - "Async Capable": false, + - "Relation Name": "int8_tbl",+ - "Alias": "i8", + - "Startup Cost": N.N, + - "Total Cost": N.N, + - "Plan Rows": N, + - "Plan Width": N, + - "Disabled": false, + - "Shared Hit Blocks": N, + - "Shared Read Blocks": N, + - "Shared Dirtied Blocks": N, + - "Shared Written Blocks": N, + - "Local Hit Blocks": N, + - "Local Read Blocks": N, + - "Local Dirtied Blocks": N, + - "Local Written Blocks": N, + - "Temp Read Blocks": N, + - "Temp Written Blocks": N + - }, + - "Planning": { + - "Shared Hit Blocks": N, + - "Shared Read Blocks": N, + - "Shared Dirtied Blocks": N, + - "Shared Written Blocks": N, + - "Local Hit Blocks": N, + - "Local Read Blocks": N, + - "Local Dirtied Blocks": N, + - "Local Written Blocks": N, + - "Temp Read Blocks": N, + - "Temp Written Blocks": N + - } + - } + - ] -(1 row) - --- Check output including I/O timings. These fields are conditional --- but always set in JSON format, so check them only in this case. -set track_io_timing = on; -select explain_filter('explain (analyze, buffers, format json) select * from int8_tbl i8'); - explain_filter -------------------------------------- - [ + - { + - "Plan": { + - "Node Type": "Seq Scan", + - "Parallel Aware": false, + - "Async Capable": false, + - "Relation Name": "int8_tbl", + - "Alias": "i8", + - "Startup Cost": N.N, + - "Total Cost": N.N, + - "Plan Rows": N, + - "Plan Width": N, + - "Actual Startup Time": N.N, + - "Actual Total Time": N.N, + - "Actual Rows": N, + - "Actual Loops": N, + - "Disabled": false, + - "Shared Hit Blocks": N, + - "Shared Read Blocks": N, + - "Shared Dirtied Blocks": N, + - "Shared Written Blocks": N, + - "Local Hit Blocks": N, + - "Local Read Blocks": N, + - "Local Dirtied Blocks": N, + - "Local Written Blocks": N, + - "Temp Read Blocks": N, + - "Temp Written Blocks": N, + - "Shared I/O Read Time": N.N, + - "Shared I/O Write Time": N.N,+ - "Local I/O Read Time": N.N, + - "Local I/O Write Time": N.N, + - "Temp I/O Read Time": N.N, + - "Temp I/O Write Time": N.N + - }, + - "Planning": { + - "Shared Hit Blocks": N, + - "Shared Read Blocks": N, + - "Shared Dirtied Blocks": N, + - "Shared Written Blocks": N, + - "Local Hit Blocks": N, + - "Local Read Blocks": N, + - "Local Dirtied Blocks": N, + - "Local Written Blocks": N, + - "Temp Read Blocks": N, + - "Temp Written Blocks": N, + - "Shared I/O Read Time": N.N, + - "Shared I/O Write Time": N.N,+ - "Local I/O Read Time": N.N, + - "Local I/O Write Time": N.N, + - "Temp I/O Read Time": N.N, + - "Temp I/O Write Time": N.N + - }, + - "Planning Time": N.N, + - "Triggers": [ + - ], + - "Execution Time": N.N + - } + - ] -(1 row) - -set track_io_timing = off; --- SETTINGS option --- We have to ignore other settings that might be imposed by the environment, --- so printing the whole Settings field unfortunately won't do. -begin; -set local plan_cache_mode = force_generic_plan; -select true as "OK" - from explain_filter('explain (settings) select * from int8_tbl i8') ln - where ln ~ '^ *Settings: .*plan_cache_mode = ''force_generic_plan'''; - OK ----- - t -(1 row) - -select explain_filter_to_json('explain (settings, format json) select * from int8_tbl i8') #> '{0,Settings,plan_cache_mode}'; - ?column? ----------------------- - "force_generic_plan" -(1 row) - -rollback; --- GENERIC_PLAN option -select explain_filter('explain (generic_plan) select unique1 from tenk1 where thousand = $1'); - explain_filter ---------------------------------------------------------------------------------- - Bitmap Heap Scan on tenk1 (cost=N.N..N.N rows=N width=N) - Recheck Cond: (thousand = $N) - -> Bitmap Index Scan on tenk1_thous_tenthous (cost=N.N..N.N rows=N width=N) - Index Cond: (thousand = $N) -(4 rows) - --- should fail -select explain_filter('explain (analyze, generic_plan) select unique1 from tenk1 where thousand = $1'); -ERROR: EXPLAIN options ANALYZE and GENERIC_PLAN cannot be used together -CONTEXT: PL/pgSQL function explain_filter(text) line 5 at FOR over EXECUTE statement --- MEMORY option -select explain_filter('explain (memory) select * from int8_tbl i8'); - explain_filter ---------------------------------------------------------- - Seq Scan on int8_tbl i8 (cost=N.N..N.N rows=N width=N) - Memory: used=NkB allocated=NkB -(2 rows) - -select explain_filter('explain (memory, analyze, buffers off) select * from int8_tbl i8'); - explain_filter ------------------------------------------------------------------------------------------------ - Seq Scan on int8_tbl i8 (cost=N.N..N.N rows=N width=N) (actual time=N.N..N.N rows=N loops=N) - Memory: used=NkB allocated=NkB - Planning Time: N.N ms - Execution Time: N.N ms -(4 rows) - -select explain_filter('explain (memory, summary, format yaml) select * from int8_tbl i8'); - explain_filter -------------------------------- - - Plan: + - Node Type: "Seq Scan" + - Parallel Aware: false + - Async Capable: false + - Relation Name: "int8_tbl"+ - Alias: "i8" + - Startup Cost: N.N + - Total Cost: N.N + - Plan Rows: N + - Plan Width: N + - Disabled: false + - Planning: + - Memory Used: N + - Memory Allocated: N + - Planning Time: N.N -(1 row) - -select explain_filter('explain (memory, analyze, format json) select * from int8_tbl i8'); - explain_filter ------------------------------------- - [ + - { + - "Plan": { + - "Node Type": "Seq Scan", + - "Parallel Aware": false, + - "Async Capable": false, + - "Relation Name": "int8_tbl",+ - "Alias": "i8", + - "Startup Cost": N.N, + - "Total Cost": N.N, + - "Plan Rows": N, + - "Plan Width": N, + - "Actual Startup Time": N.N, + - "Actual Total Time": N.N, + - "Actual Rows": N, + - "Actual Loops": N, + - "Disabled": false, + - "Shared Hit Blocks": N, + - "Shared Read Blocks": N, + - "Shared Dirtied Blocks": N, + - "Shared Written Blocks": N, + - "Local Hit Blocks": N, + - "Local Read Blocks": N, + - "Local Dirtied Blocks": N, + - "Local Written Blocks": N, + - "Temp Read Blocks": N, + - "Temp Written Blocks": N + - }, + - "Planning": { + - "Shared Hit Blocks": N, + - "Shared Read Blocks": N, + - "Shared Dirtied Blocks": N, + - "Shared Written Blocks": N, + - "Local Hit Blocks": N, + - "Local Read Blocks": N, + - "Local Dirtied Blocks": N, + - "Local Written Blocks": N, + - "Temp Read Blocks": N, + - "Temp Written Blocks": N, + - "Memory Used": N, + - "Memory Allocated": N + - }, + - "Planning Time": N.N, + - "Triggers": [ + - ], + - "Execution Time": N.N + - } + - ] -(1 row) - -prepare int8_query as select * from int8_tbl i8; -select explain_filter('explain (memory) execute int8_query'); - explain_filter ---------------------------------------------------------- - Seq Scan on int8_tbl i8 (cost=N.N..N.N rows=N width=N) - Memory: used=NkB allocated=NkB -(2 rows) - --- Test EXPLAIN (GENERIC_PLAN) with partition pruning --- partitions should be pruned at plan time, based on constants, --- but there should be no pruning based on parameter placeholders -create table gen_part ( - key1 integer not null, - key2 integer not null -) partition by list (key1); -create table gen_part_1 - partition of gen_part for values in (1) - partition by range (key2); -create table gen_part_1_1 - partition of gen_part_1 for values from (1) to (2); -create table gen_part_1_2 - partition of gen_part_1 for values from (2) to (3); -create table gen_part_2 - partition of gen_part for values in (2); --- should scan gen_part_1_1 and gen_part_1_2, but not gen_part_2 -select explain_filter('explain (generic_plan) select key1, key2 from gen_part where key1 = 1 and key2 = $1'); - explain_filter ---------------------------------------------------------------------------- - Append (cost=N.N..N.N rows=N width=N) - -> Seq Scan on gen_part_1_1 gen_part_1 (cost=N.N..N.N rows=N width=N) - Filter: ((key1 = N) AND (key2 = $N)) - -> Seq Scan on gen_part_1_2 gen_part_2 (cost=N.N..N.N rows=N width=N) - Filter: ((key1 = N) AND (key2 = $N)) -(5 rows) - -drop table gen_part; --- --- Test production of per-worker data --- --- Unfortunately, because we don't know how many worker processes we'll --- actually get (maybe none at all), we can't examine the "Workers" output --- in any detail. We can check that it parses correctly as JSON, and then --- remove it from the displayed results. -begin; --- encourage use of parallel plans -set parallel_setup_cost=0; -set parallel_tuple_cost=0; -set min_parallel_table_scan_size=0; -set max_parallel_workers_per_gather=4; -select jsonb_pretty( - explain_filter_to_json('explain (analyze, verbose, buffers, format json) - select * from tenk1 order by tenthous') - -- remove "Workers" node of the Seq Scan plan node - #- '{0,Plan,Plans,0,Plans,0,Workers}' - -- remove "Workers" node of the Sort plan node - #- '{0,Plan,Plans,0,Workers}' - -- Also remove its sort-type fields, as those aren't 100% stable - #- '{0,Plan,Plans,0,Sort Method}' - #- '{0,Plan,Plans,0,Sort Space Type}' -); - jsonb_pretty -------------------------------------------------------------- - [ + - { + - "Plan": { + - "Plans": [ + - { + - "Plans": [ + - { + - "Alias": "tenk1", + - "Output": [ + - "unique1", + - "unique2", + - "two", + - "four", + - "ten", + - "twenty", + - "hundred", + - "thousand", + - "twothousand", + - "fivethous", + - "tenthous", + - "odd", + - "even", + - "stringu1", + - "stringu2", + - "string4" + - ], + - "Schema": "public", + - "Disabled": false, + - "Node Type": "Seq Scan", + - "Plan Rows": 0, + - "Plan Width": 0, + - "Total Cost": 0.0, + - "Actual Rows": 0, + - "Actual Loops": 0, + - "Startup Cost": 0.0, + - "Async Capable": false, + - "Relation Name": "tenk1", + - "Parallel Aware": true, + - "Local Hit Blocks": 0, + - "Temp Read Blocks": 0, + - "Actual Total Time": 0.0, + - "Local Read Blocks": 0, + - "Shared Hit Blocks": 0, + - "Shared Read Blocks": 0, + - "Actual Startup Time": 0.0, + - "Parent Relationship": "Outer",+ - "Temp Written Blocks": 0, + - "Local Dirtied Blocks": 0, + - "Local Written Blocks": 0, + - "Shared Dirtied Blocks": 0, + - "Shared Written Blocks": 0 + - } + - ], + - "Output": [ + - "unique1", + - "unique2", + - "two", + - "four", + - "ten", + - "twenty", + - "hundred", + - "thousand", + - "twothousand", + - "fivethous", + - "tenthous", + - "odd", + - "even", + - "stringu1", + - "stringu2", + - "string4" + - ], + - "Disabled": false, + - "Sort Key": [ + - "tenk1.tenthous" + - ], + - "Node Type": "Sort", + - "Plan Rows": 0, + - "Plan Width": 0, + - "Total Cost": 0.0, + - "Actual Rows": 0, + - "Actual Loops": 0, + - "Startup Cost": 0.0, + - "Async Capable": false, + - "Parallel Aware": false, + - "Sort Space Used": 0, + - "Local Hit Blocks": 0, + - "Temp Read Blocks": 0, + - "Actual Total Time": 0.0, + - "Local Read Blocks": 0, + - "Shared Hit Blocks": 0, + - "Shared Read Blocks": 0, + - "Actual Startup Time": 0.0, + - "Parent Relationship": "Outer", + - "Temp Written Blocks": 0, + - "Local Dirtied Blocks": 0, + - "Local Written Blocks": 0, + - "Shared Dirtied Blocks": 0, + - "Shared Written Blocks": 0 + - } + - ], + - "Output": [ + - "unique1", + - "unique2", + - "two", + - "four", + - "ten", + - "twenty", + - "hundred", + - "thousand", + - "twothousand", + - "fivethous", + - "tenthous", + - "odd", + - "even", + - "stringu1", + - "stringu2", + - "string4" + - ], + - "Disabled": false, + - "Node Type": "Gather Merge", + - "Plan Rows": 0, + - "Plan Width": 0, + - "Total Cost": 0.0, + - "Actual Rows": 0, + - "Actual Loops": 0, + - "Startup Cost": 0.0, + - "Async Capable": false, + - "Parallel Aware": false, + - "Workers Planned": 0, + - "Local Hit Blocks": 0, + - "Temp Read Blocks": 0, + - "Workers Launched": 0, + - "Actual Total Time": 0.0, + - "Local Read Blocks": 0, + - "Shared Hit Blocks": 0, + - "Shared Read Blocks": 0, + - "Actual Startup Time": 0.0, + - "Temp Written Blocks": 0, + - "Local Dirtied Blocks": 0, + - "Local Written Blocks": 0, + - "Shared Dirtied Blocks": 0, + - "Shared Written Blocks": 0 + - }, + - "Planning": { + - "Local Hit Blocks": 0, + - "Temp Read Blocks": 0, + - "Local Read Blocks": 0, + - "Shared Hit Blocks": 0, + - "Shared Read Blocks": 0, + - "Temp Written Blocks": 0, + - "Local Dirtied Blocks": 0, + - "Local Written Blocks": 0, + - "Shared Dirtied Blocks": 0, + - "Shared Written Blocks": 0 + - }, + - "Triggers": [ + - ], + - "Planning Time": 0.0, + - "Execution Time": 0.0 + - } + - ] -(1 row) - -rollback; --- Test display of temporary objects -create temp table t1(f1 float8); -create function pg_temp.mysin(float8) returns float8 language plpgsql -as 'begin return sin($1); end'; -select explain_filter('explain (verbose) select * from t1 where pg_temp.mysin(f1) < 0.5'); - explain_filter ------------------------------------------------------------- - Seq Scan on pg_temp.t1 (cost=N.N..N.N rows=N width=N) - Output: f1 - Filter: (pg_temp.mysin(t1.f1) < 'N.N'::double precision) -(3 rows) - --- Test compute_query_id -set compute_query_id = on; -select explain_filter('explain (verbose) select * from int8_tbl i8'); - explain_filter ----------------------------------------------------------------- - Seq Scan on public.int8_tbl i8 (cost=N.N..N.N rows=N width=N) - Output: q1, q2 - Query Identifier: N -(3 rows) - --- Test compute_query_id with utility statements containing plannable query -select explain_filter('explain (verbose) declare test_cur cursor for select * from int8_tbl'); - explain_filter -------------------------------------------------------------- - Seq Scan on public.int8_tbl (cost=N.N..N.N rows=N width=N) - Output: q1, q2 - Query Identifier: N -(3 rows) - -select explain_filter('explain (verbose) create table test_ctas as select 1'); - explain_filter ----------------------------------------- - Result (cost=N.N..N.N rows=N width=N) - Output: N - Query Identifier: N -(3 rows) - --- Test SERIALIZE option -select explain_filter('explain (analyze,buffers off,serialize) select * from int8_tbl i8'); - explain_filter ------------------------------------------------------------------------------------------------ - Seq Scan on int8_tbl i8 (cost=N.N..N.N rows=N width=N) (actual time=N.N..N.N rows=N loops=N) - Planning Time: N.N ms - Serialization: time=N.N ms output=NkB format=text - Execution Time: N.N ms -(4 rows) - -select explain_filter('explain (analyze,serialize text,buffers,timing off) select * from int8_tbl i8'); - explain_filter ---------------------------------------------------------------------------------- - Seq Scan on int8_tbl i8 (cost=N.N..N.N rows=N width=N) (actual rows=N loops=N) - Planning Time: N.N ms - Serialization: output=NkB format=text - Execution Time: N.N ms -(4 rows) - -select explain_filter('explain (analyze,serialize binary,buffers,timing) select * from int8_tbl i8'); - explain_filter ------------------------------------------------------------------------------------------------ - Seq Scan on int8_tbl i8 (cost=N.N..N.N rows=N width=N) (actual time=N.N..N.N rows=N loops=N) - Planning Time: N.N ms - Serialization: time=N.N ms output=NkB format=binary - Execution Time: N.N ms -(4 rows) - --- this tests an edge case where we have no data to return -select explain_filter('explain (analyze,buffers off,serialize) create temp table explain_temp as select * from int8_tbl i8'); - explain_filter ------------------------------------------------------------------------------------------------ - Seq Scan on int8_tbl i8 (cost=N.N..N.N rows=N width=N) (actual time=N.N..N.N rows=N loops=N) - Planning Time: N.N ms - Serialization: time=N.N ms output=NkB format=text - Execution Time: N.N ms -(4 rows) - --- Test tuplestore storage usage in Window aggregate (memory case) -select explain_filter('explain (analyze,buffers off,costs off) select sum(n) over() from generate_series(1,10) a(n)'); - explain_filter --------------------------------------------------------------------------------- - WindowAgg (actual time=N.N..N.N rows=N loops=N) - Storage: Memory Maximum Storage: NkB - -> Function Scan on generate_series a (actual time=N.N..N.N rows=N loops=N) - Planning Time: N.N ms - Execution Time: N.N ms -(5 rows) - --- Test tuplestore storage usage in Window aggregate (disk case) -set work_mem to 64; -select explain_filter('explain (analyze,buffers off,costs off) select sum(n) over() from generate_series(1,2000) a(n)'); - explain_filter --------------------------------------------------------------------------------- - WindowAgg (actual time=N.N..N.N rows=N loops=N) - Storage: Disk Maximum Storage: NkB - -> Function Scan on generate_series a (actual time=N.N..N.N rows=N loops=N) - Planning Time: N.N ms - Execution Time: N.N ms -(5 rows) - --- Test tuplestore storage usage in Window aggregate (memory and disk case, final result is disk) -select explain_filter('explain (analyze,buffers off,costs off) select sum(n) over(partition by m) from (SELECT n < 3 as m, n from generate_series(1,2000) a(n))'); - explain_filter --------------------------------------------------------------------------------------- - WindowAgg (actual time=N.N..N.N rows=N loops=N) - Storage: Disk Maximum Storage: NkB - -> Sort (actual time=N.N..N.N rows=N loops=N) - Sort Key: ((a.n < N)) - Sort Method: external merge Disk: NkB - -> Function Scan on generate_series a (actual time=N.N..N.N rows=N loops=N) - Planning Time: N.N ms - Execution Time: N.N ms -(8 rows) - -reset work_mem; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/compression_1.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/compression.out --- /Users/admin/pgsql/src/test/regress/expected/compression_1.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/compression.out 2024-12-13 13:20:13 @@ -1,360 +1,2 @@ -\set HIDE_TOAST_COMPRESSION false --- ensure we get stable results regardless of installation's default -SET default_toast_compression = 'pglz'; --- test creating table with compression method -CREATE TABLE cmdata(f1 text COMPRESSION pglz); -CREATE INDEX idx ON cmdata(f1); -INSERT INTO cmdata VALUES(repeat('1234567890', 1000)); -\d+ cmdata - Table "public.cmdata" - Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ---------+------+-----------+----------+---------+----------+-------------+--------------+------------- - f1 | text | | | | extended | pglz | | -Indexes: - "idx" btree (f1) - -CREATE TABLE cmdata1(f1 TEXT COMPRESSION lz4); -ERROR: compression method lz4 not supported -DETAIL: This functionality requires the server to be built with lz4 support. -INSERT INTO cmdata1 VALUES(repeat('1234567890', 1004)); -ERROR: relation "cmdata1" does not exist -LINE 1: INSERT INTO cmdata1 VALUES(repeat('1234567890', 1004)); - ^ -\d+ cmdata1 --- verify stored compression method in the data -SELECT pg_column_compression(f1) FROM cmdata; - pg_column_compression ------------------------ - pglz -(1 row) - -SELECT pg_column_compression(f1) FROM cmdata1; -ERROR: relation "cmdata1" does not exist -LINE 1: SELECT pg_column_compression(f1) FROM cmdata1; - ^ --- decompress data slice -SELECT SUBSTR(f1, 200, 5) FROM cmdata; - substr --------- - 01234 -(1 row) - -SELECT SUBSTR(f1, 2000, 50) FROM cmdata1; -ERROR: relation "cmdata1" does not exist -LINE 1: SELECT SUBSTR(f1, 2000, 50) FROM cmdata1; - ^ --- copy with table creation -SELECT * INTO cmmove1 FROM cmdata; -\d+ cmmove1 - Table "public.cmmove1" - Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ---------+------+-----------+----------+---------+----------+-------------+--------------+------------- - f1 | text | | | | extended | | | - -SELECT pg_column_compression(f1) FROM cmmove1; - pg_column_compression ------------------------ - pglz -(1 row) - --- copy to existing table -CREATE TABLE cmmove3(f1 text COMPRESSION pglz); -INSERT INTO cmmove3 SELECT * FROM cmdata; -INSERT INTO cmmove3 SELECT * FROM cmdata1; -ERROR: relation "cmdata1" does not exist -LINE 1: INSERT INTO cmmove3 SELECT * FROM cmdata1; - ^ -SELECT pg_column_compression(f1) FROM cmmove3; - pg_column_compression ------------------------ - pglz -(1 row) - --- test LIKE INCLUDING COMPRESSION -CREATE TABLE cmdata2 (LIKE cmdata1 INCLUDING COMPRESSION); -ERROR: relation "cmdata1" does not exist -LINE 1: CREATE TABLE cmdata2 (LIKE cmdata1 INCLUDING COMPRESSION); - ^ -\d+ cmdata2 -DROP TABLE cmdata2; -ERROR: table "cmdata2" does not exist --- try setting compression for incompressible data type -CREATE TABLE cmdata2 (f1 int COMPRESSION pglz); -ERROR: column data type integer does not support compression --- update using datum from different table -CREATE TABLE cmmove2(f1 text COMPRESSION pglz); -INSERT INTO cmmove2 VALUES (repeat('1234567890', 1004)); -SELECT pg_column_compression(f1) FROM cmmove2; - pg_column_compression ------------------------ - pglz -(1 row) - -UPDATE cmmove2 SET f1 = cmdata1.f1 FROM cmdata1; -ERROR: relation "cmdata1" does not exist -LINE 1: UPDATE cmmove2 SET f1 = cmdata1.f1 FROM cmdata1; - ^ -SELECT pg_column_compression(f1) FROM cmmove2; - pg_column_compression ------------------------ - pglz -(1 row) - --- test externally stored compressed data -CREATE OR REPLACE FUNCTION large_val() RETURNS TEXT LANGUAGE SQL AS -'select array_agg(fipshash(g::text))::text from generate_series(1, 256) g'; -CREATE TABLE cmdata2 (f1 text COMPRESSION pglz); -INSERT INTO cmdata2 SELECT large_val() || repeat('a', 4000); -SELECT pg_column_compression(f1) FROM cmdata2; - pg_column_compression ------------------------ - pglz -(1 row) - -INSERT INTO cmdata1 SELECT large_val() || repeat('a', 4000); -ERROR: relation "cmdata1" does not exist -LINE 1: INSERT INTO cmdata1 SELECT large_val() || repeat('a', 4000); - ^ -SELECT pg_column_compression(f1) FROM cmdata1; -ERROR: relation "cmdata1" does not exist -LINE 1: SELECT pg_column_compression(f1) FROM cmdata1; - ^ -SELECT SUBSTR(f1, 200, 5) FROM cmdata1; -ERROR: relation "cmdata1" does not exist -LINE 1: SELECT SUBSTR(f1, 200, 5) FROM cmdata1; - ^ -SELECT SUBSTR(f1, 200, 5) FROM cmdata2; - substr --------- - 79026 -(1 row) - -DROP TABLE cmdata2; ---test column type update varlena/non-varlena -CREATE TABLE cmdata2 (f1 int); -\d+ cmdata2 - Table "public.cmdata2" - Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ---------+---------+-----------+----------+---------+---------+-------------+--------------+------------- - f1 | integer | | | | plain | | | - -ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE varchar; -\d+ cmdata2 - Table "public.cmdata2" - Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ---------+-------------------+-----------+----------+---------+----------+-------------+--------------+------------- - f1 | character varying | | | | extended | | | - -ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE int USING f1::integer; -\d+ cmdata2 - Table "public.cmdata2" - Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ---------+---------+-----------+----------+---------+---------+-------------+--------------+------------- - f1 | integer | | | | plain | | | - ---changing column storage should not impact the compression method ---but the data should not be compressed -ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE varchar; -ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION pglz; -\d+ cmdata2 - Table "public.cmdata2" - Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ---------+-------------------+-----------+----------+---------+----------+-------------+--------------+------------- - f1 | character varying | | | | extended | pglz | | - -ALTER TABLE cmdata2 ALTER COLUMN f1 SET STORAGE plain; -\d+ cmdata2 - Table "public.cmdata2" - Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ---------+-------------------+-----------+----------+---------+---------+-------------+--------------+------------- - f1 | character varying | | | | plain | pglz | | - -INSERT INTO cmdata2 VALUES (repeat('123456789', 800)); -SELECT pg_column_compression(f1) FROM cmdata2; - pg_column_compression ------------------------ - -(1 row) - --- test compression with materialized view -CREATE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata1; -ERROR: relation "cmdata1" does not exist -LINE 1: ...TE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata1; - ^ -\d+ compressmv -SELECT pg_column_compression(f1) FROM cmdata1; -ERROR: relation "cmdata1" does not exist -LINE 1: SELECT pg_column_compression(f1) FROM cmdata1; - ^ -SELECT pg_column_compression(x) FROM compressmv; -ERROR: relation "compressmv" does not exist -LINE 1: SELECT pg_column_compression(x) FROM compressmv; - ^ --- test compression with partition -CREATE TABLE cmpart(f1 text COMPRESSION lz4) PARTITION BY HASH(f1); -ERROR: compression method lz4 not supported -DETAIL: This functionality requires the server to be built with lz4 support. -CREATE TABLE cmpart1 PARTITION OF cmpart FOR VALUES WITH (MODULUS 2, REMAINDER 0); -ERROR: relation "cmpart" does not exist -CREATE TABLE cmpart2(f1 text COMPRESSION pglz); -ALTER TABLE cmpart ATTACH PARTITION cmpart2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); -ERROR: relation "cmpart" does not exist -INSERT INTO cmpart VALUES (repeat('123456789', 1004)); -ERROR: relation "cmpart" does not exist -LINE 1: INSERT INTO cmpart VALUES (repeat('123456789', 1004)); - ^ -INSERT INTO cmpart VALUES (repeat('123456789', 4004)); -ERROR: relation "cmpart" does not exist -LINE 1: INSERT INTO cmpart VALUES (repeat('123456789', 4004)); - ^ -SELECT pg_column_compression(f1) FROM cmpart1; -ERROR: relation "cmpart1" does not exist -LINE 1: SELECT pg_column_compression(f1) FROM cmpart1; - ^ -SELECT pg_column_compression(f1) FROM cmpart2; - pg_column_compression ------------------------ -(0 rows) - --- test compression with inheritance -CREATE TABLE cminh() INHERITS(cmdata, cmdata1); -- error -ERROR: relation "cmdata1" does not exist -CREATE TABLE cminh(f1 TEXT COMPRESSION lz4) INHERITS(cmdata); -- error -NOTICE: merging column "f1" with inherited definition -ERROR: column "f1" has a compression method conflict -DETAIL: pglz versus lz4 -CREATE TABLE cmdata3(f1 text); -CREATE TABLE cminh() INHERITS (cmdata, cmdata3); -NOTICE: merging multiple inherited definitions of column "f1" --- test default_toast_compression GUC -SET default_toast_compression = ''; -ERROR: invalid value for parameter "default_toast_compression": "" -HINT: Available values: pglz. -SET default_toast_compression = 'I do not exist compression'; -ERROR: invalid value for parameter "default_toast_compression": "I do not exist compression" -HINT: Available values: pglz. -SET default_toast_compression = 'lz4'; -ERROR: invalid value for parameter "default_toast_compression": "lz4" -HINT: Available values: pglz. -SET default_toast_compression = 'pglz'; --- test alter compression method -ALTER TABLE cmdata ALTER COLUMN f1 SET COMPRESSION lz4; -ERROR: compression method lz4 not supported -DETAIL: This functionality requires the server to be built with lz4 support. -INSERT INTO cmdata VALUES (repeat('123456789', 4004)); -\d+ cmdata - Table "public.cmdata" - Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ---------+------+-----------+----------+---------+----------+-------------+--------------+------------- - f1 | text | | | | extended | pglz | | -Indexes: - "idx" btree (f1) -Child tables: cminh - -SELECT pg_column_compression(f1) FROM cmdata; - pg_column_compression ------------------------ - pglz - pglz -(2 rows) - -ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION default; -\d+ cmdata2 - Table "public.cmdata2" - Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description ---------+-------------------+-----------+----------+---------+---------+-------------+--------------+------------- - f1 | character varying | | | | plain | | | - --- test alter compression method for materialized views -ALTER MATERIALIZED VIEW compressmv ALTER COLUMN x SET COMPRESSION lz4; -ERROR: relation "compressmv" does not exist -\d+ compressmv --- test alter compression method for partitioned tables -ALTER TABLE cmpart1 ALTER COLUMN f1 SET COMPRESSION pglz; -ERROR: relation "cmpart1" does not exist -ALTER TABLE cmpart2 ALTER COLUMN f1 SET COMPRESSION lz4; -ERROR: compression method lz4 not supported -DETAIL: This functionality requires the server to be built with lz4 support. --- new data should be compressed with the current compression method -INSERT INTO cmpart VALUES (repeat('123456789', 1004)); -ERROR: relation "cmpart" does not exist -LINE 1: INSERT INTO cmpart VALUES (repeat('123456789', 1004)); - ^ -INSERT INTO cmpart VALUES (repeat('123456789', 4004)); -ERROR: relation "cmpart" does not exist -LINE 1: INSERT INTO cmpart VALUES (repeat('123456789', 4004)); - ^ -SELECT pg_column_compression(f1) FROM cmpart1; -ERROR: relation "cmpart1" does not exist -LINE 1: SELECT pg_column_compression(f1) FROM cmpart1; - ^ -SELECT pg_column_compression(f1) FROM cmpart2; - pg_column_compression ------------------------ -(0 rows) - --- VACUUM FULL does not recompress -SELECT pg_column_compression(f1) FROM cmdata; - pg_column_compression ------------------------ - pglz - pglz -(2 rows) - -VACUUM FULL cmdata; -SELECT pg_column_compression(f1) FROM cmdata; - pg_column_compression ------------------------ - pglz - pglz -(2 rows) - --- test expression index -DROP TABLE cmdata2; -CREATE TABLE cmdata2 (f1 TEXT COMPRESSION pglz, f2 TEXT COMPRESSION lz4); -ERROR: compression method lz4 not supported -DETAIL: This functionality requires the server to be built with lz4 support. -CREATE UNIQUE INDEX idx1 ON cmdata2 ((f1 || f2)); -ERROR: relation "cmdata2" does not exist -INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEXT))::TEXT FROM -generate_series(1, 50) g), VERSION()); -ERROR: relation "cmdata2" does not exist -LINE 1: INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEX... - ^ --- check data is ok -SELECT length(f1) FROM cmdata; - length --------- - 10000 - 36036 -(2 rows) - -SELECT length(f1) FROM cmdata1; -ERROR: relation "cmdata1" does not exist -LINE 1: SELECT length(f1) FROM cmdata1; - ^ -SELECT length(f1) FROM cmmove1; - length --------- - 10000 -(1 row) - -SELECT length(f1) FROM cmmove2; - length --------- - 10040 -(1 row) - -SELECT length(f1) FROM cmmove3; - length --------- - 10000 -(1 row) - -CREATE TABLE badcompresstbl (a text COMPRESSION I_Do_Not_Exist_Compression); -- fails -ERROR: invalid compression method "i_do_not_exist_compression" -CREATE TABLE badcompresstbl (a text); -ALTER TABLE badcompresstbl ALTER a SET COMPRESSION I_Do_Not_Exist_Compression; -- fails -ERROR: invalid compression method "i_do_not_exist_compression" -DROP TABLE badcompresstbl; -\set HIDE_TOAST_COMPRESSION true +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/memoize.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/memoize.out --- /Users/admin/pgsql/src/test/regress/expected/memoize.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/memoize.out 2024-12-13 13:20:13 @@ -1,481 +1,2 @@ --- Perform tests on the Memoize node. --- The cache hits/misses/evictions from the Memoize node can vary between --- machines. Let's just replace the number with an 'N'. In order to allow us --- to perform validation when the measure was zero, we replace a zero value --- with "Zero". All other numbers are replaced with 'N'. -create function explain_memoize(query text, hide_hitmiss bool) returns setof text -language plpgsql as -$$ -declare - ln text; -begin - for ln in - execute format('explain (analyze, costs off, summary off, timing off, buffers off) %s', - query) - loop - if hide_hitmiss = true then - ln := regexp_replace(ln, 'Hits: 0', 'Hits: Zero'); - ln := regexp_replace(ln, 'Hits: \d+', 'Hits: N'); - ln := regexp_replace(ln, 'Misses: 0', 'Misses: Zero'); - ln := regexp_replace(ln, 'Misses: \d+', 'Misses: N'); - end if; - ln := regexp_replace(ln, 'Evictions: 0', 'Evictions: Zero'); - ln := regexp_replace(ln, 'Evictions: \d+', 'Evictions: N'); - ln := regexp_replace(ln, 'Memory Usage: \d+', 'Memory Usage: N'); - ln := regexp_replace(ln, 'Heap Fetches: \d+', 'Heap Fetches: N'); - ln := regexp_replace(ln, 'loops=\d+', 'loops=N'); - return next ln; - end loop; -end; -$$; --- Ensure we get a memoize node on the inner side of the nested loop -SET enable_hashjoin TO off; -SET enable_bitmapscan TO off; -SELECT explain_memoize(' -SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1 -INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty -WHERE t2.unique1 < 1000;', false); - explain_memoize -------------------------------------------------------------------------------------------- - Aggregate (actual rows=1 loops=N) - -> Nested Loop (actual rows=1000 loops=N) - -> Seq Scan on tenk1 t2 (actual rows=1000 loops=N) - Filter: (unique1 < 1000) - Rows Removed by Filter: 9000 - -> Memoize (actual rows=1 loops=N) - Cache Key: t2.twenty - Cache Mode: logical - Hits: 980 Misses: 20 Evictions: Zero Overflows: 0 Memory Usage: NkB - -> Index Only Scan using tenk1_unique1 on tenk1 t1 (actual rows=1 loops=N) - Index Cond: (unique1 = t2.twenty) - Heap Fetches: N -(12 rows) - --- And check we get the expected results. -SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1 -INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty -WHERE t2.unique1 < 1000; - count | avg --------+-------------------- - 1000 | 9.5000000000000000 -(1 row) - --- Try with LATERAL joins -SELECT explain_memoize(' -SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, -LATERAL (SELECT t2.unique1 FROM tenk1 t2 - WHERE t1.twenty = t2.unique1 OFFSET 0) t2 -WHERE t1.unique1 < 1000;', false); - explain_memoize -------------------------------------------------------------------------------------------- - Aggregate (actual rows=1 loops=N) - -> Nested Loop (actual rows=1000 loops=N) - -> Seq Scan on tenk1 t1 (actual rows=1000 loops=N) - Filter: (unique1 < 1000) - Rows Removed by Filter: 9000 - -> Memoize (actual rows=1 loops=N) - Cache Key: t1.twenty - Cache Mode: binary - Hits: 980 Misses: 20 Evictions: Zero Overflows: 0 Memory Usage: NkB - -> Index Only Scan using tenk1_unique1 on tenk1 t2 (actual rows=1 loops=N) - Index Cond: (unique1 = t1.twenty) - Heap Fetches: N -(12 rows) - --- And check we get the expected results. -SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, -LATERAL (SELECT t2.unique1 FROM tenk1 t2 - WHERE t1.twenty = t2.unique1 OFFSET 0) t2 -WHERE t1.unique1 < 1000; - count | avg --------+-------------------- - 1000 | 9.5000000000000000 -(1 row) - --- Try with LATERAL joins -SELECT explain_memoize(' -SELECT COUNT(*),AVG(t2.t1two) FROM tenk1 t1 LEFT JOIN -LATERAL ( - SELECT t1.two as t1two, * FROM tenk1 t2 WHERE t2.unique1 < 4 OFFSET 0 -) t2 -ON t1.two = t2.two -WHERE t1.unique1 < 10;', false); - explain_memoize ----------------------------------------------------------------------------------------------- - Aggregate (actual rows=1 loops=N) - -> Nested Loop Left Join (actual rows=20 loops=N) - -> Index Scan using tenk1_unique1 on tenk1 t1 (actual rows=10 loops=N) - Index Cond: (unique1 < 10) - -> Memoize (actual rows=2 loops=N) - Cache Key: t1.two - Cache Mode: binary - Hits: 8 Misses: 2 Evictions: Zero Overflows: 0 Memory Usage: NkB - -> Subquery Scan on t2 (actual rows=2 loops=N) - Filter: (t1.two = t2.two) - Rows Removed by Filter: 2 - -> Index Scan using tenk1_unique1 on tenk1 t2_1 (actual rows=4 loops=N) - Index Cond: (unique1 < 4) -(13 rows) - --- And check we get the expected results. -SELECT COUNT(*),AVG(t2.t1two) FROM tenk1 t1 LEFT JOIN -LATERAL ( - SELECT t1.two as t1two, * FROM tenk1 t2 WHERE t2.unique1 < 4 OFFSET 0 -) t2 -ON t1.two = t2.two -WHERE t1.unique1 < 10; - count | avg --------+------------------------ - 20 | 0.50000000000000000000 -(1 row) - --- Try with LATERAL references within PlaceHolderVars -SELECT explain_memoize(' -SELECT COUNT(*), AVG(t1.twenty) FROM tenk1 t1 LEFT JOIN -LATERAL (SELECT t1.two+1 AS c1, t2.unique1 AS c2 FROM tenk1 t2) s ON TRUE -WHERE s.c1 = s.c2 AND t1.unique1 < 1000;', false); - explain_memoize -------------------------------------------------------------------------------------------- - Aggregate (actual rows=1 loops=N) - -> Nested Loop (actual rows=1000 loops=N) - -> Seq Scan on tenk1 t1 (actual rows=1000 loops=N) - Filter: (unique1 < 1000) - Rows Removed by Filter: 9000 - -> Memoize (actual rows=1 loops=N) - Cache Key: (t1.two + 1) - Cache Mode: binary - Hits: 998 Misses: 2 Evictions: Zero Overflows: 0 Memory Usage: NkB - -> Index Only Scan using tenk1_unique1 on tenk1 t2 (actual rows=1 loops=N) - Filter: ((t1.two + 1) = unique1) - Rows Removed by Filter: 9999 - Heap Fetches: N -(13 rows) - --- And check we get the expected results. -SELECT COUNT(*), AVG(t1.twenty) FROM tenk1 t1 LEFT JOIN -LATERAL (SELECT t1.two+1 AS c1, t2.unique1 AS c2 FROM tenk1 t2) s ON TRUE -WHERE s.c1 = s.c2 AND t1.unique1 < 1000; - count | avg --------+-------------------- - 1000 | 9.5000000000000000 -(1 row) - --- Ensure we do not omit the cache keys from PlaceHolderVars -SELECT explain_memoize(' -SELECT COUNT(*), AVG(t1.twenty) FROM tenk1 t1 LEFT JOIN -LATERAL (SELECT t1.twenty AS c1, t2.unique1 AS c2, t2.two FROM tenk1 t2) s -ON t1.two = s.two -WHERE s.c1 = s.c2 AND t1.unique1 < 1000;', false); - explain_memoize ---------------------------------------------------------------------------------------- - Aggregate (actual rows=1 loops=N) - -> Nested Loop (actual rows=1000 loops=N) - -> Seq Scan on tenk1 t1 (actual rows=1000 loops=N) - Filter: (unique1 < 1000) - Rows Removed by Filter: 9000 - -> Memoize (actual rows=1 loops=N) - Cache Key: t1.two, t1.twenty - Cache Mode: binary - Hits: 980 Misses: 20 Evictions: Zero Overflows: 0 Memory Usage: NkB - -> Seq Scan on tenk1 t2 (actual rows=1 loops=N) - Filter: ((t1.twenty = unique1) AND (t1.two = two)) - Rows Removed by Filter: 9999 -(12 rows) - --- And check we get the expected results. -SELECT COUNT(*), AVG(t1.twenty) FROM tenk1 t1 LEFT JOIN -LATERAL (SELECT t1.twenty AS c1, t2.unique1 AS c2, t2.two FROM tenk1 t2) s -ON t1.two = s.two -WHERE s.c1 = s.c2 AND t1.unique1 < 1000; - count | avg --------+-------------------- - 1000 | 9.5000000000000000 -(1 row) - -SET enable_mergejoin TO off; --- Test for varlena datatype with expr evaluation -CREATE TABLE expr_key (x numeric, t text); -INSERT INTO expr_key (x, t) -SELECT d1::numeric, d1::text FROM ( - SELECT round((d / pi())::numeric, 7) AS d1 FROM generate_series(1, 20) AS d -) t; --- duplicate rows so we get some cache hits -INSERT INTO expr_key SELECT * FROM expr_key; -CREATE INDEX expr_key_idx_x_t ON expr_key (x, t); -VACUUM ANALYZE expr_key; --- Ensure we get we get a cache miss and hit for each of the 20 distinct values -SELECT explain_memoize(' -SELECT * FROM expr_key t1 INNER JOIN expr_key t2 -ON t1.x = t2.t::numeric AND t1.t::numeric = t2.x;', false); - explain_memoize -------------------------------------------------------------------------------------------- - Nested Loop (actual rows=80 loops=N) - -> Seq Scan on expr_key t1 (actual rows=40 loops=N) - -> Memoize (actual rows=2 loops=N) - Cache Key: t1.x, (t1.t)::numeric - Cache Mode: logical - Hits: 20 Misses: 20 Evictions: Zero Overflows: 0 Memory Usage: NkB - -> Index Only Scan using expr_key_idx_x_t on expr_key t2 (actual rows=2 loops=N) - Index Cond: (x = (t1.t)::numeric) - Filter: (t1.x = (t)::numeric) - Heap Fetches: N -(10 rows) - -DROP TABLE expr_key; --- Reduce work_mem and hash_mem_multiplier so that we see some cache evictions -SET work_mem TO '64kB'; -SET hash_mem_multiplier TO 1.0; --- Ensure we get some evictions. We're unable to validate the hits and misses --- here as the number of entries that fit in the cache at once will vary --- between different machines. -SELECT explain_memoize(' -SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1 -INNER JOIN tenk1 t2 ON t1.unique1 = t2.thousand -WHERE t2.unique1 < 1200;', true); - explain_memoize -------------------------------------------------------------------------------------------- - Aggregate (actual rows=1 loops=N) - -> Nested Loop (actual rows=1200 loops=N) - -> Seq Scan on tenk1 t2 (actual rows=1200 loops=N) - Filter: (unique1 < 1200) - Rows Removed by Filter: 8800 - -> Memoize (actual rows=1 loops=N) - Cache Key: t2.thousand - Cache Mode: logical - Hits: N Misses: N Evictions: N Overflows: 0 Memory Usage: NkB - -> Index Only Scan using tenk1_unique1 on tenk1 t1 (actual rows=1 loops=N) - Index Cond: (unique1 = t2.thousand) - Heap Fetches: N -(12 rows) - -CREATE TABLE flt (f float); -CREATE INDEX flt_f_idx ON flt (f); -INSERT INTO flt VALUES('-0.0'::float),('+0.0'::float); -ANALYZE flt; -SET enable_seqscan TO off; --- Ensure memoize operates in logical mode -SELECT explain_memoize(' -SELECT * FROM flt f1 INNER JOIN flt f2 ON f1.f = f2.f;', false); - explain_memoize -------------------------------------------------------------------------------- - Nested Loop (actual rows=4 loops=N) - -> Index Only Scan using flt_f_idx on flt f1 (actual rows=2 loops=N) - Heap Fetches: N - -> Memoize (actual rows=2 loops=N) - Cache Key: f1.f - Cache Mode: logical - Hits: 1 Misses: 1 Evictions: Zero Overflows: 0 Memory Usage: NkB - -> Index Only Scan using flt_f_idx on flt f2 (actual rows=2 loops=N) - Index Cond: (f = f1.f) - Heap Fetches: N -(10 rows) - --- Ensure memoize operates in binary mode -SELECT explain_memoize(' -SELECT * FROM flt f1 INNER JOIN flt f2 ON f1.f >= f2.f;', false); - explain_memoize -------------------------------------------------------------------------------- - Nested Loop (actual rows=4 loops=N) - -> Index Only Scan using flt_f_idx on flt f1 (actual rows=2 loops=N) - Heap Fetches: N - -> Memoize (actual rows=2 loops=N) - Cache Key: f1.f - Cache Mode: binary - Hits: 0 Misses: 2 Evictions: Zero Overflows: 0 Memory Usage: NkB - -> Index Only Scan using flt_f_idx on flt f2 (actual rows=2 loops=N) - Index Cond: (f <= f1.f) - Heap Fetches: N -(10 rows) - -DROP TABLE flt; --- Exercise Memoize in binary mode with a large fixed width type and a --- varlena type. -CREATE TABLE strtest (n name, t text); -CREATE INDEX strtest_n_idx ON strtest (n); -CREATE INDEX strtest_t_idx ON strtest (t); -INSERT INTO strtest VALUES('one','one'),('two','two'),('three',repeat(fipshash('three'),100)); --- duplicate rows so we get some cache hits -INSERT INTO strtest SELECT * FROM strtest; -ANALYZE strtest; --- Ensure we get 3 hits and 3 misses -SELECT explain_memoize(' -SELECT * FROM strtest s1 INNER JOIN strtest s2 ON s1.n >= s2.n;', false); - explain_memoize ----------------------------------------------------------------------------------- - Nested Loop (actual rows=24 loops=N) - -> Seq Scan on strtest s1 (actual rows=6 loops=N) - Disabled: true - -> Memoize (actual rows=4 loops=N) - Cache Key: s1.n - Cache Mode: binary - Hits: 3 Misses: 3 Evictions: Zero Overflows: 0 Memory Usage: NkB - -> Index Scan using strtest_n_idx on strtest s2 (actual rows=4 loops=N) - Index Cond: (n <= s1.n) -(9 rows) - --- Ensure we get 3 hits and 3 misses -SELECT explain_memoize(' -SELECT * FROM strtest s1 INNER JOIN strtest s2 ON s1.t >= s2.t;', false); - explain_memoize ----------------------------------------------------------------------------------- - Nested Loop (actual rows=24 loops=N) - -> Seq Scan on strtest s1 (actual rows=6 loops=N) - Disabled: true - -> Memoize (actual rows=4 loops=N) - Cache Key: s1.t - Cache Mode: binary - Hits: 3 Misses: 3 Evictions: Zero Overflows: 0 Memory Usage: NkB - -> Index Scan using strtest_t_idx on strtest s2 (actual rows=4 loops=N) - Index Cond: (t <= s1.t) -(9 rows) - -DROP TABLE strtest; --- Ensure memoize works with partitionwise join -SET enable_partitionwise_join TO on; -CREATE TABLE prt (a int) PARTITION BY RANGE(a); -CREATE TABLE prt_p1 PARTITION OF prt FOR VALUES FROM (0) TO (10); -CREATE TABLE prt_p2 PARTITION OF prt FOR VALUES FROM (10) TO (20); -INSERT INTO prt VALUES (0), (0), (0), (0); -INSERT INTO prt VALUES (10), (10), (10), (10); -CREATE INDEX iprt_p1_a ON prt_p1 (a); -CREATE INDEX iprt_p2_a ON prt_p2 (a); -ANALYZE prt; -SELECT explain_memoize(' -SELECT * FROM prt t1 INNER JOIN prt t2 ON t1.a = t2.a;', false); - explain_memoize ------------------------------------------------------------------------------------------- - Append (actual rows=32 loops=N) - -> Nested Loop (actual rows=16 loops=N) - -> Index Only Scan using iprt_p1_a on prt_p1 t1_1 (actual rows=4 loops=N) - Heap Fetches: N - -> Memoize (actual rows=4 loops=N) - Cache Key: t1_1.a - Cache Mode: logical - Hits: 3 Misses: 1 Evictions: Zero Overflows: 0 Memory Usage: NkB - -> Index Only Scan using iprt_p1_a on prt_p1 t2_1 (actual rows=4 loops=N) - Index Cond: (a = t1_1.a) - Heap Fetches: N - -> Nested Loop (actual rows=16 loops=N) - -> Index Only Scan using iprt_p2_a on prt_p2 t1_2 (actual rows=4 loops=N) - Heap Fetches: N - -> Memoize (actual rows=4 loops=N) - Cache Key: t1_2.a - Cache Mode: logical - Hits: 3 Misses: 1 Evictions: Zero Overflows: 0 Memory Usage: NkB - -> Index Only Scan using iprt_p2_a on prt_p2 t2_2 (actual rows=4 loops=N) - Index Cond: (a = t1_2.a) - Heap Fetches: N -(21 rows) - --- Ensure memoize works with parameterized union-all Append path -SET enable_partitionwise_join TO off; -SELECT explain_memoize(' -SELECT * FROM prt_p1 t1 INNER JOIN -(SELECT * FROM prt_p1 UNION ALL SELECT * FROM prt_p2) t2 -ON t1.a = t2.a;', false); - explain_memoize -------------------------------------------------------------------------------------- - Nested Loop (actual rows=16 loops=N) - -> Index Only Scan using iprt_p1_a on prt_p1 t1 (actual rows=4 loops=N) - Heap Fetches: N - -> Memoize (actual rows=4 loops=N) - Cache Key: t1.a - Cache Mode: logical - Hits: 3 Misses: 1 Evictions: Zero Overflows: 0 Memory Usage: NkB - -> Append (actual rows=4 loops=N) - -> Index Only Scan using iprt_p1_a on prt_p1 (actual rows=4 loops=N) - Index Cond: (a = t1.a) - Heap Fetches: N - -> Index Only Scan using iprt_p2_a on prt_p2 (actual rows=0 loops=N) - Index Cond: (a = t1.a) - Heap Fetches: N -(14 rows) - -DROP TABLE prt; -RESET enable_partitionwise_join; --- Exercise Memoize code that flushes the cache when a parameter changes which --- is not part of the cache key. --- Ensure we get a Memoize plan -EXPLAIN (COSTS OFF) -SELECT unique1 FROM tenk1 t0 -WHERE unique1 < 3 - AND EXISTS ( - SELECT 1 FROM tenk1 t1 - INNER JOIN tenk1 t2 ON t1.unique1 = t2.hundred - WHERE t0.ten = t1.twenty AND t0.two <> t2.four OFFSET 0); - QUERY PLAN ----------------------------------------------------------------- - Index Scan using tenk1_unique1 on tenk1 t0 - Index Cond: (unique1 < 3) - Filter: EXISTS(SubPlan 1) - SubPlan 1 - -> Nested Loop - -> Index Scan using tenk1_hundred on tenk1 t2 - Filter: (t0.two <> four) - -> Memoize - Cache Key: t2.hundred - Cache Mode: logical - -> Index Scan using tenk1_unique1 on tenk1 t1 - Index Cond: (unique1 = t2.hundred) - Filter: (t0.ten = twenty) -(13 rows) - --- Ensure the above query returns the correct result -SELECT unique1 FROM tenk1 t0 -WHERE unique1 < 3 - AND EXISTS ( - SELECT 1 FROM tenk1 t1 - INNER JOIN tenk1 t2 ON t1.unique1 = t2.hundred - WHERE t0.ten = t1.twenty AND t0.two <> t2.four OFFSET 0); - unique1 ---------- - 2 -(1 row) - -RESET enable_seqscan; -RESET enable_mergejoin; -RESET work_mem; -RESET hash_mem_multiplier; -RESET enable_bitmapscan; -RESET enable_hashjoin; --- Test parallel plans with Memoize -SET min_parallel_table_scan_size TO 0; -SET parallel_setup_cost TO 0; -SET parallel_tuple_cost TO 0; -SET max_parallel_workers_per_gather TO 2; --- Ensure we get a parallel plan. -EXPLAIN (COSTS OFF) -SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, -LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2 -WHERE t1.unique1 < 1000; - QUERY PLAN -------------------------------------------------------------------------------- - Finalize Aggregate - -> Gather - Workers Planned: 2 - -> Partial Aggregate - -> Nested Loop - -> Parallel Bitmap Heap Scan on tenk1 t1 - Recheck Cond: (unique1 < 1000) - -> Bitmap Index Scan on tenk1_unique1 - Index Cond: (unique1 < 1000) - -> Memoize - Cache Key: t1.twenty - Cache Mode: logical - -> Index Only Scan using tenk1_unique1 on tenk1 t2 - Index Cond: (unique1 = t1.twenty) -(14 rows) - --- And ensure the parallel plan gives us the correct results. -SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, -LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2 -WHERE t1.unique1 < 1000; - count | avg --------+-------------------- - 1000 | 9.5000000000000000 -(1 row) - -RESET max_parallel_workers_per_gather; -RESET parallel_tuple_cost; -RESET parallel_setup_cost; -RESET min_parallel_table_scan_size; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/stats.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/stats.out --- /Users/admin/pgsql/src/test/regress/expected/stats.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/stats.out 2024-12-13 13:20:13 @@ -1,1649 +1,2 @@ --- --- Test cumulative stats system --- --- Must be run after tenk2 has been created (by create_table), --- populated (by create_misc) and indexed (by create_index). --- --- conditio sine qua non -SHOW track_counts; -- must be on - track_counts --------------- - on -(1 row) - --- ensure that both seqscan and indexscan plans are allowed -SET enable_seqscan TO on; -SET enable_indexscan TO on; --- for the moment, we don't want index-only scans here -SET enable_indexonlyscan TO off; --- not enabled by default, but we want to test it... -SET track_functions TO 'all'; --- record dboid for later use -SELECT oid AS dboid from pg_database where datname = current_database() \gset --- save counters -BEGIN; -SET LOCAL stats_fetch_consistency = snapshot; -CREATE TABLE prevstats AS -SELECT t.seq_scan, t.seq_tup_read, t.idx_scan, t.idx_tup_fetch, - (b.heap_blks_read + b.heap_blks_hit) AS heap_blks, - (b.idx_blks_read + b.idx_blks_hit) AS idx_blks, - pg_stat_get_snapshot_timestamp() as snap_ts - FROM pg_catalog.pg_stat_user_tables AS t, - pg_catalog.pg_statio_user_tables AS b - WHERE t.relname='tenk2' AND b.relname='tenk2'; -COMMIT; --- test effects of TRUNCATE on n_live_tup/n_dead_tup counters -CREATE TABLE trunc_stats_test(id serial); -CREATE TABLE trunc_stats_test1(id serial, stuff text); -CREATE TABLE trunc_stats_test2(id serial); -CREATE TABLE trunc_stats_test3(id serial, stuff text); -CREATE TABLE trunc_stats_test4(id serial); --- check that n_live_tup is reset to 0 after truncate -INSERT INTO trunc_stats_test DEFAULT VALUES; -INSERT INTO trunc_stats_test DEFAULT VALUES; -INSERT INTO trunc_stats_test DEFAULT VALUES; -TRUNCATE trunc_stats_test; --- test involving a truncate in a transaction; 4 ins but only 1 live -INSERT INTO trunc_stats_test1 DEFAULT VALUES; -INSERT INTO trunc_stats_test1 DEFAULT VALUES; -INSERT INTO trunc_stats_test1 DEFAULT VALUES; -UPDATE trunc_stats_test1 SET id = id + 10 WHERE id IN (1, 2); -DELETE FROM trunc_stats_test1 WHERE id = 3; -BEGIN; -UPDATE trunc_stats_test1 SET id = id + 100; -TRUNCATE trunc_stats_test1; -INSERT INTO trunc_stats_test1 DEFAULT VALUES; -COMMIT; --- use a savepoint: 1 insert, 1 live -BEGIN; -INSERT INTO trunc_stats_test2 DEFAULT VALUES; -INSERT INTO trunc_stats_test2 DEFAULT VALUES; -SAVEPOINT p1; -INSERT INTO trunc_stats_test2 DEFAULT VALUES; -TRUNCATE trunc_stats_test2; -INSERT INTO trunc_stats_test2 DEFAULT VALUES; -RELEASE SAVEPOINT p1; -COMMIT; --- rollback a savepoint: this should count 4 inserts and have 2 --- live tuples after commit (and 2 dead ones due to aborted subxact) -BEGIN; -INSERT INTO trunc_stats_test3 DEFAULT VALUES; -INSERT INTO trunc_stats_test3 DEFAULT VALUES; -SAVEPOINT p1; -INSERT INTO trunc_stats_test3 DEFAULT VALUES; -INSERT INTO trunc_stats_test3 DEFAULT VALUES; -TRUNCATE trunc_stats_test3; -INSERT INTO trunc_stats_test3 DEFAULT VALUES; -ROLLBACK TO SAVEPOINT p1; -COMMIT; --- rollback a truncate: this should count 2 inserts and produce 2 dead tuples -BEGIN; -INSERT INTO trunc_stats_test4 DEFAULT VALUES; -INSERT INTO trunc_stats_test4 DEFAULT VALUES; -TRUNCATE trunc_stats_test4; -INSERT INTO trunc_stats_test4 DEFAULT VALUES; -ROLLBACK; --- do a seqscan -SELECT count(*) FROM tenk2; - count -------- - 10000 -(1 row) - --- do an indexscan --- make sure it is not a bitmap scan, which might skip fetching heap tuples -SET enable_bitmapscan TO off; -SELECT count(*) FROM tenk2 WHERE unique1 = 1; - count -------- - 1 -(1 row) - -RESET enable_bitmapscan; --- ensure pending stats are flushed -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - --- check effects -BEGIN; -SET LOCAL stats_fetch_consistency = snapshot; -SELECT relname, n_tup_ins, n_tup_upd, n_tup_del, n_live_tup, n_dead_tup - FROM pg_stat_user_tables - WHERE relname like 'trunc_stats_test%' order by relname; - relname | n_tup_ins | n_tup_upd | n_tup_del | n_live_tup | n_dead_tup --------------------+-----------+-----------+-----------+------------+------------ - trunc_stats_test | 3 | 0 | 0 | 0 | 0 - trunc_stats_test1 | 4 | 2 | 1 | 1 | 0 - trunc_stats_test2 | 1 | 0 | 0 | 1 | 0 - trunc_stats_test3 | 4 | 0 | 0 | 2 | 2 - trunc_stats_test4 | 2 | 0 | 0 | 0 | 2 -(5 rows) - -SELECT st.seq_scan >= pr.seq_scan + 1, - st.seq_tup_read >= pr.seq_tup_read + cl.reltuples, - st.idx_scan >= pr.idx_scan + 1, - st.idx_tup_fetch >= pr.idx_tup_fetch + 1 - FROM pg_stat_user_tables AS st, pg_class AS cl, prevstats AS pr - WHERE st.relname='tenk2' AND cl.relname='tenk2'; - ?column? | ?column? | ?column? | ?column? -----------+----------+----------+---------- - t | t | t | t -(1 row) - -SELECT st.heap_blks_read + st.heap_blks_hit >= pr.heap_blks + cl.relpages, - st.idx_blks_read + st.idx_blks_hit >= pr.idx_blks + 1 - FROM pg_statio_user_tables AS st, pg_class AS cl, prevstats AS pr - WHERE st.relname='tenk2' AND cl.relname='tenk2'; - ?column? | ?column? -----------+---------- - t | t -(1 row) - -SELECT pr.snap_ts < pg_stat_get_snapshot_timestamp() as snapshot_newer -FROM prevstats AS pr; - snapshot_newer ----------------- - t -(1 row) - -COMMIT; ----- --- Basic tests for track_functions ---- -CREATE FUNCTION stats_test_func1() RETURNS VOID LANGUAGE plpgsql AS $$BEGIN END;$$; -SELECT 'stats_test_func1()'::regprocedure::oid AS stats_test_func1_oid \gset -CREATE FUNCTION stats_test_func2() RETURNS VOID LANGUAGE plpgsql AS $$BEGIN END;$$; -SELECT 'stats_test_func2()'::regprocedure::oid AS stats_test_func2_oid \gset --- test that stats are accumulated -BEGIN; -SET LOCAL stats_fetch_consistency = none; -SELECT pg_stat_get_function_calls(:stats_test_func1_oid); - pg_stat_get_function_calls ----------------------------- - -(1 row) - -SELECT pg_stat_get_xact_function_calls(:stats_test_func1_oid); - pg_stat_get_xact_function_calls ---------------------------------- - -(1 row) - -SELECT stats_test_func1(); - stats_test_func1 ------------------- - -(1 row) - -SELECT pg_stat_get_xact_function_calls(:stats_test_func1_oid); - pg_stat_get_xact_function_calls ---------------------------------- - 1 -(1 row) - -SELECT stats_test_func1(); - stats_test_func1 ------------------- - -(1 row) - -SELECT pg_stat_get_xact_function_calls(:stats_test_func1_oid); - pg_stat_get_xact_function_calls ---------------------------------- - 2 -(1 row) - -SELECT pg_stat_get_function_calls(:stats_test_func1_oid); - pg_stat_get_function_calls ----------------------------- - 0 -(1 row) - -COMMIT; --- Verify that function stats are not transactional --- rolled back savepoint in committing transaction -BEGIN; -SELECT stats_test_func2(); - stats_test_func2 ------------------- - -(1 row) - -SAVEPOINT foo; -SELECT stats_test_func2(); - stats_test_func2 ------------------- - -(1 row) - -ROLLBACK TO SAVEPOINT foo; -SELECT pg_stat_get_xact_function_calls(:stats_test_func2_oid); - pg_stat_get_xact_function_calls ---------------------------------- - 2 -(1 row) - -SELECT stats_test_func2(); - stats_test_func2 ------------------- - -(1 row) - -COMMIT; --- rolled back transaction -BEGIN; -SELECT stats_test_func2(); - stats_test_func2 ------------------- - -(1 row) - -ROLLBACK; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - --- check collected stats -SELECT funcname, calls FROM pg_stat_user_functions WHERE funcid = :stats_test_func1_oid; - funcname | calls -------------------+------- - stats_test_func1 | 2 -(1 row) - -SELECT funcname, calls FROM pg_stat_user_functions WHERE funcid = :stats_test_func2_oid; - funcname | calls -------------------+------- - stats_test_func2 | 4 -(1 row) - --- check that a rolled back drop function stats leaves stats alive -BEGIN; -SELECT funcname, calls FROM pg_stat_user_functions WHERE funcid = :stats_test_func1_oid; - funcname | calls -------------------+------- - stats_test_func1 | 2 -(1 row) - -DROP FUNCTION stats_test_func1(); --- shouldn't be visible via view -SELECT funcname, calls FROM pg_stat_user_functions WHERE funcid = :stats_test_func1_oid; - funcname | calls -----------+------- -(0 rows) - --- but still via oid access -SELECT pg_stat_get_function_calls(:stats_test_func1_oid); - pg_stat_get_function_calls ----------------------------- - 2 -(1 row) - -ROLLBACK; -SELECT funcname, calls FROM pg_stat_user_functions WHERE funcid = :stats_test_func1_oid; - funcname | calls -------------------+------- - stats_test_func1 | 2 -(1 row) - -SELECT pg_stat_get_function_calls(:stats_test_func1_oid); - pg_stat_get_function_calls ----------------------------- - 2 -(1 row) - --- check that function dropped in main transaction leaves no stats behind -BEGIN; -DROP FUNCTION stats_test_func1(); -COMMIT; -SELECT funcname, calls FROM pg_stat_user_functions WHERE funcid = :stats_test_func1_oid; - funcname | calls -----------+------- -(0 rows) - -SELECT pg_stat_get_function_calls(:stats_test_func1_oid); - pg_stat_get_function_calls ----------------------------- - -(1 row) - --- check that function dropped in a subtransaction leaves no stats behind -BEGIN; -SELECT stats_test_func2(); - stats_test_func2 ------------------- - -(1 row) - -SAVEPOINT a; -SELECT stats_test_func2(); - stats_test_func2 ------------------- - -(1 row) - -SAVEPOINT b; -DROP FUNCTION stats_test_func2(); -COMMIT; -SELECT funcname, calls FROM pg_stat_user_functions WHERE funcid = :stats_test_func2_oid; - funcname | calls -----------+------- -(0 rows) - -SELECT pg_stat_get_function_calls(:stats_test_func2_oid); - pg_stat_get_function_calls ----------------------------- - -(1 row) - --- Check that stats for relations are dropped. For that we need to access stats --- by oid after the DROP TABLE. Save oids. -CREATE TABLE drop_stats_test(); -INSERT INTO drop_stats_test DEFAULT VALUES; -SELECT 'drop_stats_test'::regclass::oid AS drop_stats_test_oid \gset -CREATE TABLE drop_stats_test_xact(); -INSERT INTO drop_stats_test_xact DEFAULT VALUES; -SELECT 'drop_stats_test_xact'::regclass::oid AS drop_stats_test_xact_oid \gset -CREATE TABLE drop_stats_test_subxact(); -INSERT INTO drop_stats_test_subxact DEFAULT VALUES; -SELECT 'drop_stats_test_subxact'::regclass::oid AS drop_stats_test_subxact_oid \gset -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT pg_stat_get_live_tuples(:drop_stats_test_oid); - pg_stat_get_live_tuples -------------------------- - 1 -(1 row) - -DROP TABLE drop_stats_test; -SELECT pg_stat_get_live_tuples(:drop_stats_test_oid); - pg_stat_get_live_tuples -------------------------- - 0 -(1 row) - -SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_oid); - pg_stat_get_xact_tuples_inserted ----------------------------------- - 0 -(1 row) - --- check that rollback protects against having stats dropped and that local --- modifications don't pose a problem -SELECT pg_stat_get_live_tuples(:drop_stats_test_xact_oid); - pg_stat_get_live_tuples -------------------------- - 1 -(1 row) - -SELECT pg_stat_get_tuples_inserted(:drop_stats_test_xact_oid); - pg_stat_get_tuples_inserted ------------------------------ - 1 -(1 row) - -SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_xact_oid); - pg_stat_get_xact_tuples_inserted ----------------------------------- - 0 -(1 row) - -BEGIN; -INSERT INTO drop_stats_test_xact DEFAULT VALUES; -SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_xact_oid); - pg_stat_get_xact_tuples_inserted ----------------------------------- - 1 -(1 row) - -DROP TABLE drop_stats_test_xact; -SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_xact_oid); - pg_stat_get_xact_tuples_inserted ----------------------------------- - 0 -(1 row) - -ROLLBACK; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT pg_stat_get_live_tuples(:drop_stats_test_xact_oid); - pg_stat_get_live_tuples -------------------------- - 1 -(1 row) - -SELECT pg_stat_get_tuples_inserted(:drop_stats_test_xact_oid); - pg_stat_get_tuples_inserted ------------------------------ - 2 -(1 row) - --- transactional drop -SELECT pg_stat_get_live_tuples(:drop_stats_test_xact_oid); - pg_stat_get_live_tuples -------------------------- - 1 -(1 row) - -SELECT pg_stat_get_tuples_inserted(:drop_stats_test_xact_oid); - pg_stat_get_tuples_inserted ------------------------------ - 2 -(1 row) - -BEGIN; -INSERT INTO drop_stats_test_xact DEFAULT VALUES; -SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_xact_oid); - pg_stat_get_xact_tuples_inserted ----------------------------------- - 1 -(1 row) - -DROP TABLE drop_stats_test_xact; -SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_xact_oid); - pg_stat_get_xact_tuples_inserted ----------------------------------- - 0 -(1 row) - -COMMIT; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT pg_stat_get_live_tuples(:drop_stats_test_xact_oid); - pg_stat_get_live_tuples -------------------------- - 0 -(1 row) - -SELECT pg_stat_get_tuples_inserted(:drop_stats_test_xact_oid); - pg_stat_get_tuples_inserted ------------------------------ - 0 -(1 row) - --- savepoint rollback (2 levels) -SELECT pg_stat_get_live_tuples(:drop_stats_test_subxact_oid); - pg_stat_get_live_tuples -------------------------- - 1 -(1 row) - -BEGIN; -INSERT INTO drop_stats_test_subxact DEFAULT VALUES; -SAVEPOINT sp1; -INSERT INTO drop_stats_test_subxact DEFAULT VALUES; -SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_subxact_oid); - pg_stat_get_xact_tuples_inserted ----------------------------------- - 2 -(1 row) - -SAVEPOINT sp2; -DROP TABLE drop_stats_test_subxact; -ROLLBACK TO SAVEPOINT sp2; -SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_subxact_oid); - pg_stat_get_xact_tuples_inserted ----------------------------------- - 2 -(1 row) - -COMMIT; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT pg_stat_get_live_tuples(:drop_stats_test_subxact_oid); - pg_stat_get_live_tuples -------------------------- - 3 -(1 row) - --- savepoint rolback (1 level) -SELECT pg_stat_get_live_tuples(:drop_stats_test_subxact_oid); - pg_stat_get_live_tuples -------------------------- - 3 -(1 row) - -BEGIN; -SAVEPOINT sp1; -DROP TABLE drop_stats_test_subxact; -SAVEPOINT sp2; -ROLLBACK TO SAVEPOINT sp1; -COMMIT; -SELECT pg_stat_get_live_tuples(:drop_stats_test_subxact_oid); - pg_stat_get_live_tuples -------------------------- - 3 -(1 row) - --- and now actually drop -SELECT pg_stat_get_live_tuples(:drop_stats_test_subxact_oid); - pg_stat_get_live_tuples -------------------------- - 3 -(1 row) - -BEGIN; -SAVEPOINT sp1; -DROP TABLE drop_stats_test_subxact; -SAVEPOINT sp2; -RELEASE SAVEPOINT sp1; -COMMIT; -SELECT pg_stat_get_live_tuples(:drop_stats_test_subxact_oid); - pg_stat_get_live_tuples -------------------------- - 0 -(1 row) - -DROP TABLE trunc_stats_test, trunc_stats_test1, trunc_stats_test2, trunc_stats_test3, trunc_stats_test4; -DROP TABLE prevstats; ------ --- Test that last_seq_scan, last_idx_scan are correctly maintained --- --- Perform test using a temporary table. That way autovacuum etc won't --- interfere. To be able to check that timestamps increase, we sleep for 100ms --- between tests, assuming that there aren't systems with a coarser timestamp --- granularity. ------ -BEGIN; -CREATE TEMPORARY TABLE test_last_scan(idx_col int primary key, noidx_col int); -INSERT INTO test_last_scan(idx_col, noidx_col) VALUES(1, 1); -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT last_seq_scan, last_idx_scan FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass; - last_seq_scan | last_idx_scan ----------------+--------------- - | -(1 row) - -COMMIT; -SELECT pg_stat_reset_single_table_counters('test_last_scan'::regclass); - pg_stat_reset_single_table_counters -------------------------------------- - -(1 row) - -SELECT seq_scan, idx_scan FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass; - seq_scan | idx_scan -----------+---------- - 0 | 0 -(1 row) - --- ensure we start out with exactly one index and sequential scan -BEGIN; -SET LOCAL enable_seqscan TO on; -SET LOCAL enable_indexscan TO on; -SET LOCAL enable_bitmapscan TO off; -EXPLAIN (COSTS off) SELECT count(*) FROM test_last_scan WHERE noidx_col = 1; - QUERY PLAN ----------------------------------- - Aggregate - -> Seq Scan on test_last_scan - Filter: (noidx_col = 1) -(3 rows) - -SELECT count(*) FROM test_last_scan WHERE noidx_col = 1; - count -------- - 1 -(1 row) - -SET LOCAL enable_seqscan TO off; -EXPLAIN (COSTS off) SELECT count(*) FROM test_last_scan WHERE idx_col = 1; - QUERY PLAN --------------------------------------------------------------- - Aggregate - -> Index Scan using test_last_scan_pkey on test_last_scan - Index Cond: (idx_col = 1) -(3 rows) - -SELECT count(*) FROM test_last_scan WHERE idx_col = 1; - count -------- - 1 -(1 row) - -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -COMMIT; --- fetch timestamps from before the next test -SELECT last_seq_scan AS test_last_seq, last_idx_scan AS test_last_idx -FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass \gset -SELECT pg_sleep(0.1); -- assume a minimum timestamp granularity of 100ms - pg_sleep ----------- - -(1 row) - --- cause one sequential scan -BEGIN; -SET LOCAL enable_seqscan TO on; -SET LOCAL enable_indexscan TO off; -SET LOCAL enable_bitmapscan TO off; -EXPLAIN (COSTS off) SELECT count(*) FROM test_last_scan WHERE noidx_col = 1; - QUERY PLAN ----------------------------------- - Aggregate - -> Seq Scan on test_last_scan - Filter: (noidx_col = 1) -(3 rows) - -SELECT count(*) FROM test_last_scan WHERE noidx_col = 1; - count -------- - 1 -(1 row) - -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -COMMIT; --- check that just sequential scan stats were incremented -SELECT seq_scan, :'test_last_seq' < last_seq_scan AS seq_ok, idx_scan, :'test_last_idx' = last_idx_scan AS idx_ok -FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass; - seq_scan | seq_ok | idx_scan | idx_ok -----------+--------+----------+-------- - 2 | t | 1 | t -(1 row) - --- fetch timestamps from before the next test -SELECT last_seq_scan AS test_last_seq, last_idx_scan AS test_last_idx -FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass \gset -SELECT pg_sleep(0.1); - pg_sleep ----------- - -(1 row) - --- cause one index scan -BEGIN; -SET LOCAL enable_seqscan TO off; -SET LOCAL enable_indexscan TO on; -SET LOCAL enable_bitmapscan TO off; -EXPLAIN (COSTS off) SELECT count(*) FROM test_last_scan WHERE idx_col = 1; - QUERY PLAN --------------------------------------------------------------- - Aggregate - -> Index Scan using test_last_scan_pkey on test_last_scan - Index Cond: (idx_col = 1) -(3 rows) - -SELECT count(*) FROM test_last_scan WHERE idx_col = 1; - count -------- - 1 -(1 row) - -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -COMMIT; --- check that just index scan stats were incremented -SELECT seq_scan, :'test_last_seq' = last_seq_scan AS seq_ok, idx_scan, :'test_last_idx' < last_idx_scan AS idx_ok -FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass; - seq_scan | seq_ok | idx_scan | idx_ok -----------+--------+----------+-------- - 2 | t | 2 | t -(1 row) - --- fetch timestamps from before the next test -SELECT last_seq_scan AS test_last_seq, last_idx_scan AS test_last_idx -FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass \gset -SELECT pg_sleep(0.1); - pg_sleep ----------- - -(1 row) - --- cause one bitmap index scan -BEGIN; -SET LOCAL enable_seqscan TO off; -SET LOCAL enable_indexscan TO off; -SET LOCAL enable_bitmapscan TO on; -EXPLAIN (COSTS off) SELECT count(*) FROM test_last_scan WHERE idx_col = 1; - QUERY PLAN ------------------------------------------------------- - Aggregate - -> Bitmap Heap Scan on test_last_scan - Recheck Cond: (idx_col = 1) - -> Bitmap Index Scan on test_last_scan_pkey - Index Cond: (idx_col = 1) -(5 rows) - -SELECT count(*) FROM test_last_scan WHERE idx_col = 1; - count -------- - 1 -(1 row) - -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -COMMIT; --- check that just index scan stats were incremented -SELECT seq_scan, :'test_last_seq' = last_seq_scan AS seq_ok, idx_scan, :'test_last_idx' < last_idx_scan AS idx_ok -FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass; - seq_scan | seq_ok | idx_scan | idx_ok -----------+--------+----------+-------- - 2 | t | 3 | t -(1 row) - ------ --- Test reset of some stats for shared table ------ --- This updates the comment of the database currently in use in --- pg_shdescription with a fake value, then sets it back to its --- original value. -SELECT shobj_description(d.oid, 'pg_database') as description_before - FROM pg_database d WHERE datname = current_database() \gset --- force some stats in pg_shdescription. -BEGIN; -SELECT current_database() as datname \gset -COMMENT ON DATABASE :"datname" IS 'This is a test comment'; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -COMMIT; --- check that the stats are reset. -SELECT (n_tup_ins + n_tup_upd) > 0 AS has_data FROM pg_stat_all_tables - WHERE relid = 'pg_shdescription'::regclass; - has_data ----------- - t -(1 row) - -SELECT pg_stat_reset_single_table_counters('pg_shdescription'::regclass); - pg_stat_reset_single_table_counters -------------------------------------- - -(1 row) - -SELECT (n_tup_ins + n_tup_upd) > 0 AS has_data FROM pg_stat_all_tables - WHERE relid = 'pg_shdescription'::regclass; - has_data ----------- - f -(1 row) - --- set back comment -\if :{?description_before} - COMMENT ON DATABASE :"datname" IS :'description_before'; -\else - COMMENT ON DATABASE :"datname" IS NULL; -\endif ------ --- Test that various stats views are being properly populated ------ --- Test that sessions is incremented when a new session is started in pg_stat_database -SELECT sessions AS db_stat_sessions FROM pg_stat_database WHERE datname = (SELECT current_database()) \gset -\c -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sessions > :db_stat_sessions FROM pg_stat_database WHERE datname = (SELECT current_database()); - ?column? ----------- - t -(1 row) - --- Test pg_stat_checkpointer checkpointer-related stats, together with pg_stat_wal -SELECT num_requested AS rqst_ckpts_before FROM pg_stat_checkpointer \gset --- Test pg_stat_wal (and make a temp table so our temp schema exists) -SELECT wal_bytes AS wal_bytes_before FROM pg_stat_wal \gset -CREATE TEMP TABLE test_stats_temp AS SELECT 17; -DROP TABLE test_stats_temp; --- Checkpoint twice: The checkpointer reports stats after reporting completion --- of the checkpoint. But after a second checkpoint we'll see at least the --- results of the first. -CHECKPOINT; -CHECKPOINT; -SELECT num_requested > :rqst_ckpts_before FROM pg_stat_checkpointer; - ?column? ----------- - t -(1 row) - -SELECT wal_bytes > :wal_bytes_before FROM pg_stat_wal; - ?column? ----------- - t -(1 row) - --- Test pg_stat_get_backend_idset() and some allied functions. --- In particular, verify that their notion of backend ID matches --- our temp schema index. -SELECT (current_schemas(true))[1] = ('pg_temp_' || beid::text) AS match -FROM pg_stat_get_backend_idset() beid -WHERE pg_stat_get_backend_pid(beid) = pg_backend_pid(); - match -------- - t -(1 row) - ------ --- Test that resetting stats works for reset timestamp ------ --- Test that reset_slru with a specified SLRU works. -SELECT stats_reset AS slru_commit_ts_reset_ts FROM pg_stat_slru WHERE name = 'commit_timestamp' \gset -SELECT stats_reset AS slru_notify_reset_ts FROM pg_stat_slru WHERE name = 'notify' \gset -SELECT pg_stat_reset_slru('commit_timestamp'); - pg_stat_reset_slru --------------------- - -(1 row) - -SELECT stats_reset > :'slru_commit_ts_reset_ts'::timestamptz FROM pg_stat_slru WHERE name = 'commit_timestamp'; - ?column? ----------- - t -(1 row) - -SELECT stats_reset AS slru_commit_ts_reset_ts FROM pg_stat_slru WHERE name = 'commit_timestamp' \gset --- Test that multiple SLRUs are reset when no specific SLRU provided to reset function -SELECT pg_stat_reset_slru(); - pg_stat_reset_slru --------------------- - -(1 row) - -SELECT stats_reset > :'slru_commit_ts_reset_ts'::timestamptz FROM pg_stat_slru WHERE name = 'commit_timestamp'; - ?column? ----------- - t -(1 row) - -SELECT stats_reset > :'slru_notify_reset_ts'::timestamptz FROM pg_stat_slru WHERE name = 'notify'; - ?column? ----------- - t -(1 row) - --- Test that reset_shared with archiver specified as the stats type works -SELECT stats_reset AS archiver_reset_ts FROM pg_stat_archiver \gset -SELECT pg_stat_reset_shared('archiver'); - pg_stat_reset_shared ----------------------- - -(1 row) - -SELECT stats_reset > :'archiver_reset_ts'::timestamptz FROM pg_stat_archiver; - ?column? ----------- - t -(1 row) - --- Test that reset_shared with bgwriter specified as the stats type works -SELECT stats_reset AS bgwriter_reset_ts FROM pg_stat_bgwriter \gset -SELECT pg_stat_reset_shared('bgwriter'); - pg_stat_reset_shared ----------------------- - -(1 row) - -SELECT stats_reset > :'bgwriter_reset_ts'::timestamptz FROM pg_stat_bgwriter; - ?column? ----------- - t -(1 row) - --- Test that reset_shared with checkpointer specified as the stats type works -SELECT stats_reset AS checkpointer_reset_ts FROM pg_stat_checkpointer \gset -SELECT pg_stat_reset_shared('checkpointer'); - pg_stat_reset_shared ----------------------- - -(1 row) - -SELECT stats_reset > :'checkpointer_reset_ts'::timestamptz FROM pg_stat_checkpointer; - ?column? ----------- - t -(1 row) - --- Test that reset_shared with recovery_prefetch specified as the stats type works -SELECT stats_reset AS recovery_prefetch_reset_ts FROM pg_stat_recovery_prefetch \gset -SELECT pg_stat_reset_shared('recovery_prefetch'); - pg_stat_reset_shared ----------------------- - -(1 row) - -SELECT stats_reset > :'recovery_prefetch_reset_ts'::timestamptz FROM pg_stat_recovery_prefetch; - ?column? ----------- - t -(1 row) - --- Test that reset_shared with slru specified as the stats type works -SELECT max(stats_reset) AS slru_reset_ts FROM pg_stat_slru \gset -SELECT pg_stat_reset_shared('slru'); - pg_stat_reset_shared ----------------------- - -(1 row) - -SELECT max(stats_reset) > :'slru_reset_ts'::timestamptz FROM pg_stat_slru; - ?column? ----------- - t -(1 row) - --- Test that reset_shared with wal specified as the stats type works -SELECT stats_reset AS wal_reset_ts FROM pg_stat_wal \gset -SELECT pg_stat_reset_shared('wal'); - pg_stat_reset_shared ----------------------- - -(1 row) - -SELECT stats_reset > :'wal_reset_ts'::timestamptz FROM pg_stat_wal; - ?column? ----------- - t -(1 row) - --- Test error case for reset_shared with unknown stats type -SELECT pg_stat_reset_shared('unknown'); -ERROR: unrecognized reset target: "unknown" -HINT: Target must be "archiver", "bgwriter", "checkpointer", "io", "recovery_prefetch", "slru", or "wal". --- Test that reset works for pg_stat_database --- Since pg_stat_database stats_reset starts out as NULL, reset it once first so we have something to compare it to -SELECT pg_stat_reset(); - pg_stat_reset ---------------- - -(1 row) - -SELECT stats_reset AS db_reset_ts FROM pg_stat_database WHERE datname = (SELECT current_database()) \gset -SELECT pg_stat_reset(); - pg_stat_reset ---------------- - -(1 row) - -SELECT stats_reset > :'db_reset_ts'::timestamptz FROM pg_stat_database WHERE datname = (SELECT current_database()); - ?column? ----------- - t -(1 row) - ----- --- pg_stat_get_snapshot_timestamp behavior ----- -BEGIN; -SET LOCAL stats_fetch_consistency = snapshot; --- no snapshot yet, return NULL -SELECT pg_stat_get_snapshot_timestamp(); - pg_stat_get_snapshot_timestamp --------------------------------- - -(1 row) - --- any attempt at accessing stats will build snapshot -SELECT pg_stat_get_function_calls(0); - pg_stat_get_function_calls ----------------------------- - -(1 row) - -SELECT pg_stat_get_snapshot_timestamp() >= NOW(); - ?column? ----------- - t -(1 row) - --- shows NULL again after clearing -SELECT pg_stat_clear_snapshot(); - pg_stat_clear_snapshot ------------------------- - -(1 row) - -SELECT pg_stat_get_snapshot_timestamp(); - pg_stat_get_snapshot_timestamp --------------------------------- - -(1 row) - -COMMIT; ----- --- Changing stats_fetch_consistency in a transaction. ----- -BEGIN; --- Stats filled under the cache mode -SET LOCAL stats_fetch_consistency = cache; -SELECT pg_stat_get_function_calls(0); - pg_stat_get_function_calls ----------------------------- - -(1 row) - -SELECT pg_stat_get_snapshot_timestamp() IS NOT NULL AS snapshot_ok; - snapshot_ok -------------- - f -(1 row) - --- Success in accessing pre-existing snapshot data. -SET LOCAL stats_fetch_consistency = snapshot; -SELECT pg_stat_get_snapshot_timestamp() IS NOT NULL AS snapshot_ok; - snapshot_ok -------------- - f -(1 row) - -SELECT pg_stat_get_function_calls(0); - pg_stat_get_function_calls ----------------------------- - -(1 row) - -SELECT pg_stat_get_snapshot_timestamp() IS NOT NULL AS snapshot_ok; - snapshot_ok -------------- - t -(1 row) - --- Snapshot cleared. -SET LOCAL stats_fetch_consistency = none; -SELECT pg_stat_get_snapshot_timestamp() IS NOT NULL AS snapshot_ok; - snapshot_ok -------------- - f -(1 row) - -SELECT pg_stat_get_function_calls(0); - pg_stat_get_function_calls ----------------------------- - -(1 row) - -SELECT pg_stat_get_snapshot_timestamp() IS NOT NULL AS snapshot_ok; - snapshot_ok -------------- - f -(1 row) - -ROLLBACK; ----- --- pg_stat_have_stats behavior ----- --- fixed-numbered stats exist -SELECT pg_stat_have_stats('bgwriter', 0, 0); - pg_stat_have_stats --------------------- - t -(1 row) - --- unknown stats kinds error out -SELECT pg_stat_have_stats('zaphod', 0, 0); -ERROR: invalid statistics kind: "zaphod" --- db stats have objid 0 -SELECT pg_stat_have_stats('database', :dboid, 1); - pg_stat_have_stats --------------------- - f -(1 row) - -SELECT pg_stat_have_stats('database', :dboid, 0); - pg_stat_have_stats --------------------- - t -(1 row) - --- pg_stat_have_stats returns true for committed index creation -CREATE table stats_test_tab1 as select generate_series(1,10) a; -CREATE index stats_test_idx1 on stats_test_tab1(a); -SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid \gset -SET enable_seqscan TO off; -select a from stats_test_tab1 where a = 3; - a ---- - 3 -(1 row) - -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - t -(1 row) - --- pg_stat_have_stats returns false for dropped index with stats -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - t -(1 row) - -DROP index stats_test_idx1; -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - f -(1 row) - --- pg_stat_have_stats returns false for rolled back index creation -BEGIN; -CREATE index stats_test_idx1 on stats_test_tab1(a); -SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid \gset -select a from stats_test_tab1 where a = 3; - a ---- - 3 -(1 row) - -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - t -(1 row) - -ROLLBACK; -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - f -(1 row) - --- pg_stat_have_stats returns true for reindex CONCURRENTLY -CREATE index stats_test_idx1 on stats_test_tab1(a); -SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid \gset -select a from stats_test_tab1 where a = 3; - a ---- - 3 -(1 row) - -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - t -(1 row) - -REINDEX index CONCURRENTLY stats_test_idx1; --- false for previous oid -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - f -(1 row) - --- true for new oid -SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid \gset -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - t -(1 row) - --- pg_stat_have_stats returns true for a rolled back drop index with stats -BEGIN; -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - t -(1 row) - -DROP index stats_test_idx1; -ROLLBACK; -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - t -(1 row) - --- put enable_seqscan back to on -SET enable_seqscan TO on; --- ensure that stats accessors handle NULL input correctly -SELECT pg_stat_get_replication_slot(NULL); - pg_stat_get_replication_slot ------------------------------- - -(1 row) - -SELECT pg_stat_get_subscription_stats(NULL); - pg_stat_get_subscription_stats --------------------------------- - -(1 row) - --- Test that the following operations are tracked in pg_stat_io: --- - reads of target blocks into shared buffers --- - writes of shared buffers to permanent storage --- - extends of relations using shared buffers --- - fsyncs done to ensure the durability of data dirtying shared buffers --- - shared buffer hits --- There is no test for blocks evicted from shared buffers, because we cannot --- be sure of the state of shared buffers at the point the test is run. --- Create a regular table and insert some data to generate IOCONTEXT_NORMAL --- extends. -SELECT sum(extends) AS io_sum_shared_before_extends - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset -SELECT sum(writes) AS writes, sum(fsyncs) AS fsyncs - FROM pg_stat_io - WHERE object = 'relation' \gset io_sum_shared_before_ -CREATE TABLE test_io_shared(a int); -INSERT INTO test_io_shared SELECT i FROM generate_series(1,100)i; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(extends) AS io_sum_shared_after_extends - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset -SELECT :io_sum_shared_after_extends > :io_sum_shared_before_extends; - ?column? ----------- - t -(1 row) - --- After a checkpoint, there should be some additional IOCONTEXT_NORMAL writes --- and fsyncs. --- See comment above for rationale for two explicit CHECKPOINTs. -CHECKPOINT; -CHECKPOINT; -SELECT sum(writes) AS writes, sum(fsyncs) AS fsyncs - FROM pg_stat_io - WHERE object = 'relation' \gset io_sum_shared_after_ -SELECT :io_sum_shared_after_writes > :io_sum_shared_before_writes; - ?column? ----------- - t -(1 row) - -SELECT current_setting('fsync') = 'off' - OR :io_sum_shared_after_fsyncs > :io_sum_shared_before_fsyncs; - ?column? ----------- - t -(1 row) - --- Change the tablespace so that the table is rewritten directly, then SELECT --- from it to cause it to be read back into shared buffers. -SELECT sum(reads) AS io_sum_shared_before_reads - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset --- Do this in a transaction to prevent spurious failures due to concurrent accesses to our newly --- rewritten table, e.g. by autovacuum. -BEGIN; -ALTER TABLE test_io_shared SET TABLESPACE regress_tblspace; --- SELECT from the table so that the data is read into shared buffers and --- context 'normal', object 'relation' reads are counted. -SELECT COUNT(*) FROM test_io_shared; - count -------- - 100 -(1 row) - -COMMIT; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(reads) AS io_sum_shared_after_reads - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset -SELECT :io_sum_shared_after_reads > :io_sum_shared_before_reads; - ?column? ----------- - t -(1 row) - -SELECT sum(hits) AS io_sum_shared_before_hits - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset --- Select from the table again to count hits. --- Ensure we generate hits by forcing a nested loop self-join with no --- materialize node. The outer side's buffer will stay pinned, preventing its --- eviction, while we loop through the inner side and generate hits. -BEGIN; -SET LOCAL enable_nestloop TO on; SET LOCAL enable_mergejoin TO off; -SET LOCAL enable_hashjoin TO off; SET LOCAL enable_material TO off; --- ensure plan stays as we expect it to -EXPLAIN (COSTS OFF) SELECT COUNT(*) FROM test_io_shared t1 INNER JOIN test_io_shared t2 USING (a); - QUERY PLAN -------------------------------------------- - Aggregate - -> Nested Loop - Join Filter: (t1.a = t2.a) - -> Seq Scan on test_io_shared t1 - -> Seq Scan on test_io_shared t2 -(5 rows) - -SELECT COUNT(*) FROM test_io_shared t1 INNER JOIN test_io_shared t2 USING (a); - count -------- - 100 -(1 row) - -COMMIT; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(hits) AS io_sum_shared_after_hits - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset -SELECT :io_sum_shared_after_hits > :io_sum_shared_before_hits; - ?column? ----------- - t -(1 row) - -DROP TABLE test_io_shared; --- Test that the follow IOCONTEXT_LOCAL IOOps are tracked in pg_stat_io: --- - eviction of local buffers in order to reuse them --- - reads of temporary table blocks into local buffers --- - writes of local buffers to permanent storage --- - extends of temporary tables --- Set temp_buffers to its minimum so that we can trigger writes with fewer --- inserted tuples. Do so in a new session in case temporary tables have been --- accessed by previous tests in this session. -\c -SET temp_buffers TO 100; -CREATE TEMPORARY TABLE test_io_local(a int, b TEXT); -SELECT sum(extends) AS extends, sum(evictions) AS evictions, sum(writes) AS writes - FROM pg_stat_io - WHERE context = 'normal' AND object = 'temp relation' \gset io_sum_local_before_ --- Insert tuples into the temporary table, generating extends in the stats. --- Insert enough values that we need to reuse and write out dirty local --- buffers, generating evictions and writes. -INSERT INTO test_io_local SELECT generate_series(1, 5000) as id, repeat('a', 200); --- Ensure the table is large enough to exceed our temp_buffers setting. -SELECT pg_relation_size('test_io_local') / current_setting('block_size')::int8 > 100; - ?column? ----------- - t -(1 row) - -SELECT sum(reads) AS io_sum_local_before_reads - FROM pg_stat_io WHERE context = 'normal' AND object = 'temp relation' \gset --- Read in evicted buffers, generating reads. -SELECT COUNT(*) FROM test_io_local; - count -------- - 5000 -(1 row) - -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(evictions) AS evictions, - sum(reads) AS reads, - sum(writes) AS writes, - sum(extends) AS extends - FROM pg_stat_io - WHERE context = 'normal' AND object = 'temp relation' \gset io_sum_local_after_ -SELECT :io_sum_local_after_evictions > :io_sum_local_before_evictions, - :io_sum_local_after_reads > :io_sum_local_before_reads, - :io_sum_local_after_writes > :io_sum_local_before_writes, - :io_sum_local_after_extends > :io_sum_local_before_extends; - ?column? | ?column? | ?column? | ?column? -----------+----------+----------+---------- - t | t | t | t -(1 row) - --- Change the tablespaces so that the temporary table is rewritten to other --- local buffers, exercising a different codepath than standard local buffer --- writes. -ALTER TABLE test_io_local SET TABLESPACE regress_tblspace; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(writes) AS io_sum_local_new_tblspc_writes - FROM pg_stat_io WHERE context = 'normal' AND object = 'temp relation' \gset -SELECT :io_sum_local_new_tblspc_writes > :io_sum_local_after_writes; - ?column? ----------- - t -(1 row) - -RESET temp_buffers; --- Test that reuse of strategy buffers and reads of blocks into these reused --- buffers while VACUUMing are tracked in pg_stat_io. If there is sufficient --- demand for shared buffers from concurrent queries, some buffers may be --- pinned by other backends before they can be reused. In such cases, the --- backend will evict a buffer from outside the ring and add it to the --- ring. This is considered an eviction and not a reuse. --- Set wal_skip_threshold smaller than the expected size of --- test_io_vac_strategy so that, even if wal_level is minimal, VACUUM FULL will --- fsync the newly rewritten test_io_vac_strategy instead of writing it to WAL. --- Writing it to WAL will result in the newly written relation pages being in --- shared buffers -- preventing us from testing BAS_VACUUM BufferAccessStrategy --- reads. -SET wal_skip_threshold = '1 kB'; -SELECT sum(reuses) AS reuses, sum(reads) AS reads, sum(evictions) AS evictions - FROM pg_stat_io WHERE context = 'vacuum' \gset io_sum_vac_strategy_before_ -CREATE TABLE test_io_vac_strategy(a int, b int) WITH (autovacuum_enabled = 'false'); -INSERT INTO test_io_vac_strategy SELECT i, i from generate_series(1, 4500)i; --- Ensure that the next VACUUM will need to perform IO by rewriting the table --- first with VACUUM (FULL). -VACUUM (FULL) test_io_vac_strategy; --- Use the minimum BUFFER_USAGE_LIMIT to cause reuses or evictions with the --- smallest table possible. -VACUUM (PARALLEL 0, BUFFER_USAGE_LIMIT 128) test_io_vac_strategy; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(reuses) AS reuses, sum(reads) AS reads, sum(evictions) AS evictions - FROM pg_stat_io WHERE context = 'vacuum' \gset io_sum_vac_strategy_after_ -SELECT :io_sum_vac_strategy_after_reads > :io_sum_vac_strategy_before_reads; - ?column? ----------- - t -(1 row) - -SELECT (:io_sum_vac_strategy_after_reuses + :io_sum_vac_strategy_after_evictions) > - (:io_sum_vac_strategy_before_reuses + :io_sum_vac_strategy_before_evictions); - ?column? ----------- - t -(1 row) - -RESET wal_skip_threshold; --- Test that extends done by a CTAS, which uses a BAS_BULKWRITE --- BufferAccessStrategy, are tracked in pg_stat_io. -SELECT sum(extends) AS io_sum_bulkwrite_strategy_extends_before - FROM pg_stat_io WHERE context = 'bulkwrite' \gset -CREATE TABLE test_io_bulkwrite_strategy AS SELECT i FROM generate_series(1,100)i; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(extends) AS io_sum_bulkwrite_strategy_extends_after - FROM pg_stat_io WHERE context = 'bulkwrite' \gset -SELECT :io_sum_bulkwrite_strategy_extends_after > :io_sum_bulkwrite_strategy_extends_before; - ?column? ----------- - t -(1 row) - --- Test IO stats reset -SELECT pg_stat_have_stats('io', 0, 0); - pg_stat_have_stats --------------------- - t -(1 row) - -SELECT sum(evictions) + sum(reuses) + sum(extends) + sum(fsyncs) + sum(reads) + sum(writes) + sum(writebacks) + sum(hits) AS io_stats_pre_reset - FROM pg_stat_io \gset -SELECT pg_stat_reset_shared('io'); - pg_stat_reset_shared ----------------------- - -(1 row) - -SELECT sum(evictions) + sum(reuses) + sum(extends) + sum(fsyncs) + sum(reads) + sum(writes) + sum(writebacks) + sum(hits) AS io_stats_post_reset - FROM pg_stat_io \gset -SELECT :io_stats_post_reset < :io_stats_pre_reset; - ?column? ----------- - t -(1 row) - --- test BRIN index doesn't block HOT update -CREATE TABLE brin_hot ( - id integer PRIMARY KEY, - val integer NOT NULL -) WITH (autovacuum_enabled = off, fillfactor = 70); -INSERT INTO brin_hot SELECT *, 0 FROM generate_series(1, 235); -CREATE INDEX val_brin ON brin_hot using brin(val); -CREATE FUNCTION wait_for_hot_stats() RETURNS void AS $$ -DECLARE - start_time timestamptz := clock_timestamp(); - updated bool; -BEGIN - -- we don't want to wait forever; loop will exit after 30 seconds - FOR i IN 1 .. 300 LOOP - SELECT (pg_stat_get_tuples_hot_updated('brin_hot'::regclass::oid) > 0) INTO updated; - EXIT WHEN updated; - - -- wait a little - PERFORM pg_sleep_for('100 milliseconds'); - -- reset stats snapshot so we can test again - PERFORM pg_stat_clear_snapshot(); - END LOOP; - -- report time waited in postmaster log (where it won't change test output) - RAISE log 'wait_for_hot_stats delayed % seconds', - EXTRACT(epoch FROM clock_timestamp() - start_time); -END -$$ LANGUAGE plpgsql; -UPDATE brin_hot SET val = -3 WHERE id = 42; --- We can't just call wait_for_hot_stats() at this point, because we only --- transmit stats when the session goes idle, and we probably didn't --- transmit the last couple of counts yet thanks to the rate-limiting logic --- in pgstat_report_stat(). But instead of waiting for the rate limiter's --- timeout to elapse, let's just start a new session. The old one will --- then send its stats before dying. -\c - -SELECT wait_for_hot_stats(); - wait_for_hot_stats --------------------- - -(1 row) - -SELECT pg_stat_get_tuples_hot_updated('brin_hot'::regclass::oid); - pg_stat_get_tuples_hot_updated --------------------------------- - 1 -(1 row) - -DROP TABLE brin_hot; -DROP FUNCTION wait_for_hot_stats(); --- Test handling of index predicates - updating attributes in precicates --- should not block HOT when summarizing indexes are involved. We update --- a row that was not indexed due to the index predicate, and becomes --- indexable - the HOT-updated tuple is forwarded to the BRIN index. -CREATE TABLE brin_hot_2 (a int, b int); -INSERT INTO brin_hot_2 VALUES (1, 100); -CREATE INDEX ON brin_hot_2 USING brin (b) WHERE a = 2; -UPDATE brin_hot_2 SET a = 2; -EXPLAIN (COSTS OFF) SELECT * FROM brin_hot_2 WHERE a = 2 AND b = 100; - QUERY PLAN ------------------------------------ - Seq Scan on brin_hot_2 - Filter: ((a = 2) AND (b = 100)) -(2 rows) - -SELECT COUNT(*) FROM brin_hot_2 WHERE a = 2 AND b = 100; - count -------- - 1 -(1 row) - -SET enable_seqscan = off; -EXPLAIN (COSTS OFF) SELECT * FROM brin_hot_2 WHERE a = 2 AND b = 100; - QUERY PLAN ---------------------------------------------- - Bitmap Heap Scan on brin_hot_2 - Recheck Cond: ((b = 100) AND (a = 2)) - -> Bitmap Index Scan on brin_hot_2_b_idx - Index Cond: (b = 100) -(4 rows) - -SELECT COUNT(*) FROM brin_hot_2 WHERE a = 2 AND b = 100; - count -------- - 1 -(1 row) - -DROP TABLE brin_hot_2; --- Test that updates to indexed columns are still propagated to the --- BRIN column. --- https://postgr.es/m/05ebcb44-f383-86e3-4f31-0a97a55634cf@enterprisedb.com -CREATE TABLE brin_hot_3 (a int, filler text) WITH (fillfactor = 10); -INSERT INTO brin_hot_3 SELECT 1, repeat(' ', 500) FROM generate_series(1, 20); -CREATE INDEX ON brin_hot_3 USING brin (a) WITH (pages_per_range = 1); -UPDATE brin_hot_3 SET a = 2; -EXPLAIN (COSTS OFF) SELECT * FROM brin_hot_3 WHERE a = 2; - QUERY PLAN ---------------------------------------------- - Bitmap Heap Scan on brin_hot_3 - Recheck Cond: (a = 2) - -> Bitmap Index Scan on brin_hot_3_a_idx - Index Cond: (a = 2) -(4 rows) - -SELECT COUNT(*) FROM brin_hot_3 WHERE a = 2; - count -------- - 20 -(1 row) - -DROP TABLE brin_hot_3; -SET enable_seqscan = on; --- End of Stats Test +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/predicate.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/predicate.out --- /Users/admin/pgsql/src/test/regress/expected/predicate.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/predicate.out 2024-12-13 13:20:13 @@ -1,320 +1,2 @@ --- --- Tests for predicate handling --- --- --- Test that restrictions that are always true are ignored, and that are always --- false are replaced with constant-FALSE --- --- Currently we only check for NullTest quals and OR clauses that include --- NullTest quals. We may extend it in the future. --- -CREATE TABLE pred_tab (a int NOT NULL, b int, c int NOT NULL); --- --- Test restriction clauses --- --- Ensure the IS_NOT_NULL qual is ignored when the column is non-nullable -EXPLAIN (COSTS OFF) -SELECT * FROM pred_tab t WHERE t.a IS NOT NULL; - QUERY PLAN ------------------------- - Seq Scan on pred_tab t -(1 row) - --- Ensure the IS_NOT_NULL qual is not ignored on a nullable column -EXPLAIN (COSTS OFF) -SELECT * FROM pred_tab t WHERE t.b IS NOT NULL; - QUERY PLAN ---------------------------- - Seq Scan on pred_tab t - Filter: (b IS NOT NULL) -(2 rows) - --- Ensure the IS_NULL qual is reduced to constant-FALSE for non-nullable --- columns -EXPLAIN (COSTS OFF) -SELECT * FROM pred_tab t WHERE t.a IS NULL; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - --- Ensure the IS_NULL qual is not reduced to constant-FALSE on nullable --- columns -EXPLAIN (COSTS OFF) -SELECT * FROM pred_tab t WHERE t.b IS NULL; - QUERY PLAN ------------------------- - Seq Scan on pred_tab t - Filter: (b IS NULL) -(2 rows) - --- --- Tests for OR clauses in restriction clauses --- --- Ensure the OR clause is ignored when an OR branch is always true -EXPLAIN (COSTS OFF) -SELECT * FROM pred_tab t WHERE t.a IS NOT NULL OR t.b = 1; - QUERY PLAN ------------------------- - Seq Scan on pred_tab t -(1 row) - --- Ensure the OR clause is not ignored for NullTests that can't be proven --- always true -EXPLAIN (COSTS OFF) -SELECT * FROM pred_tab t WHERE t.b IS NOT NULL OR t.a = 1; - QUERY PLAN ----------------------------------------- - Seq Scan on pred_tab t - Filter: ((b IS NOT NULL) OR (a = 1)) -(2 rows) - --- Ensure the OR clause is reduced to constant-FALSE when all branches are --- provably false -EXPLAIN (COSTS OFF) -SELECT * FROM pred_tab t WHERE t.a IS NULL OR t.c IS NULL; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - --- Ensure the OR clause is not reduced to constant-FALSE when not all branches --- are provably false -EXPLAIN (COSTS OFF) -SELECT * FROM pred_tab t WHERE t.b IS NULL OR t.c IS NULL; - QUERY PLAN ----------------------------------------- - Seq Scan on pred_tab t - Filter: ((b IS NULL) OR (c IS NULL)) -(2 rows) - --- --- Test join clauses --- --- Ensure the IS_NOT_NULL qual is ignored, since a) it's on a NOT NULL column, --- and b) its Var is not nullable by any outer joins -EXPLAIN (COSTS OFF) -SELECT * FROM pred_tab t1 - LEFT JOIN pred_tab t2 ON TRUE - LEFT JOIN pred_tab t3 ON t2.a IS NOT NULL; - QUERY PLAN -------------------------------------------------- - Nested Loop Left Join - -> Seq Scan on pred_tab t1 - -> Materialize - -> Nested Loop Left Join - -> Seq Scan on pred_tab t2 - -> Materialize - -> Seq Scan on pred_tab t3 -(7 rows) - --- Ensure the IS_NOT_NULL qual is not ignored when columns are made nullable --- by an outer join -EXPLAIN (COSTS OFF) -SELECT * FROM pred_tab t1 - LEFT JOIN pred_tab t2 ON t1.a = 1 - LEFT JOIN pred_tab t3 ON t2.a IS NOT NULL; - QUERY PLAN -------------------------------------------- - Nested Loop Left Join - Join Filter: (t2.a IS NOT NULL) - -> Nested Loop Left Join - Join Filter: (t1.a = 1) - -> Seq Scan on pred_tab t1 - -> Materialize - -> Seq Scan on pred_tab t2 - -> Materialize - -> Seq Scan on pred_tab t3 -(9 rows) - --- Ensure the IS_NULL qual is reduced to constant-FALSE, since a) it's on a NOT --- NULL column, and b) its Var is not nullable by any outer joins -EXPLAIN (COSTS OFF) -SELECT * FROM pred_tab t1 - LEFT JOIN pred_tab t2 ON TRUE - LEFT JOIN pred_tab t3 ON t2.a IS NULL AND t2.b = 1; - QUERY PLAN ---------------------------------------------------- - Nested Loop Left Join - -> Seq Scan on pred_tab t1 - -> Materialize - -> Nested Loop Left Join - Join Filter: (false AND (t2.b = 1)) - -> Seq Scan on pred_tab t2 - -> Result - One-Time Filter: false -(8 rows) - --- Ensure the IS_NULL qual is not reduced to constant-FALSE when the column is --- nullable by an outer join -EXPLAIN (COSTS OFF) -SELECT * FROM pred_tab t1 - LEFT JOIN pred_tab t2 ON t1.a = 1 - LEFT JOIN pred_tab t3 ON t2.a IS NULL; - QUERY PLAN -------------------------------------------- - Nested Loop Left Join - Join Filter: (t2.a IS NULL) - -> Nested Loop Left Join - Join Filter: (t1.a = 1) - -> Seq Scan on pred_tab t1 - -> Materialize - -> Seq Scan on pred_tab t2 - -> Materialize - -> Seq Scan on pred_tab t3 -(9 rows) - --- --- Tests for OR clauses in join clauses --- --- Ensure the OR clause is ignored when an OR branch is provably always true -EXPLAIN (COSTS OFF) -SELECT * FROM pred_tab t1 - LEFT JOIN pred_tab t2 ON TRUE - LEFT JOIN pred_tab t3 ON t2.a IS NOT NULL OR t2.b = 1; - QUERY PLAN -------------------------------------------------- - Nested Loop Left Join - -> Seq Scan on pred_tab t1 - -> Materialize - -> Nested Loop Left Join - -> Seq Scan on pred_tab t2 - -> Materialize - -> Seq Scan on pred_tab t3 -(7 rows) - --- Ensure the NullTest is not ignored when the column is nullable by an outer --- join -EXPLAIN (COSTS OFF) -SELECT * FROM pred_tab t1 - LEFT JOIN pred_tab t2 ON t1.a = 1 - LEFT JOIN pred_tab t3 ON t2.a IS NOT NULL OR t2.b = 1; - QUERY PLAN ---------------------------------------------------- - Nested Loop Left Join - Join Filter: ((t2.a IS NOT NULL) OR (t2.b = 1)) - -> Nested Loop Left Join - Join Filter: (t1.a = 1) - -> Seq Scan on pred_tab t1 - -> Materialize - -> Seq Scan on pred_tab t2 - -> Materialize - -> Seq Scan on pred_tab t3 -(9 rows) - --- Ensure the OR clause is reduced to constant-FALSE when all OR branches are --- provably false -EXPLAIN (COSTS OFF) -SELECT * FROM pred_tab t1 - LEFT JOIN pred_tab t2 ON TRUE - LEFT JOIN pred_tab t3 ON (t2.a IS NULL OR t2.c IS NULL) AND t2.b = 1; - QUERY PLAN ---------------------------------------------------- - Nested Loop Left Join - -> Seq Scan on pred_tab t1 - -> Materialize - -> Nested Loop Left Join - Join Filter: (false AND (t2.b = 1)) - -> Seq Scan on pred_tab t2 - -> Result - One-Time Filter: false -(8 rows) - --- Ensure the OR clause is not reduced to constant-FALSE when a column is --- made nullable from an outer join -EXPLAIN (COSTS OFF) -SELECT * FROM pred_tab t1 - LEFT JOIN pred_tab t2 ON t1.a = 1 - LEFT JOIN pred_tab t3 ON t2.a IS NULL OR t2.c IS NULL; - QUERY PLAN ---------------------------------------------------- - Nested Loop Left Join - Join Filter: ((t2.a IS NULL) OR (t2.c IS NULL)) - -> Nested Loop Left Join - Join Filter: (t1.a = 1) - -> Seq Scan on pred_tab t1 - -> Materialize - -> Seq Scan on pred_tab t2 - -> Materialize - -> Seq Scan on pred_tab t3 -(9 rows) - -DROP TABLE pred_tab; --- Validate we handle IS NULL and IS NOT NULL quals correctly with inheritance --- parents. -CREATE TABLE pred_parent (a int); -CREATE TABLE pred_child () INHERITS (pred_parent); -ALTER TABLE ONLY pred_parent ALTER a SET NOT NULL; --- Ensure that the scan on pred_child contains the IS NOT NULL qual. -EXPLAIN (COSTS OFF) -SELECT * FROM pred_parent WHERE a IS NOT NULL; - QUERY PLAN ---------------------------------------------- - Append - -> Seq Scan on pred_parent pred_parent_1 - -> Seq Scan on pred_child pred_parent_2 - Filter: (a IS NOT NULL) -(4 rows) - --- Ensure we only scan pred_child and not pred_parent -EXPLAIN (COSTS OFF) -SELECT * FROM pred_parent WHERE a IS NULL; - QUERY PLAN ------------------------------------- - Seq Scan on pred_child pred_parent - Filter: (a IS NULL) -(2 rows) - -ALTER TABLE pred_parent ALTER a DROP NOT NULL; -ALTER TABLE pred_child ALTER a SET NOT NULL; --- Ensure the IS NOT NULL qual is removed from the pred_child scan. -EXPLAIN (COSTS OFF) -SELECT * FROM pred_parent WHERE a IS NOT NULL; - QUERY PLAN ---------------------------------------------- - Append - -> Seq Scan on pred_parent pred_parent_1 - Filter: (a IS NOT NULL) - -> Seq Scan on pred_child pred_parent_2 -(4 rows) - --- Ensure we only scan pred_parent and not pred_child -EXPLAIN (COSTS OFF) -SELECT * FROM pred_parent WHERE a IS NULL; - QUERY PLAN -------------------------- - Seq Scan on pred_parent - Filter: (a IS NULL) -(2 rows) - -DROP TABLE pred_parent, pred_child; --- Validate the additional constant-FALSE qual does not cause inconsistent --- RestrictInfo serial numbers -CREATE TABLE pred_tab (a int PRIMARY KEY, b int); -INSERT INTO pred_tab SELECT i, i FROM generate_series(1, 10)i; -ANALYZE pred_tab; -EXPLAIN (COSTS OFF) -SELECT 1 FROM pred_tab t1 - LEFT JOIN - (pred_tab t2 LEFT JOIN pred_tab t3 ON t2.a = t3.a) ON TRUE - LEFT JOIN pred_tab t4 ON t1.a IS NULL AND t1.b = 1 - RIGHT JOIN pred_tab t5 ON t1.b = t5.b; - QUERY PLAN ---------------------------------------------------- - Hash Right Join - Hash Cond: (t1.b = t5.b) - -> Nested Loop Left Join - -> Nested Loop Left Join - Join Filter: (false AND (t1.b = 1)) - -> Seq Scan on pred_tab t1 - -> Result - One-Time Filter: false - -> Materialize - -> Seq Scan on pred_tab t2 - -> Hash - -> Seq Scan on pred_tab t5 -(12 rows) - -DROP TABLE pred_tab; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/oidjoins.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/oidjoins.out --- /Users/admin/pgsql/src/test/regress/expected/oidjoins.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/oidjoins.out 2024-12-13 13:20:14 @@ -1,268 +1,2 @@ --- --- Verify system catalog foreign key relationships --- -DO $doblock$ -declare - fk record; - nkeys integer; - cmd text; - err record; -begin - for fk in select * from pg_get_catalog_foreign_keys() - loop - raise notice 'checking % % => % %', - fk.fktable, fk.fkcols, fk.pktable, fk.pkcols; - nkeys := array_length(fk.fkcols, 1); - cmd := 'SELECT ctid'; - for i in 1 .. nkeys loop - cmd := cmd || ', ' || quote_ident(fk.fkcols[i]); - end loop; - if fk.is_array then - cmd := cmd || ' FROM (SELECT ctid'; - for i in 1 .. nkeys-1 loop - cmd := cmd || ', ' || quote_ident(fk.fkcols[i]); - end loop; - cmd := cmd || ', unnest(' || quote_ident(fk.fkcols[nkeys]); - cmd := cmd || ') as ' || quote_ident(fk.fkcols[nkeys]); - cmd := cmd || ' FROM ' || fk.fktable::text || ') fk WHERE '; - else - cmd := cmd || ' FROM ' || fk.fktable::text || ' fk WHERE '; - end if; - if fk.is_opt then - for i in 1 .. nkeys loop - cmd := cmd || quote_ident(fk.fkcols[i]) || ' != 0 AND '; - end loop; - end if; - cmd := cmd || 'NOT EXISTS(SELECT 1 FROM ' || fk.pktable::text || ' pk WHERE '; - for i in 1 .. nkeys loop - if i > 1 then cmd := cmd || ' AND '; end if; - cmd := cmd || 'pk.' || quote_ident(fk.pkcols[i]); - cmd := cmd || ' = fk.' || quote_ident(fk.fkcols[i]); - end loop; - cmd := cmd || ')'; - -- raise notice 'cmd = %', cmd; - for err in execute cmd loop - raise warning 'FK VIOLATION IN %(%): %', fk.fktable, fk.fkcols, err; - end loop; - end loop; -end -$doblock$; -NOTICE: checking pg_proc {pronamespace} => pg_namespace {oid} -NOTICE: checking pg_proc {proowner} => pg_authid {oid} -NOTICE: checking pg_proc {prolang} => pg_language {oid} -NOTICE: checking pg_proc {provariadic} => pg_type {oid} -NOTICE: checking pg_proc {prosupport} => pg_proc {oid} -NOTICE: checking pg_proc {prorettype} => pg_type {oid} -NOTICE: checking pg_proc {proargtypes} => pg_type {oid} -NOTICE: checking pg_proc {proallargtypes} => pg_type {oid} -NOTICE: checking pg_proc {protrftypes} => pg_type {oid} -NOTICE: checking pg_type {typnamespace} => pg_namespace {oid} -NOTICE: checking pg_type {typowner} => pg_authid {oid} -NOTICE: checking pg_type {typrelid} => pg_class {oid} -NOTICE: checking pg_type {typsubscript} => pg_proc {oid} -NOTICE: checking pg_type {typelem} => pg_type {oid} -NOTICE: checking pg_type {typarray} => pg_type {oid} -NOTICE: checking pg_type {typinput} => pg_proc {oid} -NOTICE: checking pg_type {typoutput} => pg_proc {oid} -NOTICE: checking pg_type {typreceive} => pg_proc {oid} -NOTICE: checking pg_type {typsend} => pg_proc {oid} -NOTICE: checking pg_type {typmodin} => pg_proc {oid} -NOTICE: checking pg_type {typmodout} => pg_proc {oid} -NOTICE: checking pg_type {typanalyze} => pg_proc {oid} -NOTICE: checking pg_type {typbasetype} => pg_type {oid} -NOTICE: checking pg_type {typcollation} => pg_collation {oid} -NOTICE: checking pg_attribute {attrelid} => pg_class {oid} -NOTICE: checking pg_attribute {atttypid} => pg_type {oid} -NOTICE: checking pg_attribute {attcollation} => pg_collation {oid} -NOTICE: checking pg_class {relnamespace} => pg_namespace {oid} -NOTICE: checking pg_class {reltype} => pg_type {oid} -NOTICE: checking pg_class {reloftype} => pg_type {oid} -NOTICE: checking pg_class {relowner} => pg_authid {oid} -NOTICE: checking pg_class {relam} => pg_am {oid} -NOTICE: checking pg_class {reltablespace} => pg_tablespace {oid} -NOTICE: checking pg_class {reltoastrelid} => pg_class {oid} -NOTICE: checking pg_class {relrewrite} => pg_class {oid} -NOTICE: checking pg_attrdef {adrelid} => pg_class {oid} -NOTICE: checking pg_attrdef {adrelid,adnum} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_constraint {connamespace} => pg_namespace {oid} -NOTICE: checking pg_constraint {conrelid} => pg_class {oid} -NOTICE: checking pg_constraint {contypid} => pg_type {oid} -NOTICE: checking pg_constraint {conindid} => pg_class {oid} -NOTICE: checking pg_constraint {conparentid} => pg_constraint {oid} -NOTICE: checking pg_constraint {confrelid} => pg_class {oid} -NOTICE: checking pg_constraint {conpfeqop} => pg_operator {oid} -NOTICE: checking pg_constraint {conppeqop} => pg_operator {oid} -NOTICE: checking pg_constraint {conffeqop} => pg_operator {oid} -NOTICE: checking pg_constraint {conexclop} => pg_operator {oid} -NOTICE: checking pg_constraint {conrelid,conkey} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_constraint {confrelid,confkey} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_inherits {inhrelid} => pg_class {oid} -NOTICE: checking pg_inherits {inhparent} => pg_class {oid} -NOTICE: checking pg_index {indexrelid} => pg_class {oid} -NOTICE: checking pg_index {indrelid} => pg_class {oid} -NOTICE: checking pg_index {indcollation} => pg_collation {oid} -NOTICE: checking pg_index {indclass} => pg_opclass {oid} -NOTICE: checking pg_index {indrelid,indkey} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_operator {oprnamespace} => pg_namespace {oid} -NOTICE: checking pg_operator {oprowner} => pg_authid {oid} -NOTICE: checking pg_operator {oprleft} => pg_type {oid} -NOTICE: checking pg_operator {oprright} => pg_type {oid} -NOTICE: checking pg_operator {oprresult} => pg_type {oid} -NOTICE: checking pg_operator {oprcom} => pg_operator {oid} -NOTICE: checking pg_operator {oprnegate} => pg_operator {oid} -NOTICE: checking pg_operator {oprcode} => pg_proc {oid} -NOTICE: checking pg_operator {oprrest} => pg_proc {oid} -NOTICE: checking pg_operator {oprjoin} => pg_proc {oid} -NOTICE: checking pg_opfamily {opfmethod} => pg_am {oid} -NOTICE: checking pg_opfamily {opfnamespace} => pg_namespace {oid} -NOTICE: checking pg_opfamily {opfowner} => pg_authid {oid} -NOTICE: checking pg_opclass {opcmethod} => pg_am {oid} -NOTICE: checking pg_opclass {opcnamespace} => pg_namespace {oid} -NOTICE: checking pg_opclass {opcowner} => pg_authid {oid} -NOTICE: checking pg_opclass {opcfamily} => pg_opfamily {oid} -NOTICE: checking pg_opclass {opcintype} => pg_type {oid} -NOTICE: checking pg_opclass {opckeytype} => pg_type {oid} -NOTICE: checking pg_am {amhandler} => pg_proc {oid} -NOTICE: checking pg_amop {amopfamily} => pg_opfamily {oid} -NOTICE: checking pg_amop {amoplefttype} => pg_type {oid} -NOTICE: checking pg_amop {amoprighttype} => pg_type {oid} -NOTICE: checking pg_amop {amopopr} => pg_operator {oid} -NOTICE: checking pg_amop {amopmethod} => pg_am {oid} -NOTICE: checking pg_amop {amopsortfamily} => pg_opfamily {oid} -NOTICE: checking pg_amproc {amprocfamily} => pg_opfamily {oid} -NOTICE: checking pg_amproc {amproclefttype} => pg_type {oid} -NOTICE: checking pg_amproc {amprocrighttype} => pg_type {oid} -NOTICE: checking pg_amproc {amproc} => pg_proc {oid} -NOTICE: checking pg_language {lanowner} => pg_authid {oid} -NOTICE: checking pg_language {lanplcallfoid} => pg_proc {oid} -NOTICE: checking pg_language {laninline} => pg_proc {oid} -NOTICE: checking pg_language {lanvalidator} => pg_proc {oid} -NOTICE: checking pg_largeobject_metadata {lomowner} => pg_authid {oid} -NOTICE: checking pg_largeobject {loid} => pg_largeobject_metadata {oid} -NOTICE: checking pg_aggregate {aggfnoid} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggtransfn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggfinalfn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggcombinefn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggserialfn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggdeserialfn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggmtransfn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggminvtransfn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggmfinalfn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggsortop} => pg_operator {oid} -NOTICE: checking pg_aggregate {aggtranstype} => pg_type {oid} -NOTICE: checking pg_aggregate {aggmtranstype} => pg_type {oid} -NOTICE: checking pg_statistic {starelid} => pg_class {oid} -NOTICE: checking pg_statistic {staop1} => pg_operator {oid} -NOTICE: checking pg_statistic {staop2} => pg_operator {oid} -NOTICE: checking pg_statistic {staop3} => pg_operator {oid} -NOTICE: checking pg_statistic {staop4} => pg_operator {oid} -NOTICE: checking pg_statistic {staop5} => pg_operator {oid} -NOTICE: checking pg_statistic {stacoll1} => pg_collation {oid} -NOTICE: checking pg_statistic {stacoll2} => pg_collation {oid} -NOTICE: checking pg_statistic {stacoll3} => pg_collation {oid} -NOTICE: checking pg_statistic {stacoll4} => pg_collation {oid} -NOTICE: checking pg_statistic {stacoll5} => pg_collation {oid} -NOTICE: checking pg_statistic {starelid,staattnum} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_statistic_ext {stxrelid} => pg_class {oid} -NOTICE: checking pg_statistic_ext {stxnamespace} => pg_namespace {oid} -NOTICE: checking pg_statistic_ext {stxowner} => pg_authid {oid} -NOTICE: checking pg_statistic_ext {stxrelid,stxkeys} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_statistic_ext_data {stxoid} => pg_statistic_ext {oid} -NOTICE: checking pg_rewrite {ev_class} => pg_class {oid} -NOTICE: checking pg_trigger {tgrelid} => pg_class {oid} -NOTICE: checking pg_trigger {tgparentid} => pg_trigger {oid} -NOTICE: checking pg_trigger {tgfoid} => pg_proc {oid} -NOTICE: checking pg_trigger {tgconstrrelid} => pg_class {oid} -NOTICE: checking pg_trigger {tgconstrindid} => pg_class {oid} -NOTICE: checking pg_trigger {tgconstraint} => pg_constraint {oid} -NOTICE: checking pg_trigger {tgrelid,tgattr} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_event_trigger {evtowner} => pg_authid {oid} -NOTICE: checking pg_event_trigger {evtfoid} => pg_proc {oid} -NOTICE: checking pg_description {classoid} => pg_class {oid} -NOTICE: checking pg_cast {castsource} => pg_type {oid} -NOTICE: checking pg_cast {casttarget} => pg_type {oid} -NOTICE: checking pg_cast {castfunc} => pg_proc {oid} -NOTICE: checking pg_enum {enumtypid} => pg_type {oid} -NOTICE: checking pg_namespace {nspowner} => pg_authid {oid} -NOTICE: checking pg_conversion {connamespace} => pg_namespace {oid} -NOTICE: checking pg_conversion {conowner} => pg_authid {oid} -NOTICE: checking pg_conversion {conproc} => pg_proc {oid} -NOTICE: checking pg_depend {classid} => pg_class {oid} -NOTICE: checking pg_depend {refclassid} => pg_class {oid} -NOTICE: checking pg_database {datdba} => pg_authid {oid} -NOTICE: checking pg_database {dattablespace} => pg_tablespace {oid} -NOTICE: checking pg_db_role_setting {setdatabase} => pg_database {oid} -NOTICE: checking pg_db_role_setting {setrole} => pg_authid {oid} -NOTICE: checking pg_tablespace {spcowner} => pg_authid {oid} -NOTICE: checking pg_auth_members {roleid} => pg_authid {oid} -NOTICE: checking pg_auth_members {member} => pg_authid {oid} -NOTICE: checking pg_auth_members {grantor} => pg_authid {oid} -NOTICE: checking pg_shdepend {dbid} => pg_database {oid} -NOTICE: checking pg_shdepend {classid} => pg_class {oid} -NOTICE: checking pg_shdepend {refclassid} => pg_class {oid} -NOTICE: checking pg_shdescription {classoid} => pg_class {oid} -NOTICE: checking pg_ts_config {cfgnamespace} => pg_namespace {oid} -NOTICE: checking pg_ts_config {cfgowner} => pg_authid {oid} -NOTICE: checking pg_ts_config {cfgparser} => pg_ts_parser {oid} -NOTICE: checking pg_ts_config_map {mapcfg} => pg_ts_config {oid} -NOTICE: checking pg_ts_config_map {mapdict} => pg_ts_dict {oid} -NOTICE: checking pg_ts_dict {dictnamespace} => pg_namespace {oid} -NOTICE: checking pg_ts_dict {dictowner} => pg_authid {oid} -NOTICE: checking pg_ts_dict {dicttemplate} => pg_ts_template {oid} -NOTICE: checking pg_ts_parser {prsnamespace} => pg_namespace {oid} -NOTICE: checking pg_ts_parser {prsstart} => pg_proc {oid} -NOTICE: checking pg_ts_parser {prstoken} => pg_proc {oid} -NOTICE: checking pg_ts_parser {prsend} => pg_proc {oid} -NOTICE: checking pg_ts_parser {prsheadline} => pg_proc {oid} -NOTICE: checking pg_ts_parser {prslextype} => pg_proc {oid} -NOTICE: checking pg_ts_template {tmplnamespace} => pg_namespace {oid} -NOTICE: checking pg_ts_template {tmplinit} => pg_proc {oid} -NOTICE: checking pg_ts_template {tmpllexize} => pg_proc {oid} -NOTICE: checking pg_extension {extowner} => pg_authid {oid} -NOTICE: checking pg_extension {extnamespace} => pg_namespace {oid} -NOTICE: checking pg_extension {extconfig} => pg_class {oid} -NOTICE: checking pg_foreign_data_wrapper {fdwowner} => pg_authid {oid} -NOTICE: checking pg_foreign_data_wrapper {fdwhandler} => pg_proc {oid} -NOTICE: checking pg_foreign_data_wrapper {fdwvalidator} => pg_proc {oid} -NOTICE: checking pg_foreign_server {srvowner} => pg_authid {oid} -NOTICE: checking pg_foreign_server {srvfdw} => pg_foreign_data_wrapper {oid} -NOTICE: checking pg_user_mapping {umuser} => pg_authid {oid} -NOTICE: checking pg_user_mapping {umserver} => pg_foreign_server {oid} -NOTICE: checking pg_foreign_table {ftrelid} => pg_class {oid} -NOTICE: checking pg_foreign_table {ftserver} => pg_foreign_server {oid} -NOTICE: checking pg_policy {polrelid} => pg_class {oid} -NOTICE: checking pg_policy {polroles} => pg_authid {oid} -NOTICE: checking pg_default_acl {defaclrole} => pg_authid {oid} -NOTICE: checking pg_default_acl {defaclnamespace} => pg_namespace {oid} -NOTICE: checking pg_init_privs {classoid} => pg_class {oid} -NOTICE: checking pg_seclabel {classoid} => pg_class {oid} -NOTICE: checking pg_shseclabel {classoid} => pg_class {oid} -NOTICE: checking pg_collation {collnamespace} => pg_namespace {oid} -NOTICE: checking pg_collation {collowner} => pg_authid {oid} -NOTICE: checking pg_partitioned_table {partrelid} => pg_class {oid} -NOTICE: checking pg_partitioned_table {partdefid} => pg_class {oid} -NOTICE: checking pg_partitioned_table {partclass} => pg_opclass {oid} -NOTICE: checking pg_partitioned_table {partcollation} => pg_collation {oid} -NOTICE: checking pg_partitioned_table {partrelid,partattrs} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_range {rngtypid} => pg_type {oid} -NOTICE: checking pg_range {rngsubtype} => pg_type {oid} -NOTICE: checking pg_range {rngmultitypid} => pg_type {oid} -NOTICE: checking pg_range {rngcollation} => pg_collation {oid} -NOTICE: checking pg_range {rngsubopc} => pg_opclass {oid} -NOTICE: checking pg_range {rngcanonical} => pg_proc {oid} -NOTICE: checking pg_range {rngsubdiff} => pg_proc {oid} -NOTICE: checking pg_transform {trftype} => pg_type {oid} -NOTICE: checking pg_transform {trflang} => pg_language {oid} -NOTICE: checking pg_transform {trffromsql} => pg_proc {oid} -NOTICE: checking pg_transform {trftosql} => pg_proc {oid} -NOTICE: checking pg_sequence {seqrelid} => pg_class {oid} -NOTICE: checking pg_sequence {seqtypid} => pg_type {oid} -NOTICE: checking pg_publication {pubowner} => pg_authid {oid} -NOTICE: checking pg_publication_namespace {pnpubid} => pg_publication {oid} -NOTICE: checking pg_publication_namespace {pnnspid} => pg_namespace {oid} -NOTICE: checking pg_publication_rel {prpubid} => pg_publication {oid} -NOTICE: checking pg_publication_rel {prrelid} => pg_class {oid} -NOTICE: checking pg_subscription {subdbid} => pg_database {oid} -NOTICE: checking pg_subscription {subowner} => pg_authid {oid} -NOTICE: checking pg_subscription_rel {srsubid} => pg_subscription {oid} -NOTICE: checking pg_subscription_rel {srrelid} => pg_class {oid} +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/event_trigger.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/event_trigger.out --- /Users/admin/pgsql/src/test/regress/expected/event_trigger.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/event_trigger.out 2024-12-13 13:20:14 @@ -1,742 +1,2 @@ --- should fail, return type mismatch -create event trigger regress_event_trigger - on ddl_command_start - execute procedure pg_backend_pid(); -ERROR: function pg_backend_pid must return type event_trigger --- OK -create function test_event_trigger() returns event_trigger as $$ -BEGIN - RAISE NOTICE 'test_event_trigger: % %', tg_event, tg_tag; -END -$$ language plpgsql; --- should fail, can't call it as a plain function -SELECT test_event_trigger(); -ERROR: trigger functions can only be called as triggers -CONTEXT: compilation of PL/pgSQL function "test_event_trigger" near line 1 --- should fail, event triggers cannot have declared arguments -create function test_event_trigger_arg(name text) -returns event_trigger as $$ BEGIN RETURN 1; END $$ language plpgsql; -ERROR: event trigger functions cannot have declared arguments -CONTEXT: compilation of PL/pgSQL function "test_event_trigger_arg" near line 1 --- should fail, SQL functions cannot be event triggers -create function test_event_trigger_sql() returns event_trigger as $$ -SELECT 1 $$ language sql; -ERROR: SQL functions cannot return type event_trigger --- should fail, no elephant_bootstrap entry point -create event trigger regress_event_trigger on elephant_bootstrap - execute procedure test_event_trigger(); -ERROR: unrecognized event name "elephant_bootstrap" --- OK -create event trigger regress_event_trigger on ddl_command_start - execute procedure test_event_trigger(); --- OK -create event trigger regress_event_trigger_end on ddl_command_end - execute function test_event_trigger(); --- should fail, food is not a valid filter variable -create event trigger regress_event_trigger2 on ddl_command_start - when food in ('sandwich') - execute procedure test_event_trigger(); -ERROR: unrecognized filter variable "food" --- should fail, sandwich is not a valid command tag -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('sandwich') - execute procedure test_event_trigger(); -ERROR: filter value "sandwich" not recognized for filter variable "tag" --- should fail, create skunkcabbage is not a valid command tag -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('create table', 'create skunkcabbage') - execute procedure test_event_trigger(); -ERROR: filter value "create skunkcabbage" not recognized for filter variable "tag" --- should fail, can't have event triggers on event triggers -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('DROP EVENT TRIGGER') - execute procedure test_event_trigger(); -ERROR: event triggers are not supported for DROP EVENT TRIGGER --- should fail, can't have event triggers on global objects -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('CREATE ROLE') - execute procedure test_event_trigger(); -ERROR: event triggers are not supported for CREATE ROLE --- should fail, can't have event triggers on global objects -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('CREATE DATABASE') - execute procedure test_event_trigger(); -ERROR: event triggers are not supported for CREATE DATABASE --- should fail, can't have event triggers on global objects -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('CREATE TABLESPACE') - execute procedure test_event_trigger(); -ERROR: event triggers are not supported for CREATE TABLESPACE --- should fail, can't have same filter variable twice -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('create table') and tag in ('CREATE FUNCTION') - execute procedure test_event_trigger(); -ERROR: filter variable "tag" specified more than once --- should fail, can't have arguments -create event trigger regress_event_trigger2 on ddl_command_start - execute procedure test_event_trigger('argument not allowed'); -ERROR: syntax error at or near "'argument not allowed'" -LINE 2: execute procedure test_event_trigger('argument not allowe... - ^ --- OK -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('create table', 'CREATE FUNCTION') - execute procedure test_event_trigger(); --- OK -comment on event trigger regress_event_trigger is 'test comment'; --- drop as non-superuser should fail -create role regress_evt_user; -set role regress_evt_user; -create event trigger regress_event_trigger_noperms on ddl_command_start - execute procedure test_event_trigger(); -ERROR: permission denied to create event trigger "regress_event_trigger_noperms" -HINT: Must be superuser to create an event trigger. -reset role; --- test enabling and disabling -alter event trigger regress_event_trigger disable; --- fires _trigger2 and _trigger_end should fire, but not _trigger -create table event_trigger_fire1 (a int); -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -NOTICE: test_event_trigger: ddl_command_end CREATE TABLE -alter event trigger regress_event_trigger enable; -set session_replication_role = replica; --- fires nothing -create table event_trigger_fire2 (a int); -alter event trigger regress_event_trigger enable replica; --- fires only _trigger -create table event_trigger_fire3 (a int); -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -alter event trigger regress_event_trigger enable always; --- fires only _trigger -create table event_trigger_fire4 (a int); -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -reset session_replication_role; --- fires all three -create table event_trigger_fire5 (a int); -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -NOTICE: test_event_trigger: ddl_command_end CREATE TABLE --- non-top-level command -create function f1() returns int -language plpgsql -as $$ -begin - create table event_trigger_fire6 (a int); - return 0; -end $$; -NOTICE: test_event_trigger: ddl_command_start CREATE FUNCTION -NOTICE: test_event_trigger: ddl_command_start CREATE FUNCTION -NOTICE: test_event_trigger: ddl_command_end CREATE FUNCTION -select f1(); -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -NOTICE: test_event_trigger: ddl_command_end CREATE TABLE - f1 ----- - 0 -(1 row) - --- non-top-level command -create procedure p1() -language plpgsql -as $$ -begin - create table event_trigger_fire7 (a int); -end $$; -NOTICE: test_event_trigger: ddl_command_start CREATE PROCEDURE -NOTICE: test_event_trigger: ddl_command_end CREATE PROCEDURE -call p1(); -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -NOTICE: test_event_trigger: ddl_command_end CREATE TABLE --- clean up -alter event trigger regress_event_trigger disable; -drop table event_trigger_fire2, event_trigger_fire3, event_trigger_fire4, event_trigger_fire5, event_trigger_fire6, event_trigger_fire7; -NOTICE: test_event_trigger: ddl_command_end DROP TABLE -drop routine f1(), p1(); -NOTICE: test_event_trigger: ddl_command_end DROP ROUTINE --- regress_event_trigger_end should fire on these commands -grant all on table event_trigger_fire1 to public; -NOTICE: test_event_trigger: ddl_command_end GRANT -comment on table event_trigger_fire1 is 'here is a comment'; -NOTICE: test_event_trigger: ddl_command_end COMMENT -revoke all on table event_trigger_fire1 from public; -NOTICE: test_event_trigger: ddl_command_end REVOKE -drop table event_trigger_fire1; -NOTICE: test_event_trigger: ddl_command_end DROP TABLE -create foreign data wrapper useless; -NOTICE: test_event_trigger: ddl_command_end CREATE FOREIGN DATA WRAPPER -create server useless_server foreign data wrapper useless; -NOTICE: test_event_trigger: ddl_command_end CREATE SERVER -create user mapping for regress_evt_user server useless_server; -NOTICE: test_event_trigger: ddl_command_end CREATE USER MAPPING -alter default privileges for role regress_evt_user - revoke delete on tables from regress_evt_user; -NOTICE: test_event_trigger: ddl_command_end ALTER DEFAULT PRIVILEGES --- alter owner to non-superuser should fail -alter event trigger regress_event_trigger owner to regress_evt_user; -ERROR: permission denied to change owner of event trigger "regress_event_trigger" -HINT: The owner of an event trigger must be a superuser. --- alter owner to superuser should work -alter role regress_evt_user superuser; -alter event trigger regress_event_trigger owner to regress_evt_user; --- should fail, name collision -alter event trigger regress_event_trigger rename to regress_event_trigger2; -ERROR: event trigger "regress_event_trigger2" already exists --- OK -alter event trigger regress_event_trigger rename to regress_event_trigger3; --- should fail, doesn't exist any more -drop event trigger regress_event_trigger; -ERROR: event trigger "regress_event_trigger" does not exist --- should fail, regress_evt_user owns some objects -drop role regress_evt_user; -ERROR: role "regress_evt_user" cannot be dropped because some objects depend on it -DETAIL: owner of event trigger regress_event_trigger3 -owner of user mapping for regress_evt_user on server useless_server -owner of default privileges on new relations belonging to role regress_evt_user --- cleanup before next test --- these are all OK; the second one should emit a NOTICE -drop event trigger if exists regress_event_trigger2; -drop event trigger if exists regress_event_trigger2; -NOTICE: event trigger "regress_event_trigger2" does not exist, skipping -drop event trigger regress_event_trigger3; -drop event trigger regress_event_trigger_end; --- test support for dropped objects -CREATE SCHEMA schema_one authorization regress_evt_user; -CREATE SCHEMA schema_two authorization regress_evt_user; -CREATE SCHEMA audit_tbls authorization regress_evt_user; -CREATE TEMP TABLE a_temp_tbl (); -SET SESSION AUTHORIZATION regress_evt_user; -CREATE TABLE schema_one.table_one(a int); -CREATE TABLE schema_one."table two"(a int); -CREATE TABLE schema_one.table_three(a int); -CREATE TABLE audit_tbls.schema_one_table_two(the_value text); -CREATE TABLE schema_two.table_two(a int); -CREATE TABLE schema_two.table_three(a int, b text); -CREATE TABLE audit_tbls.schema_two_table_three(the_value text); -CREATE OR REPLACE FUNCTION schema_two.add(int, int) RETURNS int LANGUAGE plpgsql - CALLED ON NULL INPUT - AS $$ BEGIN RETURN coalesce($1,0) + coalesce($2,0); END; $$; -CREATE AGGREGATE schema_two.newton - (BASETYPE = int, SFUNC = schema_two.add, STYPE = int); -RESET SESSION AUTHORIZATION; -CREATE TABLE undroppable_objs ( - object_type text, - object_identity text -); -INSERT INTO undroppable_objs VALUES -('table', 'schema_one.table_three'), -('table', 'audit_tbls.schema_two_table_three'); -CREATE TABLE dropped_objects ( - type text, - schema text, - object text -); --- This tests errors raised within event triggers; the one in audit_tbls --- uses 2nd-level recursive invocation via test_evtrig_dropped_objects(). -CREATE OR REPLACE FUNCTION undroppable() RETURNS event_trigger -LANGUAGE plpgsql AS $$ -DECLARE - obj record; -BEGIN - PERFORM 1 FROM pg_tables WHERE tablename = 'undroppable_objs'; - IF NOT FOUND THEN - RAISE NOTICE 'table undroppable_objs not found, skipping'; - RETURN; - END IF; - FOR obj IN - SELECT * FROM pg_event_trigger_dropped_objects() JOIN - undroppable_objs USING (object_type, object_identity) - LOOP - RAISE EXCEPTION 'object % of type % cannot be dropped', - obj.object_identity, obj.object_type; - END LOOP; -END; -$$; -CREATE EVENT TRIGGER undroppable ON sql_drop - EXECUTE PROCEDURE undroppable(); -CREATE OR REPLACE FUNCTION test_evtrig_dropped_objects() RETURNS event_trigger -LANGUAGE plpgsql AS $$ -DECLARE - obj record; -BEGIN - FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() - LOOP - IF obj.object_type = 'table' THEN - EXECUTE format('DROP TABLE IF EXISTS audit_tbls.%I', - format('%s_%s', obj.schema_name, obj.object_name)); - END IF; - - INSERT INTO dropped_objects - (type, schema, object) VALUES - (obj.object_type, obj.schema_name, obj.object_identity); - END LOOP; -END -$$; -CREATE EVENT TRIGGER regress_event_trigger_drop_objects ON sql_drop - WHEN TAG IN ('drop table', 'drop function', 'drop view', - 'drop owned', 'drop schema', 'alter table') - EXECUTE PROCEDURE test_evtrig_dropped_objects(); -ALTER TABLE schema_one.table_one DROP COLUMN a; -DROP SCHEMA schema_one, schema_two CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table schema_two.table_two -drop cascades to table schema_two.table_three -drop cascades to function schema_two.add(integer,integer) -drop cascades to function schema_two.newton(integer) -drop cascades to table schema_one.table_one -drop cascades to table schema_one."table two" -drop cascades to table schema_one.table_three -NOTICE: table "schema_two_table_two" does not exist, skipping -NOTICE: table "audit_tbls_schema_two_table_three" does not exist, skipping -ERROR: object audit_tbls.schema_two_table_three of type table cannot be dropped -CONTEXT: PL/pgSQL function undroppable() line 14 at RAISE -SQL statement "DROP TABLE IF EXISTS audit_tbls.schema_two_table_three" -PL/pgSQL function test_evtrig_dropped_objects() line 8 at EXECUTE -DELETE FROM undroppable_objs WHERE object_identity = 'audit_tbls.schema_two_table_three'; -DROP SCHEMA schema_one, schema_two CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table schema_two.table_two -drop cascades to table schema_two.table_three -drop cascades to function schema_two.add(integer,integer) -drop cascades to function schema_two.newton(integer) -drop cascades to table schema_one.table_one -drop cascades to table schema_one."table two" -drop cascades to table schema_one.table_three -NOTICE: table "schema_two_table_two" does not exist, skipping -NOTICE: table "audit_tbls_schema_two_table_three" does not exist, skipping -NOTICE: table "schema_one_table_one" does not exist, skipping -NOTICE: table "schema_one_table two" does not exist, skipping -NOTICE: table "schema_one_table_three" does not exist, skipping -ERROR: object schema_one.table_three of type table cannot be dropped -CONTEXT: PL/pgSQL function undroppable() line 14 at RAISE -DELETE FROM undroppable_objs WHERE object_identity = 'schema_one.table_three'; -DROP SCHEMA schema_one, schema_two CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table schema_two.table_two -drop cascades to table schema_two.table_three -drop cascades to function schema_two.add(integer,integer) -drop cascades to function schema_two.newton(integer) -drop cascades to table schema_one.table_one -drop cascades to table schema_one."table two" -drop cascades to table schema_one.table_three -NOTICE: table "schema_two_table_two" does not exist, skipping -NOTICE: table "audit_tbls_schema_two_table_three" does not exist, skipping -NOTICE: table "schema_one_table_one" does not exist, skipping -NOTICE: table "schema_one_table two" does not exist, skipping -NOTICE: table "schema_one_table_three" does not exist, skipping -SELECT * FROM dropped_objects WHERE schema IS NULL OR schema <> 'pg_toast'; - type | schema | object ---------------+------------+------------------------------------- - table column | schema_one | schema_one.table_one.a - schema | | schema_two - table | schema_two | schema_two.table_two - type | schema_two | schema_two.table_two - type | schema_two | schema_two.table_two[] - table | audit_tbls | audit_tbls.schema_two_table_three - type | audit_tbls | audit_tbls.schema_two_table_three - type | audit_tbls | audit_tbls.schema_two_table_three[] - table | schema_two | schema_two.table_three - type | schema_two | schema_two.table_three - type | schema_two | schema_two.table_three[] - function | schema_two | schema_two.add(integer,integer) - aggregate | schema_two | schema_two.newton(integer) - schema | | schema_one - table | schema_one | schema_one.table_one - type | schema_one | schema_one.table_one - type | schema_one | schema_one.table_one[] - table | schema_one | schema_one."table two" - type | schema_one | schema_one."table two" - type | schema_one | schema_one."table two"[] - table | schema_one | schema_one.table_three - type | schema_one | schema_one.table_three - type | schema_one | schema_one.table_three[] -(23 rows) - -DROP OWNED BY regress_evt_user; -NOTICE: schema "audit_tbls" does not exist, skipping -SELECT * FROM dropped_objects WHERE type = 'schema'; - type | schema | object ---------+--------+------------ - schema | | schema_two - schema | | schema_one - schema | | audit_tbls -(3 rows) - -DROP ROLE regress_evt_user; -DROP EVENT TRIGGER regress_event_trigger_drop_objects; -DROP EVENT TRIGGER undroppable; --- Event triggers on relations. -CREATE OR REPLACE FUNCTION event_trigger_report_dropped() - RETURNS event_trigger - LANGUAGE plpgsql -AS $$ -DECLARE r record; -BEGIN - FOR r IN SELECT * from pg_event_trigger_dropped_objects() - LOOP - IF NOT r.normal AND NOT r.original THEN - CONTINUE; - END IF; - RAISE NOTICE 'NORMAL: orig=% normal=% istemp=% type=% identity=% name=% args=%', - r.original, r.normal, r.is_temporary, r.object_type, - r.object_identity, r.address_names, r.address_args; - END LOOP; -END; $$; -CREATE EVENT TRIGGER regress_event_trigger_report_dropped ON sql_drop - EXECUTE PROCEDURE event_trigger_report_dropped(); -CREATE OR REPLACE FUNCTION event_trigger_report_end() - RETURNS event_trigger - LANGUAGE plpgsql -AS $$ -DECLARE r RECORD; -BEGIN - FOR r IN SELECT * FROM pg_event_trigger_ddl_commands() - LOOP - RAISE NOTICE 'END: command_tag=% type=% identity=%', - r.command_tag, r.object_type, r.object_identity; - END LOOP; -END; $$; -CREATE EVENT TRIGGER regress_event_trigger_report_end ON ddl_command_end - EXECUTE PROCEDURE event_trigger_report_end(); -CREATE SCHEMA evttrig - CREATE TABLE one (col_a SERIAL PRIMARY KEY, col_b text DEFAULT 'forty two', col_c SERIAL) - CREATE INDEX one_idx ON one (col_b) - CREATE TABLE two (col_c INTEGER CHECK (col_c > 0) REFERENCES one DEFAULT 42) - CREATE TABLE id (col_d int NOT NULL GENERATED ALWAYS AS IDENTITY); -NOTICE: END: command_tag=CREATE SCHEMA type=schema identity=evttrig -NOTICE: END: command_tag=CREATE SEQUENCE type=sequence identity=evttrig.one_col_a_seq -NOTICE: END: command_tag=CREATE SEQUENCE type=sequence identity=evttrig.one_col_c_seq -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.one -NOTICE: END: command_tag=CREATE INDEX type=index identity=evttrig.one_pkey -NOTICE: END: command_tag=ALTER SEQUENCE type=sequence identity=evttrig.one_col_a_seq -NOTICE: END: command_tag=ALTER SEQUENCE type=sequence identity=evttrig.one_col_c_seq -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.two -NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.two -NOTICE: END: command_tag=CREATE SEQUENCE type=sequence identity=evttrig.id_col_d_seq -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.id -NOTICE: END: command_tag=ALTER SEQUENCE type=sequence identity=evttrig.id_col_d_seq -NOTICE: END: command_tag=CREATE INDEX type=index identity=evttrig.one_idx --- Partitioned tables with a partitioned index -CREATE TABLE evttrig.parted ( - id int PRIMARY KEY) - PARTITION BY RANGE (id); -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.parted -NOTICE: END: command_tag=CREATE INDEX type=index identity=evttrig.parted_pkey -CREATE TABLE evttrig.part_1_10 PARTITION OF evttrig.parted (id) - FOR VALUES FROM (1) TO (10); -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.part_1_10 -CREATE TABLE evttrig.part_10_20 PARTITION OF evttrig.parted (id) - FOR VALUES FROM (10) TO (20) PARTITION BY RANGE (id); -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.part_10_20 -CREATE TABLE evttrig.part_10_15 PARTITION OF evttrig.part_10_20 (id) - FOR VALUES FROM (10) TO (15); -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.part_10_15 -CREATE TABLE evttrig.part_15_20 PARTITION OF evttrig.part_10_20 (id) - FOR VALUES FROM (15) TO (20); -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.part_15_20 -ALTER TABLE evttrig.two DROP COLUMN col_c; -NOTICE: NORMAL: orig=t normal=f istemp=f type=table column identity=evttrig.two.col_c name={evttrig,two,col_c} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table constraint identity=two_col_c_check on evttrig.two name={evttrig,two,two_col_c_check} args={} -NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.two -ALTER TABLE evttrig.one ALTER COLUMN col_b DROP DEFAULT; -NOTICE: NORMAL: orig=t normal=f istemp=f type=default value identity=for evttrig.one.col_b name={evttrig,one,col_b} args={} -NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.one -ALTER TABLE evttrig.one DROP CONSTRAINT one_pkey; -NOTICE: NORMAL: orig=t normal=f istemp=f type=table constraint identity=one_pkey on evttrig.one name={evttrig,one,one_pkey} args={} -NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.one -ALTER TABLE evttrig.one DROP COLUMN col_c; -NOTICE: NORMAL: orig=t normal=f istemp=f type=table column identity=evttrig.one.col_c name={evttrig,one,col_c} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=default value identity=for evttrig.one.col_c name={evttrig,one,col_c} args={} -NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.one -ALTER TABLE evttrig.id ALTER COLUMN col_d SET DATA TYPE bigint; -NOTICE: END: command_tag=ALTER SEQUENCE type=sequence identity=evttrig.id_col_d_seq -NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.id -ALTER TABLE evttrig.id ALTER COLUMN col_d DROP IDENTITY, - ALTER COLUMN col_d SET DATA TYPE int; -NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.id -DROP INDEX evttrig.one_idx; -NOTICE: NORMAL: orig=t normal=f istemp=f type=index identity=evttrig.one_idx name={evttrig,one_idx} args={} -DROP SCHEMA evttrig CASCADE; -NOTICE: drop cascades to 4 other objects -DETAIL: drop cascades to table evttrig.one -drop cascades to table evttrig.two -drop cascades to table evttrig.id -drop cascades to table evttrig.parted -NOTICE: NORMAL: orig=t normal=f istemp=f type=schema identity=evttrig name={evttrig} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.one name={evttrig,one} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=sequence identity=evttrig.one_col_a_seq name={evttrig,one_col_a_seq} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=default value identity=for evttrig.one.col_a name={evttrig,one,col_a} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.two name={evttrig,two} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.id name={evttrig,id} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.parted name={evttrig,parted} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_1_10 name={evttrig,part_1_10} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_10_20 name={evttrig,part_10_20} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_10_15 name={evttrig,part_10_15} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_15_20 name={evttrig,part_15_20} args={} -DROP TABLE a_temp_tbl; -NOTICE: NORMAL: orig=t normal=f istemp=t type=table identity=pg_temp.a_temp_tbl name={pg_temp,a_temp_tbl} args={} --- CREATE OPERATOR CLASS without FAMILY clause should report --- both CREATE OPERATOR FAMILY and CREATE OPERATOR CLASS -CREATE OPERATOR CLASS evttrigopclass FOR TYPE int USING btree AS STORAGE int; -NOTICE: END: command_tag=CREATE OPERATOR FAMILY type=operator family identity=public.evttrigopclass USING btree -NOTICE: END: command_tag=CREATE OPERATOR CLASS type=operator class identity=public.evttrigopclass USING btree -DROP EVENT TRIGGER regress_event_trigger_report_dropped; -DROP EVENT TRIGGER regress_event_trigger_report_end; --- only allowed from within an event trigger function, should fail -select pg_event_trigger_table_rewrite_oid(); -ERROR: pg_event_trigger_table_rewrite_oid() can only be called in a table_rewrite event trigger function --- test Table Rewrite Event Trigger -CREATE OR REPLACE FUNCTION test_evtrig_no_rewrite() RETURNS event_trigger -LANGUAGE plpgsql AS $$ -BEGIN - RAISE EXCEPTION 'rewrites not allowed'; -END; -$$; -create event trigger no_rewrite_allowed on table_rewrite - execute procedure test_evtrig_no_rewrite(); -create table rewriteme (id serial primary key, foo float, bar timestamptz); -insert into rewriteme - select x * 1.001 from generate_series(1, 500) as t(x); -alter table rewriteme alter column foo type numeric; -ERROR: rewrites not allowed -CONTEXT: PL/pgSQL function test_evtrig_no_rewrite() line 3 at RAISE -alter table rewriteme add column baz int default 0; --- test with more than one reason to rewrite a single table -CREATE OR REPLACE FUNCTION test_evtrig_no_rewrite() RETURNS event_trigger -LANGUAGE plpgsql AS $$ -BEGIN - RAISE NOTICE 'Table ''%'' is being rewritten (reason = %)', - pg_event_trigger_table_rewrite_oid()::regclass, - pg_event_trigger_table_rewrite_reason(); -END; -$$; -alter table rewriteme - add column onemore int default 0, - add column another int default -1, - alter column foo type numeric(10,4); -NOTICE: Table 'rewriteme' is being rewritten (reason = 4) --- matview rewrite when changing access method -CREATE MATERIALIZED VIEW heapmv USING heap AS SELECT 1 AS a; -ALTER MATERIALIZED VIEW heapmv SET ACCESS METHOD heap2; -NOTICE: Table 'heapmv' is being rewritten (reason = 8) -DROP MATERIALIZED VIEW heapmv; --- shouldn't trigger a table_rewrite event -alter table rewriteme alter column foo type numeric(12,4); -begin; -set timezone to 'UTC'; -alter table rewriteme alter column bar type timestamp; -set timezone to '0'; -alter table rewriteme alter column bar type timestamptz; -set timezone to 'Europe/London'; -alter table rewriteme alter column bar type timestamp; -- does rewrite -NOTICE: Table 'rewriteme' is being rewritten (reason = 4) -rollback; --- typed tables are rewritten when their type changes. Don't emit table --- name, because firing order is not stable. -CREATE OR REPLACE FUNCTION test_evtrig_no_rewrite() RETURNS event_trigger -LANGUAGE plpgsql AS $$ -BEGIN - RAISE NOTICE 'Table is being rewritten (reason = %)', - pg_event_trigger_table_rewrite_reason(); -END; -$$; -create type rewritetype as (a int); -create table rewritemetoo1 of rewritetype; -create table rewritemetoo2 of rewritetype; -alter type rewritetype alter attribute a type text cascade; -NOTICE: Table is being rewritten (reason = 4) -NOTICE: Table is being rewritten (reason = 4) --- but this doesn't work -create table rewritemetoo3 (a rewritetype); -alter type rewritetype alter attribute a type varchar cascade; -ERROR: cannot alter type "rewritetype" because column "rewritemetoo3.a" uses it -drop table rewriteme; -drop event trigger no_rewrite_allowed; -drop function test_evtrig_no_rewrite(); --- Tests for REINDEX -CREATE OR REPLACE FUNCTION reindex_start_command() -RETURNS event_trigger AS $$ -BEGIN - RAISE NOTICE 'REINDEX START: % %', tg_event, tg_tag; -END; -$$ LANGUAGE plpgsql; -CREATE EVENT TRIGGER regress_reindex_start ON ddl_command_start - WHEN TAG IN ('REINDEX') - EXECUTE PROCEDURE reindex_start_command(); -CREATE FUNCTION reindex_end_command() -RETURNS event_trigger AS $$ -DECLARE - obj record; -BEGIN - FOR obj IN SELECT * FROM pg_event_trigger_ddl_commands() - LOOP - RAISE NOTICE 'REINDEX END: command_tag=% type=% identity=%', - obj.command_tag, obj.object_type, obj.object_identity; - END LOOP; -END; -$$ LANGUAGE plpgsql; -CREATE EVENT TRIGGER regress_reindex_end ON ddl_command_end - WHEN TAG IN ('REINDEX') - EXECUTE PROCEDURE reindex_end_command(); --- Extra event to force the use of a snapshot. -CREATE FUNCTION reindex_end_command_snap() RETURNS EVENT_TRIGGER - AS $$ BEGIN PERFORM 1; END $$ LANGUAGE plpgsql; -CREATE EVENT TRIGGER regress_reindex_end_snap ON ddl_command_end - EXECUTE FUNCTION reindex_end_command_snap(); --- With simple relation -CREATE TABLE concur_reindex_tab (c1 int); -CREATE INDEX concur_reindex_ind ON concur_reindex_tab (c1); --- Both start and end triggers enabled. -REINDEX INDEX concur_reindex_ind; -NOTICE: REINDEX START: ddl_command_start REINDEX -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_ind -REINDEX TABLE concur_reindex_tab; -NOTICE: REINDEX START: ddl_command_start REINDEX -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_ind -REINDEX INDEX CONCURRENTLY concur_reindex_ind; -NOTICE: REINDEX START: ddl_command_start REINDEX -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_ind -REINDEX TABLE CONCURRENTLY concur_reindex_tab; -NOTICE: REINDEX START: ddl_command_start REINDEX -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_ind --- with start trigger disabled. -ALTER EVENT TRIGGER regress_reindex_start DISABLE; -REINDEX INDEX concur_reindex_ind; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_ind -REINDEX INDEX CONCURRENTLY concur_reindex_ind; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_ind --- without an index -DROP INDEX concur_reindex_ind; -REINDEX TABLE concur_reindex_tab; -NOTICE: table "concur_reindex_tab" has no indexes to reindex -REINDEX TABLE CONCURRENTLY concur_reindex_tab; -NOTICE: table "concur_reindex_tab" has no indexes that can be reindexed concurrently --- With a Schema -CREATE SCHEMA concur_reindex_schema; --- No indexes -REINDEX SCHEMA concur_reindex_schema; -REINDEX SCHEMA CONCURRENTLY concur_reindex_schema; -CREATE TABLE concur_reindex_schema.tab (a int); -CREATE INDEX ind ON concur_reindex_schema.tab (a); --- One index reported -REINDEX SCHEMA concur_reindex_schema; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=concur_reindex_schema.ind -REINDEX SCHEMA CONCURRENTLY concur_reindex_schema; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=concur_reindex_schema.ind --- One table on schema but no indexes -DROP INDEX concur_reindex_schema.ind; -REINDEX SCHEMA concur_reindex_schema; -REINDEX SCHEMA CONCURRENTLY concur_reindex_schema; -DROP SCHEMA concur_reindex_schema CASCADE; -NOTICE: drop cascades to table concur_reindex_schema.tab --- With a partitioned table, and nothing else. -CREATE TABLE concur_reindex_part (id int) PARTITION BY RANGE (id); -REINDEX TABLE concur_reindex_part; -REINDEX TABLE CONCURRENTLY concur_reindex_part; --- Partition that would be reindexed, still nothing. -CREATE TABLE concur_reindex_child PARTITION OF concur_reindex_part - FOR VALUES FROM (0) TO (10); -REINDEX TABLE concur_reindex_part; -REINDEX TABLE CONCURRENTLY concur_reindex_part; --- Now add some indexes. -CREATE INDEX concur_reindex_partidx ON concur_reindex_part (id); -REINDEX INDEX concur_reindex_partidx; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_child_id_idx -REINDEX INDEX CONCURRENTLY concur_reindex_partidx; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_child_id_idx -REINDEX TABLE concur_reindex_part; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_child_id_idx -REINDEX TABLE CONCURRENTLY concur_reindex_part; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_child_id_idx -DROP TABLE concur_reindex_part; --- Clean up -DROP EVENT TRIGGER regress_reindex_start; -DROP EVENT TRIGGER regress_reindex_end; -DROP EVENT TRIGGER regress_reindex_end_snap; -DROP FUNCTION reindex_end_command(); -DROP FUNCTION reindex_end_command_snap(); -DROP FUNCTION reindex_start_command(); -DROP TABLE concur_reindex_tab; --- test Row Security Event Trigger -RESET SESSION AUTHORIZATION; -CREATE TABLE event_trigger_test (a integer, b text); -CREATE OR REPLACE FUNCTION start_command() -RETURNS event_trigger AS $$ -BEGIN -RAISE NOTICE '% - ddl_command_start', tg_tag; -END; -$$ LANGUAGE plpgsql; -CREATE OR REPLACE FUNCTION end_command() -RETURNS event_trigger AS $$ -BEGIN -RAISE NOTICE '% - ddl_command_end', tg_tag; -END; -$$ LANGUAGE plpgsql; -CREATE OR REPLACE FUNCTION drop_sql_command() -RETURNS event_trigger AS $$ -BEGIN -RAISE NOTICE '% - sql_drop', tg_tag; -END; -$$ LANGUAGE plpgsql; -CREATE EVENT TRIGGER start_rls_command ON ddl_command_start - WHEN TAG IN ('CREATE POLICY', 'ALTER POLICY', 'DROP POLICY') EXECUTE PROCEDURE start_command(); -CREATE EVENT TRIGGER end_rls_command ON ddl_command_end - WHEN TAG IN ('CREATE POLICY', 'ALTER POLICY', 'DROP POLICY') EXECUTE PROCEDURE end_command(); -CREATE EVENT TRIGGER sql_drop_command ON sql_drop - WHEN TAG IN ('DROP POLICY') EXECUTE PROCEDURE drop_sql_command(); -CREATE POLICY p1 ON event_trigger_test USING (FALSE); -NOTICE: CREATE POLICY - ddl_command_start -NOTICE: CREATE POLICY - ddl_command_end -ALTER POLICY p1 ON event_trigger_test USING (TRUE); -NOTICE: ALTER POLICY - ddl_command_start -NOTICE: ALTER POLICY - ddl_command_end -ALTER POLICY p1 ON event_trigger_test RENAME TO p2; -NOTICE: ALTER POLICY - ddl_command_start -NOTICE: ALTER POLICY - ddl_command_end -DROP POLICY p2 ON event_trigger_test; -NOTICE: DROP POLICY - ddl_command_start -NOTICE: DROP POLICY - sql_drop -NOTICE: DROP POLICY - ddl_command_end --- Check the object addresses of all the event triggers. -SELECT - e.evtname, - pg_describe_object('pg_event_trigger'::regclass, e.oid, 0) as descr, - b.type, b.object_names, b.object_args, - pg_identify_object(a.classid, a.objid, a.objsubid) as ident - FROM pg_event_trigger as e, - LATERAL pg_identify_object_as_address('pg_event_trigger'::regclass, e.oid, 0) as b, - LATERAL pg_get_object_address(b.type, b.object_names, b.object_args) as a - ORDER BY e.evtname; - evtname | descr | type | object_names | object_args | ident --------------------+---------------------------------+---------------+---------------------+-------------+-------------------------------------------------------- - end_rls_command | event trigger end_rls_command | event trigger | {end_rls_command} | {} | ("event trigger",,end_rls_command,end_rls_command) - sql_drop_command | event trigger sql_drop_command | event trigger | {sql_drop_command} | {} | ("event trigger",,sql_drop_command,sql_drop_command) - start_rls_command | event trigger start_rls_command | event trigger | {start_rls_command} | {} | ("event trigger",,start_rls_command,start_rls_command) -(3 rows) - -DROP EVENT TRIGGER start_rls_command; -DROP EVENT TRIGGER end_rls_command; -DROP EVENT TRIGGER sql_drop_command; --- Check the GUC for disabling event triggers -CREATE FUNCTION test_event_trigger_guc() RETURNS event_trigger -LANGUAGE plpgsql AS $$ -DECLARE - obj record; -BEGIN - FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() - LOOP - RAISE NOTICE '% dropped %', tg_tag, obj.object_type; - END LOOP; -END; -$$; -CREATE EVENT TRIGGER test_event_trigger_guc - ON sql_drop - WHEN TAG IN ('DROP POLICY') EXECUTE FUNCTION test_event_trigger_guc(); -SET event_triggers = 'on'; -CREATE POLICY pguc ON event_trigger_test USING (FALSE); -DROP POLICY pguc ON event_trigger_test; -NOTICE: DROP POLICY dropped policy -CREATE POLICY pguc ON event_trigger_test USING (FALSE); -SET event_triggers = 'off'; -DROP POLICY pguc ON event_trigger_test; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/event_trigger_login.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/event_trigger_login.out --- /Users/admin/pgsql/src/test/regress/expected/event_trigger_login.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/event_trigger_login.out 2024-12-13 13:20:14 @@ -1,39 +1,2 @@ --- Login event triggers -CREATE TABLE user_logins(id serial, who text); -GRANT SELECT ON user_logins TO public; -CREATE FUNCTION on_login_proc() RETURNS event_trigger AS $$ -BEGIN - INSERT INTO user_logins (who) VALUES (SESSION_USER); - RAISE NOTICE 'You are welcome!'; -END; -$$ LANGUAGE plpgsql; -CREATE EVENT TRIGGER on_login_trigger ON login EXECUTE PROCEDURE on_login_proc(); -ALTER EVENT TRIGGER on_login_trigger ENABLE ALWAYS; -\c -NOTICE: You are welcome! -SELECT COUNT(*) FROM user_logins; - count -------- - 1 -(1 row) - -\c -NOTICE: You are welcome! -SELECT COUNT(*) FROM user_logins; - count -------- - 2 -(1 row) - --- Check dathasloginevt in system catalog -SELECT dathasloginevt FROM pg_database WHERE datname= :'DBNAME'; - dathasloginevt ----------------- - t -(1 row) - --- Cleanup -DROP TABLE user_logins; -DROP EVENT TRIGGER on_login_trigger; -DROP FUNCTION on_login_proc(); -\c +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/fast_default.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/fast_default.out --- /Users/admin/pgsql/src/test/regress/expected/fast_default.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/fast_default.out 2024-12-13 13:20:14 @@ -1,861 +1,2 @@ --- --- ALTER TABLE ADD COLUMN DEFAULT test --- -SET search_path = fast_default; -CREATE SCHEMA fast_default; -CREATE TABLE m(id OID); -INSERT INTO m VALUES (NULL::OID); -CREATE FUNCTION set(tabname name) RETURNS VOID -AS $$ -BEGIN - UPDATE m - SET id = (SELECT c.relfilenode - FROM pg_class AS c, pg_namespace AS s - WHERE c.relname = tabname - AND c.relnamespace = s.oid - AND s.nspname = 'fast_default'); -END; -$$ LANGUAGE 'plpgsql'; -CREATE FUNCTION comp() RETURNS TEXT -AS $$ -BEGIN - RETURN (SELECT CASE - WHEN m.id = c.relfilenode THEN 'Unchanged' - ELSE 'Rewritten' - END - FROM m, pg_class AS c, pg_namespace AS s - WHERE c.relname = 't' - AND c.relnamespace = s.oid - AND s.nspname = 'fast_default'); -END; -$$ LANGUAGE 'plpgsql'; -CREATE FUNCTION log_rewrite() RETURNS event_trigger -LANGUAGE plpgsql as -$func$ - -declare - this_schema text; -begin - select into this_schema relnamespace::regnamespace::text - from pg_class - where oid = pg_event_trigger_table_rewrite_oid(); - if this_schema = 'fast_default' - then - RAISE NOTICE 'rewriting table % for reason %', - pg_event_trigger_table_rewrite_oid()::regclass, - pg_event_trigger_table_rewrite_reason(); - end if; -end; -$func$; -CREATE TABLE has_volatile AS -SELECT * FROM generate_series(1,10) id; -CREATE EVENT TRIGGER has_volatile_rewrite - ON table_rewrite - EXECUTE PROCEDURE log_rewrite(); --- only the last of these should trigger a rewrite -ALTER TABLE has_volatile ADD col1 int; -ALTER TABLE has_volatile ADD col2 int DEFAULT 1; -ALTER TABLE has_volatile ADD col3 timestamptz DEFAULT current_timestamp; -ALTER TABLE has_volatile ADD col4 int DEFAULT (random() * 10000)::int; -NOTICE: rewriting table has_volatile for reason 2 --- Test a large sample of different datatypes -CREATE TABLE T(pk INT NOT NULL PRIMARY KEY, c_int INT DEFAULT 1); -SELECT set('t'); - set ------ - -(1 row) - -INSERT INTO T VALUES (1), (2); -ALTER TABLE T ADD COLUMN c_bpchar BPCHAR(5) DEFAULT 'hello', - ALTER COLUMN c_int SET DEFAULT 2; -INSERT INTO T VALUES (3), (4); -ALTER TABLE T ADD COLUMN c_text TEXT DEFAULT 'world', - ALTER COLUMN c_bpchar SET DEFAULT 'dog'; -INSERT INTO T VALUES (5), (6); -ALTER TABLE T ADD COLUMN c_date DATE DEFAULT '2016-06-02', - ALTER COLUMN c_text SET DEFAULT 'cat'; -INSERT INTO T VALUES (7), (8); -ALTER TABLE T ADD COLUMN c_timestamp TIMESTAMP DEFAULT '2016-09-01 12:00:00', - ADD COLUMN c_timestamp_null TIMESTAMP, - ALTER COLUMN c_date SET DEFAULT '2010-01-01'; -INSERT INTO T VALUES (9), (10); -ALTER TABLE T ADD COLUMN c_array TEXT[] - DEFAULT '{"This", "is", "the", "real", "world"}', - ALTER COLUMN c_timestamp SET DEFAULT '1970-12-31 11:12:13', - ALTER COLUMN c_timestamp_null SET DEFAULT '2016-09-29 12:00:00'; -INSERT INTO T VALUES (11), (12); -ALTER TABLE T ADD COLUMN c_small SMALLINT DEFAULT -5, - ADD COLUMN c_small_null SMALLINT, - ALTER COLUMN c_array - SET DEFAULT '{"This", "is", "no", "fantasy"}'; -INSERT INTO T VALUES (13), (14); -ALTER TABLE T ADD COLUMN c_big BIGINT DEFAULT 180000000000018, - ALTER COLUMN c_small SET DEFAULT 9, - ALTER COLUMN c_small_null SET DEFAULT 13; -INSERT INTO T VALUES (15), (16); -ALTER TABLE T ADD COLUMN c_num NUMERIC DEFAULT 1.00000000001, - ALTER COLUMN c_big SET DEFAULT -9999999999999999; -INSERT INTO T VALUES (17), (18); -ALTER TABLE T ADD COLUMN c_time TIME DEFAULT '12:00:00', - ALTER COLUMN c_num SET DEFAULT 2.000000000000002; -INSERT INTO T VALUES (19), (20); -ALTER TABLE T ADD COLUMN c_interval INTERVAL DEFAULT '1 day', - ALTER COLUMN c_time SET DEFAULT '23:59:59'; -INSERT INTO T VALUES (21), (22); -ALTER TABLE T ADD COLUMN c_hugetext TEXT DEFAULT repeat('abcdefg',1000), - ALTER COLUMN c_interval SET DEFAULT '3 hours'; -INSERT INTO T VALUES (23), (24); -ALTER TABLE T ALTER COLUMN c_interval DROP DEFAULT, - ALTER COLUMN c_hugetext SET DEFAULT repeat('poiuyt', 1000); -INSERT INTO T VALUES (25), (26); -ALTER TABLE T ALTER COLUMN c_bpchar DROP DEFAULT, - ALTER COLUMN c_date DROP DEFAULT, - ALTER COLUMN c_text DROP DEFAULT, - ALTER COLUMN c_timestamp DROP DEFAULT, - ALTER COLUMN c_array DROP DEFAULT, - ALTER COLUMN c_small DROP DEFAULT, - ALTER COLUMN c_big DROP DEFAULT, - ALTER COLUMN c_num DROP DEFAULT, - ALTER COLUMN c_time DROP DEFAULT, - ALTER COLUMN c_hugetext DROP DEFAULT; -INSERT INTO T VALUES (27), (28); -SELECT pk, c_int, c_bpchar, c_text, c_date, c_timestamp, - c_timestamp_null, c_array, c_small, c_small_null, - c_big, c_num, c_time, c_interval, - c_hugetext = repeat('abcdefg',1000) as c_hugetext_origdef, - c_hugetext = repeat('poiuyt', 1000) as c_hugetext_newdef -FROM T ORDER BY pk; - pk | c_int | c_bpchar | c_text | c_date | c_timestamp | c_timestamp_null | c_array | c_small | c_small_null | c_big | c_num | c_time | c_interval | c_hugetext_origdef | c_hugetext_newdef -----+-------+----------+--------+------------+--------------------------+--------------------------+--------------------------+---------+--------------+-------------------+-------------------+----------+------------+--------------------+------------------- - 1 | 1 | hello | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 2 | 1 | hello | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 3 | 2 | hello | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 4 | 2 | hello | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 5 | 2 | dog | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 6 | 2 | dog | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 7 | 2 | dog | cat | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 8 | 2 | dog | cat | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 9 | 2 | dog | cat | 01-01-2010 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 10 | 2 | dog | cat | 01-01-2010 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 11 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 12 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 13 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 14 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 15 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 16 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 17 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 18 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 19 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 12:00:00 | @ 1 day | t | f - 20 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 12:00:00 | @ 1 day | t | f - 21 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | @ 1 day | t | f - 22 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | @ 1 day | t | f - 23 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | @ 3 hours | t | f - 24 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | @ 3 hours | t | f - 25 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | | f | t - 26 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | | f | t - 27 | 2 | | | | | Thu Sep 29 12:00:00 2016 | | | 13 | | | | | | - 28 | 2 | | | | | Thu Sep 29 12:00:00 2016 | | | 13 | | | | | | -(28 rows) - -SELECT comp(); - comp ------------ - Unchanged -(1 row) - -DROP TABLE T; --- Test expressions in the defaults -CREATE OR REPLACE FUNCTION foo(a INT) RETURNS TEXT AS $$ -DECLARE res TEXT := ''; - i INT; -BEGIN - i := 0; - WHILE (i < a) LOOP - res := res || chr(ascii('a') + i); - i := i + 1; - END LOOP; - RETURN res; -END; $$ LANGUAGE PLPGSQL STABLE; -CREATE TABLE T(pk INT NOT NULL PRIMARY KEY, c_int INT DEFAULT LENGTH(foo(6))); -SELECT set('t'); - set ------ - -(1 row) - -INSERT INTO T VALUES (1), (2); -ALTER TABLE T ADD COLUMN c_bpchar BPCHAR(5) DEFAULT foo(4), - ALTER COLUMN c_int SET DEFAULT LENGTH(foo(8)); -INSERT INTO T VALUES (3), (4); -ALTER TABLE T ADD COLUMN c_text TEXT DEFAULT foo(6), - ALTER COLUMN c_bpchar SET DEFAULT foo(3); -INSERT INTO T VALUES (5), (6); -ALTER TABLE T ADD COLUMN c_date DATE - DEFAULT '2016-06-02'::DATE + LENGTH(foo(10)), - ALTER COLUMN c_text SET DEFAULT foo(12); -INSERT INTO T VALUES (7), (8); -ALTER TABLE T ADD COLUMN c_timestamp TIMESTAMP - DEFAULT '2016-09-01'::DATE + LENGTH(foo(10)), - ALTER COLUMN c_date - SET DEFAULT '2010-01-01'::DATE - LENGTH(foo(4)); -INSERT INTO T VALUES (9), (10); -ALTER TABLE T ADD COLUMN c_array TEXT[] - DEFAULT ('{"This", "is", "' || foo(4) || - '","the", "real", "world"}')::TEXT[], - ALTER COLUMN c_timestamp - SET DEFAULT '1970-12-31'::DATE + LENGTH(foo(30)); -INSERT INTO T VALUES (11), (12); -ALTER TABLE T ALTER COLUMN c_int DROP DEFAULT, - ALTER COLUMN c_array - SET DEFAULT ('{"This", "is", "' || foo(1) || - '", "fantasy"}')::text[]; -INSERT INTO T VALUES (13), (14); -ALTER TABLE T ALTER COLUMN c_bpchar DROP DEFAULT, - ALTER COLUMN c_date DROP DEFAULT, - ALTER COLUMN c_text DROP DEFAULT, - ALTER COLUMN c_timestamp DROP DEFAULT, - ALTER COLUMN c_array DROP DEFAULT; -INSERT INTO T VALUES (15), (16); -SELECT * FROM T; - pk | c_int | c_bpchar | c_text | c_date | c_timestamp | c_array -----+-------+----------+--------------+------------+--------------------------+------------------------------- - 1 | 6 | abcd | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} - 2 | 6 | abcd | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} - 3 | 8 | abcd | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} - 4 | 8 | abcd | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} - 5 | 8 | abc | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} - 6 | 8 | abc | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} - 7 | 8 | abc | abcdefghijkl | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} - 8 | 8 | abc | abcdefghijkl | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} - 9 | 8 | abc | abcdefghijkl | 12-28-2009 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} - 10 | 8 | abc | abcdefghijkl | 12-28-2009 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} - 11 | 8 | abc | abcdefghijkl | 12-28-2009 | Sat Jan 30 00:00:00 1971 | {This,is,abcd,the,real,world} - 12 | 8 | abc | abcdefghijkl | 12-28-2009 | Sat Jan 30 00:00:00 1971 | {This,is,abcd,the,real,world} - 13 | | abc | abcdefghijkl | 12-28-2009 | Sat Jan 30 00:00:00 1971 | {This,is,a,fantasy} - 14 | | abc | abcdefghijkl | 12-28-2009 | Sat Jan 30 00:00:00 1971 | {This,is,a,fantasy} - 15 | | | | | | - 16 | | | | | | -(16 rows) - -SELECT comp(); - comp ------------ - Unchanged -(1 row) - -DROP TABLE T; -DROP FUNCTION foo(INT); --- Fall back to full rewrite for volatile expressions -CREATE TABLE T(pk INT NOT NULL PRIMARY KEY); -INSERT INTO T VALUES (1); -SELECT set('t'); - set ------ - -(1 row) - --- now() is stable, because it returns the transaction timestamp -ALTER TABLE T ADD COLUMN c1 TIMESTAMP DEFAULT now(); -SELECT comp(); - comp ------------ - Unchanged -(1 row) - --- clock_timestamp() is volatile -ALTER TABLE T ADD COLUMN c2 TIMESTAMP DEFAULT clock_timestamp(); -NOTICE: rewriting table t for reason 2 -SELECT comp(); - comp ------------ - Rewritten -(1 row) - --- check that we notice insertion of a volatile default argument -CREATE FUNCTION foolme(timestamptz DEFAULT clock_timestamp()) - RETURNS timestamptz - IMMUTABLE AS 'select $1' LANGUAGE sql; -ALTER TABLE T ADD COLUMN c3 timestamptz DEFAULT foolme(); -NOTICE: rewriting table t for reason 2 -SELECT attname, atthasmissing, attmissingval FROM pg_attribute - WHERE attrelid = 't'::regclass AND attnum > 0 - ORDER BY attnum; - attname | atthasmissing | attmissingval ----------+---------------+--------------- - pk | f | - c1 | f | - c2 | f | - c3 | f | -(4 rows) - -DROP TABLE T; -DROP FUNCTION foolme(timestamptz); --- Simple querie -CREATE TABLE T (pk INT NOT NULL PRIMARY KEY); -SELECT set('t'); - set ------ - -(1 row) - -INSERT INTO T SELECT * FROM generate_series(1, 10) a; -ALTER TABLE T ADD COLUMN c_bigint BIGINT NOT NULL DEFAULT -1; -INSERT INTO T SELECT b, b - 10 FROM generate_series(11, 20) a(b); -ALTER TABLE T ADD COLUMN c_text TEXT DEFAULT 'hello'; -INSERT INTO T SELECT b, b - 10, (b + 10)::text FROM generate_series(21, 30) a(b); --- WHERE clause -SELECT c_bigint, c_text FROM T WHERE c_bigint = -1 LIMIT 1; - c_bigint | c_text -----------+-------- - -1 | hello -(1 row) - -EXPLAIN (VERBOSE TRUE, COSTS FALSE) -SELECT c_bigint, c_text FROM T WHERE c_bigint = -1 LIMIT 1; - QUERY PLAN ----------------------------------------------- - Limit - Output: c_bigint, c_text - -> Seq Scan on fast_default.t - Output: c_bigint, c_text - Filter: (t.c_bigint = '-1'::integer) -(5 rows) - -SELECT c_bigint, c_text FROM T WHERE c_text = 'hello' LIMIT 1; - c_bigint | c_text -----------+-------- - -1 | hello -(1 row) - -EXPLAIN (VERBOSE TRUE, COSTS FALSE) SELECT c_bigint, c_text FROM T WHERE c_text = 'hello' LIMIT 1; - QUERY PLAN --------------------------------------------- - Limit - Output: c_bigint, c_text - -> Seq Scan on fast_default.t - Output: c_bigint, c_text - Filter: (t.c_text = 'hello'::text) -(5 rows) - --- COALESCE -SELECT COALESCE(c_bigint, pk), COALESCE(c_text, pk::text) -FROM T -ORDER BY pk LIMIT 10; - coalesce | coalesce -----------+---------- - -1 | hello - -1 | hello - -1 | hello - -1 | hello - -1 | hello - -1 | hello - -1 | hello - -1 | hello - -1 | hello - -1 | hello -(10 rows) - --- Aggregate function -SELECT SUM(c_bigint), MAX(c_text COLLATE "C" ), MIN(c_text COLLATE "C") FROM T; - sum | max | min ------+-------+----- - 200 | hello | 31 -(1 row) - --- ORDER BY -SELECT * FROM T ORDER BY c_bigint, c_text, pk LIMIT 10; - pk | c_bigint | c_text -----+----------+-------- - 1 | -1 | hello - 2 | -1 | hello - 3 | -1 | hello - 4 | -1 | hello - 5 | -1 | hello - 6 | -1 | hello - 7 | -1 | hello - 8 | -1 | hello - 9 | -1 | hello - 10 | -1 | hello -(10 rows) - -EXPLAIN (VERBOSE TRUE, COSTS FALSE) -SELECT * FROM T ORDER BY c_bigint, c_text, pk LIMIT 10; - QUERY PLAN ----------------------------------------------- - Limit - Output: pk, c_bigint, c_text - -> Sort - Output: pk, c_bigint, c_text - Sort Key: t.c_bigint, t.c_text, t.pk - -> Seq Scan on fast_default.t - Output: pk, c_bigint, c_text -(7 rows) - --- LIMIT -SELECT * FROM T WHERE c_bigint > -1 ORDER BY c_bigint, c_text, pk LIMIT 10; - pk | c_bigint | c_text -----+----------+-------- - 11 | 1 | hello - 12 | 2 | hello - 13 | 3 | hello - 14 | 4 | hello - 15 | 5 | hello - 16 | 6 | hello - 17 | 7 | hello - 18 | 8 | hello - 19 | 9 | hello - 20 | 10 | hello -(10 rows) - -EXPLAIN (VERBOSE TRUE, COSTS FALSE) -SELECT * FROM T WHERE c_bigint > -1 ORDER BY c_bigint, c_text, pk LIMIT 10; - QUERY PLAN ----------------------------------------------------- - Limit - Output: pk, c_bigint, c_text - -> Sort - Output: pk, c_bigint, c_text - Sort Key: t.c_bigint, t.c_text, t.pk - -> Seq Scan on fast_default.t - Output: pk, c_bigint, c_text - Filter: (t.c_bigint > '-1'::integer) -(8 rows) - --- DELETE with RETURNING -DELETE FROM T WHERE pk BETWEEN 10 AND 20 RETURNING *; - pk | c_bigint | c_text -----+----------+-------- - 10 | -1 | hello - 11 | 1 | hello - 12 | 2 | hello - 13 | 3 | hello - 14 | 4 | hello - 15 | 5 | hello - 16 | 6 | hello - 17 | 7 | hello - 18 | 8 | hello - 19 | 9 | hello - 20 | 10 | hello -(11 rows) - -EXPLAIN (VERBOSE TRUE, COSTS FALSE) -DELETE FROM T WHERE pk BETWEEN 10 AND 20 RETURNING *; - QUERY PLAN ------------------------------------------------------------ - Delete on fast_default.t - Output: pk, c_bigint, c_text - -> Bitmap Heap Scan on fast_default.t - Output: ctid - Recheck Cond: ((t.pk >= 10) AND (t.pk <= 20)) - -> Bitmap Index Scan on t_pkey - Index Cond: ((t.pk >= 10) AND (t.pk <= 20)) -(7 rows) - --- UPDATE -UPDATE T SET c_text = '"' || c_text || '"' WHERE pk < 10; -SELECT * FROM T WHERE c_text LIKE '"%"' ORDER BY PK; - pk | c_bigint | c_text -----+----------+--------- - 1 | -1 | "hello" - 2 | -1 | "hello" - 3 | -1 | "hello" - 4 | -1 | "hello" - 5 | -1 | "hello" - 6 | -1 | "hello" - 7 | -1 | "hello" - 8 | -1 | "hello" - 9 | -1 | "hello" -(9 rows) - -SELECT comp(); - comp ------------ - Unchanged -(1 row) - -DROP TABLE T; --- Combine with other DDL -CREATE TABLE T(pk INT NOT NULL PRIMARY KEY); -SELECT set('t'); - set ------ - -(1 row) - -INSERT INTO T VALUES (1), (2); -ALTER TABLE T ADD COLUMN c_int INT NOT NULL DEFAULT -1; -INSERT INTO T VALUES (3), (4); -ALTER TABLE T ADD COLUMN c_text TEXT DEFAULT 'Hello'; -INSERT INTO T VALUES (5), (6); -ALTER TABLE T ALTER COLUMN c_text SET DEFAULT 'world', - ALTER COLUMN c_int SET DEFAULT 1; -INSERT INTO T VALUES (7), (8); -SELECT * FROM T ORDER BY pk; - pk | c_int | c_text -----+-------+-------- - 1 | -1 | Hello - 2 | -1 | Hello - 3 | -1 | Hello - 4 | -1 | Hello - 5 | -1 | Hello - 6 | -1 | Hello - 7 | 1 | world - 8 | 1 | world -(8 rows) - --- Add an index -CREATE INDEX i ON T(c_int, c_text); -SELECT c_text FROM T WHERE c_int = -1; - c_text --------- - Hello - Hello - Hello - Hello - Hello - Hello -(6 rows) - -SELECT comp(); - comp ------------ - Unchanged -(1 row) - --- query to exercise expand_tuple function -CREATE TABLE t1 AS -SELECT 1::int AS a , 2::int AS b -FROM generate_series(1,20) q; -ALTER TABLE t1 ADD COLUMN c text; -SELECT a, - stddev(cast((SELECT sum(1) FROM generate_series(1,20) x) AS float4)) - OVER (PARTITION BY a,b,c ORDER BY b) - AS z -FROM t1; - a | z ----+--- - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 -(20 rows) - -DROP TABLE T; --- test that we account for missing columns without defaults correctly --- in expand_tuple, and that rows are correctly expanded for triggers -CREATE FUNCTION test_trigger() -RETURNS trigger -LANGUAGE plpgsql -AS $$ - -begin - raise notice 'old tuple: %', to_json(OLD)::text; - if TG_OP = 'DELETE' - then - return OLD; - else - return NEW; - end if; -end; - -$$; --- 2 new columns, both have defaults -CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); -INSERT INTO t (a,b,c) VALUES (1,2,3); -ALTER TABLE t ADD COLUMN x int NOT NULL DEFAULT 4; -ALTER TABLE t ADD COLUMN y int NOT NULL DEFAULT 5; -CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | 3 | 4 | 5 -(1 row) - -UPDATE t SET y = 2; -NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":3,"x":4,"y":5} -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | 3 | 4 | 2 -(1 row) - -DROP TABLE t; --- 2 new columns, first has default -CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); -INSERT INTO t (a,b,c) VALUES (1,2,3); -ALTER TABLE t ADD COLUMN x int NOT NULL DEFAULT 4; -ALTER TABLE t ADD COLUMN y int; -CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | 3 | 4 | -(1 row) - -UPDATE t SET y = 2; -NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":3,"x":4,"y":null} -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | 3 | 4 | 2 -(1 row) - -DROP TABLE t; --- 2 new columns, second has default -CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); -INSERT INTO t (a,b,c) VALUES (1,2,3); -ALTER TABLE t ADD COLUMN x int; -ALTER TABLE t ADD COLUMN y int NOT NULL DEFAULT 5; -CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | 3 | | 5 -(1 row) - -UPDATE t SET y = 2; -NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":3,"x":null,"y":5} -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | 3 | | 2 -(1 row) - -DROP TABLE t; --- 2 new columns, neither has default -CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); -INSERT INTO t (a,b,c) VALUES (1,2,3); -ALTER TABLE t ADD COLUMN x int; -ALTER TABLE t ADD COLUMN y int; -CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | 3 | | -(1 row) - -UPDATE t SET y = 2; -NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":3,"x":null,"y":null} -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | 3 | | 2 -(1 row) - -DROP TABLE t; --- same as last 4 tests but here the last original column has a NULL value --- 2 new columns, both have defaults -CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); -INSERT INTO t (a,b,c) VALUES (1,2,NULL); -ALTER TABLE t ADD COLUMN x int NOT NULL DEFAULT 4; -ALTER TABLE t ADD COLUMN y int NOT NULL DEFAULT 5; -CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | | 4 | 5 -(1 row) - -UPDATE t SET y = 2; -NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":null,"x":4,"y":5} -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | | 4 | 2 -(1 row) - -DROP TABLE t; --- 2 new columns, first has default -CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); -INSERT INTO t (a,b,c) VALUES (1,2,NULL); -ALTER TABLE t ADD COLUMN x int NOT NULL DEFAULT 4; -ALTER TABLE t ADD COLUMN y int; -CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | | 4 | -(1 row) - -UPDATE t SET y = 2; -NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":null,"x":4,"y":null} -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | | 4 | 2 -(1 row) - -DROP TABLE t; --- 2 new columns, second has default -CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); -INSERT INTO t (a,b,c) VALUES (1,2,NULL); -ALTER TABLE t ADD COLUMN x int; -ALTER TABLE t ADD COLUMN y int NOT NULL DEFAULT 5; -CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | | | 5 -(1 row) - -UPDATE t SET y = 2; -NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":null,"x":null,"y":5} -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | | | 2 -(1 row) - -DROP TABLE t; --- 2 new columns, neither has default -CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); -INSERT INTO t (a,b,c) VALUES (1,2,NULL); -ALTER TABLE t ADD COLUMN x int; -ALTER TABLE t ADD COLUMN y int; -CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | | | -(1 row) - -UPDATE t SET y = 2; -NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":null,"x":null,"y":null} -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | | | 2 -(1 row) - -DROP TABLE t; --- make sure expanded tuple has correct self pointer --- it will be required by the RI trigger doing the cascading delete -CREATE TABLE leader (a int PRIMARY KEY, b int); -CREATE TABLE follower (a int REFERENCES leader ON DELETE CASCADE, b int); -INSERT INTO leader VALUES (1, 1), (2, 2); -ALTER TABLE leader ADD c int; -ALTER TABLE leader DROP c; -DELETE FROM leader; --- check that ALTER TABLE ... ALTER TYPE does the right thing -CREATE TABLE vtype( a integer); -INSERT INTO vtype VALUES (1); -ALTER TABLE vtype ADD COLUMN b DOUBLE PRECISION DEFAULT 0.2; -ALTER TABLE vtype ADD COLUMN c BOOLEAN DEFAULT true; -SELECT * FROM vtype; - a | b | c ----+-----+--- - 1 | 0.2 | t -(1 row) - -ALTER TABLE vtype - ALTER b TYPE text USING b::text, - ALTER c TYPE text USING c::text; -NOTICE: rewriting table vtype for reason 4 -SELECT * FROM vtype; - a | b | c ----+-----+------ - 1 | 0.2 | true -(1 row) - --- also check the case that doesn't rewrite the table -CREATE TABLE vtype2 (a int); -INSERT INTO vtype2 VALUES (1); -ALTER TABLE vtype2 ADD COLUMN b varchar(10) DEFAULT 'xxx'; -ALTER TABLE vtype2 ALTER COLUMN b SET DEFAULT 'yyy'; -INSERT INTO vtype2 VALUES (2); -ALTER TABLE vtype2 ALTER COLUMN b TYPE varchar(20) USING b::varchar(20); -SELECT * FROM vtype2; - a | b ----+----- - 1 | xxx - 2 | yyy -(2 rows) - --- Ensure that defaults are checked when evaluating whether HOT update --- is possible, this was broken for a while: --- https://postgr.es/m/20190202133521.ylauh3ckqa7colzj%40alap3.anarazel.de -BEGIN; -CREATE TABLE t(); -INSERT INTO t DEFAULT VALUES; -ALTER TABLE t ADD COLUMN a int DEFAULT 1; -CREATE INDEX ON t(a); --- set column with a default 1 to NULL, due to a bug that wasn't --- noticed has heap_getattr buggily returned NULL for default columns -UPDATE t SET a = NULL; --- verify that index and non-index scans show the same result -SET LOCAL enable_seqscan = true; -SELECT * FROM t WHERE a IS NULL; - a ---- - -(1 row) - -SET LOCAL enable_seqscan = false; -SELECT * FROM t WHERE a IS NULL; - a ---- - -(1 row) - -ROLLBACK; --- verify that a default set on a non-plain table doesn't set a missing --- value on the attribute -CREATE FOREIGN DATA WRAPPER dummy; -CREATE SERVER s0 FOREIGN DATA WRAPPER dummy; -CREATE FOREIGN TABLE ft1 (c1 integer NOT NULL) SERVER s0; -ALTER FOREIGN TABLE ft1 ADD COLUMN c8 integer DEFAULT 0; -ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE char(10); -SELECT count(*) - FROM pg_attribute - WHERE attrelid = 'ft1'::regclass AND - (attmissingval IS NOT NULL OR atthasmissing); - count -------- - 0 -(1 row) - --- cleanup -DROP FOREIGN TABLE ft1; -DROP SERVER s0; -DROP FOREIGN DATA WRAPPER dummy; -DROP TABLE vtype; -DROP TABLE vtype2; -DROP TABLE follower; -DROP TABLE leader; -DROP FUNCTION test_trigger(); -DROP TABLE t1; -DROP FUNCTION set(name); -DROP FUNCTION comp(); -DROP TABLE m; -DROP TABLE has_volatile; -DROP EVENT TRIGGER has_volatile_rewrite; -DROP FUNCTION log_rewrite; -DROP SCHEMA fast_default; --- Leave a table with an active fast default in place, for pg_upgrade testing -set search_path = public; -create table has_fast_default(f1 int); -insert into has_fast_default values(1); -alter table has_fast_default add column f2 int default 42; -table has_fast_default; - f1 | f2 -----+---- - 1 | 42 -(1 row) - +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /Users/admin/pgsql/src/test/regress/expected/tablespace.out /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/tablespace.out --- /Users/admin/pgsql/src/test/regress/expected/tablespace.out 2024-12-13 13:17:12 +++ /Users/admin/pgsql/build/testrun/pg_upgrade/002_pg_upgrade/data/results/tablespace.out 2024-12-13 13:20:14 @@ -1,973 +1,2 @@ --- relative tablespace locations are not allowed -CREATE TABLESPACE regress_tblspace LOCATION 'relative'; -- fail -ERROR: tablespace location must be an absolute path --- empty tablespace locations are not usually allowed -CREATE TABLESPACE regress_tblspace LOCATION ''; -- fail -ERROR: tablespace location must be an absolute path --- as a special developer-only option to allow us to use tablespaces --- with streaming replication on the same server, an empty location --- can be allowed as a way to say that the tablespace should be created --- as a directory in pg_tblspc, rather than being a symlink -SET allow_in_place_tablespaces = true; --- create a tablespace using WITH clause -CREATE TABLESPACE regress_tblspacewith LOCATION '' WITH (some_nonexistent_parameter = true); -- fail -ERROR: unrecognized parameter "some_nonexistent_parameter" -CREATE TABLESPACE regress_tblspacewith LOCATION '' WITH (random_page_cost = 3.0); -- ok --- check to see the parameter was used -SELECT spcoptions FROM pg_tablespace WHERE spcname = 'regress_tblspacewith'; - spcoptions ------------------------- - {random_page_cost=3.0} -(1 row) - --- drop the tablespace so we can re-use the location -DROP TABLESPACE regress_tblspacewith; --- This returns a relative path as of an effect of allow_in_place_tablespaces, --- masking the tablespace OID used in the path name. -SELECT regexp_replace(pg_tablespace_location(oid), '(pg_tblspc)/(\d+)', '\1/NNN') - FROM pg_tablespace WHERE spcname = 'regress_tblspace'; - regexp_replace ----------------- - pg_tblspc/NNN -(1 row) - --- try setting and resetting some properties for the new tablespace -ALTER TABLESPACE regress_tblspace SET (random_page_cost = 1.0, seq_page_cost = 1.1); -ALTER TABLESPACE regress_tblspace SET (some_nonexistent_parameter = true); -- fail -ERROR: unrecognized parameter "some_nonexistent_parameter" -ALTER TABLESPACE regress_tblspace RESET (random_page_cost = 2.0); -- fail -ERROR: RESET must not include values for parameters -ALTER TABLESPACE regress_tblspace RESET (random_page_cost, effective_io_concurrency); -- ok --- REINDEX (TABLESPACE) --- catalogs and system tablespaces --- system catalog, fail -REINDEX (TABLESPACE regress_tblspace) TABLE pg_am; -ERROR: cannot move system relation "pg_am_name_index" -REINDEX (TABLESPACE regress_tblspace) TABLE CONCURRENTLY pg_am; -ERROR: cannot reindex system catalogs concurrently --- shared catalog, fail -REINDEX (TABLESPACE regress_tblspace) TABLE pg_authid; -ERROR: cannot move system relation "pg_authid_rolname_index" -REINDEX (TABLESPACE regress_tblspace) TABLE CONCURRENTLY pg_authid; -ERROR: cannot reindex system catalogs concurrently --- toast relations, fail -REINDEX (TABLESPACE regress_tblspace) INDEX pg_toast.pg_toast_1262_index; -ERROR: cannot move system relation "pg_toast_1262_index" -REINDEX (TABLESPACE regress_tblspace) INDEX CONCURRENTLY pg_toast.pg_toast_1262_index; -ERROR: cannot reindex system catalogs concurrently -REINDEX (TABLESPACE regress_tblspace) TABLE pg_toast.pg_toast_1262; -ERROR: cannot move system relation "pg_toast_1262_index" -REINDEX (TABLESPACE regress_tblspace) TABLE CONCURRENTLY pg_toast.pg_toast_1262; -ERROR: cannot reindex system catalogs concurrently --- system catalog, fail -REINDEX (TABLESPACE pg_global) TABLE pg_authid; -ERROR: cannot move system relation "pg_authid_rolname_index" -REINDEX (TABLESPACE pg_global) TABLE CONCURRENTLY pg_authid; -ERROR: cannot reindex system catalogs concurrently --- table with toast relation -CREATE TABLE regress_tblspace_test_tbl (num1 bigint, num2 double precision, t text); -INSERT INTO regress_tblspace_test_tbl (num1, num2, t) - SELECT round(random()*100), random(), 'text' - FROM generate_series(1, 10) s(i); -CREATE INDEX regress_tblspace_test_tbl_idx ON regress_tblspace_test_tbl (num1); --- move to global tablespace, fail -REINDEX (TABLESPACE pg_global) INDEX regress_tblspace_test_tbl_idx; -ERROR: only shared relations can be placed in pg_global tablespace -REINDEX (TABLESPACE pg_global) INDEX CONCURRENTLY regress_tblspace_test_tbl_idx; -ERROR: cannot move non-shared relation to tablespace "pg_global" --- check transactional behavior of REINDEX (TABLESPACE) -BEGIN; -REINDEX (TABLESPACE regress_tblspace) INDEX regress_tblspace_test_tbl_idx; -REINDEX (TABLESPACE regress_tblspace) TABLE regress_tblspace_test_tbl; -ROLLBACK; --- no relation moved to the new tablespace -SELECT c.relname FROM pg_class c, pg_tablespace s - WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace'; - relname ---------- -(0 rows) - --- check that all indexes are moved to a new tablespace with different --- relfilenode. --- Save first the existing relfilenode for the toast and main relations. -SELECT relfilenode as main_filenode FROM pg_class - WHERE relname = 'regress_tblspace_test_tbl_idx' \gset -SELECT relfilenode as toast_filenode FROM pg_class - WHERE oid = - (SELECT i.indexrelid - FROM pg_class c, - pg_index i - WHERE i.indrelid = c.reltoastrelid AND - c.relname = 'regress_tblspace_test_tbl') \gset -REINDEX (TABLESPACE regress_tblspace) TABLE regress_tblspace_test_tbl; -SELECT c.relname FROM pg_class c, pg_tablespace s - WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' - ORDER BY c.relname; - relname -------------------------------- - regress_tblspace_test_tbl_idx -(1 row) - -ALTER TABLE regress_tblspace_test_tbl SET TABLESPACE regress_tblspace; -ALTER TABLE regress_tblspace_test_tbl SET TABLESPACE pg_default; -SELECT c.relname FROM pg_class c, pg_tablespace s - WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' - ORDER BY c.relname; - relname -------------------------------- - regress_tblspace_test_tbl_idx -(1 row) - --- Move back to the default tablespace. -ALTER INDEX regress_tblspace_test_tbl_idx SET TABLESPACE pg_default; -SELECT c.relname FROM pg_class c, pg_tablespace s - WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' - ORDER BY c.relname; - relname ---------- -(0 rows) - -REINDEX (TABLESPACE regress_tblspace, CONCURRENTLY) TABLE regress_tblspace_test_tbl; -SELECT c.relname FROM pg_class c, pg_tablespace s - WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' - ORDER BY c.relname; - relname -------------------------------- - regress_tblspace_test_tbl_idx -(1 row) - -SELECT relfilenode = :main_filenode AS main_same FROM pg_class - WHERE relname = 'regress_tblspace_test_tbl_idx'; - main_same ------------ - f -(1 row) - -SELECT relfilenode = :toast_filenode as toast_same FROM pg_class - WHERE oid = - (SELECT i.indexrelid - FROM pg_class c, - pg_index i - WHERE i.indrelid = c.reltoastrelid AND - c.relname = 'regress_tblspace_test_tbl'); - toast_same ------------- - f -(1 row) - -DROP TABLE regress_tblspace_test_tbl; --- REINDEX (TABLESPACE) with partitions --- Create a partition tree and check the set of relations reindexed --- with their new tablespace. -CREATE TABLE tbspace_reindex_part (c1 int, c2 int) PARTITION BY RANGE (c1); -CREATE TABLE tbspace_reindex_part_0 PARTITION OF tbspace_reindex_part - FOR VALUES FROM (0) TO (10) PARTITION BY list (c2); -CREATE TABLE tbspace_reindex_part_0_1 PARTITION OF tbspace_reindex_part_0 - FOR VALUES IN (1); -CREATE TABLE tbspace_reindex_part_0_2 PARTITION OF tbspace_reindex_part_0 - FOR VALUES IN (2); --- This partitioned table will have no partitions. -CREATE TABLE tbspace_reindex_part_10 PARTITION OF tbspace_reindex_part - FOR VALUES FROM (10) TO (20) PARTITION BY list (c2); --- Create some partitioned indexes -CREATE INDEX tbspace_reindex_part_index ON ONLY tbspace_reindex_part (c1); -CREATE INDEX tbspace_reindex_part_index_0 ON ONLY tbspace_reindex_part_0 (c1); -ALTER INDEX tbspace_reindex_part_index ATTACH PARTITION tbspace_reindex_part_index_0; --- This partitioned index will have no partitions. -CREATE INDEX tbspace_reindex_part_index_10 ON ONLY tbspace_reindex_part_10 (c1); -ALTER INDEX tbspace_reindex_part_index ATTACH PARTITION tbspace_reindex_part_index_10; -CREATE INDEX tbspace_reindex_part_index_0_1 ON ONLY tbspace_reindex_part_0_1 (c1); -ALTER INDEX tbspace_reindex_part_index_0 ATTACH PARTITION tbspace_reindex_part_index_0_1; -CREATE INDEX tbspace_reindex_part_index_0_2 ON ONLY tbspace_reindex_part_0_2 (c1); -ALTER INDEX tbspace_reindex_part_index_0 ATTACH PARTITION tbspace_reindex_part_index_0_2; -SELECT relid, parentrelid, level FROM pg_partition_tree('tbspace_reindex_part_index') - ORDER BY relid, level; - relid | parentrelid | level ---------------------------------+------------------------------+------- - tbspace_reindex_part_index | | 0 - tbspace_reindex_part_index_0 | tbspace_reindex_part_index | 1 - tbspace_reindex_part_index_10 | tbspace_reindex_part_index | 1 - tbspace_reindex_part_index_0_1 | tbspace_reindex_part_index_0 | 2 - tbspace_reindex_part_index_0_2 | tbspace_reindex_part_index_0 | 2 -(5 rows) - --- Track the original tablespace, relfilenode and OID of each index --- in the tree. -CREATE TEMP TABLE reindex_temp_before AS - SELECT oid, relname, relfilenode, reltablespace - FROM pg_class - WHERE relname ~ 'tbspace_reindex_part_index'; -REINDEX (TABLESPACE regress_tblspace, CONCURRENTLY) TABLE tbspace_reindex_part; --- REINDEX CONCURRENTLY changes the OID of the old relation, hence a check --- based on the relation name below. -SELECT b.relname, - CASE WHEN a.relfilenode = b.relfilenode THEN 'relfilenode is unchanged' - ELSE 'relfilenode has changed' END AS filenode, - CASE WHEN a.reltablespace = b.reltablespace THEN 'reltablespace is unchanged' - ELSE 'reltablespace has changed' END AS tbspace - FROM reindex_temp_before b JOIN pg_class a ON b.relname = a.relname - ORDER BY 1; - relname | filenode | tbspace ---------------------------------+--------------------------+---------------------------- - tbspace_reindex_part_index | relfilenode is unchanged | reltablespace is unchanged - tbspace_reindex_part_index_0 | relfilenode is unchanged | reltablespace is unchanged - tbspace_reindex_part_index_0_1 | relfilenode has changed | reltablespace has changed - tbspace_reindex_part_index_0_2 | relfilenode has changed | reltablespace has changed - tbspace_reindex_part_index_10 | relfilenode is unchanged | reltablespace is unchanged -(5 rows) - -DROP TABLE tbspace_reindex_part; --- create a schema we can use -CREATE SCHEMA testschema; --- try a table -CREATE TABLE testschema.foo (i int) TABLESPACE regress_tblspace; -SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c - where c.reltablespace = t.oid AND c.relname = 'foo'; - relname | spcname ----------+------------------ - foo | regress_tblspace -(1 row) - -INSERT INTO testschema.foo VALUES(1); -INSERT INTO testschema.foo VALUES(2); --- tables from dynamic sources -CREATE TABLE testschema.asselect TABLESPACE regress_tblspace AS SELECT 1; -SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c - where c.reltablespace = t.oid AND c.relname = 'asselect'; - relname | spcname -----------+------------------ - asselect | regress_tblspace -(1 row) - -PREPARE selectsource(int) AS SELECT $1; -CREATE TABLE testschema.asexecute TABLESPACE regress_tblspace - AS EXECUTE selectsource(2); -SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c - where c.reltablespace = t.oid AND c.relname = 'asexecute'; - relname | spcname ------------+------------------ - asexecute | regress_tblspace -(1 row) - --- index -CREATE INDEX foo_idx on testschema.foo(i) TABLESPACE regress_tblspace; -SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c - where c.reltablespace = t.oid AND c.relname = 'foo_idx'; - relname | spcname ----------+------------------ - foo_idx | regress_tblspace -(1 row) - --- check \d output -\d testschema.foo - Table "testschema.foo" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - i | integer | | | -Indexes: - "foo_idx" btree (i), tablespace "regress_tblspace" -Tablespace: "regress_tblspace" - -\d testschema.foo_idx - Index "testschema.foo_idx" - Column | Type | Key? | Definition ---------+---------+------+------------ - i | integer | yes | i -btree, for table "testschema.foo" -Tablespace: "regress_tblspace" - --- --- partitioned table --- -CREATE TABLE testschema.part (a int) PARTITION BY LIST (a); -SET default_tablespace TO pg_global; -CREATE TABLE testschema.part_1 PARTITION OF testschema.part FOR VALUES IN (1); -ERROR: only shared relations can be placed in pg_global tablespace -RESET default_tablespace; -CREATE TABLE testschema.part_1 PARTITION OF testschema.part FOR VALUES IN (1); -SET default_tablespace TO regress_tblspace; -CREATE TABLE testschema.part_2 PARTITION OF testschema.part FOR VALUES IN (2); -SET default_tablespace TO pg_global; -CREATE TABLE testschema.part_3 PARTITION OF testschema.part FOR VALUES IN (3); -ERROR: only shared relations can be placed in pg_global tablespace -ALTER TABLE testschema.part SET TABLESPACE regress_tblspace; -CREATE TABLE testschema.part_3 PARTITION OF testschema.part FOR VALUES IN (3); -CREATE TABLE testschema.part_4 PARTITION OF testschema.part FOR VALUES IN (4) - TABLESPACE pg_default; -CREATE TABLE testschema.part_56 PARTITION OF testschema.part FOR VALUES IN (5, 6) - PARTITION BY LIST (a); -ALTER TABLE testschema.part SET TABLESPACE pg_default; -CREATE TABLE testschema.part_78 PARTITION OF testschema.part FOR VALUES IN (7, 8) - PARTITION BY LIST (a); -ERROR: only shared relations can be placed in pg_global tablespace -CREATE TABLE testschema.part_910 PARTITION OF testschema.part FOR VALUES IN (9, 10) - PARTITION BY LIST (a) TABLESPACE regress_tblspace; -RESET default_tablespace; -CREATE TABLE testschema.part_78 PARTITION OF testschema.part FOR VALUES IN (7, 8) - PARTITION BY LIST (a); -SELECT relname, spcname FROM pg_catalog.pg_class c - JOIN pg_catalog.pg_namespace n ON (c.relnamespace = n.oid) - LEFT JOIN pg_catalog.pg_tablespace t ON c.reltablespace = t.oid - where c.relname LIKE 'part%' AND n.nspname = 'testschema' order by relname; - relname | spcname -----------+------------------ - part | - part_1 | - part_2 | regress_tblspace - part_3 | regress_tblspace - part_4 | - part_56 | regress_tblspace - part_78 | - part_910 | regress_tblspace -(8 rows) - -RESET default_tablespace; -DROP TABLE testschema.part; --- partitioned index -CREATE TABLE testschema.part (a int) PARTITION BY LIST (a); -CREATE TABLE testschema.part1 PARTITION OF testschema.part FOR VALUES IN (1); -CREATE INDEX part_a_idx ON testschema.part (a) TABLESPACE regress_tblspace; -CREATE TABLE testschema.part2 PARTITION OF testschema.part FOR VALUES IN (2); -SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c - where c.reltablespace = t.oid AND c.relname LIKE 'part%_idx' ORDER BY relname; - relname | spcname --------------+------------------ - part1_a_idx | regress_tblspace - part2_a_idx | regress_tblspace - part_a_idx | regress_tblspace -(3 rows) - -\d testschema.part - Partitioned table "testschema.part" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition key: LIST (a) -Indexes: - "part_a_idx" btree (a), tablespace "regress_tblspace" -Number of partitions: 2 (Use \d+ to list them.) - -\d+ testschema.part - Partitioned table "testschema.part" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - a | integer | | | | plain | | -Partition key: LIST (a) -Indexes: - "part_a_idx" btree (a), tablespace "regress_tblspace" -Partitions: testschema.part1 FOR VALUES IN (1), - testschema.part2 FOR VALUES IN (2) - -\d testschema.part1 - Table "testschema.part1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition of: testschema.part FOR VALUES IN (1) -Indexes: - "part1_a_idx" btree (a), tablespace "regress_tblspace" - -\d+ testschema.part1 - Table "testschema.part1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - a | integer | | | | plain | | -Partition of: testschema.part FOR VALUES IN (1) -Partition constraint: ((a IS NOT NULL) AND (a = 1)) -Indexes: - "part1_a_idx" btree (a), tablespace "regress_tblspace" - -\d testschema.part_a_idx -Partitioned index "testschema.part_a_idx" - Column | Type | Key? | Definition ---------+---------+------+------------ - a | integer | yes | a -btree, for table "testschema.part" -Number of partitions: 2 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -\d+ testschema.part_a_idx - Partitioned index "testschema.part_a_idx" - Column | Type | Key? | Definition | Storage | Stats target ---------+---------+------+------------+---------+-------------- - a | integer | yes | a | plain | -btree, for table "testschema.part" -Partitions: testschema.part1_a_idx, - testschema.part2_a_idx -Tablespace: "regress_tblspace" - --- partitioned rels cannot specify the default tablespace. These fail: -CREATE TABLE testschema.dflt (a int PRIMARY KEY) PARTITION BY LIST (a) TABLESPACE pg_default; -ERROR: cannot specify default tablespace for partitioned relations -CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE pg_default) PARTITION BY LIST (a); -ERROR: cannot specify default tablespace for partitioned relations -SET default_tablespace TO 'pg_default'; -CREATE TABLE testschema.dflt (a int PRIMARY KEY) PARTITION BY LIST (a) TABLESPACE regress_tblspace; -ERROR: cannot specify default tablespace for partitioned relations -CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE regress_tblspace) PARTITION BY LIST (a); -ERROR: cannot specify default tablespace for partitioned relations --- but these work: -CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE regress_tblspace) PARTITION BY LIST (a) TABLESPACE regress_tblspace; -SET default_tablespace TO ''; -CREATE TABLE testschema.dflt2 (a int PRIMARY KEY) PARTITION BY LIST (a); -DROP TABLE testschema.dflt, testschema.dflt2; --- check that default_tablespace doesn't affect ALTER TABLE index rebuilds -CREATE TABLE testschema.test_default_tab(id bigint) TABLESPACE regress_tblspace; -INSERT INTO testschema.test_default_tab VALUES (1); -CREATE INDEX test_index1 on testschema.test_default_tab (id); -CREATE INDEX test_index2 on testschema.test_default_tab (id) TABLESPACE regress_tblspace; -ALTER TABLE testschema.test_default_tab ADD CONSTRAINT test_index3 PRIMARY KEY (id); -ALTER TABLE testschema.test_default_tab ADD CONSTRAINT test_index4 UNIQUE (id) USING INDEX TABLESPACE regress_tblspace; -\d testschema.test_index1 - Index "testschema.test_index1" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -btree, for table "testschema.test_default_tab" - -\d testschema.test_index2 - Index "testschema.test_index2" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_index3 - Index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab" - -\d testschema.test_index4 - Index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - --- use a custom tablespace for default_tablespace -SET default_tablespace TO regress_tblspace; --- tablespace should not change if no rewrite -ALTER TABLE testschema.test_default_tab ALTER id TYPE bigint; -\d testschema.test_index1 - Index "testschema.test_index1" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -btree, for table "testschema.test_default_tab" - -\d testschema.test_index2 - Index "testschema.test_index2" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_index3 - Index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab" - -\d testschema.test_index4 - Index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -SELECT * FROM testschema.test_default_tab; - id ----- - 1 -(1 row) - --- tablespace should not change even if there is an index rewrite -ALTER TABLE testschema.test_default_tab ALTER id TYPE int; -\d testschema.test_index1 - Index "testschema.test_index1" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -btree, for table "testschema.test_default_tab" - -\d testschema.test_index2 - Index "testschema.test_index2" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_index3 - Index "testschema.test_index3" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -primary key, btree, for table "testschema.test_default_tab" - -\d testschema.test_index4 - Index "testschema.test_index4" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -unique, btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -SELECT * FROM testschema.test_default_tab; - id ----- - 1 -(1 row) - --- now use the default tablespace for default_tablespace -SET default_tablespace TO ''; --- tablespace should not change if no rewrite -ALTER TABLE testschema.test_default_tab ALTER id TYPE int; -\d testschema.test_index1 - Index "testschema.test_index1" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -btree, for table "testschema.test_default_tab" - -\d testschema.test_index2 - Index "testschema.test_index2" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_index3 - Index "testschema.test_index3" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -primary key, btree, for table "testschema.test_default_tab" - -\d testschema.test_index4 - Index "testschema.test_index4" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -unique, btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - --- tablespace should not change even if there is an index rewrite -ALTER TABLE testschema.test_default_tab ALTER id TYPE bigint; -\d testschema.test_index1 - Index "testschema.test_index1" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -btree, for table "testschema.test_default_tab" - -\d testschema.test_index2 - Index "testschema.test_index2" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_index3 - Index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab" - -\d testschema.test_index4 - Index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -DROP TABLE testschema.test_default_tab; --- check that default_tablespace doesn't affect ALTER TABLE index rebuilds --- (this time with a partitioned table) -CREATE TABLE testschema.test_default_tab_p(id bigint, val bigint) - PARTITION BY LIST (id) TABLESPACE regress_tblspace; -CREATE TABLE testschema.test_default_tab_p1 PARTITION OF testschema.test_default_tab_p - FOR VALUES IN (1); -INSERT INTO testschema.test_default_tab_p VALUES (1); -CREATE INDEX test_index1 on testschema.test_default_tab_p (val); -CREATE INDEX test_index2 on testschema.test_default_tab_p (val) TABLESPACE regress_tblspace; -ALTER TABLE testschema.test_default_tab_p ADD CONSTRAINT test_index3 PRIMARY KEY (id); -ALTER TABLE testschema.test_default_tab_p ADD CONSTRAINT test_index4 UNIQUE (id) USING INDEX TABLESPACE regress_tblspace; -\d testschema.test_index1 -Partitioned index "testschema.test_index1" - Column | Type | Key? | Definition ---------+--------+------+------------ - val | bigint | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index2 -Partitioned index "testschema.test_index2" - Column | Type | Key? | Definition ---------+--------+------+------------ - val | bigint | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -\d testschema.test_index3 -Partitioned index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index4 -Partitioned index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - --- use a custom tablespace for default_tablespace -SET default_tablespace TO regress_tblspace; --- tablespace should not change if no rewrite -ALTER TABLE testschema.test_default_tab_p ALTER val TYPE bigint; -\d testschema.test_index1 -Partitioned index "testschema.test_index1" - Column | Type | Key? | Definition ---------+--------+------+------------ - val | bigint | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index2 -Partitioned index "testschema.test_index2" - Column | Type | Key? | Definition ---------+--------+------+------------ - val | bigint | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -\d testschema.test_index3 -Partitioned index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index4 -Partitioned index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -SELECT * FROM testschema.test_default_tab_p; - id | val -----+----- - 1 | -(1 row) - --- tablespace should not change even if there is an index rewrite -ALTER TABLE testschema.test_default_tab_p ALTER val TYPE int; -\d testschema.test_index1 -Partitioned index "testschema.test_index1" - Column | Type | Key? | Definition ---------+---------+------+------------ - val | integer | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index2 -Partitioned index "testschema.test_index2" - Column | Type | Key? | Definition ---------+---------+------+------------ - val | integer | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -\d testschema.test_index3 -Partitioned index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index4 -Partitioned index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -SELECT * FROM testschema.test_default_tab_p; - id | val -----+----- - 1 | -(1 row) - --- now use the default tablespace for default_tablespace -SET default_tablespace TO ''; --- tablespace should not change if no rewrite -ALTER TABLE testschema.test_default_tab_p ALTER val TYPE int; -\d testschema.test_index1 -Partitioned index "testschema.test_index1" - Column | Type | Key? | Definition ---------+---------+------+------------ - val | integer | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index2 -Partitioned index "testschema.test_index2" - Column | Type | Key? | Definition ---------+---------+------+------------ - val | integer | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -\d testschema.test_index3 -Partitioned index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index4 -Partitioned index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - --- tablespace should not change even if there is an index rewrite -ALTER TABLE testschema.test_default_tab_p ALTER val TYPE bigint; -\d testschema.test_index1 -Partitioned index "testschema.test_index1" - Column | Type | Key? | Definition ---------+--------+------+------------ - val | bigint | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index2 -Partitioned index "testschema.test_index2" - Column | Type | Key? | Definition ---------+--------+------+------------ - val | bigint | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -\d testschema.test_index3 -Partitioned index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index4 -Partitioned index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -DROP TABLE testschema.test_default_tab_p; --- check that default_tablespace affects index additions in ALTER TABLE -CREATE TABLE testschema.test_tab(id int) TABLESPACE regress_tblspace; -INSERT INTO testschema.test_tab VALUES (1); -SET default_tablespace TO regress_tblspace; -ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_unique UNIQUE (id); -SET default_tablespace TO ''; -ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_pkey PRIMARY KEY (id); -\d testschema.test_tab_unique - Index "testschema.test_tab_unique" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -unique, btree, for table "testschema.test_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_tab_pkey - Index "testschema.test_tab_pkey" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -primary key, btree, for table "testschema.test_tab" - -SELECT * FROM testschema.test_tab; - id ----- - 1 -(1 row) - -DROP TABLE testschema.test_tab; --- check that default_tablespace is handled correctly by multi-command --- ALTER TABLE that includes a tablespace-preserving rewrite -CREATE TABLE testschema.test_tab(a int, b int, c int); -SET default_tablespace TO regress_tblspace; -ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_unique UNIQUE (a); -CREATE INDEX test_tab_a_idx ON testschema.test_tab (a); -SET default_tablespace TO ''; -CREATE INDEX test_tab_b_idx ON testschema.test_tab (b); -\d testschema.test_tab_unique - Index "testschema.test_tab_unique" - Column | Type | Key? | Definition ---------+---------+------+------------ - a | integer | yes | a -unique, btree, for table "testschema.test_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_tab_a_idx - Index "testschema.test_tab_a_idx" - Column | Type | Key? | Definition ---------+---------+------+------------ - a | integer | yes | a -btree, for table "testschema.test_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_tab_b_idx - Index "testschema.test_tab_b_idx" - Column | Type | Key? | Definition ---------+---------+------+------------ - b | integer | yes | b -btree, for table "testschema.test_tab" - -ALTER TABLE testschema.test_tab ALTER b TYPE bigint, ADD UNIQUE (c); -\d testschema.test_tab_unique - Index "testschema.test_tab_unique" - Column | Type | Key? | Definition ---------+---------+------+------------ - a | integer | yes | a -unique, btree, for table "testschema.test_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_tab_a_idx - Index "testschema.test_tab_a_idx" - Column | Type | Key? | Definition ---------+---------+------+------------ - a | integer | yes | a -btree, for table "testschema.test_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_tab_b_idx - Index "testschema.test_tab_b_idx" - Column | Type | Key? | Definition ---------+--------+------+------------ - b | bigint | yes | b -btree, for table "testschema.test_tab" - -DROP TABLE testschema.test_tab; --- let's try moving a table from one place to another -CREATE TABLE testschema.atable AS VALUES (1), (2); -CREATE UNIQUE INDEX anindex ON testschema.atable(column1); -ALTER TABLE testschema.atable SET TABLESPACE regress_tblspace; -ALTER INDEX testschema.anindex SET TABLESPACE regress_tblspace; -ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_global; -ERROR: only shared relations can be placed in pg_global tablespace -ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_default; -ALTER INDEX testschema.part_a_idx SET TABLESPACE regress_tblspace; -INSERT INTO testschema.atable VALUES(3); -- ok -INSERT INTO testschema.atable VALUES(1); -- fail (checks index) -ERROR: duplicate key value violates unique constraint "anindex" -DETAIL: Key (column1)=(1) already exists. -SELECT COUNT(*) FROM testschema.atable; -- checks heap - count -------- - 3 -(1 row) - --- let's try moving a materialized view from one place to another -CREATE MATERIALIZED VIEW testschema.amv AS SELECT * FROM testschema.atable; -ALTER MATERIALIZED VIEW testschema.amv SET TABLESPACE regress_tblspace; -REFRESH MATERIALIZED VIEW testschema.amv; -SELECT COUNT(*) FROM testschema.amv; - count -------- - 3 -(1 row) - --- Will fail with bad path -CREATE TABLESPACE regress_badspace LOCATION '/no/such/location'; -ERROR: directory "/no/such/location" does not exist --- No such tablespace -CREATE TABLE bar (i int) TABLESPACE regress_nosuchspace; -ERROR: tablespace "regress_nosuchspace" does not exist --- Fail, in use for some partitioned object -DROP TABLESPACE regress_tblspace; -ERROR: tablespace "regress_tblspace" cannot be dropped because some objects depend on it -DETAIL: tablespace for index testschema.part_a_idx -ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_default; --- Fail, not empty -DROP TABLESPACE regress_tblspace; -ERROR: tablespace "regress_tblspace" is not empty --- Adequate cache initialization before GRANT -\c - -BEGIN; -GRANT ALL ON TABLESPACE regress_tblspace TO PUBLIC; -ROLLBACK; -CREATE ROLE regress_tablespace_user1 login; -CREATE ROLE regress_tablespace_user2 login; -GRANT USAGE ON SCHEMA testschema TO regress_tablespace_user2; -ALTER TABLESPACE regress_tblspace OWNER TO regress_tablespace_user1; -CREATE TABLE testschema.tablespace_acl (c int); --- new owner lacks permission to create this index from scratch -CREATE INDEX k ON testschema.tablespace_acl (c) TABLESPACE regress_tblspace; -ALTER TABLE testschema.tablespace_acl OWNER TO regress_tablespace_user2; -SET SESSION ROLE regress_tablespace_user2; -CREATE TABLE tablespace_table (i int) TABLESPACE regress_tblspace; -- fail -ERROR: permission denied for tablespace regress_tblspace -ALTER TABLE testschema.tablespace_acl ALTER c TYPE bigint; -REINDEX (TABLESPACE regress_tblspace) TABLE tablespace_table; -- fail -ERROR: permission denied for tablespace regress_tblspace -REINDEX (TABLESPACE regress_tblspace, CONCURRENTLY) TABLE tablespace_table; -- fail -ERROR: permission denied for tablespace regress_tblspace -RESET ROLE; -ALTER TABLESPACE regress_tblspace RENAME TO regress_tblspace_renamed; -ALTER TABLE ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; -ALTER INDEX ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; -ALTER MATERIALIZED VIEW ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; --- Should show notice that nothing was done -ALTER TABLE ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; -NOTICE: no matching relations in tablespace "regress_tblspace_renamed" found -ALTER MATERIALIZED VIEW ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; -NOTICE: no matching relations in tablespace "regress_tblspace_renamed" found --- Should succeed -DROP TABLESPACE regress_tblspace_renamed; -DROP SCHEMA testschema CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table testschema.foo -drop cascades to table testschema.asselect -drop cascades to table testschema.asexecute -drop cascades to table testschema.part -drop cascades to table testschema.atable -drop cascades to materialized view testschema.amv -drop cascades to table testschema.tablespace_acl -DROP ROLE regress_tablespace_user1; -DROP ROLE regress_tablespace_user2; +psql: error: connection to server on socket "/var/folders/xf/_tm0f94d66n8kr12tqwrylrr0000gn/T/bWx31REu5z/.s.PGSQL.17844" failed: No such file or directory + Is the server running locally and accepting connections on that socket?