diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/partition_join.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/partition_join.out --- /tmp/cirrus-ci-build/src/test/regress/expected/partition_join.out 2024-09-11 00:19:52.093233906 +0000 +++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/partition_join.out 2024-09-11 00:29:43.224157045 +0000 @@ -2262,2970 +2262,10 @@ CREATE TABLE prt2_n (a int, b int, c text) PARTITION BY LIST(c); CREATE TABLE prt2_n_p1 PARTITION OF prt2_n FOR VALUES IN ('0000', '0003', '0004', '0010', '0006', '0007'); CREATE TABLE prt2_n_p2 PARTITION OF prt2_n FOR VALUES IN ('0001', '0005', '0002', '0009', '0008', '0011'); -INSERT INTO prt2_n SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 2) i; -ANALYZE prt2_n; -CREATE TABLE prt3_n (a int, b int, c text) PARTITION BY LIST(c); -CREATE TABLE prt3_n_p1 PARTITION OF prt3_n FOR VALUES IN ('0000', '0004', '0006', '0007'); -CREATE TABLE prt3_n_p2 PARTITION OF prt3_n FOR VALUES IN ('0001', '0002', '0008', '0010'); -CREATE TABLE prt3_n_p3 PARTITION OF prt3_n FOR VALUES IN ('0003', '0005', '0009', '0011'); -INSERT INTO prt2_n SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 2) i; -ANALYZE prt3_n; -CREATE TABLE prt4_n (a int, b int, c text) PARTITION BY RANGE(a); -CREATE TABLE prt4_n_p1 PARTITION OF prt4_n FOR VALUES FROM (0) TO (300); -CREATE TABLE prt4_n_p2 PARTITION OF prt4_n FOR VALUES FROM (300) TO (500); -CREATE TABLE prt4_n_p3 PARTITION OF prt4_n FOR VALUES FROM (500) TO (600); -INSERT INTO prt4_n SELECT i, i, to_char(i, 'FM0000') FROM generate_series(0, 599, 2) i; -ANALYZE prt4_n; --- partitionwise join can not be applied if the partition ranges differ -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt4_n t2 WHERE t1.a = t2.a; - QUERY PLAN ----------------------------------------------- - Hash Join - Hash Cond: (t1.a = t2.a) - -> Append - -> Seq Scan on prt1_p1 t1_1 - -> Seq Scan on prt1_p2 t1_2 - -> Seq Scan on prt1_p3 t1_3 - -> Hash - -> Append - -> Seq Scan on prt4_n_p1 t2_1 - -> Seq Scan on prt4_n_p2 t2_2 - -> Seq Scan on prt4_n_p3 t2_3 -(11 rows) - -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt4_n t2, prt2 t3 WHERE t1.a = t2.a and t1.a = t3.b; - QUERY PLAN --------------------------------------------------------- - Hash Join - Hash Cond: (t2.a = t1.a) - -> Append - -> Seq Scan on prt4_n_p1 t2_1 - -> Seq Scan on prt4_n_p2 t2_2 - -> Seq Scan on prt4_n_p3 t2_3 - -> Hash - -> Append - -> Hash Join - Hash Cond: (t1_1.a = t3_1.b) - -> Seq Scan on prt1_p1 t1_1 - -> Hash - -> Seq Scan on prt2_p1 t3_1 - -> Hash Join - Hash Cond: (t1_2.a = t3_2.b) - -> Seq Scan on prt1_p2 t1_2 - -> Hash - -> Seq Scan on prt2_p2 t3_2 - -> Hash Join - Hash Cond: (t1_3.a = t3_3.b) - -> Seq Scan on prt1_p3 t1_3 - -> Hash - -> Seq Scan on prt2_p3 t3_3 -(23 rows) - --- partitionwise join can not be applied if there are no equi-join conditions --- between partition keys -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1 LEFT JOIN prt2 t2 ON (t1.a < t2.b); - QUERY PLAN ---------------------------------------------------------- - Nested Loop Left Join - -> Append - -> Seq Scan on prt1_p1 t1_1 - -> Seq Scan on prt1_p2 t1_2 - -> Seq Scan on prt1_p3 t1_3 - -> Append - -> Index Scan using iprt2_p1_b on prt2_p1 t2_1 - Index Cond: (b > t1.a) - -> Index Scan using iprt2_p2_b on prt2_p2 t2_2 - Index Cond: (b > t1.a) - -> Index Scan using iprt2_p3_b on prt2_p3 t2_3 - Index Cond: (b > t1.a) -(12 rows) - --- equi-join with join condition on partial keys does not qualify for --- partitionwise join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_m t1, prt2_m t2 WHERE t1.a = (t2.b + t2.a)/2; - QUERY PLAN ----------------------------------------------- - Hash Join - Hash Cond: (((t2.b + t2.a) / 2) = t1.a) - -> Append - -> Seq Scan on prt2_m_p1 t2_1 - -> Seq Scan on prt2_m_p2 t2_2 - -> Seq Scan on prt2_m_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on prt1_m_p1 t1_1 - -> Seq Scan on prt1_m_p2 t1_2 - -> Seq Scan on prt1_m_p3 t1_3 -(11 rows) - --- equi-join between out-of-order partition key columns does not qualify for --- partitionwise join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_m t1 LEFT JOIN prt2_m t2 ON t1.a = t2.b; - QUERY PLAN ----------------------------------------------- - Hash Left Join - Hash Cond: (t1.a = t2.b) - -> Append - -> Seq Scan on prt1_m_p1 t1_1 - -> Seq Scan on prt1_m_p2 t1_2 - -> Seq Scan on prt1_m_p3 t1_3 - -> Hash - -> Append - -> Seq Scan on prt2_m_p1 t2_1 - -> Seq Scan on prt2_m_p2 t2_2 - -> Seq Scan on prt2_m_p3 t2_3 -(11 rows) - --- equi-join between non-key columns does not qualify for partitionwise join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_m t1 LEFT JOIN prt2_m t2 ON t1.c = t2.c; - QUERY PLAN ----------------------------------------------- - Hash Left Join - Hash Cond: (t1.c = t2.c) - -> Append - -> Seq Scan on prt1_m_p1 t1_1 - -> Seq Scan on prt1_m_p2 t1_2 - -> Seq Scan on prt1_m_p3 t1_3 - -> Hash - -> Append - -> Seq Scan on prt2_m_p1 t2_1 - -> Seq Scan on prt2_m_p2 t2_2 - -> Seq Scan on prt2_m_p3 t2_3 -(11 rows) - --- partitionwise join can not be applied for a join between list and range --- partitioned tables -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_n t1 LEFT JOIN prt2_n t2 ON (t1.c = t2.c); - QUERY PLAN ----------------------------------------------- - Hash Right Join - Hash Cond: (t2.c = (t1.c)::text) - -> Append - -> Seq Scan on prt2_n_p1 t2_1 - -> Seq Scan on prt2_n_p2 t2_2 - -> Hash - -> Append - -> Seq Scan on prt1_n_p1 t1_1 - -> Seq Scan on prt1_n_p2 t1_2 -(9 rows) - --- partitionwise join can not be applied between tables with different --- partition lists -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_n t1 JOIN prt2_n t2 ON (t1.c = t2.c) JOIN plt1 t3 ON (t1.c = t3.c); - QUERY PLAN ----------------------------------------------------------- - Hash Join - Hash Cond: (t2.c = (t1.c)::text) - -> Append - -> Seq Scan on prt2_n_p1 t2_1 - -> Seq Scan on prt2_n_p2 t2_2 - -> Hash - -> Hash Join - Hash Cond: (t3.c = (t1.c)::text) - -> Append - -> Seq Scan on plt1_p1 t3_1 - -> Seq Scan on plt1_p2 t3_2 - -> Seq Scan on plt1_p3 t3_3 - -> Hash - -> Append - -> Seq Scan on prt1_n_p1 t1_1 - -> Seq Scan on prt1_n_p2 t1_2 -(16 rows) - --- partitionwise join can not be applied for a join between key column and --- non-key column -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_n t1 FULL JOIN prt1 t2 ON (t1.c = t2.c); - QUERY PLAN ----------------------------------------------- - Hash Full Join - Hash Cond: ((t2.c)::text = (t1.c)::text) - -> Append - -> Seq Scan on prt1_p1 t2_1 - -> Seq Scan on prt1_p2 t2_2 - -> Seq Scan on prt1_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on prt1_n_p1 t1_1 - -> Seq Scan on prt1_n_p2 t1_2 -(10 rows) - --- --- Test some other plan types in a partitionwise join (unfortunately, --- we need larger tables to get the planner to choose these plan types) --- -create temp table prtx1 (a integer, b integer, c integer) - partition by range (a); -create temp table prtx1_1 partition of prtx1 for values from (1) to (11); -create temp table prtx1_2 partition of prtx1 for values from (11) to (21); -create temp table prtx1_3 partition of prtx1 for values from (21) to (31); -create temp table prtx2 (a integer, b integer, c integer) - partition by range (a); -create temp table prtx2_1 partition of prtx2 for values from (1) to (11); -create temp table prtx2_2 partition of prtx2 for values from (11) to (21); -create temp table prtx2_3 partition of prtx2 for values from (21) to (31); -insert into prtx1 select 1 + i%30, i, i - from generate_series(1,1000) i; -insert into prtx2 select 1 + i%30, i, i - from generate_series(1,500) i, generate_series(1,10) j; -create index on prtx2 (b); -create index on prtx2 (c); -analyze prtx1; -analyze prtx2; -explain (costs off) -select * from prtx1 -where not exists (select 1 from prtx2 - where prtx2.a=prtx1.a and prtx2.b=prtx1.b and prtx2.c=123) - and a<20 and c=120; - QUERY PLAN -------------------------------------------------------------- - Append - -> Nested Loop Anti Join - -> Seq Scan on prtx1_1 - Filter: ((a < 20) AND (c = 120)) - -> Bitmap Heap Scan on prtx2_1 - Recheck Cond: ((b = prtx1_1.b) AND (c = 123)) - Filter: (a = prtx1_1.a) - -> BitmapAnd - -> Bitmap Index Scan on prtx2_1_b_idx - Index Cond: (b = prtx1_1.b) - -> Bitmap Index Scan on prtx2_1_c_idx - Index Cond: (c = 123) - -> Nested Loop Anti Join - -> Seq Scan on prtx1_2 - Filter: ((a < 20) AND (c = 120)) - -> Bitmap Heap Scan on prtx2_2 - Recheck Cond: ((b = prtx1_2.b) AND (c = 123)) - Filter: (a = prtx1_2.a) - -> BitmapAnd - -> Bitmap Index Scan on prtx2_2_b_idx - Index Cond: (b = prtx1_2.b) - -> Bitmap Index Scan on prtx2_2_c_idx - Index Cond: (c = 123) -(23 rows) - -select * from prtx1 -where not exists (select 1 from prtx2 - where prtx2.a=prtx1.a and prtx2.b=prtx1.b and prtx2.c=123) - and a<20 and c=120; - a | b | c ----+-----+----- - 1 | 120 | 120 -(1 row) - -explain (costs off) -select * from prtx1 -where not exists (select 1 from prtx2 - where prtx2.a=prtx1.a and (prtx2.b=prtx1.b+1 or prtx2.c=99)) - and a<20 and c=91; - QUERY PLAN ------------------------------------------------------------------ - Append - -> Nested Loop Anti Join - -> Seq Scan on prtx1_1 - Filter: ((a < 20) AND (c = 91)) - -> Bitmap Heap Scan on prtx2_1 - Recheck Cond: ((b = (prtx1_1.b + 1)) OR (c = 99)) - Filter: (a = prtx1_1.a) - -> BitmapOr - -> Bitmap Index Scan on prtx2_1_b_idx - Index Cond: (b = (prtx1_1.b + 1)) - -> Bitmap Index Scan on prtx2_1_c_idx - Index Cond: (c = 99) - -> Nested Loop Anti Join - -> Seq Scan on prtx1_2 - Filter: ((a < 20) AND (c = 91)) - -> Bitmap Heap Scan on prtx2_2 - Recheck Cond: ((b = (prtx1_2.b + 1)) OR (c = 99)) - Filter: (a = prtx1_2.a) - -> BitmapOr - -> Bitmap Index Scan on prtx2_2_b_idx - Index Cond: (b = (prtx1_2.b + 1)) - -> Bitmap Index Scan on prtx2_2_c_idx - Index Cond: (c = 99) -(23 rows) - -select * from prtx1 -where not exists (select 1 from prtx2 - where prtx2.a=prtx1.a and (prtx2.b=prtx1.b+1 or prtx2.c=99)) - and a<20 and c=91; - a | b | c ----+----+---- - 2 | 91 | 91 -(1 row) - --- --- Test advanced partition-matching algorithm for partitioned join --- --- Tests for range-partitioned tables -CREATE TABLE prt1_adv (a int, b int, c varchar) PARTITION BY RANGE (a); -CREATE TABLE prt1_adv_p1 PARTITION OF prt1_adv FOR VALUES FROM (100) TO (200); -CREATE TABLE prt1_adv_p2 PARTITION OF prt1_adv FOR VALUES FROM (200) TO (300); -CREATE TABLE prt1_adv_p3 PARTITION OF prt1_adv FOR VALUES FROM (300) TO (400); -CREATE INDEX prt1_adv_a_idx ON prt1_adv (a); -INSERT INTO prt1_adv SELECT i, i % 25, to_char(i, 'FM0000') FROM generate_series(100, 399) i; -ANALYZE prt1_adv; -CREATE TABLE prt2_adv (a int, b int, c varchar) PARTITION BY RANGE (b); -CREATE TABLE prt2_adv_p1 PARTITION OF prt2_adv FOR VALUES FROM (100) TO (150); -CREATE TABLE prt2_adv_p2 PARTITION OF prt2_adv FOR VALUES FROM (200) TO (300); -CREATE TABLE prt2_adv_p3 PARTITION OF prt2_adv FOR VALUES FROM (350) TO (500); -CREATE INDEX prt2_adv_b_idx ON prt2_adv (b); -INSERT INTO prt2_adv_p1 SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(100, 149) i; -INSERT INTO prt2_adv_p2 SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(200, 299) i; -INSERT INTO prt2_adv_p3 SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(350, 499) i; -ANALYZE prt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Hash Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Hash Join - Hash Cond: (t2_3.b = t1_3.a) - -> Seq Scan on prt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(21 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 100 | 0100 | 100 | 0100 - 125 | 0125 | 125 | 0125 - 200 | 0200 | 200 | 0200 - 225 | 0225 | 225 | 0225 - 250 | 0250 | 250 | 0250 - 275 | 0275 | 275 | 0275 - 350 | 0350 | 350 | 0350 - 375 | 0375 | 375 | 0375 -(8 rows) - --- semi join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM prt1_adv t1 WHERE EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Semi Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Hash Right Semi Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Hash Right Semi Join - Hash Cond: (t2_3.b = t1_3.a) - -> Seq Scan on prt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(21 rows) - -SELECT t1.* FROM prt1_adv t1 WHERE EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - a | b | c ------+---+------ - 100 | 0 | 0100 - 125 | 0 | 0125 - 200 | 0 | 0200 - 225 | 0 | 0225 - 250 | 0 | 0250 - 275 | 0 | 0275 - 350 | 0 | 0350 - 375 | 0 | 0375 -(8 rows) - --- left join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a, t2.b - -> Append - -> Hash Right Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Hash Right Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Hash Right Join - Hash Cond: (t2_3.b = t1_3.a) - -> Seq Scan on prt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(21 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 100 | 0100 | 100 | 0100 - 125 | 0125 | 125 | 0125 - 150 | 0150 | | - 175 | 0175 | | - 200 | 0200 | 200 | 0200 - 225 | 0225 | 225 | 0225 - 250 | 0250 | 250 | 0250 - 275 | 0275 | 275 | 0275 - 300 | 0300 | | - 325 | 0325 | | - 350 | 0350 | 350 | 0350 - 375 | 0375 | 375 | 0375 -(12 rows) - --- anti join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM prt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Anti Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Hash Right Anti Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Hash Right Anti Join - Hash Cond: (t2_3.b = t1_3.a) - -> Seq Scan on prt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(21 rows) - -SELECT t1.* FROM prt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - a | b | c ------+---+------ - 150 | 0 | 0150 - 175 | 0 | 0175 - 300 | 0 | 0300 - 325 | 0 | 0325 -(4 rows) - --- full join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT 175 phv, * FROM prt1_adv WHERE prt1_adv.b = 0) t1 FULL JOIN (SELECT 425 phv, * FROM prt2_adv WHERE prt2_adv.a = 0) t2 ON (t1.a = t2.b) WHERE t1.phv = t1.a OR t2.phv = t2.b ORDER BY t1.a, t2.b; - QUERY PLAN --------------------------------------------------------------------------- - Sort - Sort Key: prt1_adv.a, prt2_adv.b - -> Append - -> Hash Full Join - Hash Cond: (prt1_adv_1.a = prt2_adv_1.b) - Filter: (((175) = prt1_adv_1.a) OR ((425) = prt2_adv_1.b)) - -> Seq Scan on prt1_adv_p1 prt1_adv_1 - Filter: (b = 0) - -> Hash - -> Seq Scan on prt2_adv_p1 prt2_adv_1 - Filter: (a = 0) - -> Hash Full Join - Hash Cond: (prt1_adv_2.a = prt2_adv_2.b) - Filter: (((175) = prt1_adv_2.a) OR ((425) = prt2_adv_2.b)) - -> Seq Scan on prt1_adv_p2 prt1_adv_2 - Filter: (b = 0) - -> Hash - -> Seq Scan on prt2_adv_p2 prt2_adv_2 - Filter: (a = 0) - -> Hash Full Join - Hash Cond: (prt2_adv_3.b = prt1_adv_3.a) - Filter: (((175) = prt1_adv_3.a) OR ((425) = prt2_adv_3.b)) - -> Seq Scan on prt2_adv_p3 prt2_adv_3 - Filter: (a = 0) - -> Hash - -> Seq Scan on prt1_adv_p3 prt1_adv_3 - Filter: (b = 0) -(27 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT 175 phv, * FROM prt1_adv WHERE prt1_adv.b = 0) t1 FULL JOIN (SELECT 425 phv, * FROM prt2_adv WHERE prt2_adv.a = 0) t2 ON (t1.a = t2.b) WHERE t1.phv = t1.a OR t2.phv = t2.b ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 175 | 0175 | | - | | 425 | 0425 -(2 rows) - --- Test cases where one side has an extra partition -CREATE TABLE prt2_adv_extra PARTITION OF prt2_adv FOR VALUES FROM (500) TO (MAXVALUE); -INSERT INTO prt2_adv SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(500, 599) i; -ANALYZE prt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Hash Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Hash Join - Hash Cond: (t2_3.b = t1_3.a) - -> Seq Scan on prt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(21 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 100 | 0100 | 100 | 0100 - 125 | 0125 | 125 | 0125 - 200 | 0200 | 200 | 0200 - 225 | 0225 | 225 | 0225 - 250 | 0250 | 250 | 0250 - 275 | 0275 | 275 | 0275 - 350 | 0350 | 350 | 0350 - 375 | 0375 | 375 | 0375 -(8 rows) - --- semi join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM prt1_adv t1 WHERE EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Semi Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Hash Right Semi Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Hash Right Semi Join - Hash Cond: (t2_3.b = t1_3.a) - -> Seq Scan on prt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(21 rows) - -SELECT t1.* FROM prt1_adv t1 WHERE EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - a | b | c ------+---+------ - 100 | 0 | 0100 - 125 | 0 | 0125 - 200 | 0 | 0200 - 225 | 0 | 0225 - 250 | 0 | 0250 - 275 | 0 | 0275 - 350 | 0 | 0350 - 375 | 0 | 0375 -(8 rows) - --- left join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a, t2.b - -> Append - -> Hash Right Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Hash Right Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Hash Right Join - Hash Cond: (t2_3.b = t1_3.a) - -> Seq Scan on prt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(21 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 100 | 0100 | 100 | 0100 - 125 | 0125 | 125 | 0125 - 150 | 0150 | | - 175 | 0175 | | - 200 | 0200 | 200 | 0200 - 225 | 0225 | 225 | 0225 - 250 | 0250 | 250 | 0250 - 275 | 0275 | 275 | 0275 - 300 | 0300 | | - 325 | 0325 | | - 350 | 0350 | 350 | 0350 - 375 | 0375 | 375 | 0375 -(12 rows) - --- left join; currently we can't do partitioned join if there are no matched --- partitions on the nullable side -EXPLAIN (COSTS OFF) -SELECT t1.b, t1.c, t2.a, t2.c FROM prt2_adv t1 LEFT JOIN prt1_adv t2 ON (t1.b = t2.a) WHERE t1.a = 0 ORDER BY t1.b, t2.a; - QUERY PLAN ---------------------------------------------------------- - Sort - Sort Key: t1.b, t2.a - -> Hash Right Join - Hash Cond: (t2.a = t1.b) - -> Append - -> Seq Scan on prt1_adv_p1 t2_1 - -> Seq Scan on prt1_adv_p2 t2_2 - -> Seq Scan on prt1_adv_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on prt2_adv_p1 t1_1 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p2 t1_2 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p3 t1_3 - Filter: (a = 0) - -> Seq Scan on prt2_adv_extra t1_4 - Filter: (a = 0) -(18 rows) - --- anti join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM prt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Anti Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Hash Right Anti Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Hash Right Anti Join - Hash Cond: (t2_3.b = t1_3.a) - -> Seq Scan on prt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(21 rows) - -SELECT t1.* FROM prt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - a | b | c ------+---+------ - 150 | 0 | 0150 - 175 | 0 | 0175 - 300 | 0 | 0300 - 325 | 0 | 0325 -(4 rows) - --- anti join; currently we can't do partitioned join if there are no matched --- partitions on the nullable side -EXPLAIN (COSTS OFF) -SELECT t1.* FROM prt2_adv t1 WHERE NOT EXISTS (SELECT 1 FROM prt1_adv t2 WHERE t1.b = t2.a) AND t1.a = 0 ORDER BY t1.b; - QUERY PLAN ---------------------------------------------------------- - Sort - Sort Key: t1.b - -> Hash Right Anti Join - Hash Cond: (t2.a = t1.b) - -> Append - -> Seq Scan on prt1_adv_p1 t2_1 - -> Seq Scan on prt1_adv_p2 t2_2 - -> Seq Scan on prt1_adv_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on prt2_adv_p1 t1_1 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p2 t1_2 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p3 t1_3 - Filter: (a = 0) - -> Seq Scan on prt2_adv_extra t1_4 - Filter: (a = 0) -(18 rows) - --- full join; currently we can't do partitioned join if there are no matched --- partitions on the nullable side -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT 175 phv, * FROM prt1_adv WHERE prt1_adv.b = 0) t1 FULL JOIN (SELECT 425 phv, * FROM prt2_adv WHERE prt2_adv.a = 0) t2 ON (t1.a = t2.b) WHERE t1.phv = t1.a OR t2.phv = t2.b ORDER BY t1.a, t2.b; - QUERY PLAN ----------------------------------------------------------------- - Sort - Sort Key: prt1_adv.a, prt2_adv.b - -> Hash Full Join - Hash Cond: (prt2_adv.b = prt1_adv.a) - Filter: (((175) = prt1_adv.a) OR ((425) = prt2_adv.b)) - -> Append - -> Seq Scan on prt2_adv_p1 prt2_adv_1 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p2 prt2_adv_2 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p3 prt2_adv_3 - Filter: (a = 0) - -> Seq Scan on prt2_adv_extra prt2_adv_4 - Filter: (a = 0) - -> Hash - -> Append - -> Seq Scan on prt1_adv_p1 prt1_adv_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p2 prt1_adv_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 prt1_adv_3 - Filter: (b = 0) -(22 rows) - --- 3-way join where not every pair of relations can do partitioned join -EXPLAIN (COSTS OFF) -SELECT t1.b, t1.c, t2.a, t2.c, t3.a, t3.c FROM prt2_adv t1 LEFT JOIN prt1_adv t2 ON (t1.b = t2.a) INNER JOIN prt1_adv t3 ON (t1.b = t3.a) WHERE t1.a = 0 ORDER BY t1.b, t2.a, t3.a; - QUERY PLAN --------------------------------------------------------------------------------- - Sort - Sort Key: t1.b, t2.a - -> Append - -> Nested Loop Left Join - -> Nested Loop - -> Seq Scan on prt2_adv_p1 t1_1 - Filter: (a = 0) - -> Index Scan using prt1_adv_p1_a_idx on prt1_adv_p1 t3_1 - Index Cond: (a = t1_1.b) - -> Index Scan using prt1_adv_p1_a_idx on prt1_adv_p1 t2_1 - Index Cond: (a = t1_1.b) - -> Hash Right Join - Hash Cond: (t2_2.a = t1_2.b) - -> Seq Scan on prt1_adv_p2 t2_2 - -> Hash - -> Hash Join - Hash Cond: (t3_2.a = t1_2.b) - -> Seq Scan on prt1_adv_p2 t3_2 - -> Hash - -> Seq Scan on prt2_adv_p2 t1_2 - Filter: (a = 0) - -> Hash Right Join - Hash Cond: (t2_3.a = t1_3.b) - -> Seq Scan on prt1_adv_p3 t2_3 - -> Hash - -> Hash Join - Hash Cond: (t3_3.a = t1_3.b) - -> Seq Scan on prt1_adv_p3 t3_3 - -> Hash - -> Seq Scan on prt2_adv_p3 t1_3 - Filter: (a = 0) -(31 rows) - -SELECT t1.b, t1.c, t2.a, t2.c, t3.a, t3.c FROM prt2_adv t1 LEFT JOIN prt1_adv t2 ON (t1.b = t2.a) INNER JOIN prt1_adv t3 ON (t1.b = t3.a) WHERE t1.a = 0 ORDER BY t1.b, t2.a, t3.a; - b | c | a | c | a | c ------+------+-----+------+-----+------ - 100 | 0100 | 100 | 0100 | 100 | 0100 - 125 | 0125 | 125 | 0125 | 125 | 0125 - 200 | 0200 | 200 | 0200 | 200 | 0200 - 225 | 0225 | 225 | 0225 | 225 | 0225 - 250 | 0250 | 250 | 0250 | 250 | 0250 - 275 | 0275 | 275 | 0275 | 275 | 0275 - 350 | 0350 | 350 | 0350 | 350 | 0350 - 375 | 0375 | 375 | 0375 | 375 | 0375 -(8 rows) - -DROP TABLE prt2_adv_extra; --- Test cases where a partition on one side matches multiple partitions on --- the other side; we currently can't do partitioned join in such cases -ALTER TABLE prt2_adv DETACH PARTITION prt2_adv_p3; --- Split prt2_adv_p3 into two partitions so that prt1_adv_p3 matches both -CREATE TABLE prt2_adv_p3_1 PARTITION OF prt2_adv FOR VALUES FROM (350) TO (375); -CREATE TABLE prt2_adv_p3_2 PARTITION OF prt2_adv FOR VALUES FROM (375) TO (500); -INSERT INTO prt2_adv SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(350, 499) i; -ANALYZE prt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Join - Hash Cond: (t2.b = t1.a) - -> Append - -> Seq Scan on prt2_adv_p1 t2_1 - -> Seq Scan on prt2_adv_p2 t2_2 - -> Seq Scan on prt2_adv_p3_1 t2_3 - -> Seq Scan on prt2_adv_p3_2 t2_4 - -> Hash - -> Append - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(17 rows) - --- semi join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM prt1_adv t1 WHERE EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Semi Join - Hash Cond: (t2.b = t1.a) - -> Append - -> Seq Scan on prt2_adv_p1 t2_1 - -> Seq Scan on prt2_adv_p2 t2_2 - -> Seq Scan on prt2_adv_p3_1 t2_3 - -> Seq Scan on prt2_adv_p3_2 t2_4 - -> Hash - -> Append - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(17 rows) - --- left join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a, t2.b - -> Hash Right Join - Hash Cond: (t2.b = t1.a) - -> Append - -> Seq Scan on prt2_adv_p1 t2_1 - -> Seq Scan on prt2_adv_p2 t2_2 - -> Seq Scan on prt2_adv_p3_1 t2_3 - -> Seq Scan on prt2_adv_p3_2 t2_4 - -> Hash - -> Append - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(17 rows) - --- anti join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM prt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Anti Join - Hash Cond: (t2.b = t1.a) - -> Append - -> Seq Scan on prt2_adv_p1 t2_1 - -> Seq Scan on prt2_adv_p2 t2_2 - -> Seq Scan on prt2_adv_p3_1 t2_3 - -> Seq Scan on prt2_adv_p3_2 t2_4 - -> Hash - -> Append - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 t1_3 - Filter: (b = 0) -(17 rows) - --- full join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT 175 phv, * FROM prt1_adv WHERE prt1_adv.b = 0) t1 FULL JOIN (SELECT 425 phv, * FROM prt2_adv WHERE prt2_adv.a = 0) t2 ON (t1.a = t2.b) WHERE t1.phv = t1.a OR t2.phv = t2.b ORDER BY t1.a, t2.b; - QUERY PLAN ----------------------------------------------------------------- - Sort - Sort Key: prt1_adv.a, prt2_adv.b - -> Hash Full Join - Hash Cond: (prt2_adv.b = prt1_adv.a) - Filter: (((175) = prt1_adv.a) OR ((425) = prt2_adv.b)) - -> Append - -> Seq Scan on prt2_adv_p1 prt2_adv_1 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p2 prt2_adv_2 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p3_1 prt2_adv_3 - Filter: (a = 0) - -> Seq Scan on prt2_adv_p3_2 prt2_adv_4 - Filter: (a = 0) - -> Hash - -> Append - -> Seq Scan on prt1_adv_p1 prt1_adv_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p2 prt1_adv_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 prt1_adv_3 - Filter: (b = 0) -(22 rows) - -DROP TABLE prt2_adv_p3_1; -DROP TABLE prt2_adv_p3_2; -ANALYZE prt2_adv; --- Test default partitions -ALTER TABLE prt1_adv DETACH PARTITION prt1_adv_p1; --- Change prt1_adv_p1 to the default partition -ALTER TABLE prt1_adv ATTACH PARTITION prt1_adv_p1 DEFAULT; -ALTER TABLE prt1_adv DETACH PARTITION prt1_adv_p3; -ANALYZE prt1_adv; --- We can do partitioned join even if only one of relations has the default --- partition -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: (t2_1.b = t1_2.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_2 - Filter: (b = 0) - -> Hash Join - Hash Cond: (t2_2.b = t1_1.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_1 - Filter: (b = 0) -(15 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 100 | 0100 | 100 | 0100 - 125 | 0125 | 125 | 0125 - 200 | 0200 | 200 | 0200 - 225 | 0225 | 225 | 0225 - 250 | 0250 | 250 | 0250 - 275 | 0275 | 275 | 0275 -(6 rows) - --- Restore prt1_adv_p3 -ALTER TABLE prt1_adv ATTACH PARTITION prt1_adv_p3 FOR VALUES FROM (300) TO (400); -ANALYZE prt1_adv; --- Restore prt2_adv_p3 -ALTER TABLE prt2_adv ATTACH PARTITION prt2_adv_p3 FOR VALUES FROM (350) TO (500); -ANALYZE prt2_adv; --- Partitioned join can't be applied because the default partition of prt1_adv --- matches prt2_adv_p1 and prt2_adv_p3 -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Join - Hash Cond: (t2.b = t1.a) - -> Append - -> Seq Scan on prt2_adv_p1 t2_1 - -> Seq Scan on prt2_adv_p2 t2_2 - -> Seq Scan on prt2_adv_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on prt1_adv_p2 t1_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 t1_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p1 t1_3 - Filter: (b = 0) -(16 rows) - -ALTER TABLE prt2_adv DETACH PARTITION prt2_adv_p3; --- Change prt2_adv_p3 to the default partition -ALTER TABLE prt2_adv ATTACH PARTITION prt2_adv_p3 DEFAULT; -ANALYZE prt2_adv; --- Partitioned join can't be applied because the default partition of prt1_adv --- matches prt2_adv_p1 and prt2_adv_p3 -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Join - Hash Cond: (t2.b = t1.a) - -> Append - -> Seq Scan on prt2_adv_p1 t2_1 - -> Seq Scan on prt2_adv_p2 t2_2 - -> Seq Scan on prt2_adv_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on prt1_adv_p2 t1_1 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p3 t1_2 - Filter: (b = 0) - -> Seq Scan on prt1_adv_p1 t1_3 - Filter: (b = 0) -(16 rows) - -DROP TABLE prt1_adv_p3; -ANALYZE prt1_adv; -DROP TABLE prt2_adv_p3; -ANALYZE prt2_adv; -CREATE TABLE prt3_adv (a int, b int, c varchar) PARTITION BY RANGE (a); -CREATE TABLE prt3_adv_p1 PARTITION OF prt3_adv FOR VALUES FROM (200) TO (300); -CREATE TABLE prt3_adv_p2 PARTITION OF prt3_adv FOR VALUES FROM (300) TO (400); -CREATE INDEX prt3_adv_a_idx ON prt3_adv (a); -INSERT INTO prt3_adv SELECT i, i % 25, to_char(i, 'FM0000') FROM generate_series(200, 399) i; -ANALYZE prt3_adv; --- 3-way join to test the default partition of a join relation -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c, t3.a, t3.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) LEFT JOIN prt3_adv t3 ON (t1.a = t3.a) WHERE t1.b = 0 ORDER BY t1.a, t2.b, t3.a; - QUERY PLAN ------------------------------------------------------------------- - Sort - Sort Key: t1.a, t2.b, t3.a - -> Append - -> Hash Right Join - Hash Cond: (t3_1.a = t1_1.a) - -> Seq Scan on prt3_adv_p1 t3_1 - -> Hash - -> Hash Right Join - Hash Cond: (t2_2.b = t1_1.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_1 - Filter: (b = 0) - -> Hash Right Join - Hash Cond: (t3_2.a = t1_2.a) - -> Seq Scan on prt3_adv_p2 t3_2 - -> Hash - -> Hash Right Join - Hash Cond: (t2_1.b = t1_2.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_2 - Filter: (b = 0) -(23 rows) - -SELECT t1.a, t1.c, t2.b, t2.c, t3.a, t3.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) LEFT JOIN prt3_adv t3 ON (t1.a = t3.a) WHERE t1.b = 0 ORDER BY t1.a, t2.b, t3.a; - a | c | b | c | a | c ------+------+-----+------+-----+------ - 100 | 0100 | 100 | 0100 | | - 125 | 0125 | 125 | 0125 | | - 150 | 0150 | | | | - 175 | 0175 | | | | - 200 | 0200 | 200 | 0200 | 200 | 0200 - 225 | 0225 | 225 | 0225 | 225 | 0225 - 250 | 0250 | 250 | 0250 | 250 | 0250 - 275 | 0275 | 275 | 0275 | 275 | 0275 -(8 rows) - -DROP TABLE prt1_adv; -DROP TABLE prt2_adv; -DROP TABLE prt3_adv; --- Test interaction of partitioned join with partition pruning -CREATE TABLE prt1_adv (a int, b int, c varchar) PARTITION BY RANGE (a); -CREATE TABLE prt1_adv_p1 PARTITION OF prt1_adv FOR VALUES FROM (100) TO (200); -CREATE TABLE prt1_adv_p2 PARTITION OF prt1_adv FOR VALUES FROM (200) TO (300); -CREATE TABLE prt1_adv_p3 PARTITION OF prt1_adv FOR VALUES FROM (300) TO (400); -CREATE INDEX prt1_adv_a_idx ON prt1_adv (a); -INSERT INTO prt1_adv SELECT i, i % 25, to_char(i, 'FM0000') FROM generate_series(100, 399) i; -ANALYZE prt1_adv; -CREATE TABLE prt2_adv (a int, b int, c varchar) PARTITION BY RANGE (b); -CREATE TABLE prt2_adv_p1 PARTITION OF prt2_adv FOR VALUES FROM (100) TO (200); -CREATE TABLE prt2_adv_p2 PARTITION OF prt2_adv FOR VALUES FROM (200) TO (400); -CREATE INDEX prt2_adv_b_idx ON prt2_adv (b); -INSERT INTO prt2_adv SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(100, 399) i; -ANALYZE prt2_adv; -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.a < 300 AND t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN ------------------------------------------------------------ - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: ((a < 300) AND (b = 0)) - -> Hash Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: ((a < 300) AND (b = 0)) -(15 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.a < 300 AND t1.b = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 100 | 0100 | 100 | 0100 - 125 | 0125 | 125 | 0125 - 150 | 0150 | 150 | 0150 - 175 | 0175 | 175 | 0175 - 200 | 0200 | 200 | 0200 - 225 | 0225 | 225 | 0225 - 250 | 0250 | 250 | 0250 - 275 | 0275 | 275 | 0275 -(8 rows) - -DROP TABLE prt1_adv_p3; -CREATE TABLE prt1_adv_default PARTITION OF prt1_adv DEFAULT; -ANALYZE prt1_adv; -CREATE TABLE prt2_adv_default PARTITION OF prt2_adv DEFAULT; -ANALYZE prt2_adv; -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.a >= 100 AND t1.a < 300 AND t1.b = 0 ORDER BY t1.a, t2.b; - QUERY PLAN --------------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: (t2_1.b = t1_1.a) - -> Seq Scan on prt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on prt1_adv_p1 t1_1 - Filter: ((a >= 100) AND (a < 300) AND (b = 0)) - -> Hash Join - Hash Cond: (t2_2.b = t1_2.a) - -> Seq Scan on prt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on prt1_adv_p2 t1_2 - Filter: ((a >= 100) AND (a < 300) AND (b = 0)) -(15 rows) - -SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.a >= 100 AND t1.a < 300 AND t1.b = 0 ORDER BY t1.a, t2.b; - a | c | b | c ------+------+-----+------ - 100 | 0100 | 100 | 0100 - 125 | 0125 | 125 | 0125 - 150 | 0150 | 150 | 0150 - 175 | 0175 | 175 | 0175 - 200 | 0200 | 200 | 0200 - 225 | 0225 | 225 | 0225 - 250 | 0250 | 250 | 0250 - 275 | 0275 | 275 | 0275 -(8 rows) - -DROP TABLE prt1_adv; -DROP TABLE prt2_adv; --- Tests for list-partitioned tables -CREATE TABLE plt1_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt1_adv_p1 PARTITION OF plt1_adv FOR VALUES IN ('0001', '0003'); -CREATE TABLE plt1_adv_p2 PARTITION OF plt1_adv FOR VALUES IN ('0004', '0006'); -CREATE TABLE plt1_adv_p3 PARTITION OF plt1_adv FOR VALUES IN ('0008', '0009'); -INSERT INTO plt1_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (1, 3, 4, 6, 8, 9); -ANALYZE plt1_adv; -CREATE TABLE plt2_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt2_adv_p1 PARTITION OF plt2_adv FOR VALUES IN ('0002', '0003'); -CREATE TABLE plt2_adv_p2 PARTITION OF plt2_adv FOR VALUES IN ('0004', '0006'); -CREATE TABLE plt2_adv_p3 PARTITION OF plt2_adv FOR VALUES IN ('0007', '0009'); -INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); -ANALYZE plt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 9 | 0009 | 9 | 0009 -(4 rows) - --- semi join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Semi Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Right Semi Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Semi Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - a | b | c ----+---+------ - 3 | 3 | 0003 - 4 | 4 | 0004 - 6 | 6 | 0006 - 9 | 9 | 0009 -(4 rows) - --- left join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 1 | 0001 | | - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 8 | 0008 | | - 9 | 0009 | 9 | 0009 -(6 rows) - --- anti join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Anti Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Right Anti Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Anti Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - a | b | c ----+---+------ - 1 | 1 | 0001 - 8 | 8 | 0008 -(2 rows) - --- full join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - QUERY PLAN ------------------------------------------------------------------------------------ - Sort - Sort Key: t1.a, t2.a - -> Append - -> Hash Full Join - Hash Cond: ((t1_1.a = t2_1.a) AND (t1_1.c = t2_1.c)) - Filter: ((COALESCE(t1_1.b, 0) < 10) AND (COALESCE(t2_1.b, 0) < 10)) - -> Seq Scan on plt1_adv_p1 t1_1 - -> Hash - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash Full Join - Hash Cond: ((t1_2.a = t2_2.a) AND (t1_2.c = t2_2.c)) - Filter: ((COALESCE(t1_2.b, 0) < 10) AND (COALESCE(t2_2.b, 0) < 10)) - -> Seq Scan on plt1_adv_p2 t1_2 - -> Hash - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash Full Join - Hash Cond: ((t1_3.a = t2_3.a) AND (t1_3.c = t2_3.c)) - Filter: ((COALESCE(t1_3.b, 0) < 10) AND (COALESCE(t2_3.b, 0) < 10)) - -> Seq Scan on plt1_adv_p3 t1_3 - -> Hash - -> Seq Scan on plt2_adv_p3 t2_3 -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - a | c | a | c ----+------+---+------ - 1 | 0001 | | - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 8 | 0008 | | - 9 | 0009 | 9 | 0009 - | | 2 | 0002 - | | 7 | 0007 -(8 rows) - --- Test cases where one side has an extra partition -CREATE TABLE plt2_adv_extra PARTITION OF plt2_adv FOR VALUES IN ('0000'); -INSERT INTO plt2_adv_extra VALUES (0, 0, '0000'); -ANALYZE plt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 9 | 0009 | 9 | 0009 -(4 rows) - --- semi join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Semi Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Right Semi Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Semi Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - a | b | c ----+---+------ - 3 | 3 | 0003 - 4 | 4 | 0004 - 6 | 6 | 0006 - 9 | 9 | 0009 -(4 rows) - --- left join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 1 | 0001 | | - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 8 | 0008 | | - 9 | 0009 | 9 | 0009 -(6 rows) - --- left join; currently we can't do partitioned join if there are no matched --- partitions on the nullable side -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt2_adv t1 LEFT JOIN plt1_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ---------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt1_adv_p1 t2_1 - -> Seq Scan on plt1_adv_p2 t2_2 - -> Seq Scan on plt1_adv_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on plt2_adv_extra t1_1 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p1 t1_2 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p2 t1_3 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p3 t1_4 - Filter: (b < 10) -(18 rows) - --- anti join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Anti Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Right Anti Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Anti Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - a | b | c ----+---+------ - 1 | 1 | 0001 - 8 | 8 | 0008 -(2 rows) - --- anti join; currently we can't do partitioned join if there are no matched --- partitions on the nullable side -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt2_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt1_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN ---------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Anti Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt1_adv_p1 t2_1 - -> Seq Scan on plt1_adv_p2 t2_2 - -> Seq Scan on plt1_adv_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on plt2_adv_extra t1_1 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p1 t1_2 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p2 t1_3 - Filter: (b < 10) - -> Seq Scan on plt2_adv_p3 t1_4 - Filter: (b < 10) -(18 rows) - --- full join; currently we can't do partitioned join if there are no matched --- partitions on the nullable side -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - QUERY PLAN -------------------------------------------------------------------------- - Sort - Sort Key: t1.a, t2.a - -> Hash Full Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - Filter: ((COALESCE(t1.b, 0) < 10) AND (COALESCE(t2.b, 0) < 10)) - -> Append - -> Seq Scan on plt2_adv_extra t2_1 - -> Seq Scan on plt2_adv_p1 t2_2 - -> Seq Scan on plt2_adv_p2 t2_3 - -> Seq Scan on plt2_adv_p3 t2_4 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - -> Seq Scan on plt1_adv_p2 t1_2 - -> Seq Scan on plt1_adv_p3 t1_3 -(15 rows) - -DROP TABLE plt2_adv_extra; --- Test cases where a partition on one side matches multiple partitions on --- the other side; we currently can't do partitioned join in such cases -ALTER TABLE plt2_adv DETACH PARTITION plt2_adv_p2; --- Split plt2_adv_p2 into two partitions so that plt1_adv_p2 matches both -CREATE TABLE plt2_adv_p2_1 PARTITION OF plt2_adv FOR VALUES IN ('0004'); -CREATE TABLE plt2_adv_p2_2 PARTITION OF plt2_adv FOR VALUES IN ('0006'); -INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (4, 6); -ANALYZE plt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2_1 t2_2 - -> Seq Scan on plt2_adv_p2_2 t2_3 - -> Seq Scan on plt2_adv_p3 t2_4 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(17 rows) - --- semi join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Semi Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2_1 t2_2 - -> Seq Scan on plt2_adv_p2_2 t2_3 - -> Seq Scan on plt2_adv_p3 t2_4 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(17 rows) - --- left join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2_1 t2_2 - -> Seq Scan on plt2_adv_p2_2 t2_3 - -> Seq Scan on plt2_adv_p3 t2_4 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(17 rows) - --- anti join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Anti Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2_1 t2_2 - -> Seq Scan on plt2_adv_p2_2 t2_3 - -> Seq Scan on plt2_adv_p3 t2_4 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(17 rows) - --- full join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - QUERY PLAN -------------------------------------------------------------------------- - Sort - Sort Key: t1.a, t2.a - -> Hash Full Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - Filter: ((COALESCE(t1.b, 0) < 10) AND (COALESCE(t2.b, 0) < 10)) - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2_1 t2_2 - -> Seq Scan on plt2_adv_p2_2 t2_3 - -> Seq Scan on plt2_adv_p3 t2_4 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - -> Seq Scan on plt1_adv_p2 t1_2 - -> Seq Scan on plt1_adv_p3 t1_3 -(15 rows) - -DROP TABLE plt2_adv_p2_1; -DROP TABLE plt2_adv_p2_2; --- Restore plt2_adv_p2 -ALTER TABLE plt2_adv ATTACH PARTITION plt2_adv_p2 FOR VALUES IN ('0004', '0006'); --- Test NULL partitions -ALTER TABLE plt1_adv DETACH PARTITION plt1_adv_p1; --- Change plt1_adv_p1 to the NULL partition -CREATE TABLE plt1_adv_p1_null PARTITION OF plt1_adv FOR VALUES IN (NULL, '0001', '0003'); -INSERT INTO plt1_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (1, 3); -INSERT INTO plt1_adv VALUES (-1, -1, NULL); -ANALYZE plt1_adv; -ALTER TABLE plt2_adv DETACH PARTITION plt2_adv_p3; --- Change plt2_adv_p3 to the NULL partition -CREATE TABLE plt2_adv_p3_null PARTITION OF plt2_adv FOR VALUES IN (NULL, '0007', '0009'); -INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (7, 9); -INSERT INTO plt2_adv VALUES (-1, -1, NULL); -ANALYZE plt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1_null t1_1 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3_null t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 9 | 0009 | 9 | 0009 -(4 rows) - --- semi join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Semi Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1_null t1_1 - Filter: (b < 10) - -> Hash Right Semi Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Semi Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3_null t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - a | b | c ----+---+------ - 3 | 3 | 0003 - 4 | 4 | 0004 - 6 | 6 | 0006 - 9 | 9 | 0009 -(4 rows) - --- left join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1_null t1_1 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3_null t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c -----+------+---+------ - -1 | | | - 1 | 0001 | | - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 8 | 0008 | | - 9 | 0009 | 9 | 0009 -(7 rows) - --- anti join -EXPLAIN (COSTS OFF) -SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Anti Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1_null t1_1 - Filter: (b < 10) - -> Hash Right Anti Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Anti Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3_null t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; - a | b | c -----+----+------ - -1 | -1 | - 1 | 1 | 0001 - 8 | 8 | 0008 -(3 rows) - --- full join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - QUERY PLAN ------------------------------------------------------------------------------------ - Sort - Sort Key: t1.a, t2.a - -> Append - -> Hash Full Join - Hash Cond: ((t1_1.a = t2_1.a) AND (t1_1.c = t2_1.c)) - Filter: ((COALESCE(t1_1.b, 0) < 10) AND (COALESCE(t2_1.b, 0) < 10)) - -> Seq Scan on plt1_adv_p1_null t1_1 - -> Hash - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash Full Join - Hash Cond: ((t1_2.a = t2_2.a) AND (t1_2.c = t2_2.c)) - Filter: ((COALESCE(t1_2.b, 0) < 10) AND (COALESCE(t2_2.b, 0) < 10)) - -> Seq Scan on plt1_adv_p2 t1_2 - -> Hash - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash Full Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - Filter: ((COALESCE(t1_3.b, 0) < 10) AND (COALESCE(t2_3.b, 0) < 10)) - -> Seq Scan on plt2_adv_p3_null t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - a | c | a | c -----+------+----+------ - -1 | | | - 1 | 0001 | | - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 8 | 0008 | | - 9 | 0009 | 9 | 0009 - | | -1 | - | | 2 | 0002 - | | 7 | 0007 -(10 rows) - -DROP TABLE plt1_adv_p1_null; --- Restore plt1_adv_p1 -ALTER TABLE plt1_adv ATTACH PARTITION plt1_adv_p1 FOR VALUES IN ('0001', '0003'); --- Add to plt1_adv the extra NULL partition containing only NULL values as the --- key values -CREATE TABLE plt1_adv_extra PARTITION OF plt1_adv FOR VALUES IN (NULL); -INSERT INTO plt1_adv VALUES (-1, -1, NULL); -ANALYZE plt1_adv; -DROP TABLE plt2_adv_p3_null; --- Restore plt2_adv_p3 -ALTER TABLE plt2_adv ATTACH PARTITION plt2_adv_p3 FOR VALUES IN ('0007', '0009'); -ANALYZE plt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 9 | 0009 | 9 | 0009 -(4 rows) - --- left join; currently we can't do partitioned join if there are no matched --- partitions on the nullable side -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ---------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2 t2_2 - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) - -> Seq Scan on plt1_adv_extra t1_4 - Filter: (b < 10) -(18 rows) - --- full join; currently we can't do partitioned join if there are no matched --- partitions on the nullable side -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - QUERY PLAN -------------------------------------------------------------------------- - Sort - Sort Key: t1.a, t2.a - -> Hash Full Join - Hash Cond: ((t1.a = t2.a) AND (t1.c = t2.c)) - Filter: ((COALESCE(t1.b, 0) < 10) AND (COALESCE(t2.b, 0) < 10)) - -> Append - -> Seq Scan on plt1_adv_p1 t1_1 - -> Seq Scan on plt1_adv_p2 t1_2 - -> Seq Scan on plt1_adv_p3 t1_3 - -> Seq Scan on plt1_adv_extra t1_4 - -> Hash - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2 t2_2 - -> Seq Scan on plt2_adv_p3 t2_3 -(15 rows) - --- Add to plt2_adv the extra NULL partition containing only NULL values as the --- key values -CREATE TABLE plt2_adv_extra PARTITION OF plt2_adv FOR VALUES IN (NULL); -INSERT INTO plt2_adv VALUES (-1, -1, NULL); -ANALYZE plt2_adv; --- inner join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) -(21 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 9 | 0009 | 9 | 0009 -(4 rows) - --- left join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ----------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) - -> Nested Loop Left Join - Join Filter: ((t1_4.a = t2_4.a) AND (t1_4.c = t2_4.c)) - -> Seq Scan on plt1_adv_extra t1_4 - Filter: (b < 10) - -> Seq Scan on plt2_adv_extra t2_4 -(26 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c -----+------+---+------ - -1 | | | - 1 | 0001 | | - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 8 | 0008 | | - 9 | 0009 | 9 | 0009 -(7 rows) - --- full join -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - QUERY PLAN ------------------------------------------------------------------------------------ - Sort - Sort Key: t1.a, t2.a - -> Append - -> Hash Full Join - Hash Cond: ((t1_1.a = t2_1.a) AND (t1_1.c = t2_1.c)) - Filter: ((COALESCE(t1_1.b, 0) < 10) AND (COALESCE(t2_1.b, 0) < 10)) - -> Seq Scan on plt1_adv_p1 t1_1 - -> Hash - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash Full Join - Hash Cond: ((t1_2.a = t2_2.a) AND (t1_2.c = t2_2.c)) - Filter: ((COALESCE(t1_2.b, 0) < 10) AND (COALESCE(t2_2.b, 0) < 10)) - -> Seq Scan on plt1_adv_p2 t1_2 - -> Hash - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash Full Join - Hash Cond: ((t1_3.a = t2_3.a) AND (t1_3.c = t2_3.c)) - Filter: ((COALESCE(t1_3.b, 0) < 10) AND (COALESCE(t2_3.b, 0) < 10)) - -> Seq Scan on plt1_adv_p3 t1_3 - -> Hash - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash Full Join - Hash Cond: ((t1_4.a = t2_4.a) AND (t1_4.c = t2_4.c)) - Filter: ((COALESCE(t1_4.b, 0) < 10) AND (COALESCE(t2_4.b, 0) < 10)) - -> Seq Scan on plt1_adv_extra t1_4 - -> Hash - -> Seq Scan on plt2_adv_extra t2_4 -(27 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; - a | c | a | c -----+------+----+------ - -1 | | | - 1 | 0001 | | - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 - 8 | 0008 | | - 9 | 0009 | 9 | 0009 - | | -1 | - | | 2 | 0002 - | | 7 | 0007 -(10 rows) - --- 3-way join to test the NULL partition of a join relation -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) LEFT JOIN plt1_adv t3 ON (t1.a = t3.a AND t1.c = t3.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Join - Hash Cond: ((t3_1.a = t1_1.a) AND (t3_1.c = t1_1.c)) - -> Seq Scan on plt1_adv_p1 t3_1 - -> Hash - -> Hash Right Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_1 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t3_2.a = t1_2.a) AND (t3_2.c = t1_2.c)) - -> Seq Scan on plt1_adv_p2 t3_2 - -> Hash - -> Hash Right Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_2 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t3_3.a = t1_3.a) AND (t3_3.c = t1_3.c)) - -> Seq Scan on plt1_adv_p3 t3_3 - -> Hash - -> Hash Right Join - Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) - -> Seq Scan on plt2_adv_p3 t2_3 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_3 - Filter: (b < 10) - -> Nested Loop Left Join - Join Filter: ((t1_4.a = t3_4.a) AND (t1_4.c = t3_4.c)) - -> Nested Loop Left Join - Join Filter: ((t1_4.a = t2_4.a) AND (t1_4.c = t2_4.c)) - -> Seq Scan on plt1_adv_extra t1_4 - Filter: (b < 10) - -> Seq Scan on plt2_adv_extra t2_4 - -> Seq Scan on plt1_adv_extra t3_4 -(41 rows) - -SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) LEFT JOIN plt1_adv t3 ON (t1.a = t3.a AND t1.c = t3.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c | a | c -----+------+---+------+---+------ - -1 | | | | | - 1 | 0001 | | | 1 | 0001 - 3 | 0003 | 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 | 6 | 0006 - 8 | 0008 | | | 8 | 0008 - 9 | 0009 | 9 | 0009 | 9 | 0009 -(7 rows) - -DROP TABLE plt1_adv_extra; -DROP TABLE plt2_adv_extra; --- Test default partitions -ALTER TABLE plt1_adv DETACH PARTITION plt1_adv_p1; --- Change plt1_adv_p1 to the default partition -ALTER TABLE plt1_adv ATTACH PARTITION plt1_adv_p1 DEFAULT; -DROP TABLE plt1_adv_p3; -ANALYZE plt1_adv; -DROP TABLE plt2_adv_p3; -ANALYZE plt2_adv; --- We can do partitioned join even if only one of relations has the default --- partition -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_2.a) AND (t2_1.c = t1_2.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_2 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_2.a = t1_1.a) AND (t2_2.c = t1_1.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_1 - Filter: (b < 10) -(15 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 -(3 rows) - -ALTER TABLE plt2_adv DETACH PARTITION plt2_adv_p2; --- Change plt2_adv_p2 to contain '0005' in addition to '0004' and '0006' as --- the key values -CREATE TABLE plt2_adv_p2_ext PARTITION OF plt2_adv FOR VALUES IN ('0004', '0005', '0006'); -INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (4, 5, 6); -ANALYZE plt2_adv; --- Partitioned join can't be applied because the default partition of plt1_adv --- matches plt2_adv_p1 and plt2_adv_p2_ext -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2_ext t2_2 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p2 t1_1 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p1 t1_2 - Filter: (b < 10) -(13 rows) - -ALTER TABLE plt2_adv DETACH PARTITION plt2_adv_p2_ext; --- Change plt2_adv_p2_ext to the default partition -ALTER TABLE plt2_adv ATTACH PARTITION plt2_adv_p2_ext DEFAULT; -ANALYZE plt2_adv; --- Partitioned join can't be applied because the default partition of plt1_adv --- matches plt2_adv_p1 and plt2_adv_p2_ext -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Append - -> Seq Scan on plt2_adv_p1 t2_1 - -> Seq Scan on plt2_adv_p2_ext t2_2 - -> Hash - -> Append - -> Seq Scan on plt1_adv_p2 t1_1 - Filter: (b < 10) - -> Seq Scan on plt1_adv_p1 t1_2 - Filter: (b < 10) -(13 rows) - -DROP TABLE plt2_adv_p2_ext; --- Restore plt2_adv_p2 -ALTER TABLE plt2_adv ATTACH PARTITION plt2_adv_p2 FOR VALUES IN ('0004', '0006'); -ANALYZE plt2_adv; -CREATE TABLE plt3_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt3_adv_p1 PARTITION OF plt3_adv FOR VALUES IN ('0004', '0006'); -CREATE TABLE plt3_adv_p2 PARTITION OF plt3_adv FOR VALUES IN ('0007', '0009'); -INSERT INTO plt3_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (4, 6, 7, 9); -ANALYZE plt3_adv; --- 3-way join to test the default partition of a join relation -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) LEFT JOIN plt3_adv t3 ON (t1.a = t3.a AND t1.c = t3.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Right Join - Hash Cond: ((t3_1.a = t1_1.a) AND (t3_1.c = t1_1.c)) - -> Seq Scan on plt3_adv_p1 t3_1 - -> Hash - -> Hash Right Join - Hash Cond: ((t2_2.a = t1_1.a) AND (t2_2.c = t1_1.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_1 - Filter: (b < 10) - -> Hash Right Join - Hash Cond: ((t3_2.a = t1_2.a) AND (t3_2.c = t1_2.c)) - -> Seq Scan on plt3_adv_p2 t3_2 - -> Hash - -> Hash Right Join - Hash Cond: ((t2_1.a = t1_2.a) AND (t2_1.c = t1_2.c)) - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_2 - Filter: (b < 10) -(23 rows) - -SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) LEFT JOIN plt3_adv t3 ON (t1.a = t3.a AND t1.c = t3.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c | a | c ----+------+---+------+---+------ - 1 | 0001 | | | | - 3 | 0003 | 3 | 0003 | | - 4 | 0004 | 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 | 6 | 0006 -(4 rows) - --- Test cases where one side has the default partition while the other side --- has the NULL partition -DROP TABLE plt2_adv_p1; --- Add the NULL partition to plt2_adv -CREATE TABLE plt2_adv_p1_null PARTITION OF plt2_adv FOR VALUES IN (NULL, '0001', '0003'); -INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (1, 3); -INSERT INTO plt2_adv VALUES (-1, -1, NULL); -ANALYZE plt2_adv; -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_2.a) AND (t2_1.c = t1_2.c)) - -> Seq Scan on plt2_adv_p1_null t2_1 - -> Hash - -> Seq Scan on plt1_adv_p1 t1_2 - Filter: (b < 10) - -> Hash Join - Hash Cond: ((t2_2.a = t1_1.a) AND (t2_2.c = t1_1.c)) - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1_1 - Filter: (b < 10) -(15 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 1 | 0001 | 1 | 0001 - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 -(4 rows) - -DROP TABLE plt2_adv_p1_null; --- Add the NULL partition that contains only NULL values as the key values -CREATE TABLE plt2_adv_p1_null PARTITION OF plt2_adv FOR VALUES IN (NULL); -INSERT INTO plt2_adv VALUES (-1, -1, NULL); -ANALYZE plt2_adv; -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Seq Scan on plt2_adv_p2 t2 - -> Hash - -> Seq Scan on plt1_adv_p2 t1 - Filter: (b < 10) -(8 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 4 | 0004 | 4 | 0004 - 6 | 0006 | 6 | 0006 -(2 rows) - -DROP TABLE plt1_adv; -DROP TABLE plt2_adv; -DROP TABLE plt3_adv; --- Test interaction of partitioned join with partition pruning -CREATE TABLE plt1_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt1_adv_p1 PARTITION OF plt1_adv FOR VALUES IN ('0001'); -CREATE TABLE plt1_adv_p2 PARTITION OF plt1_adv FOR VALUES IN ('0002'); -CREATE TABLE plt1_adv_p3 PARTITION OF plt1_adv FOR VALUES IN ('0003'); -CREATE TABLE plt1_adv_p4 PARTITION OF plt1_adv FOR VALUES IN (NULL, '0004', '0005'); -INSERT INTO plt1_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (1, 2, 3, 4, 5); -INSERT INTO plt1_adv VALUES (-1, -1, NULL); -ANALYZE plt1_adv; -CREATE TABLE plt2_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt2_adv_p1 PARTITION OF plt2_adv FOR VALUES IN ('0001', '0002'); -CREATE TABLE plt2_adv_p2 PARTITION OF plt2_adv FOR VALUES IN (NULL); -CREATE TABLE plt2_adv_p3 PARTITION OF plt2_adv FOR VALUES IN ('0003'); -CREATE TABLE plt2_adv_p4 PARTITION OF plt2_adv FOR VALUES IN ('0004', '0005'); -INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (1, 2, 3, 4, 5); -INSERT INTO plt2_adv VALUES (-1, -1, NULL); -ANALYZE plt2_adv; -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IN ('0003', '0004', '0005') AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------------------------------------------ - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p3 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_1 - Filter: ((b < 10) AND (c = ANY ('{0003,0004,0005}'::text[]))) - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p4 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p4 t1_2 - Filter: ((b < 10) AND (c = ANY ('{0003,0004,0005}'::text[]))) -(15 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IN ('0003', '0004', '0005') AND t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 5 | 0005 | 5 | 0005 -(3 rows) - -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IS NULL AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Seq Scan on plt2_adv_p4 t2 - -> Hash - -> Seq Scan on plt1_adv_p4 t1 - Filter: ((c IS NULL) AND (b < 10)) -(8 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IS NULL AND t1.b < 10 ORDER BY t1.a; - a | c | a | c -----+---+---+--- - -1 | | | -(1 row) - -CREATE TABLE plt1_adv_default PARTITION OF plt1_adv DEFAULT; -ANALYZE plt1_adv; -CREATE TABLE plt2_adv_default PARTITION OF plt2_adv DEFAULT; -ANALYZE plt2_adv; -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IN ('0003', '0004', '0005') AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN ------------------------------------------------------------------------------------------ - Sort - Sort Key: t1.a - -> Append - -> Hash Join - Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) - -> Seq Scan on plt2_adv_p3 t2_1 - -> Hash - -> Seq Scan on plt1_adv_p3 t1_1 - Filter: ((b < 10) AND (c = ANY ('{0003,0004,0005}'::text[]))) - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) - -> Seq Scan on plt2_adv_p4 t2_2 - -> Hash - -> Seq Scan on plt1_adv_p4 t1_2 - Filter: ((b < 10) AND (c = ANY ('{0003,0004,0005}'::text[]))) -(15 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IN ('0003', '0004', '0005') AND t1.b < 10 ORDER BY t1.a; - a | c | a | c ----+------+---+------ - 3 | 0003 | 3 | 0003 - 4 | 0004 | 4 | 0004 - 5 | 0005 | 5 | 0005 -(3 rows) - -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IS NULL AND t1.b < 10 ORDER BY t1.a; - QUERY PLAN --------------------------------------------------------- - Sort - Sort Key: t1.a - -> Hash Right Join - Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) - -> Seq Scan on plt2_adv_p4 t2 - -> Hash - -> Seq Scan on plt1_adv_p4 t1 - Filter: ((c IS NULL) AND (b < 10)) -(8 rows) - -SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IS NULL AND t1.b < 10 ORDER BY t1.a; - a | c | a | c -----+---+---+--- - -1 | | | -(1 row) - -DROP TABLE plt1_adv; -DROP TABLE plt2_adv; --- Test the process_outer_partition() code path -CREATE TABLE plt1_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt1_adv_p1 PARTITION OF plt1_adv FOR VALUES IN ('0000', '0001', '0002'); -CREATE TABLE plt1_adv_p2 PARTITION OF plt1_adv FOR VALUES IN ('0003', '0004'); -INSERT INTO plt1_adv SELECT i, i, to_char(i % 5, 'FM0000') FROM generate_series(0, 24) i; -ANALYZE plt1_adv; -CREATE TABLE plt2_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt2_adv_p1 PARTITION OF plt2_adv FOR VALUES IN ('0002'); -CREATE TABLE plt2_adv_p2 PARTITION OF plt2_adv FOR VALUES IN ('0003', '0004'); -INSERT INTO plt2_adv SELECT i, i, to_char(i % 5, 'FM0000') FROM generate_series(0, 24) i WHERE i % 5 IN (2, 3, 4); -ANALYZE plt2_adv; -CREATE TABLE plt3_adv (a int, b int, c text) PARTITION BY LIST (c); -CREATE TABLE plt3_adv_p1 PARTITION OF plt3_adv FOR VALUES IN ('0001'); -CREATE TABLE plt3_adv_p2 PARTITION OF plt3_adv FOR VALUES IN ('0003', '0004'); -INSERT INTO plt3_adv SELECT i, i, to_char(i % 5, 'FM0000') FROM generate_series(0, 24) i WHERE i % 5 IN (1, 3, 4); -ANALYZE plt3_adv; --- This tests that when merging partitions from plt1_adv and plt2_adv in --- merge_list_bounds(), process_outer_partition() returns an already-assigned --- merged partition when re-called with plt1_adv_p1 for the second list value --- '0001' of that partition -EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM (plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.c = t2.c)) FULL JOIN plt3_adv t3 ON (t1.c = t3.c) WHERE coalesce(t1.a, 0) % 5 != 3 AND coalesce(t1.a, 0) % 5 != 4 ORDER BY t1.c, t1.a, t2.a, t3.a; - QUERY PLAN ------------------------------------------------------------------------------------------------ - Sort - Sort Key: t1.c, t1.a, t2.a, t3.a - -> Append - -> Hash Full Join - Hash Cond: (t1_1.c = t3_1.c) - Filter: (((COALESCE(t1_1.a, 0) % 5) <> 3) AND ((COALESCE(t1_1.a, 0) % 5) <> 4)) - -> Hash Left Join - Hash Cond: (t1_1.c = t2_1.c) - -> Seq Scan on plt1_adv_p1 t1_1 - -> Hash - -> Seq Scan on plt2_adv_p1 t2_1 - -> Hash - -> Seq Scan on plt3_adv_p1 t3_1 - -> Hash Full Join - Hash Cond: (t1_2.c = t3_2.c) - Filter: (((COALESCE(t1_2.a, 0) % 5) <> 3) AND ((COALESCE(t1_2.a, 0) % 5) <> 4)) - -> Hash Left Join - Hash Cond: (t1_2.c = t2_2.c) - -> Seq Scan on plt1_adv_p2 t1_2 - -> Hash - -> Seq Scan on plt2_adv_p2 t2_2 - -> Hash - -> Seq Scan on plt3_adv_p2 t3_2 -(23 rows) - -SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM (plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.c = t2.c)) FULL JOIN plt3_adv t3 ON (t1.c = t3.c) WHERE coalesce(t1.a, 0) % 5 != 3 AND coalesce(t1.a, 0) % 5 != 4 ORDER BY t1.c, t1.a, t2.a, t3.a; - a | c | a | c | a | c -----+------+----+------+----+------ - 0 | 0000 | | | | - 5 | 0000 | | | | - 10 | 0000 | | | | - 15 | 0000 | | | | - 20 | 0000 | | | | - 1 | 0001 | | | 1 | 0001 - 1 | 0001 | | | 6 | 0001 - 1 | 0001 | | | 11 | 0001 - 1 | 0001 | | | 16 | 0001 - 1 | 0001 | | | 21 | 0001 - 6 | 0001 | | | 1 | 0001 - 6 | 0001 | | | 6 | 0001 - 6 | 0001 | | | 11 | 0001 - 6 | 0001 | | | 16 | 0001 - 6 | 0001 | | | 21 | 0001 - 11 | 0001 | | | 1 | 0001 - 11 | 0001 | | | 6 | 0001 - 11 | 0001 | | | 11 | 0001 - 11 | 0001 | | | 16 | 0001 - 11 | 0001 | | | 21 | 0001 - 16 | 0001 | | | 1 | 0001 - 16 | 0001 | | | 6 | 0001 - 16 | 0001 | | | 11 | 0001 - 16 | 0001 | | | 16 | 0001 - 16 | 0001 | | | 21 | 0001 - 21 | 0001 | | | 1 | 0001 - 21 | 0001 | | | 6 | 0001 - 21 | 0001 | | | 11 | 0001 - 21 | 0001 | | | 16 | 0001 - 21 | 0001 | | | 21 | 0001 - 2 | 0002 | 2 | 0002 | | - 2 | 0002 | 7 | 0002 | | - 2 | 0002 | 12 | 0002 | | - 2 | 0002 | 17 | 0002 | | - 2 | 0002 | 22 | 0002 | | - 7 | 0002 | 2 | 0002 | | - 7 | 0002 | 7 | 0002 | | - 7 | 0002 | 12 | 0002 | | - 7 | 0002 | 17 | 0002 | | - 7 | 0002 | 22 | 0002 | | - 12 | 0002 | 2 | 0002 | | - 12 | 0002 | 7 | 0002 | | - 12 | 0002 | 12 | 0002 | | - 12 | 0002 | 17 | 0002 | | - 12 | 0002 | 22 | 0002 | | - 17 | 0002 | 2 | 0002 | | - 17 | 0002 | 7 | 0002 | | - 17 | 0002 | 12 | 0002 | | - 17 | 0002 | 17 | 0002 | | - 17 | 0002 | 22 | 0002 | | - 22 | 0002 | 2 | 0002 | | - 22 | 0002 | 7 | 0002 | | - 22 | 0002 | 12 | 0002 | | - 22 | 0002 | 17 | 0002 | | - 22 | 0002 | 22 | 0002 | | -(55 rows) - -DROP TABLE plt1_adv; -DROP TABLE plt2_adv; -DROP TABLE plt3_adv; --- Tests for multi-level partitioned tables -CREATE TABLE alpha (a double precision, b int, c text) PARTITION BY RANGE (a); -CREATE TABLE alpha_neg PARTITION OF alpha FOR VALUES FROM ('-Infinity') TO (0) PARTITION BY RANGE (b); -CREATE TABLE alpha_pos PARTITION OF alpha FOR VALUES FROM (0) TO (10.0) PARTITION BY LIST (c); -CREATE TABLE alpha_neg_p1 PARTITION OF alpha_neg FOR VALUES FROM (100) TO (200); -CREATE TABLE alpha_neg_p2 PARTITION OF alpha_neg FOR VALUES FROM (200) TO (300); -CREATE TABLE alpha_neg_p3 PARTITION OF alpha_neg FOR VALUES FROM (300) TO (400); -CREATE TABLE alpha_pos_p1 PARTITION OF alpha_pos FOR VALUES IN ('0001', '0003'); -CREATE TABLE alpha_pos_p2 PARTITION OF alpha_pos FOR VALUES IN ('0004', '0006'); -CREATE TABLE alpha_pos_p3 PARTITION OF alpha_pos FOR VALUES IN ('0008', '0009'); -INSERT INTO alpha_neg SELECT -1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(100, 399) i WHERE i % 10 IN (1, 3, 4, 6, 8, 9); -INSERT INTO alpha_pos SELECT 1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(100, 399) i WHERE i % 10 IN (1, 3, 4, 6, 8, 9); -ANALYZE alpha; -CREATE TABLE beta (a double precision, b int, c text) PARTITION BY RANGE (a); -CREATE TABLE beta_neg PARTITION OF beta FOR VALUES FROM (-10.0) TO (0) PARTITION BY RANGE (b); -CREATE TABLE beta_pos PARTITION OF beta FOR VALUES FROM (0) TO ('Infinity') PARTITION BY LIST (c); -CREATE TABLE beta_neg_p1 PARTITION OF beta_neg FOR VALUES FROM (100) TO (150); -CREATE TABLE beta_neg_p2 PARTITION OF beta_neg FOR VALUES FROM (200) TO (300); -CREATE TABLE beta_neg_p3 PARTITION OF beta_neg FOR VALUES FROM (350) TO (500); -CREATE TABLE beta_pos_p1 PARTITION OF beta_pos FOR VALUES IN ('0002', '0003'); -CREATE TABLE beta_pos_p2 PARTITION OF beta_pos FOR VALUES IN ('0004', '0006'); -CREATE TABLE beta_pos_p3 PARTITION OF beta_pos FOR VALUES IN ('0007', '0009'); -INSERT INTO beta_neg SELECT -1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(100, 149) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); -INSERT INTO beta_neg SELECT -1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(200, 299) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); -INSERT INTO beta_neg SELECT -1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(350, 499) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); -INSERT INTO beta_pos SELECT 1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(100, 149) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); -INSERT INTO beta_pos SELECT 1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(200, 299) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); -INSERT INTO beta_pos SELECT 1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(350, 499) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); -ANALYZE beta; -EXPLAIN (COSTS OFF) -SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.b = t2.b) WHERE t1.b >= 125 AND t1.b < 225 ORDER BY t1.a, t1.b; - QUERY PLAN --------------------------------------------------------------------- - Sort - Sort Key: t1.a, t1.b - -> Append - -> Hash Join - Hash Cond: ((t1_1.a = t2_1.a) AND (t1_1.b = t2_1.b)) - -> Seq Scan on alpha_neg_p1 t1_1 - Filter: ((b >= 125) AND (b < 225)) - -> Hash - -> Seq Scan on beta_neg_p1 t2_1 - -> Hash Join - Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.b = t1_2.b)) - -> Seq Scan on beta_neg_p2 t2_2 - -> Hash - -> Seq Scan on alpha_neg_p2 t1_2 - Filter: ((b >= 125) AND (b < 225)) - -> Hash Join - Hash Cond: ((t2_4.a = t1_4.a) AND (t2_4.b = t1_4.b)) - -> Append - -> Seq Scan on beta_pos_p1 t2_4 - -> Seq Scan on beta_pos_p2 t2_5 - -> Seq Scan on beta_pos_p3 t2_6 - -> Hash - -> Append - -> Seq Scan on alpha_pos_p1 t1_4 - Filter: ((b >= 125) AND (b < 225)) - -> Seq Scan on alpha_pos_p2 t1_5 - Filter: ((b >= 125) AND (b < 225)) - -> Seq Scan on alpha_pos_p3 t1_6 - Filter: ((b >= 125) AND (b < 225)) -(29 rows) - -SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.b = t2.b) WHERE t1.b >= 125 AND t1.b < 225 ORDER BY t1.a, t1.b; - a | b | c | a | b | c -----+-----+------+----+-----+------ - -1 | 126 | 0006 | -1 | 126 | 0006 - -1 | 129 | 0009 | -1 | 129 | 0009 - -1 | 133 | 0003 | -1 | 133 | 0003 - -1 | 134 | 0004 | -1 | 134 | 0004 - -1 | 136 | 0006 | -1 | 136 | 0006 - -1 | 139 | 0009 | -1 | 139 | 0009 - -1 | 143 | 0003 | -1 | 143 | 0003 - -1 | 144 | 0004 | -1 | 144 | 0004 - -1 | 146 | 0006 | -1 | 146 | 0006 - -1 | 149 | 0009 | -1 | 149 | 0009 - -1 | 203 | 0003 | -1 | 203 | 0003 - -1 | 204 | 0004 | -1 | 204 | 0004 - -1 | 206 | 0006 | -1 | 206 | 0006 - -1 | 209 | 0009 | -1 | 209 | 0009 - -1 | 213 | 0003 | -1 | 213 | 0003 - -1 | 214 | 0004 | -1 | 214 | 0004 - -1 | 216 | 0006 | -1 | 216 | 0006 - -1 | 219 | 0009 | -1 | 219 | 0009 - -1 | 223 | 0003 | -1 | 223 | 0003 - -1 | 224 | 0004 | -1 | 224 | 0004 - 1 | 126 | 0006 | 1 | 126 | 0006 - 1 | 129 | 0009 | 1 | 129 | 0009 - 1 | 133 | 0003 | 1 | 133 | 0003 - 1 | 134 | 0004 | 1 | 134 | 0004 - 1 | 136 | 0006 | 1 | 136 | 0006 - 1 | 139 | 0009 | 1 | 139 | 0009 - 1 | 143 | 0003 | 1 | 143 | 0003 - 1 | 144 | 0004 | 1 | 144 | 0004 - 1 | 146 | 0006 | 1 | 146 | 0006 - 1 | 149 | 0009 | 1 | 149 | 0009 - 1 | 203 | 0003 | 1 | 203 | 0003 - 1 | 204 | 0004 | 1 | 204 | 0004 - 1 | 206 | 0006 | 1 | 206 | 0006 - 1 | 209 | 0009 | 1 | 209 | 0009 - 1 | 213 | 0003 | 1 | 213 | 0003 - 1 | 214 | 0004 | 1 | 214 | 0004 - 1 | 216 | 0006 | 1 | 216 | 0006 - 1 | 219 | 0009 | 1 | 219 | 0009 - 1 | 223 | 0003 | 1 | 223 | 0003 - 1 | 224 | 0004 | 1 | 224 | 0004 -(40 rows) - -EXPLAIN (COSTS OFF) -SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE ((t1.b >= 100 AND t1.b < 110) OR (t1.b >= 200 AND t1.b < 210)) AND ((t2.b >= 100 AND t2.b < 110) OR (t2.b >= 200 AND t2.b < 210)) AND t1.c IN ('0004', '0009') ORDER BY t1.a, t1.b, t2.b; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------- - Sort - Sort Key: t1.a, t1.b, t2.b - -> Append - -> Hash Join - Hash Cond: ((t1_2.a = t2_2.a) AND (t1_2.c = t2_2.c)) - -> Append - -> Seq Scan on alpha_neg_p1 t1_2 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Seq Scan on alpha_neg_p2 t1_3 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Hash - -> Append - -> Seq Scan on beta_neg_p1 t2_2 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) - -> Seq Scan on beta_neg_p2 t2_3 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) - -> Nested Loop - Join Filter: ((t1_4.a = t2_4.a) AND (t1_4.c = t2_4.c)) - -> Seq Scan on alpha_pos_p2 t1_4 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Seq Scan on beta_pos_p2 t2_4 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) - -> Nested Loop - Join Filter: ((t1_5.a = t2_5.a) AND (t1_5.c = t2_5.c)) - -> Seq Scan on alpha_pos_p3 t1_5 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Seq Scan on beta_pos_p3 t2_5 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) -(28 rows) - -SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE ((t1.b >= 100 AND t1.b < 110) OR (t1.b >= 200 AND t1.b < 210)) AND ((t2.b >= 100 AND t2.b < 110) OR (t2.b >= 200 AND t2.b < 210)) AND t1.c IN ('0004', '0009') ORDER BY t1.a, t1.b, t2.b; - a | b | c | a | b | c -----+-----+------+----+-----+------ - -1 | 104 | 0004 | -1 | 104 | 0004 - -1 | 104 | 0004 | -1 | 204 | 0004 - -1 | 109 | 0009 | -1 | 109 | 0009 - -1 | 109 | 0009 | -1 | 209 | 0009 - -1 | 204 | 0004 | -1 | 104 | 0004 - -1 | 204 | 0004 | -1 | 204 | 0004 - -1 | 209 | 0009 | -1 | 109 | 0009 - -1 | 209 | 0009 | -1 | 209 | 0009 - 1 | 104 | 0004 | 1 | 104 | 0004 - 1 | 104 | 0004 | 1 | 204 | 0004 - 1 | 109 | 0009 | 1 | 109 | 0009 - 1 | 109 | 0009 | 1 | 209 | 0009 - 1 | 204 | 0004 | 1 | 104 | 0004 - 1 | 204 | 0004 | 1 | 204 | 0004 - 1 | 209 | 0009 | 1 | 109 | 0009 - 1 | 209 | 0009 | 1 | 209 | 0009 -(16 rows) - -EXPLAIN (COSTS OFF) -SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.b = t2.b AND t1.c = t2.c) WHERE ((t1.b >= 100 AND t1.b < 110) OR (t1.b >= 200 AND t1.b < 210)) AND ((t2.b >= 100 AND t2.b < 110) OR (t2.b >= 200 AND t2.b < 210)) AND t1.c IN ('0004', '0009') ORDER BY t1.a, t1.b; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------- - Sort - Sort Key: t1.a, t1.b - -> Append - -> Hash Join - Hash Cond: ((t1_1.a = t2_1.a) AND (t1_1.b = t2_1.b) AND (t1_1.c = t2_1.c)) - -> Seq Scan on alpha_neg_p1 t1_1 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Hash - -> Seq Scan on beta_neg_p1 t2_1 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) - -> Hash Join - Hash Cond: ((t1_2.a = t2_2.a) AND (t1_2.b = t2_2.b) AND (t1_2.c = t2_2.c)) - -> Seq Scan on alpha_neg_p2 t1_2 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Hash - -> Seq Scan on beta_neg_p2 t2_2 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) - -> Nested Loop - Join Filter: ((t1_3.a = t2_3.a) AND (t1_3.b = t2_3.b) AND (t1_3.c = t2_3.c)) - -> Seq Scan on alpha_pos_p2 t1_3 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Seq Scan on beta_pos_p2 t2_3 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) - -> Nested Loop - Join Filter: ((t1_4.a = t2_4.a) AND (t1_4.b = t2_4.b) AND (t1_4.c = t2_4.c)) - -> Seq Scan on alpha_pos_p3 t1_4 - Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) - -> Seq Scan on beta_pos_p3 t2_4 - Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) -(29 rows) - -SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.b = t2.b AND t1.c = t2.c) WHERE ((t1.b >= 100 AND t1.b < 110) OR (t1.b >= 200 AND t1.b < 210)) AND ((t2.b >= 100 AND t2.b < 110) OR (t2.b >= 200 AND t2.b < 210)) AND t1.c IN ('0004', '0009') ORDER BY t1.a, t1.b; - a | b | c | a | b | c -----+-----+------+----+-----+------ - -1 | 104 | 0004 | -1 | 104 | 0004 - -1 | 109 | 0009 | -1 | 109 | 0009 - -1 | 204 | 0004 | -1 | 204 | 0004 - -1 | 209 | 0009 | -1 | 209 | 0009 - 1 | 104 | 0004 | 1 | 104 | 0004 - 1 | 109 | 0009 | 1 | 109 | 0009 - 1 | 204 | 0004 | 1 | 204 | 0004 - 1 | 209 | 0009 | 1 | 209 | 0009 -(8 rows) - --- partitionwise join with fractional paths -CREATE TABLE fract_t (id BIGINT, PRIMARY KEY (id)) PARTITION BY RANGE (id); -CREATE TABLE fract_t0 PARTITION OF fract_t FOR VALUES FROM ('0') TO ('1000'); -CREATE TABLE fract_t1 PARTITION OF fract_t FOR VALUES FROM ('1000') TO ('2000'); --- insert data -INSERT INTO fract_t (id) (SELECT generate_series(0, 1999)); -ANALYZE fract_t; --- verify plan; nested index only scans -SET max_parallel_workers_per_gather = 0; -SET enable_partitionwise_join = on; -EXPLAIN (COSTS OFF) -SELECT x.id, y.id FROM fract_t x LEFT JOIN fract_t y USING (id) ORDER BY x.id ASC LIMIT 10; - QUERY PLAN ------------------------------------------------------------------------ - Limit - -> Merge Append - Sort Key: x.id - -> Merge Left Join - Merge Cond: (x_1.id = y_1.id) - -> Index Only Scan using fract_t0_pkey on fract_t0 x_1 - -> Index Only Scan using fract_t0_pkey on fract_t0 y_1 - -> Merge Left Join - Merge Cond: (x_2.id = y_2.id) - -> Index Only Scan using fract_t1_pkey on fract_t1 x_2 - -> Index Only Scan using fract_t1_pkey on fract_t1 y_2 -(11 rows) - -EXPLAIN (COSTS OFF) -SELECT x.id, y.id FROM fract_t x LEFT JOIN fract_t y USING (id) ORDER BY x.id DESC LIMIT 10; - QUERY PLAN --------------------------------------------------------------------------------- - Limit - -> Merge Append - Sort Key: x.id DESC - -> Nested Loop Left Join - -> Index Only Scan Backward using fract_t0_pkey on fract_t0 x_1 - -> Index Only Scan using fract_t0_pkey on fract_t0 y_1 - Index Cond: (id = x_1.id) - -> Nested Loop Left Join - -> Index Only Scan Backward using fract_t1_pkey on fract_t1 x_2 - -> Index Only Scan using fract_t1_pkey on fract_t1 y_2 - Index Cond: (id = x_2.id) -(11 rows) - --- cleanup -DROP TABLE fract_t; -RESET max_parallel_workers_per_gather; -RESET enable_partitionwise_join; +WARNING: terminating connection because of crash of another server process +DETAIL: The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory. +HINT: In a moment you should be able to reconnect to the database and repeat your command. +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/partition_prune.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/partition_prune.out --- /tmp/cirrus-ci-build/src/test/regress/expected/partition_prune.out 2024-09-11 00:19:52.093233906 +0000 +++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/partition_prune.out 2024-09-11 00:29:43.220157051 +0000 @@ -2391,2052 +2391,10 @@ (13 rows) select explain_parallel_append('execute ab_q5 (2, 3, 3)'); - explain_parallel_append ------------------------------------------------------------------------------------- - Finalize Aggregate (actual rows=N loops=N) - -> Gather (actual rows=N loops=N) - Workers Planned: 2 - Workers Launched: N - -> Partial Aggregate (actual rows=N loops=N) - -> Parallel Append (actual rows=N loops=N) - Subplans Removed: 3 - -> Parallel Seq Scan on ab_a2_b1 ab_1 (actual rows=N loops=N) - Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) - -> Parallel Seq Scan on ab_a2_b2 ab_2 (actual rows=N loops=N) - Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) - -> Parallel Seq Scan on ab_a2_b3 ab_3 (actual rows=N loops=N) - Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) - -> Parallel Seq Scan on ab_a3_b1 ab_4 (actual rows=N loops=N) - Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) - -> Parallel Seq Scan on ab_a3_b2 ab_5 (actual rows=N loops=N) - Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) - -> Parallel Seq Scan on ab_a3_b3 ab_6 (actual rows=N loops=N) - Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) -(19 rows) - --- Try some params whose values do not belong to any partition. -select explain_parallel_append('execute ab_q5 (33, 44, 55)'); - explain_parallel_append ------------------------------------------------------------ - Finalize Aggregate (actual rows=N loops=N) - -> Gather (actual rows=N loops=N) - Workers Planned: 2 - Workers Launched: N - -> Partial Aggregate (actual rows=N loops=N) - -> Parallel Append (actual rows=N loops=N) - Subplans Removed: 9 -(7 rows) - --- Test Parallel Append with PARAM_EXEC Params -select explain_parallel_append('select count(*) from ab where (a = (select 1) or a = (select 3)) and b = 2'); - explain_parallel_append ------------------------------------------------------------------------------------------------- - Aggregate (actual rows=N loops=N) - InitPlan 1 - -> Result (actual rows=N loops=N) - InitPlan 2 - -> Result (actual rows=N loops=N) - -> Gather (actual rows=N loops=N) - Workers Planned: 2 - Workers Launched: N - -> Parallel Append (actual rows=N loops=N) - -> Parallel Seq Scan on ab_a1_b2 ab_1 (actual rows=N loops=N) - Filter: ((b = 2) AND ((a = (InitPlan 1).col1) OR (a = (InitPlan 2).col1))) - -> Parallel Seq Scan on ab_a2_b2 ab_2 (never executed) - Filter: ((b = 2) AND ((a = (InitPlan 1).col1) OR (a = (InitPlan 2).col1))) - -> Parallel Seq Scan on ab_a3_b2 ab_3 (actual rows=N loops=N) - Filter: ((b = 2) AND ((a = (InitPlan 1).col1) OR (a = (InitPlan 2).col1))) -(15 rows) - --- Test pruning during parallel nested loop query -create table lprt_a (a int not null); --- Insert some values we won't find in ab -insert into lprt_a select 0 from generate_series(1,100); --- and insert some values that we should find. -insert into lprt_a values(1),(1); -analyze lprt_a; -create index ab_a2_b1_a_idx on ab_a2_b1 (a); -create index ab_a2_b2_a_idx on ab_a2_b2 (a); -create index ab_a2_b3_a_idx on ab_a2_b3 (a); -create index ab_a1_b1_a_idx on ab_a1_b1 (a); -create index ab_a1_b2_a_idx on ab_a1_b2 (a); -create index ab_a1_b3_a_idx on ab_a1_b3 (a); -create index ab_a3_b1_a_idx on ab_a3_b1 (a); -create index ab_a3_b2_a_idx on ab_a3_b2 (a); -create index ab_a3_b3_a_idx on ab_a3_b3 (a); -set enable_hashjoin = 0; -set enable_mergejoin = 0; -set enable_memoize = 0; -select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(0, 0, 1)'); - explain_parallel_append --------------------------------------------------------------------------------------------------------- - Finalize Aggregate (actual rows=N loops=N) - -> Gather (actual rows=N loops=N) - Workers Planned: 1 - Workers Launched: N - -> Partial Aggregate (actual rows=N loops=N) - -> Nested Loop (actual rows=N loops=N) - -> Parallel Seq Scan on lprt_a a (actual rows=N loops=N) - Filter: (a = ANY ('{0,0,1}'::integer[])) - -> Append (actual rows=N loops=N) - -> Index Scan using ab_a1_b1_a_idx on ab_a1_b1 ab_1 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a1_b2_a_idx on ab_a1_b2 ab_2 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a1_b3_a_idx on ab_a1_b3 ab_3 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b1_a_idx on ab_a2_b1 ab_4 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b2_a_idx on ab_a2_b2 ab_5 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b3_a_idx on ab_a2_b3 ab_6 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b1_a_idx on ab_a3_b1 ab_7 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b2_a_idx on ab_a3_b2 ab_8 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b3_a_idx on ab_a3_b3 ab_9 (never executed) - Index Cond: (a = a.a) -(27 rows) - --- Ensure the same partitions are pruned when we make the nested loop --- parameter an Expr rather than a plain Param. -select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a + 0 where a.a in(0, 0, 1)'); - explain_parallel_append --------------------------------------------------------------------------------------------------------- - Finalize Aggregate (actual rows=N loops=N) - -> Gather (actual rows=N loops=N) - Workers Planned: 1 - Workers Launched: N - -> Partial Aggregate (actual rows=N loops=N) - -> Nested Loop (actual rows=N loops=N) - -> Parallel Seq Scan on lprt_a a (actual rows=N loops=N) - Filter: (a = ANY ('{0,0,1}'::integer[])) - -> Append (actual rows=N loops=N) - -> Index Scan using ab_a1_b1_a_idx on ab_a1_b1 ab_1 (actual rows=N loops=N) - Index Cond: (a = (a.a + 0)) - -> Index Scan using ab_a1_b2_a_idx on ab_a1_b2 ab_2 (actual rows=N loops=N) - Index Cond: (a = (a.a + 0)) - -> Index Scan using ab_a1_b3_a_idx on ab_a1_b3 ab_3 (actual rows=N loops=N) - Index Cond: (a = (a.a + 0)) - -> Index Scan using ab_a2_b1_a_idx on ab_a2_b1 ab_4 (never executed) - Index Cond: (a = (a.a + 0)) - -> Index Scan using ab_a2_b2_a_idx on ab_a2_b2 ab_5 (never executed) - Index Cond: (a = (a.a + 0)) - -> Index Scan using ab_a2_b3_a_idx on ab_a2_b3 ab_6 (never executed) - Index Cond: (a = (a.a + 0)) - -> Index Scan using ab_a3_b1_a_idx on ab_a3_b1 ab_7 (never executed) - Index Cond: (a = (a.a + 0)) - -> Index Scan using ab_a3_b2_a_idx on ab_a3_b2 ab_8 (never executed) - Index Cond: (a = (a.a + 0)) - -> Index Scan using ab_a3_b3_a_idx on ab_a3_b3 ab_9 (never executed) - Index Cond: (a = (a.a + 0)) -(27 rows) - -insert into lprt_a values(3),(3); -select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(1, 0, 3)'); - explain_parallel_append --------------------------------------------------------------------------------------------------------- - Finalize Aggregate (actual rows=N loops=N) - -> Gather (actual rows=N loops=N) - Workers Planned: 1 - Workers Launched: N - -> Partial Aggregate (actual rows=N loops=N) - -> Nested Loop (actual rows=N loops=N) - -> Parallel Seq Scan on lprt_a a (actual rows=N loops=N) - Filter: (a = ANY ('{1,0,3}'::integer[])) - -> Append (actual rows=N loops=N) - -> Index Scan using ab_a1_b1_a_idx on ab_a1_b1 ab_1 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a1_b2_a_idx on ab_a1_b2 ab_2 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a1_b3_a_idx on ab_a1_b3 ab_3 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b1_a_idx on ab_a2_b1 ab_4 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b2_a_idx on ab_a2_b2 ab_5 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b3_a_idx on ab_a2_b3 ab_6 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b1_a_idx on ab_a3_b1 ab_7 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b2_a_idx on ab_a3_b2 ab_8 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b3_a_idx on ab_a3_b3 ab_9 (actual rows=N loops=N) - Index Cond: (a = a.a) -(27 rows) - -select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(1, 0, 0)'); - explain_parallel_append --------------------------------------------------------------------------------------------------------- - Finalize Aggregate (actual rows=N loops=N) - -> Gather (actual rows=N loops=N) - Workers Planned: 1 - Workers Launched: N - -> Partial Aggregate (actual rows=N loops=N) - -> Nested Loop (actual rows=N loops=N) - -> Parallel Seq Scan on lprt_a a (actual rows=N loops=N) - Filter: (a = ANY ('{1,0,0}'::integer[])) - Rows Removed by Filter: N - -> Append (actual rows=N loops=N) - -> Index Scan using ab_a1_b1_a_idx on ab_a1_b1 ab_1 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a1_b2_a_idx on ab_a1_b2 ab_2 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a1_b3_a_idx on ab_a1_b3 ab_3 (actual rows=N loops=N) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b1_a_idx on ab_a2_b1 ab_4 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b2_a_idx on ab_a2_b2 ab_5 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b3_a_idx on ab_a2_b3 ab_6 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b1_a_idx on ab_a3_b1 ab_7 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b2_a_idx on ab_a3_b2 ab_8 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b3_a_idx on ab_a3_b3 ab_9 (never executed) - Index Cond: (a = a.a) -(28 rows) - -delete from lprt_a where a = 1; -select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(1, 0, 0)'); - explain_parallel_append -------------------------------------------------------------------------------------------------- - Finalize Aggregate (actual rows=N loops=N) - -> Gather (actual rows=N loops=N) - Workers Planned: 1 - Workers Launched: N - -> Partial Aggregate (actual rows=N loops=N) - -> Nested Loop (actual rows=N loops=N) - -> Parallel Seq Scan on lprt_a a (actual rows=N loops=N) - Filter: (a = ANY ('{1,0,0}'::integer[])) - Rows Removed by Filter: N - -> Append (actual rows=N loops=N) - -> Index Scan using ab_a1_b1_a_idx on ab_a1_b1 ab_1 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a1_b2_a_idx on ab_a1_b2 ab_2 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a1_b3_a_idx on ab_a1_b3 ab_3 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b1_a_idx on ab_a2_b1 ab_4 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b2_a_idx on ab_a2_b2 ab_5 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a2_b3_a_idx on ab_a2_b3 ab_6 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b1_a_idx on ab_a3_b1 ab_7 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b2_a_idx on ab_a3_b2 ab_8 (never executed) - Index Cond: (a = a.a) - -> Index Scan using ab_a3_b3_a_idx on ab_a3_b3 ab_9 (never executed) - Index Cond: (a = a.a) -(28 rows) - -reset enable_hashjoin; -reset enable_mergejoin; -reset enable_memoize; -reset parallel_setup_cost; -reset parallel_tuple_cost; -reset min_parallel_table_scan_size; -reset max_parallel_workers_per_gather; --- Test run-time partition pruning with an initplan -explain (analyze, costs off, summary off, timing off) -select * from ab where a = (select max(a) from lprt_a) and b = (select max(a)-1 from lprt_a); - QUERY PLAN -------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - InitPlan 1 - -> Aggregate (actual rows=1 loops=1) - -> Seq Scan on lprt_a (actual rows=102 loops=1) - InitPlan 2 - -> Aggregate (actual rows=1 loops=1) - -> Seq Scan on lprt_a lprt_a_1 (actual rows=102 loops=1) - -> Bitmap Heap Scan on ab_a1_b1 ab_1 (never executed) - Recheck Cond: (a = (InitPlan 1).col1) - Filter: (b = (InitPlan 2).col1) - -> Bitmap Index Scan on ab_a1_b1_a_idx (never executed) - Index Cond: (a = (InitPlan 1).col1) - -> Bitmap Heap Scan on ab_a1_b2 ab_2 (never executed) - Recheck Cond: (a = (InitPlan 1).col1) - Filter: (b = (InitPlan 2).col1) - -> Bitmap Index Scan on ab_a1_b2_a_idx (never executed) - Index Cond: (a = (InitPlan 1).col1) - -> Bitmap Heap Scan on ab_a1_b3 ab_3 (never executed) - Recheck Cond: (a = (InitPlan 1).col1) - Filter: (b = (InitPlan 2).col1) - -> Bitmap Index Scan on ab_a1_b3_a_idx (never executed) - Index Cond: (a = (InitPlan 1).col1) - -> Bitmap Heap Scan on ab_a2_b1 ab_4 (never executed) - Recheck Cond: (a = (InitPlan 1).col1) - Filter: (b = (InitPlan 2).col1) - -> Bitmap Index Scan on ab_a2_b1_a_idx (never executed) - Index Cond: (a = (InitPlan 1).col1) - -> Bitmap Heap Scan on ab_a2_b2 ab_5 (never executed) - Recheck Cond: (a = (InitPlan 1).col1) - Filter: (b = (InitPlan 2).col1) - -> Bitmap Index Scan on ab_a2_b2_a_idx (never executed) - Index Cond: (a = (InitPlan 1).col1) - -> Bitmap Heap Scan on ab_a2_b3 ab_6 (never executed) - Recheck Cond: (a = (InitPlan 1).col1) - Filter: (b = (InitPlan 2).col1) - -> Bitmap Index Scan on ab_a2_b3_a_idx (never executed) - Index Cond: (a = (InitPlan 1).col1) - -> Bitmap Heap Scan on ab_a3_b1 ab_7 (never executed) - Recheck Cond: (a = (InitPlan 1).col1) - Filter: (b = (InitPlan 2).col1) - -> Bitmap Index Scan on ab_a3_b1_a_idx (never executed) - Index Cond: (a = (InitPlan 1).col1) - -> Bitmap Heap Scan on ab_a3_b2 ab_8 (actual rows=0 loops=1) - Recheck Cond: (a = (InitPlan 1).col1) - Filter: (b = (InitPlan 2).col1) - -> Bitmap Index Scan on ab_a3_b2_a_idx (actual rows=0 loops=1) - Index Cond: (a = (InitPlan 1).col1) - -> Bitmap Heap Scan on ab_a3_b3 ab_9 (never executed) - Recheck Cond: (a = (InitPlan 1).col1) - Filter: (b = (InitPlan 2).col1) - -> Bitmap Index Scan on ab_a3_b3_a_idx (never executed) - Index Cond: (a = (InitPlan 1).col1) -(52 rows) - --- Test run-time partition pruning with UNION ALL parents -explain (analyze, costs off, summary off, timing off) -select * from (select * from ab where a = 1 union all select * from ab) ab where b = (select 1); - QUERY PLAN -------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - InitPlan 1 - -> Result (actual rows=1 loops=1) - -> Append (actual rows=0 loops=1) - -> Bitmap Heap Scan on ab_a1_b1 ab_11 (actual rows=0 loops=1) - Recheck Cond: (a = 1) - Filter: (b = (InitPlan 1).col1) - -> Bitmap Index Scan on ab_a1_b1_a_idx (actual rows=0 loops=1) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b2 ab_12 (never executed) - Recheck Cond: (a = 1) - Filter: (b = (InitPlan 1).col1) - -> Bitmap Index Scan on ab_a1_b2_a_idx (never executed) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b3 ab_13 (never executed) - Recheck Cond: (a = 1) - Filter: (b = (InitPlan 1).col1) - -> Bitmap Index Scan on ab_a1_b3_a_idx (never executed) - Index Cond: (a = 1) - -> Seq Scan on ab_a1_b1 ab_1 (actual rows=0 loops=1) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a1_b2 ab_2 (never executed) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a1_b3 ab_3 (never executed) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a2_b1 ab_4 (actual rows=0 loops=1) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a2_b2 ab_5 (never executed) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a2_b3 ab_6 (never executed) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a3_b1 ab_7 (actual rows=0 loops=1) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a3_b2 ab_8 (never executed) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a3_b3 ab_9 (never executed) - Filter: (b = (InitPlan 1).col1) -(37 rows) - --- A case containing a UNION ALL with a non-partitioned child. -explain (analyze, costs off, summary off, timing off) -select * from (select * from ab where a = 1 union all (values(10,5)) union all select * from ab) ab where b = (select 1); - QUERY PLAN -------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - InitPlan 1 - -> Result (actual rows=1 loops=1) - -> Append (actual rows=0 loops=1) - -> Bitmap Heap Scan on ab_a1_b1 ab_11 (actual rows=0 loops=1) - Recheck Cond: (a = 1) - Filter: (b = (InitPlan 1).col1) - -> Bitmap Index Scan on ab_a1_b1_a_idx (actual rows=0 loops=1) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b2 ab_12 (never executed) - Recheck Cond: (a = 1) - Filter: (b = (InitPlan 1).col1) - -> Bitmap Index Scan on ab_a1_b2_a_idx (never executed) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b3 ab_13 (never executed) - Recheck Cond: (a = 1) - Filter: (b = (InitPlan 1).col1) - -> Bitmap Index Scan on ab_a1_b3_a_idx (never executed) - Index Cond: (a = 1) - -> Result (actual rows=0 loops=1) - One-Time Filter: (5 = (InitPlan 1).col1) - -> Seq Scan on ab_a1_b1 ab_1 (actual rows=0 loops=1) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a1_b2 ab_2 (never executed) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a1_b3 ab_3 (never executed) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a2_b1 ab_4 (actual rows=0 loops=1) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a2_b2 ab_5 (never executed) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a2_b3 ab_6 (never executed) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a3_b1 ab_7 (actual rows=0 loops=1) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a3_b2 ab_8 (never executed) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a3_b3 ab_9 (never executed) - Filter: (b = (InitPlan 1).col1) -(39 rows) - --- Another UNION ALL test, but containing a mix of exec init and exec run-time pruning. -create table xy_1 (x int, y int); -insert into xy_1 values(100,-10); -set enable_bitmapscan = 0; -set enable_indexscan = 0; -prepare ab_q6 as -select * from ( - select tableoid::regclass,a,b from ab -union all - select tableoid::regclass,x,y from xy_1 -union all - select tableoid::regclass,a,b from ab -) ab where a = $1 and b = (select -10); --- Ensure the xy_1 subplan is not pruned. -explain (analyze, costs off, summary off, timing off) execute ab_q6(1); - QUERY PLAN --------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 12 - InitPlan 1 - -> Result (actual rows=1 loops=1) - -> Seq Scan on ab_a1_b1 ab_1 (never executed) - Filter: ((a = $1) AND (b = (InitPlan 1).col1)) - -> Seq Scan on ab_a1_b2 ab_2 (never executed) - Filter: ((a = $1) AND (b = (InitPlan 1).col1)) - -> Seq Scan on ab_a1_b3 ab_3 (never executed) - Filter: ((a = $1) AND (b = (InitPlan 1).col1)) - -> Seq Scan on xy_1 (actual rows=0 loops=1) - Filter: ((x = $1) AND (y = (InitPlan 1).col1)) - Rows Removed by Filter: 1 - -> Seq Scan on ab_a1_b1 ab_4 (never executed) - Filter: ((a = $1) AND (b = (InitPlan 1).col1)) - -> Seq Scan on ab_a1_b2 ab_5 (never executed) - Filter: ((a = $1) AND (b = (InitPlan 1).col1)) - -> Seq Scan on ab_a1_b3 ab_6 (never executed) - Filter: ((a = $1) AND (b = (InitPlan 1).col1)) -(19 rows) - --- Ensure we see just the xy_1 row. -execute ab_q6(100); - tableoid | a | b -----------+-----+----- - xy_1 | 100 | -10 -(1 row) - -reset enable_bitmapscan; -reset enable_indexscan; -deallocate ab_q1; -deallocate ab_q2; -deallocate ab_q3; -deallocate ab_q4; -deallocate ab_q5; -deallocate ab_q6; --- UPDATE on a partition subtree has been seen to have problems. -insert into ab values (1,2); -select explain_analyze(' -update ab_a1 set b = 3 from ab where ab.a = 1 and ab.a = ab_a1.a;'); - explain_analyze -------------------------------------------------------------------------------------------- - Update on ab_a1 (actual rows=0 loops=1) - Update on ab_a1_b1 ab_a1_1 - Update on ab_a1_b2 ab_a1_2 - Update on ab_a1_b3 ab_a1_3 - -> Nested Loop (actual rows=1 loops=1) - -> Append (actual rows=1 loops=1) - -> Bitmap Heap Scan on ab_a1_b1 ab_a1_1 (actual rows=0 loops=1) - Recheck Cond: (a = 1) - -> Bitmap Index Scan on ab_a1_b1_a_idx (actual rows=0 loops=1) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b2 ab_a1_2 (actual rows=1 loops=1) - Recheck Cond: (a = 1) - Heap Blocks: exact=1 - -> Bitmap Index Scan on ab_a1_b2_a_idx (actual rows=1 loops=1) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b3 ab_a1_3 (actual rows=0 loops=1) - Recheck Cond: (a = 1) - Heap Blocks: exact=1 - -> Bitmap Index Scan on ab_a1_b3_a_idx (actual rows=1 loops=1) - Index Cond: (a = 1) - -> Materialize (actual rows=1 loops=1) - Storage: Memory Maximum Storage: NkB - -> Append (actual rows=1 loops=1) - -> Bitmap Heap Scan on ab_a1_b1 ab_1 (actual rows=0 loops=1) - Recheck Cond: (a = 1) - -> Bitmap Index Scan on ab_a1_b1_a_idx (actual rows=0 loops=1) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b2 ab_2 (actual rows=1 loops=1) - Recheck Cond: (a = 1) - Heap Blocks: exact=1 - -> Bitmap Index Scan on ab_a1_b2_a_idx (actual rows=1 loops=1) - Index Cond: (a = 1) - -> Bitmap Heap Scan on ab_a1_b3 ab_3 (actual rows=0 loops=1) - Recheck Cond: (a = 1) - Heap Blocks: exact=1 - -> Bitmap Index Scan on ab_a1_b3_a_idx (actual rows=1 loops=1) - Index Cond: (a = 1) -(37 rows) - -table ab; - a | b ----+--- - 1 | 3 -(1 row) - --- Test UPDATE where source relation has run-time pruning enabled -truncate ab; -insert into ab values (1, 1), (1, 2), (1, 3), (2, 1); -select explain_analyze(' -update ab_a1 set b = 3 from ab_a2 where ab_a2.b = (select 1);'); - explain_analyze ------------------------------------------------------------------------------- - Update on ab_a1 (actual rows=0 loops=1) - Update on ab_a1_b1 ab_a1_1 - Update on ab_a1_b2 ab_a1_2 - Update on ab_a1_b3 ab_a1_3 - InitPlan 1 - -> Result (actual rows=1 loops=1) - -> Nested Loop (actual rows=3 loops=1) - -> Append (actual rows=3 loops=1) - -> Seq Scan on ab_a1_b1 ab_a1_1 (actual rows=1 loops=1) - -> Seq Scan on ab_a1_b2 ab_a1_2 (actual rows=1 loops=1) - -> Seq Scan on ab_a1_b3 ab_a1_3 (actual rows=1 loops=1) - -> Materialize (actual rows=1 loops=3) - Storage: Memory Maximum Storage: NkB - -> Append (actual rows=1 loops=1) - -> Seq Scan on ab_a2_b1 ab_a2_1 (actual rows=1 loops=1) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a2_b2 ab_a2_2 (never executed) - Filter: (b = (InitPlan 1).col1) - -> Seq Scan on ab_a2_b3 ab_a2_3 (never executed) - Filter: (b = (InitPlan 1).col1) -(20 rows) - -select tableoid::regclass, * from ab; - tableoid | a | b -----------+---+--- - ab_a1_b3 | 1 | 3 - ab_a1_b3 | 1 | 3 - ab_a1_b3 | 1 | 3 - ab_a2_b1 | 2 | 1 -(4 rows) - -drop table ab, lprt_a; --- Join -create table tbl1(col1 int); -insert into tbl1 values (501), (505); --- Basic table -create table tprt (col1 int) partition by range (col1); -create table tprt_1 partition of tprt for values from (1) to (501); -create table tprt_2 partition of tprt for values from (501) to (1001); -create table tprt_3 partition of tprt for values from (1001) to (2001); -create table tprt_4 partition of tprt for values from (2001) to (3001); -create table tprt_5 partition of tprt for values from (3001) to (4001); -create table tprt_6 partition of tprt for values from (4001) to (5001); -create index tprt1_idx on tprt_1 (col1); -create index tprt2_idx on tprt_2 (col1); -create index tprt3_idx on tprt_3 (col1); -create index tprt4_idx on tprt_4 (col1); -create index tprt5_idx on tprt_5 (col1); -create index tprt6_idx on tprt_6 (col1); -insert into tprt values (10), (20), (501), (502), (505), (1001), (4500); -set enable_hashjoin = off; -set enable_mergejoin = off; -explain (analyze, costs off, summary off, timing off) -select * from tbl1 join tprt on tbl1.col1 > tprt.col1; - QUERY PLAN --------------------------------------------------------------------------- - Nested Loop (actual rows=6 loops=1) - -> Seq Scan on tbl1 (actual rows=2 loops=1) - -> Append (actual rows=3 loops=2) - -> Index Scan using tprt1_idx on tprt_1 (actual rows=2 loops=2) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt2_idx on tprt_2 (actual rows=2 loops=1) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt3_idx on tprt_3 (never executed) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt4_idx on tprt_4 (never executed) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt5_idx on tprt_5 (never executed) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt6_idx on tprt_6 (never executed) - Index Cond: (col1 < tbl1.col1) -(15 rows) - -explain (analyze, costs off, summary off, timing off) -select * from tbl1 join tprt on tbl1.col1 = tprt.col1; - QUERY PLAN --------------------------------------------------------------------------- - Nested Loop (actual rows=2 loops=1) - -> Seq Scan on tbl1 (actual rows=2 loops=1) - -> Append (actual rows=1 loops=2) - -> Index Scan using tprt1_idx on tprt_1 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt2_idx on tprt_2 (actual rows=1 loops=2) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt3_idx on tprt_3 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt4_idx on tprt_4 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt5_idx on tprt_5 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt6_idx on tprt_6 (never executed) - Index Cond: (col1 = tbl1.col1) -(15 rows) - -select tbl1.col1, tprt.col1 from tbl1 -inner join tprt on tbl1.col1 > tprt.col1 -order by tbl1.col1, tprt.col1; - col1 | col1 -------+------ - 501 | 10 - 501 | 20 - 505 | 10 - 505 | 20 - 505 | 501 - 505 | 502 -(6 rows) - -select tbl1.col1, tprt.col1 from tbl1 -inner join tprt on tbl1.col1 = tprt.col1 -order by tbl1.col1, tprt.col1; - col1 | col1 -------+------ - 501 | 501 - 505 | 505 -(2 rows) - --- Multiple partitions -insert into tbl1 values (1001), (1010), (1011); -explain (analyze, costs off, summary off, timing off) -select * from tbl1 inner join tprt on tbl1.col1 > tprt.col1; - QUERY PLAN --------------------------------------------------------------------------- - Nested Loop (actual rows=23 loops=1) - -> Seq Scan on tbl1 (actual rows=5 loops=1) - -> Append (actual rows=5 loops=5) - -> Index Scan using tprt1_idx on tprt_1 (actual rows=2 loops=5) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt2_idx on tprt_2 (actual rows=3 loops=4) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt3_idx on tprt_3 (actual rows=1 loops=2) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt4_idx on tprt_4 (never executed) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt5_idx on tprt_5 (never executed) - Index Cond: (col1 < tbl1.col1) - -> Index Scan using tprt6_idx on tprt_6 (never executed) - Index Cond: (col1 < tbl1.col1) -(15 rows) - -explain (analyze, costs off, summary off, timing off) -select * from tbl1 inner join tprt on tbl1.col1 = tprt.col1; - QUERY PLAN --------------------------------------------------------------------------- - Nested Loop (actual rows=3 loops=1) - -> Seq Scan on tbl1 (actual rows=5 loops=1) - -> Append (actual rows=1 loops=5) - -> Index Scan using tprt1_idx on tprt_1 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt2_idx on tprt_2 (actual rows=1 loops=2) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt3_idx on tprt_3 (actual rows=0 loops=3) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt4_idx on tprt_4 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt5_idx on tprt_5 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt6_idx on tprt_6 (never executed) - Index Cond: (col1 = tbl1.col1) -(15 rows) - -select tbl1.col1, tprt.col1 from tbl1 -inner join tprt on tbl1.col1 > tprt.col1 -order by tbl1.col1, tprt.col1; - col1 | col1 -------+------ - 501 | 10 - 501 | 20 - 505 | 10 - 505 | 20 - 505 | 501 - 505 | 502 - 1001 | 10 - 1001 | 20 - 1001 | 501 - 1001 | 502 - 1001 | 505 - 1010 | 10 - 1010 | 20 - 1010 | 501 - 1010 | 502 - 1010 | 505 - 1010 | 1001 - 1011 | 10 - 1011 | 20 - 1011 | 501 - 1011 | 502 - 1011 | 505 - 1011 | 1001 -(23 rows) - -select tbl1.col1, tprt.col1 from tbl1 -inner join tprt on tbl1.col1 = tprt.col1 -order by tbl1.col1, tprt.col1; - col1 | col1 -------+------ - 501 | 501 - 505 | 505 - 1001 | 1001 -(3 rows) - --- Last partition -delete from tbl1; -insert into tbl1 values (4400); -explain (analyze, costs off, summary off, timing off) -select * from tbl1 join tprt on tbl1.col1 < tprt.col1; - QUERY PLAN --------------------------------------------------------------------------- - Nested Loop (actual rows=1 loops=1) - -> Seq Scan on tbl1 (actual rows=1 loops=1) - -> Append (actual rows=1 loops=1) - -> Index Scan using tprt1_idx on tprt_1 (never executed) - Index Cond: (col1 > tbl1.col1) - -> Index Scan using tprt2_idx on tprt_2 (never executed) - Index Cond: (col1 > tbl1.col1) - -> Index Scan using tprt3_idx on tprt_3 (never executed) - Index Cond: (col1 > tbl1.col1) - -> Index Scan using tprt4_idx on tprt_4 (never executed) - Index Cond: (col1 > tbl1.col1) - -> Index Scan using tprt5_idx on tprt_5 (never executed) - Index Cond: (col1 > tbl1.col1) - -> Index Scan using tprt6_idx on tprt_6 (actual rows=1 loops=1) - Index Cond: (col1 > tbl1.col1) -(15 rows) - -select tbl1.col1, tprt.col1 from tbl1 -inner join tprt on tbl1.col1 < tprt.col1 -order by tbl1.col1, tprt.col1; - col1 | col1 -------+------ - 4400 | 4500 -(1 row) - --- No matching partition -delete from tbl1; -insert into tbl1 values (10000); -explain (analyze, costs off, summary off, timing off) -select * from tbl1 join tprt on tbl1.col1 = tprt.col1; - QUERY PLAN -------------------------------------------------------------------- - Nested Loop (actual rows=0 loops=1) - -> Seq Scan on tbl1 (actual rows=1 loops=1) - -> Append (actual rows=0 loops=1) - -> Index Scan using tprt1_idx on tprt_1 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt2_idx on tprt_2 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt3_idx on tprt_3 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt4_idx on tprt_4 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt5_idx on tprt_5 (never executed) - Index Cond: (col1 = tbl1.col1) - -> Index Scan using tprt6_idx on tprt_6 (never executed) - Index Cond: (col1 = tbl1.col1) -(15 rows) - -select tbl1.col1, tprt.col1 from tbl1 -inner join tprt on tbl1.col1 = tprt.col1 -order by tbl1.col1, tprt.col1; - col1 | col1 -------+------ -(0 rows) - -drop table tbl1, tprt; --- Test with columns defined in varying orders between each level -create table part_abc (a int not null, b int not null, c int not null) partition by list (a); -create table part_bac (b int not null, a int not null, c int not null) partition by list (b); -create table part_cab (c int not null, a int not null, b int not null) partition by list (c); -create table part_abc_p1 (a int not null, b int not null, c int not null); -alter table part_abc attach partition part_bac for values in(1); -alter table part_bac attach partition part_cab for values in(2); -alter table part_cab attach partition part_abc_p1 for values in(3); -prepare part_abc_q1 (int, int, int) as -select * from part_abc where a = $1 and b = $2 and c = $3; --- Single partition should be scanned. -explain (analyze, costs off, summary off, timing off) execute part_abc_q1 (1, 2, 3); - QUERY PLAN ----------------------------------------------------------- - Seq Scan on part_abc_p1 part_abc (actual rows=0 loops=1) - Filter: ((a = $1) AND (b = $2) AND (c = $3)) -(2 rows) - -deallocate part_abc_q1; -drop table part_abc; --- Ensure that an Append node properly handles a sub-partitioned table --- matching without any of its leaf partitions matching the clause. -create table listp (a int, b int) partition by list (a); -create table listp_1 partition of listp for values in(1) partition by list (b); -create table listp_1_1 partition of listp_1 for values in(1); -create table listp_2 partition of listp for values in(2) partition by list (b); -create table listp_2_1 partition of listp_2 for values in(2); -select * from listp where b = 1; - a | b ----+--- -(0 rows) - --- Ensure that an Append node properly can handle selection of all first level --- partitions before finally detecting the correct set of 2nd level partitions --- which match the given parameter. -prepare q1 (int,int) as select * from listp where b in ($1,$2); -explain (analyze, costs off, summary off, timing off) execute q1 (1,1); - QUERY PLAN -------------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 1 - -> Seq Scan on listp_1_1 listp_1 (actual rows=0 loops=1) - Filter: (b = ANY (ARRAY[$1, $2])) -(4 rows) - -explain (analyze, costs off, summary off, timing off) execute q1 (2,2); - QUERY PLAN -------------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 1 - -> Seq Scan on listp_2_1 listp_1 (actual rows=0 loops=1) - Filter: (b = ANY (ARRAY[$1, $2])) -(4 rows) - --- Try with no matching partitions. -explain (analyze, costs off, summary off, timing off) execute q1 (0,0); - QUERY PLAN --------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 2 -(2 rows) - -deallocate q1; --- Test more complex cases where a not-equal condition further eliminates partitions. -prepare q1 (int,int,int,int) as select * from listp where b in($1,$2) and $3 <> b and $4 <> b; --- Both partitions allowed by IN clause, but one disallowed by <> clause -explain (analyze, costs off, summary off, timing off) execute q1 (1,2,2,0); - QUERY PLAN -------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 1 - -> Seq Scan on listp_1_1 listp_1 (actual rows=0 loops=1) - Filter: ((b = ANY (ARRAY[$1, $2])) AND ($3 <> b) AND ($4 <> b)) -(4 rows) - --- Both partitions allowed by IN clause, then both excluded again by <> clauses. -explain (analyze, costs off, summary off, timing off) execute q1 (1,2,2,1); - QUERY PLAN --------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 2 -(2 rows) - --- Ensure Params that evaluate to NULL properly prune away all partitions -explain (analyze, costs off, summary off, timing off) -select * from listp where a = (select null::int); - QUERY PLAN ------------------------------------------------------- - Append (actual rows=0 loops=1) - InitPlan 1 - -> Result (actual rows=1 loops=1) - -> Seq Scan on listp_1_1 listp_1 (never executed) - Filter: (a = (InitPlan 1).col1) - -> Seq Scan on listp_2_1 listp_2 (never executed) - Filter: (a = (InitPlan 1).col1) -(7 rows) - -drop table listp; --- --- check that stable query clauses are only used in run-time pruning --- -create table stable_qual_pruning (a timestamp) partition by range (a); -create table stable_qual_pruning1 partition of stable_qual_pruning - for values from ('2000-01-01') to ('2000-02-01'); -create table stable_qual_pruning2 partition of stable_qual_pruning - for values from ('2000-02-01') to ('2000-03-01'); -create table stable_qual_pruning3 partition of stable_qual_pruning - for values from ('3000-02-01') to ('3000-03-01'); --- comparison against a stable value requires run-time pruning -explain (analyze, costs off, summary off, timing off) -select * from stable_qual_pruning where a < localtimestamp; - QUERY PLAN --------------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 1 - -> Seq Scan on stable_qual_pruning1 stable_qual_pruning_1 (actual rows=0 loops=1) - Filter: (a < LOCALTIMESTAMP) - -> Seq Scan on stable_qual_pruning2 stable_qual_pruning_2 (actual rows=0 loops=1) - Filter: (a < LOCALTIMESTAMP) -(6 rows) - --- timestamp < timestamptz comparison is only stable, not immutable -explain (analyze, costs off, summary off, timing off) -select * from stable_qual_pruning where a < '2000-02-01'::timestamptz; - QUERY PLAN --------------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 2 - -> Seq Scan on stable_qual_pruning1 stable_qual_pruning_1 (actual rows=0 loops=1) - Filter: (a < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) -(4 rows) - --- check ScalarArrayOp cases -explain (analyze, costs off, summary off, timing off) -select * from stable_qual_pruning - where a = any(array['2010-02-01', '2020-01-01']::timestamp[]); - QUERY PLAN --------------------------------- - Result (actual rows=0 loops=1) - One-Time Filter: false -(2 rows) - -explain (analyze, costs off, summary off, timing off) -select * from stable_qual_pruning - where a = any(array['2000-02-01', '2010-01-01']::timestamp[]); - QUERY PLAN ----------------------------------------------------------------------------------------------------------------- - Seq Scan on stable_qual_pruning2 stable_qual_pruning (actual rows=0 loops=1) - Filter: (a = ANY ('{"Tue Feb 01 00:00:00 2000","Fri Jan 01 00:00:00 2010"}'::timestamp without time zone[])) -(2 rows) - -explain (analyze, costs off, summary off, timing off) -select * from stable_qual_pruning - where a = any(array['2000-02-01', localtimestamp]::timestamp[]); - QUERY PLAN ------------------------------------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 2 - -> Seq Scan on stable_qual_pruning2 stable_qual_pruning_1 (actual rows=0 loops=1) - Filter: (a = ANY (ARRAY['Tue Feb 01 00:00:00 2000'::timestamp without time zone, LOCALTIMESTAMP])) -(4 rows) - -explain (analyze, costs off, summary off, timing off) -select * from stable_qual_pruning - where a = any(array['2010-02-01', '2020-01-01']::timestamptz[]); - QUERY PLAN --------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 3 -(2 rows) - -explain (analyze, costs off, summary off, timing off) -select * from stable_qual_pruning - where a = any(array['2000-02-01', '2010-01-01']::timestamptz[]); - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - Subplans Removed: 2 - -> Seq Scan on stable_qual_pruning2 stable_qual_pruning_1 (actual rows=0 loops=1) - Filter: (a = ANY ('{"Tue Feb 01 00:00:00 2000 PST","Fri Jan 01 00:00:00 2010 PST"}'::timestamp with time zone[])) -(4 rows) - -explain (analyze, costs off, summary off, timing off) -select * from stable_qual_pruning - where a = any(null::timestamptz[]); - QUERY PLAN --------------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - -> Seq Scan on stable_qual_pruning1 stable_qual_pruning_1 (actual rows=0 loops=1) - Filter: (a = ANY (NULL::timestamp with time zone[])) - -> Seq Scan on stable_qual_pruning2 stable_qual_pruning_2 (actual rows=0 loops=1) - Filter: (a = ANY (NULL::timestamp with time zone[])) - -> Seq Scan on stable_qual_pruning3 stable_qual_pruning_3 (actual rows=0 loops=1) - Filter: (a = ANY (NULL::timestamp with time zone[])) -(7 rows) - -drop table stable_qual_pruning; --- --- Check that pruning with composite range partitioning works correctly when --- it must ignore clauses for trailing keys once it has seen a clause with --- non-inclusive operator for an earlier key --- -create table mc3p (a int, b int, c int) partition by range (a, abs(b), c); -create table mc3p0 partition of mc3p - for values from (0, 0, 0) to (0, maxvalue, maxvalue); -create table mc3p1 partition of mc3p - for values from (1, 1, 1) to (2, minvalue, minvalue); -create table mc3p2 partition of mc3p - for values from (2, minvalue, minvalue) to (3, maxvalue, maxvalue); -insert into mc3p values (0, 1, 1), (1, 1, 1), (2, 1, 1); -explain (analyze, costs off, summary off, timing off) -select * from mc3p where a < 3 and abs(b) = 1; - QUERY PLAN --------------------------------------------------------- - Append (actual rows=3 loops=1) - -> Seq Scan on mc3p0 mc3p_1 (actual rows=1 loops=1) - Filter: ((a < 3) AND (abs(b) = 1)) - -> Seq Scan on mc3p1 mc3p_2 (actual rows=1 loops=1) - Filter: ((a < 3) AND (abs(b) = 1)) - -> Seq Scan on mc3p2 mc3p_3 (actual rows=1 loops=1) - Filter: ((a < 3) AND (abs(b) = 1)) -(7 rows) - --- --- Check that pruning with composite range partitioning works correctly when --- a combination of runtime parameters is specified, not all of whose values --- are available at the same time --- -prepare ps1 as - select * from mc3p where a = $1 and abs(b) < (select 3); -explain (analyze, costs off, summary off, timing off) -execute ps1(1); - QUERY PLAN -------------------------------------------------------------- - Append (actual rows=1 loops=1) - Subplans Removed: 2 - InitPlan 1 - -> Result (actual rows=1 loops=1) - -> Seq Scan on mc3p1 mc3p_1 (actual rows=1 loops=1) - Filter: ((a = $1) AND (abs(b) < (InitPlan 1).col1)) -(6 rows) - -deallocate ps1; -prepare ps2 as - select * from mc3p where a <= $1 and abs(b) < (select 3); -explain (analyze, costs off, summary off, timing off) -execute ps2(1); - QUERY PLAN --------------------------------------------------------------- - Append (actual rows=2 loops=1) - Subplans Removed: 1 - InitPlan 1 - -> Result (actual rows=1 loops=1) - -> Seq Scan on mc3p0 mc3p_1 (actual rows=1 loops=1) - Filter: ((a <= $1) AND (abs(b) < (InitPlan 1).col1)) - -> Seq Scan on mc3p1 mc3p_2 (actual rows=1 loops=1) - Filter: ((a <= $1) AND (abs(b) < (InitPlan 1).col1)) -(8 rows) - -deallocate ps2; -drop table mc3p; --- Ensure runtime pruning works with initplans params with boolean types -create table boolvalues (value bool not null); -insert into boolvalues values('t'),('f'); -create table boolp (a bool) partition by list (a); -create table boolp_t partition of boolp for values in('t'); -create table boolp_f partition of boolp for values in('f'); -explain (analyze, costs off, summary off, timing off) -select * from boolp where a = (select value from boolvalues where value); - QUERY PLAN ------------------------------------------------------------ - Append (actual rows=0 loops=1) - InitPlan 1 - -> Seq Scan on boolvalues (actual rows=1 loops=1) - Filter: value - Rows Removed by Filter: 1 - -> Seq Scan on boolp_f boolp_1 (never executed) - Filter: (a = (InitPlan 1).col1) - -> Seq Scan on boolp_t boolp_2 (actual rows=0 loops=1) - Filter: (a = (InitPlan 1).col1) -(9 rows) - -explain (analyze, costs off, summary off, timing off) -select * from boolp where a = (select value from boolvalues where not value); - QUERY PLAN ------------------------------------------------------------ - Append (actual rows=0 loops=1) - InitPlan 1 - -> Seq Scan on boolvalues (actual rows=1 loops=1) - Filter: (NOT value) - Rows Removed by Filter: 1 - -> Seq Scan on boolp_f boolp_1 (actual rows=0 loops=1) - Filter: (a = (InitPlan 1).col1) - -> Seq Scan on boolp_t boolp_2 (never executed) - Filter: (a = (InitPlan 1).col1) -(9 rows) - -drop table boolp; --- --- Test run-time pruning of MergeAppend subnodes --- -set enable_seqscan = off; -set enable_sort = off; -create table ma_test (a int, b int) partition by range (a); -create table ma_test_p1 partition of ma_test for values from (0) to (10); -create table ma_test_p2 partition of ma_test for values from (10) to (20); -create table ma_test_p3 partition of ma_test for values from (20) to (30); -insert into ma_test select x,x from generate_series(0,29) t(x); -create index on ma_test (b); -analyze ma_test; -prepare mt_q1 (int) as select a from ma_test where a >= $1 and a % 10 = 5 order by b; -explain (analyze, costs off, summary off, timing off) execute mt_q1(15); - QUERY PLAN ------------------------------------------------------------------------------------------ - Merge Append (actual rows=2 loops=1) - Sort Key: ma_test.b - Subplans Removed: 1 - -> Index Scan using ma_test_p2_b_idx on ma_test_p2 ma_test_1 (actual rows=1 loops=1) - Filter: ((a >= $1) AND ((a % 10) = 5)) - Rows Removed by Filter: 9 - -> Index Scan using ma_test_p3_b_idx on ma_test_p3 ma_test_2 (actual rows=1 loops=1) - Filter: ((a >= $1) AND ((a % 10) = 5)) - Rows Removed by Filter: 9 -(9 rows) - -execute mt_q1(15); - a ----- - 15 - 25 -(2 rows) - -explain (analyze, costs off, summary off, timing off) execute mt_q1(25); - QUERY PLAN ------------------------------------------------------------------------------------------ - Merge Append (actual rows=1 loops=1) - Sort Key: ma_test.b - Subplans Removed: 2 - -> Index Scan using ma_test_p3_b_idx on ma_test_p3 ma_test_1 (actual rows=1 loops=1) - Filter: ((a >= $1) AND ((a % 10) = 5)) - Rows Removed by Filter: 9 -(6 rows) - -execute mt_q1(25); - a ----- - 25 -(1 row) - --- Ensure MergeAppend behaves correctly when no subplans match -explain (analyze, costs off, summary off, timing off) execute mt_q1(35); - QUERY PLAN --------------------------------------- - Merge Append (actual rows=0 loops=1) - Sort Key: ma_test.b - Subplans Removed: 3 -(3 rows) - -execute mt_q1(35); - a ---- -(0 rows) - -deallocate mt_q1; -prepare mt_q2 (int) as select * from ma_test where a >= $1 order by b limit 1; --- Ensure output list looks sane when the MergeAppend has no subplans. -explain (analyze, verbose, costs off, summary off, timing off) execute mt_q2 (35); - QUERY PLAN --------------------------------------------- - Limit (actual rows=0 loops=1) - Output: ma_test.a, ma_test.b - -> Merge Append (actual rows=0 loops=1) - Sort Key: ma_test.b - Subplans Removed: 3 -(5 rows) - -deallocate mt_q2; --- ensure initplan params properly prune partitions -explain (analyze, costs off, summary off, timing off) select * from ma_test where a >= (select min(b) from ma_test_p2) order by b; - QUERY PLAN ------------------------------------------------------------------------------------------------ - Merge Append (actual rows=20 loops=1) - Sort Key: ma_test.b - InitPlan 2 - -> Result (actual rows=1 loops=1) - InitPlan 1 - -> Limit (actual rows=1 loops=1) - -> Index Scan using ma_test_p2_b_idx on ma_test_p2 (actual rows=1 loops=1) - Index Cond: (b IS NOT NULL) - -> Index Scan using ma_test_p1_b_idx on ma_test_p1 ma_test_1 (never executed) - Filter: (a >= (InitPlan 2).col1) - -> Index Scan using ma_test_p2_b_idx on ma_test_p2 ma_test_2 (actual rows=10 loops=1) - Filter: (a >= (InitPlan 2).col1) - -> Index Scan using ma_test_p3_b_idx on ma_test_p3 ma_test_3 (actual rows=10 loops=1) - Filter: (a >= (InitPlan 2).col1) -(14 rows) - -reset enable_seqscan; -reset enable_sort; -drop table ma_test; -reset enable_indexonlyscan; --- --- check that pruning works properly when the partition key is of a --- pseudotype --- --- array type list partition key -create table pp_arrpart (a int[]) partition by list (a); -create table pp_arrpart1 partition of pp_arrpart for values in ('{1}'); -create table pp_arrpart2 partition of pp_arrpart for values in ('{2, 3}', '{4, 5}'); -explain (costs off) select * from pp_arrpart where a = '{1}'; - QUERY PLAN ------------------------------------- - Seq Scan on pp_arrpart1 pp_arrpart - Filter: (a = '{1}'::integer[]) -(2 rows) - -explain (costs off) select * from pp_arrpart where a = '{1, 2}'; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -explain (costs off) select * from pp_arrpart where a in ('{4, 5}', '{1}'); - QUERY PLAN ----------------------------------------------------------------------- - Append - -> Seq Scan on pp_arrpart1 pp_arrpart_1 - Filter: ((a = '{4,5}'::integer[]) OR (a = '{1}'::integer[])) - -> Seq Scan on pp_arrpart2 pp_arrpart_2 - Filter: ((a = '{4,5}'::integer[]) OR (a = '{1}'::integer[])) -(5 rows) - -explain (costs off) update pp_arrpart set a = a where a = '{1}'; - QUERY PLAN --------------------------------------------- - Update on pp_arrpart - Update on pp_arrpart1 pp_arrpart_1 - -> Seq Scan on pp_arrpart1 pp_arrpart_1 - Filter: (a = '{1}'::integer[]) -(4 rows) - -explain (costs off) delete from pp_arrpart where a = '{1}'; - QUERY PLAN --------------------------------------------- - Delete on pp_arrpart - Delete on pp_arrpart1 pp_arrpart_1 - -> Seq Scan on pp_arrpart1 pp_arrpart_1 - Filter: (a = '{1}'::integer[]) -(4 rows) - -drop table pp_arrpart; --- array type hash partition key -create table pph_arrpart (a int[]) partition by hash (a); -create table pph_arrpart1 partition of pph_arrpart for values with (modulus 2, remainder 0); -create table pph_arrpart2 partition of pph_arrpart for values with (modulus 2, remainder 1); -insert into pph_arrpart values ('{1}'), ('{1, 2}'), ('{4, 5}'); -select tableoid::regclass, * from pph_arrpart order by 1; - tableoid | a ---------------+------- - pph_arrpart1 | {1,2} - pph_arrpart1 | {4,5} - pph_arrpart2 | {1} -(3 rows) - -explain (costs off) select * from pph_arrpart where a = '{1}'; - QUERY PLAN --------------------------------------- - Seq Scan on pph_arrpart2 pph_arrpart - Filter: (a = '{1}'::integer[]) -(2 rows) - -explain (costs off) select * from pph_arrpart where a = '{1, 2}'; - QUERY PLAN --------------------------------------- - Seq Scan on pph_arrpart1 pph_arrpart - Filter: (a = '{1,2}'::integer[]) -(2 rows) - -explain (costs off) select * from pph_arrpart where a in ('{4, 5}', '{1}'); - QUERY PLAN ----------------------------------------------------------------------- - Append - -> Seq Scan on pph_arrpart1 pph_arrpart_1 - Filter: ((a = '{4,5}'::integer[]) OR (a = '{1}'::integer[])) - -> Seq Scan on pph_arrpart2 pph_arrpart_2 - Filter: ((a = '{4,5}'::integer[]) OR (a = '{1}'::integer[])) -(5 rows) - -drop table pph_arrpart; --- enum type list partition key -create type pp_colors as enum ('green', 'blue', 'black'); -create table pp_enumpart (a pp_colors) partition by list (a); -create table pp_enumpart_green partition of pp_enumpart for values in ('green'); -create table pp_enumpart_blue partition of pp_enumpart for values in ('blue'); -explain (costs off) select * from pp_enumpart where a = 'blue'; - QUERY PLAN ------------------------------------------- - Seq Scan on pp_enumpart_blue pp_enumpart - Filter: (a = 'blue'::pp_colors) -(2 rows) - -explain (costs off) select * from pp_enumpart where a = 'black'; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -drop table pp_enumpart; -drop type pp_colors; --- record type as partition key -create type pp_rectype as (a int, b int); -create table pp_recpart (a pp_rectype) partition by list (a); -create table pp_recpart_11 partition of pp_recpart for values in ('(1,1)'); -create table pp_recpart_23 partition of pp_recpart for values in ('(2,3)'); -explain (costs off) select * from pp_recpart where a = '(1,1)'::pp_rectype; - QUERY PLAN --------------------------------------- - Seq Scan on pp_recpart_11 pp_recpart - Filter: (a = '(1,1)'::pp_rectype) -(2 rows) - -explain (costs off) select * from pp_recpart where a = '(1,2)'::pp_rectype; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -drop table pp_recpart; -drop type pp_rectype; --- range type partition key -create table pp_intrangepart (a int4range) partition by list (a); -create table pp_intrangepart12 partition of pp_intrangepart for values in ('[1,2]'); -create table pp_intrangepart2inf partition of pp_intrangepart for values in ('[2,)'); -explain (costs off) select * from pp_intrangepart where a = '[1,2]'::int4range; - QUERY PLAN ------------------------------------------------ - Seq Scan on pp_intrangepart12 pp_intrangepart - Filter: (a = '[1,3)'::int4range) -(2 rows) - -explain (costs off) select * from pp_intrangepart where a = '(1,2)'::int4range; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -drop table pp_intrangepart; --- --- Ensure the enable_partition_prune GUC properly disables partition pruning. --- -create table pp_lp (a int, value int) partition by list (a); -create table pp_lp1 partition of pp_lp for values in(1); -create table pp_lp2 partition of pp_lp for values in(2); -explain (costs off) select * from pp_lp where a = 1; - QUERY PLAN --------------------------- - Seq Scan on pp_lp1 pp_lp - Filter: (a = 1) -(2 rows) - -explain (costs off) update pp_lp set value = 10 where a = 1; - QUERY PLAN ----------------------------------- - Update on pp_lp - Update on pp_lp1 pp_lp_1 - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) -(4 rows) - -explain (costs off) delete from pp_lp where a = 1; - QUERY PLAN ----------------------------------- - Delete on pp_lp - Delete on pp_lp1 pp_lp_1 - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) -(4 rows) - -set enable_partition_pruning = off; -set constraint_exclusion = 'partition'; -- this should not affect the result. -explain (costs off) select * from pp_lp where a = 1; - QUERY PLAN ----------------------------------- - Append - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) - -> Seq Scan on pp_lp2 pp_lp_2 - Filter: (a = 1) -(5 rows) - -explain (costs off) update pp_lp set value = 10 where a = 1; - QUERY PLAN ----------------------------------------- - Update on pp_lp - Update on pp_lp1 pp_lp_1 - Update on pp_lp2 pp_lp_2 - -> Append - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) - -> Seq Scan on pp_lp2 pp_lp_2 - Filter: (a = 1) -(8 rows) - -explain (costs off) delete from pp_lp where a = 1; - QUERY PLAN ----------------------------------------- - Delete on pp_lp - Delete on pp_lp1 pp_lp_1 - Delete on pp_lp2 pp_lp_2 - -> Append - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) - -> Seq Scan on pp_lp2 pp_lp_2 - Filter: (a = 1) -(8 rows) - -set constraint_exclusion = 'off'; -- this should not affect the result. -explain (costs off) select * from pp_lp where a = 1; - QUERY PLAN ----------------------------------- - Append - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) - -> Seq Scan on pp_lp2 pp_lp_2 - Filter: (a = 1) -(5 rows) - -explain (costs off) update pp_lp set value = 10 where a = 1; - QUERY PLAN ----------------------------------------- - Update on pp_lp - Update on pp_lp1 pp_lp_1 - Update on pp_lp2 pp_lp_2 - -> Append - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) - -> Seq Scan on pp_lp2 pp_lp_2 - Filter: (a = 1) -(8 rows) - -explain (costs off) delete from pp_lp where a = 1; - QUERY PLAN ----------------------------------------- - Delete on pp_lp - Delete on pp_lp1 pp_lp_1 - Delete on pp_lp2 pp_lp_2 - -> Append - -> Seq Scan on pp_lp1 pp_lp_1 - Filter: (a = 1) - -> Seq Scan on pp_lp2 pp_lp_2 - Filter: (a = 1) -(8 rows) - -drop table pp_lp; --- Ensure enable_partition_prune does not affect non-partitioned tables. -create table inh_lp (a int, value int); -create table inh_lp1 (a int, value int, check(a = 1)) inherits (inh_lp); -NOTICE: merging column "a" with inherited definition -NOTICE: merging column "value" with inherited definition -create table inh_lp2 (a int, value int, check(a = 2)) inherits (inh_lp); -NOTICE: merging column "a" with inherited definition -NOTICE: merging column "value" with inherited definition -set constraint_exclusion = 'partition'; --- inh_lp2 should be removed in the following 3 cases. -explain (costs off) select * from inh_lp where a = 1; - QUERY PLAN ------------------------------------- - Append - -> Seq Scan on inh_lp inh_lp_1 - Filter: (a = 1) - -> Seq Scan on inh_lp1 inh_lp_2 - Filter: (a = 1) -(5 rows) - -explain (costs off) update inh_lp set value = 10 where a = 1; - QUERY PLAN ------------------------------------------------- - Update on inh_lp - Update on inh_lp inh_lp_1 - Update on inh_lp1 inh_lp_2 - -> Result - -> Append - -> Seq Scan on inh_lp inh_lp_1 - Filter: (a = 1) - -> Seq Scan on inh_lp1 inh_lp_2 - Filter: (a = 1) -(9 rows) - -explain (costs off) delete from inh_lp where a = 1; - QUERY PLAN ------------------------------------------- - Delete on inh_lp - Delete on inh_lp inh_lp_1 - Delete on inh_lp1 inh_lp_2 - -> Append - -> Seq Scan on inh_lp inh_lp_1 - Filter: (a = 1) - -> Seq Scan on inh_lp1 inh_lp_2 - Filter: (a = 1) -(8 rows) - --- Ensure we don't exclude normal relations when we only expect to exclude --- inheritance children -explain (costs off) update inh_lp1 set value = 10 where a = 2; - QUERY PLAN ---------------------------- - Update on inh_lp1 - -> Seq Scan on inh_lp1 - Filter: (a = 2) -(3 rows) - -drop table inh_lp cascade; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table inh_lp1 -drop cascades to table inh_lp2 -reset enable_partition_pruning; -reset constraint_exclusion; --- Check pruning for a partition tree containing only temporary relations -create temp table pp_temp_parent (a int) partition by list (a); -create temp table pp_temp_part_1 partition of pp_temp_parent for values in (1); -create temp table pp_temp_part_def partition of pp_temp_parent default; -explain (costs off) select * from pp_temp_parent where true; - QUERY PLAN ------------------------------------------------------ - Append - -> Seq Scan on pp_temp_part_1 pp_temp_parent_1 - -> Seq Scan on pp_temp_part_def pp_temp_parent_2 -(3 rows) - -explain (costs off) select * from pp_temp_parent where a = 2; - QUERY PLAN ---------------------------------------------- - Seq Scan on pp_temp_part_def pp_temp_parent - Filter: (a = 2) -(2 rows) - -drop table pp_temp_parent; --- Stress run-time partition pruning a bit more, per bug reports -create temp table p (a int, b int, c int) partition by list (a); -create temp table p1 partition of p for values in (1); -create temp table p2 partition of p for values in (2); -create temp table q (a int, b int, c int) partition by list (a); -create temp table q1 partition of q for values in (1) partition by list (b); -create temp table q11 partition of q1 for values in (1) partition by list (c); -create temp table q111 partition of q11 for values in (1); -create temp table q2 partition of q for values in (2) partition by list (b); -create temp table q21 partition of q2 for values in (1); -create temp table q22 partition of q2 for values in (2); -insert into q22 values (2, 2, 3); -explain (costs off) -select * -from ( - select * from p - union all - select * from q1 - union all - select 1, 1, 1 - ) s(a, b, c) -where s.a = 1 and s.b = 1 and s.c = (select 1); - QUERY PLAN -------------------------------------------------------------------- - Append - InitPlan 1 - -> Result - -> Seq Scan on p1 p - Filter: ((a = 1) AND (b = 1) AND (c = (InitPlan 1).col1)) - -> Seq Scan on q111 q1 - Filter: ((a = 1) AND (b = 1) AND (c = (InitPlan 1).col1)) - -> Result - One-Time Filter: (1 = (InitPlan 1).col1) -(9 rows) - -select * -from ( - select * from p - union all - select * from q1 - union all - select 1, 1, 1 - ) s(a, b, c) -where s.a = 1 and s.b = 1 and s.c = (select 1); - a | b | c ----+---+--- - 1 | 1 | 1 -(1 row) - -prepare q (int, int) as -select * -from ( - select * from p - union all - select * from q1 - union all - select 1, 1, 1 - ) s(a, b, c) -where s.a = $1 and s.b = $2 and s.c = (select 1); -explain (costs off) execute q (1, 1); - QUERY PLAN ------------------------------------------------------------------------------- - Append - Subplans Removed: 1 - InitPlan 1 - -> Result - -> Seq Scan on p1 p - Filter: ((a = $1) AND (b = $2) AND (c = (InitPlan 1).col1)) - -> Seq Scan on q111 q1 - Filter: ((a = $1) AND (b = $2) AND (c = (InitPlan 1).col1)) - -> Result - One-Time Filter: ((1 = $1) AND (1 = $2) AND (1 = (InitPlan 1).col1)) -(10 rows) - -execute q (1, 1); - a | b | c ----+---+--- - 1 | 1 | 1 -(1 row) - -drop table p, q; --- Ensure run-time pruning works correctly when we match a partitioned table --- on the first level but find no matching partitions on the second level. -create table listp (a int, b int) partition by list (a); -create table listp1 partition of listp for values in(1); -create table listp2 partition of listp for values in(2) partition by list(b); -create table listp2_10 partition of listp2 for values in (10); -explain (analyze, costs off, summary off, timing off) -select * from listp where a = (select 2) and b <> 10; - QUERY PLAN ---------------------------------------------------- - Seq Scan on listp1 listp (actual rows=0 loops=1) - Filter: ((b <> 10) AND (a = (InitPlan 1).col1)) - InitPlan 1 - -> Result (never executed) -(4 rows) - --- --- check that a partition directly accessed in a query is excluded with --- constraint_exclusion = on --- --- turn off partition pruning, so that it doesn't interfere -set enable_partition_pruning to off; --- setting constraint_exclusion to 'partition' disables exclusion -set constraint_exclusion to 'partition'; -explain (costs off) select * from listp1 where a = 2; - QUERY PLAN --------------------- - Seq Scan on listp1 - Filter: (a = 2) -(2 rows) - -explain (costs off) update listp1 set a = 1 where a = 2; - QUERY PLAN --------------------------- - Update on listp1 - -> Seq Scan on listp1 - Filter: (a = 2) -(3 rows) - --- constraint exclusion enabled -set constraint_exclusion to 'on'; -explain (costs off) select * from listp1 where a = 2; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -explain (costs off) update listp1 set a = 1 where a = 2; - QUERY PLAN --------------------------------- - Update on listp1 - -> Result - One-Time Filter: false -(3 rows) - -reset constraint_exclusion; -reset enable_partition_pruning; -drop table listp; --- Ensure run-time pruning works correctly for nested Append nodes -set parallel_setup_cost to 0; -set parallel_tuple_cost to 0; -create table listp (a int) partition by list(a); -create table listp_12 partition of listp for values in(1,2) partition by list(a); -create table listp_12_1 partition of listp_12 for values in(1); -create table listp_12_2 partition of listp_12 for values in(2); --- Force the 2nd subnode of the Append to be non-parallel. This results in --- a nested Append node because the mixed parallel / non-parallel paths cannot --- be pulled into the top-level Append. -alter table listp_12_1 set (parallel_workers = 0); --- Ensure that listp_12_2 is not scanned. (The nested Append is not seen in --- the plan as it's pulled in setref.c due to having just a single subnode). -select explain_parallel_append('select * from listp where a = (select 1);'); - explain_parallel_append ----------------------------------------------------------------------- - Gather (actual rows=N loops=N) - Workers Planned: 2 - Workers Launched: N - InitPlan 1 - -> Result (actual rows=N loops=N) - -> Parallel Append (actual rows=N loops=N) - -> Seq Scan on listp_12_1 listp_1 (actual rows=N loops=N) - Filter: (a = (InitPlan 1).col1) - -> Parallel Seq Scan on listp_12_2 listp_2 (never executed) - Filter: (a = (InitPlan 1).col1) -(10 rows) - --- Like the above but throw some more complexity at the planner by adding --- a UNION ALL. We expect both sides of the union not to scan the --- non-required partitions. -select explain_parallel_append( -'select * from listp where a = (select 1) - union all -select * from listp where a = (select 2);'); - explain_parallel_append ------------------------------------------------------------------------------------ - Gather (actual rows=N loops=N) - Workers Planned: 2 - Workers Launched: N - -> Parallel Append (actual rows=N loops=N) - -> Parallel Append (actual rows=N loops=N) - InitPlan 2 - -> Result (actual rows=N loops=N) - -> Seq Scan on listp_12_1 listp_1 (never executed) - Filter: (a = (InitPlan 2).col1) - -> Parallel Seq Scan on listp_12_2 listp_2 (actual rows=N loops=N) - Filter: (a = (InitPlan 2).col1) - -> Parallel Append (actual rows=N loops=N) - InitPlan 1 - -> Result (actual rows=N loops=N) - -> Seq Scan on listp_12_1 listp_4 (actual rows=N loops=N) - Filter: (a = (InitPlan 1).col1) - -> Parallel Seq Scan on listp_12_2 listp_5 (never executed) - Filter: (a = (InitPlan 1).col1) -(18 rows) - -drop table listp; -reset parallel_tuple_cost; -reset parallel_setup_cost; --- Test case for run-time pruning with a nested Merge Append -set enable_sort to 0; -create table rangep (a int, b int) partition by range (a); -create table rangep_0_to_100 partition of rangep for values from (0) to (100) partition by list (b); --- We need 3 sub-partitions. 1 to validate pruning worked and another two --- because a single remaining partition would be pulled up to the main Append. -create table rangep_0_to_100_1 partition of rangep_0_to_100 for values in(1); -create table rangep_0_to_100_2 partition of rangep_0_to_100 for values in(2); -create table rangep_0_to_100_3 partition of rangep_0_to_100 for values in(3); -create table rangep_100_to_200 partition of rangep for values from (100) to (200); -create index on rangep (a); --- Ensure run-time pruning works on the nested Merge Append -explain (analyze on, costs off, timing off, summary off) -select * from rangep where b IN((select 1),(select 2)) order by a; - QUERY PLAN ------------------------------------------------------------------------------------------------------------- - Append (actual rows=0 loops=1) - InitPlan 1 - -> Result (actual rows=1 loops=1) - InitPlan 2 - -> Result (actual rows=1 loops=1) - -> Merge Append (actual rows=0 loops=1) - Sort Key: rangep_2.a - -> Index Scan using rangep_0_to_100_1_a_idx on rangep_0_to_100_1 rangep_2 (actual rows=0 loops=1) - Filter: (b = ANY (ARRAY[(InitPlan 1).col1, (InitPlan 2).col1])) - -> Index Scan using rangep_0_to_100_2_a_idx on rangep_0_to_100_2 rangep_3 (actual rows=0 loops=1) - Filter: (b = ANY (ARRAY[(InitPlan 1).col1, (InitPlan 2).col1])) - -> Index Scan using rangep_0_to_100_3_a_idx on rangep_0_to_100_3 rangep_4 (never executed) - Filter: (b = ANY (ARRAY[(InitPlan 1).col1, (InitPlan 2).col1])) - -> Index Scan using rangep_100_to_200_a_idx on rangep_100_to_200 rangep_5 (actual rows=0 loops=1) - Filter: (b = ANY (ARRAY[(InitPlan 1).col1, (InitPlan 2).col1])) -(15 rows) - -reset enable_sort; -drop table rangep; --- --- Check that gen_prune_steps_from_opexps() works well for various cases of --- clauses for different partition keys --- -create table rp_prefix_test1 (a int, b varchar) partition by range(a, b); -create table rp_prefix_test1_p1 partition of rp_prefix_test1 for values from (1, 'a') to (1, 'b'); -create table rp_prefix_test1_p2 partition of rp_prefix_test1 for values from (2, 'a') to (2, 'b'); --- Don't call get_steps_using_prefix() with the last partition key b plus --- an empty prefix -explain (costs off) select * from rp_prefix_test1 where a <= 1 and b = 'a'; - QUERY PLAN --------------------------------------------------- - Seq Scan on rp_prefix_test1_p1 rp_prefix_test1 - Filter: ((a <= 1) AND ((b)::text = 'a'::text)) -(2 rows) - -create table rp_prefix_test2 (a int, b int, c int) partition by range(a, b, c); -create table rp_prefix_test2_p1 partition of rp_prefix_test2 for values from (1, 1, 0) to (1, 1, 10); -create table rp_prefix_test2_p2 partition of rp_prefix_test2 for values from (2, 2, 0) to (2, 2, 10); --- Don't call get_steps_using_prefix() with the last partition key c plus --- an invalid prefix (ie, b = 1) -explain (costs off) select * from rp_prefix_test2 where a <= 1 and b = 1 and c >= 0; - QUERY PLAN ------------------------------------------------- - Seq Scan on rp_prefix_test2_p1 rp_prefix_test2 - Filter: ((a <= 1) AND (c >= 0) AND (b = 1)) -(2 rows) - -create table rp_prefix_test3 (a int, b int, c int, d int) partition by range(a, b, c, d); -create table rp_prefix_test3_p1 partition of rp_prefix_test3 for values from (1, 1, 1, 0) to (1, 1, 1, 10); -create table rp_prefix_test3_p2 partition of rp_prefix_test3 for values from (2, 2, 2, 0) to (2, 2, 2, 10); --- Test that get_steps_using_prefix() handles a prefix that contains multiple --- clauses for the partition key b (ie, b >= 1 and b >= 2) -explain (costs off) select * from rp_prefix_test3 where a >= 1 and b >= 1 and b >= 2 and c >= 2 and d >= 0; - QUERY PLAN --------------------------------------------------------------------------- - Seq Scan on rp_prefix_test3_p2 rp_prefix_test3 - Filter: ((a >= 1) AND (b >= 1) AND (b >= 2) AND (c >= 2) AND (d >= 0)) -(2 rows) - --- Test that get_steps_using_prefix() handles a prefix that contains multiple --- clauses for the partition key b (ie, b >= 1 and b = 2) (This also tests --- that the caller arranges clauses in that prefix in the required order) -explain (costs off) select * from rp_prefix_test3 where a >= 1 and b >= 1 and b = 2 and c = 2 and d >= 0; - QUERY PLAN ------------------------------------------------------------------------- - Seq Scan on rp_prefix_test3_p2 rp_prefix_test3 - Filter: ((a >= 1) AND (b >= 1) AND (d >= 0) AND (b = 2) AND (c = 2)) -(2 rows) - -drop table rp_prefix_test1; -drop table rp_prefix_test2; -drop table rp_prefix_test3; --- --- Test that get_steps_using_prefix() handles IS NULL clauses correctly --- -create table hp_prefix_test (a int, b int, c int, d int) - partition by hash (a part_test_int4_ops, b part_test_int4_ops, c part_test_int4_ops, d part_test_int4_ops); --- create 8 partitions -select 'create table hp_prefix_test_p' || x::text || ' partition of hp_prefix_test for values with (modulus 8, remainder ' || x::text || ');' -from generate_Series(0,7) x; - ?column? ------------------------------------------------------------------------------------------------------- - create table hp_prefix_test_p0 partition of hp_prefix_test for values with (modulus 8, remainder 0); - create table hp_prefix_test_p1 partition of hp_prefix_test for values with (modulus 8, remainder 1); - create table hp_prefix_test_p2 partition of hp_prefix_test for values with (modulus 8, remainder 2); - create table hp_prefix_test_p3 partition of hp_prefix_test for values with (modulus 8, remainder 3); - create table hp_prefix_test_p4 partition of hp_prefix_test for values with (modulus 8, remainder 4); - create table hp_prefix_test_p5 partition of hp_prefix_test for values with (modulus 8, remainder 5); - create table hp_prefix_test_p6 partition of hp_prefix_test for values with (modulus 8, remainder 6); - create table hp_prefix_test_p7 partition of hp_prefix_test for values with (modulus 8, remainder 7); -(8 rows) - -\gexec -create table hp_prefix_test_p0 partition of hp_prefix_test for values with (modulus 8, remainder 0); -create table hp_prefix_test_p1 partition of hp_prefix_test for values with (modulus 8, remainder 1); -create table hp_prefix_test_p2 partition of hp_prefix_test for values with (modulus 8, remainder 2); -create table hp_prefix_test_p3 partition of hp_prefix_test for values with (modulus 8, remainder 3); -create table hp_prefix_test_p4 partition of hp_prefix_test for values with (modulus 8, remainder 4); -create table hp_prefix_test_p5 partition of hp_prefix_test for values with (modulus 8, remainder 5); -create table hp_prefix_test_p6 partition of hp_prefix_test for values with (modulus 8, remainder 6); -create table hp_prefix_test_p7 partition of hp_prefix_test for values with (modulus 8, remainder 7); --- insert 16 rows, one row for each test to perform. -insert into hp_prefix_test -select - case a when 0 then null else 1 end, - case b when 0 then null else 2 end, - case c when 0 then null else 3 end, - case d when 0 then null else 4 end -from - generate_series(0,1) a, - generate_series(0,1) b, - generate_Series(0,1) c, - generate_Series(0,1) d; --- Ensure partition pruning works correctly for each combination of IS NULL --- and equality quals. This may seem a little excessive, but there have been --- a number of bugs in this area over the years. We make use of row only --- output to reduce the size of the expected results. -\t on -select - 'explain (costs off) select tableoid::regclass,* from hp_prefix_test where ' || - string_agg(c.colname || case when g.s & (1 << c.colpos) = 0 then ' is null' else ' = ' || (colpos+1)::text end, ' and ' order by c.colpos) -from (values('a',0),('b',1),('c',2),('d',3)) c(colname, colpos), generate_Series(0,15) g(s) -group by g.s -order by g.s; - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d is null - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d = 4 - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d = 4 - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d = 4 - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d = 4 - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d = 4 - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d = 4 - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d = 4 - explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d = 4 - -\gexec -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d is null - Seq Scan on hp_prefix_test_p0 hp_prefix_test - Filter: ((a IS NULL) AND (b IS NULL) AND (c IS NULL) AND (d IS NULL)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d is null - Seq Scan on hp_prefix_test_p1 hp_prefix_test - Filter: ((b IS NULL) AND (c IS NULL) AND (d IS NULL) AND (a = 1)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d is null - Seq Scan on hp_prefix_test_p2 hp_prefix_test - Filter: ((a IS NULL) AND (c IS NULL) AND (d IS NULL) AND (b = 2)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d is null - Seq Scan on hp_prefix_test_p4 hp_prefix_test - Filter: ((c IS NULL) AND (d IS NULL) AND (a = 1) AND (b = 2)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d is null - Seq Scan on hp_prefix_test_p3 hp_prefix_test - Filter: ((a IS NULL) AND (b IS NULL) AND (d IS NULL) AND (c = 3)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d is null - Seq Scan on hp_prefix_test_p7 hp_prefix_test - Filter: ((b IS NULL) AND (d IS NULL) AND (a = 1) AND (c = 3)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d is null - Seq Scan on hp_prefix_test_p4 hp_prefix_test - Filter: ((a IS NULL) AND (d IS NULL) AND (b = 2) AND (c = 3)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d is null - Seq Scan on hp_prefix_test_p5 hp_prefix_test - Filter: ((d IS NULL) AND (a = 1) AND (b = 2) AND (c = 3)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d = 4 - Seq Scan on hp_prefix_test_p4 hp_prefix_test - Filter: ((a IS NULL) AND (b IS NULL) AND (c IS NULL) AND (d = 4)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d = 4 - Seq Scan on hp_prefix_test_p6 hp_prefix_test - Filter: ((b IS NULL) AND (c IS NULL) AND (a = 1) AND (d = 4)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d = 4 - Seq Scan on hp_prefix_test_p5 hp_prefix_test - Filter: ((a IS NULL) AND (c IS NULL) AND (b = 2) AND (d = 4)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d = 4 - Seq Scan on hp_prefix_test_p6 hp_prefix_test - Filter: ((c IS NULL) AND (a = 1) AND (b = 2) AND (d = 4)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d = 4 - Seq Scan on hp_prefix_test_p4 hp_prefix_test - Filter: ((a IS NULL) AND (b IS NULL) AND (c = 3) AND (d = 4)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d = 4 - Seq Scan on hp_prefix_test_p5 hp_prefix_test - Filter: ((b IS NULL) AND (a = 1) AND (c = 3) AND (d = 4)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d = 4 - Seq Scan on hp_prefix_test_p6 hp_prefix_test - Filter: ((a IS NULL) AND (b = 2) AND (c = 3) AND (d = 4)) - -explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d = 4 - Seq Scan on hp_prefix_test_p4 hp_prefix_test - Filter: ((a = 1) AND (b = 2) AND (c = 3) AND (d = 4)) - --- And ensure we get exactly 1 row from each. Again, all 16 possible combinations. -select - 'select tableoid::regclass,* from hp_prefix_test where ' || - string_agg(c.colname || case when g.s & (1 << c.colpos) = 0 then ' is null' else ' = ' || (colpos+1)::text end, ' and ' order by c.colpos) -from (values('a',0),('b',1),('c',2),('d',3)) c(colname, colpos), generate_Series(0,15) g(s) -group by g.s -order by g.s; - select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d is null - select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d is null - select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d is null - select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d is null - select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d is null - select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d is null - select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d is null - select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d is null - select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d = 4 - select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d = 4 - select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d = 4 - select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d = 4 - select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d = 4 - select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d = 4 - select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d = 4 - select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d = 4 - -\gexec -select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d is null - hp_prefix_test_p0 | | | | - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d is null - hp_prefix_test_p1 | 1 | | | - -select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d is null - hp_prefix_test_p2 | | 2 | | - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d is null - hp_prefix_test_p4 | 1 | 2 | | - -select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d is null - hp_prefix_test_p3 | | | 3 | - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d is null - hp_prefix_test_p7 | 1 | | 3 | - -select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d is null - hp_prefix_test_p4 | | 2 | 3 | - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d is null - hp_prefix_test_p5 | 1 | 2 | 3 | - -select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d = 4 - hp_prefix_test_p4 | | | | 4 - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d = 4 - hp_prefix_test_p6 | 1 | | | 4 - -select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d = 4 - hp_prefix_test_p5 | | 2 | | 4 - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d = 4 - hp_prefix_test_p6 | 1 | 2 | | 4 - -select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d = 4 - hp_prefix_test_p4 | | | 3 | 4 - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d = 4 - hp_prefix_test_p5 | 1 | | 3 | 4 - -select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d = 4 - hp_prefix_test_p6 | | 2 | 3 | 4 - -select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d = 4 - hp_prefix_test_p4 | 1 | 2 | 3 | 4 - -\t off -drop table hp_prefix_test; --- --- Check that gen_partprune_steps() detects self-contradiction from clauses --- regardless of the order of the clauses (Here we use a custom operator to --- prevent the equivclass.c machinery from reordering the clauses) --- -create operator === ( - leftarg = int4, - rightarg = int4, - procedure = int4eq, - commutator = ===, - hashes -); -create operator class part_test_int4_ops2 -for type int4 -using hash as -operator 1 ===, -function 2 part_hashint4_noop(int4, int8); -create table hp_contradict_test (a int, b int) partition by hash (a part_test_int4_ops2, b part_test_int4_ops2); -create table hp_contradict_test_p1 partition of hp_contradict_test for values with (modulus 2, remainder 0); -create table hp_contradict_test_p2 partition of hp_contradict_test for values with (modulus 2, remainder 1); -explain (costs off) select * from hp_contradict_test where a is null and a === 1 and b === 1; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -explain (costs off) select * from hp_contradict_test where a === 1 and b === 1 and a is null; - QUERY PLAN --------------------------- - Result - One-Time Filter: false -(2 rows) - -drop table hp_contradict_test; -drop operator class part_test_int4_ops2 using hash; -drop operator ===(int4, int4); -drop function explain_analyze(text); +WARNING: terminating connection because of crash of another server process +DETAIL: The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory. +HINT: In a moment you should be able to reconnect to the database and repeat your command. +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/indexing.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/indexing.out --- /tmp/cirrus-ci-build/src/test/regress/expected/indexing.out 2024-09-11 00:19:52.065144842 +0000 +++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/indexing.out 2024-09-11 00:29:43.228157041 +0000 @@ -675,968 +675,10 @@ (3 rows) drop table idxpart; --- Verify behavior for collation (mis)matches -create table idxpart (a text) partition by range (a); -create table idxpart1 (like idxpart); -create table idxpart2 (like idxpart); -create index on idxpart2 (a collate "POSIX"); -create index on idxpart2 (a); -create index on idxpart2 (a collate "C"); -alter table idxpart attach partition idxpart1 for values from ('aaa') to ('bbb'); -alter table idxpart attach partition idxpart2 for values from ('bbb') to ('ccc'); -create table idxpart3 partition of idxpart for values from ('ccc') to ('ddd'); -create index on idxpart (a collate "C"); -create table idxpart4 partition of idxpart for values from ('ddd') to ('eee'); -select relname as child, inhparent::regclass as parent, pg_get_indexdef as childdef - from pg_class left join pg_inherits on inhrelid = oid, - lateral pg_get_indexdef(pg_class.oid) - where relkind in ('i', 'I') and relname like 'idxpart%' order by relname; - child | parent | childdef ------------------+---------------+-------------------------------------------------------------------------------- - idxpart1_a_idx | idxpart_a_idx | CREATE INDEX idxpart1_a_idx ON public.idxpart1 USING btree (a COLLATE "C") - idxpart2_a_idx | | CREATE INDEX idxpart2_a_idx ON public.idxpart2 USING btree (a COLLATE "POSIX") - idxpart2_a_idx1 | | CREATE INDEX idxpart2_a_idx1 ON public.idxpart2 USING btree (a) - idxpart2_a_idx2 | idxpart_a_idx | CREATE INDEX idxpart2_a_idx2 ON public.idxpart2 USING btree (a COLLATE "C") - idxpart3_a_idx | idxpart_a_idx | CREATE INDEX idxpart3_a_idx ON public.idxpart3 USING btree (a COLLATE "C") - idxpart4_a_idx | idxpart_a_idx | CREATE INDEX idxpart4_a_idx ON public.idxpart4 USING btree (a COLLATE "C") - idxpart_a_idx | | CREATE INDEX idxpart_a_idx ON ONLY public.idxpart USING btree (a COLLATE "C") -(7 rows) - -drop table idxpart; --- Verify behavior for opclass (mis)matches -create table idxpart (a text) partition by range (a); -create table idxpart1 (like idxpart); -create table idxpart2 (like idxpart); -create index on idxpart2 (a); -alter table idxpart attach partition idxpart1 for values from ('aaa') to ('bbb'); -alter table idxpart attach partition idxpart2 for values from ('bbb') to ('ccc'); -create table idxpart3 partition of idxpart for values from ('ccc') to ('ddd'); -create index on idxpart (a text_pattern_ops); -create table idxpart4 partition of idxpart for values from ('ddd') to ('eee'); --- must *not* have attached the index we created on idxpart2 -select relname as child, inhparent::regclass as parent, pg_get_indexdef as childdef - from pg_class left join pg_inherits on inhrelid = oid, - lateral pg_get_indexdef(pg_class.oid) - where relkind in ('i', 'I') and relname like 'idxpart%' order by relname; - child | parent | childdef ------------------+---------------+------------------------------------------------------------------------------------ - idxpart1_a_idx | idxpart_a_idx | CREATE INDEX idxpart1_a_idx ON public.idxpart1 USING btree (a text_pattern_ops) - idxpart2_a_idx | | CREATE INDEX idxpart2_a_idx ON public.idxpart2 USING btree (a) - idxpart2_a_idx1 | idxpart_a_idx | CREATE INDEX idxpart2_a_idx1 ON public.idxpart2 USING btree (a text_pattern_ops) - idxpart3_a_idx | idxpart_a_idx | CREATE INDEX idxpart3_a_idx ON public.idxpart3 USING btree (a text_pattern_ops) - idxpart4_a_idx | idxpart_a_idx | CREATE INDEX idxpart4_a_idx ON public.idxpart4 USING btree (a text_pattern_ops) - idxpart_a_idx | | CREATE INDEX idxpart_a_idx ON ONLY public.idxpart USING btree (a text_pattern_ops) -(6 rows) - -drop index idxpart_a_idx; -create index on only idxpart (a text_pattern_ops); --- must reject -alter index idxpart_a_idx attach partition idxpart2_a_idx; -ERROR: cannot attach index "idxpart2_a_idx" as a partition of index "idxpart_a_idx" -DETAIL: The index definitions do not match. -drop table idxpart; --- Verify that attaching indexes maps attribute numbers correctly -create table idxpart (col1 int, a int, col2 int, b int) partition by range (a); -create table idxpart1 (b int, col1 int, col2 int, col3 int, a int); -alter table idxpart drop column col1, drop column col2; -alter table idxpart1 drop column col1, drop column col2, drop column col3; -alter table idxpart attach partition idxpart1 for values from (0) to (1000); -create index idxpart_1_idx on only idxpart (b, a); -create index idxpart1_1_idx on idxpart1 (b, a); -create index idxpart1_1b_idx on idxpart1 (b); --- test expressions and partial-index predicate, too -create index idxpart_2_idx on only idxpart ((b + a)) where a > 1; -create index idxpart1_2_idx on idxpart1 ((b + a)) where a > 1; -create index idxpart1_2b_idx on idxpart1 ((a + b)) where a > 1; -create index idxpart1_2c_idx on idxpart1 ((b + a)) where b > 1; -alter index idxpart_1_idx attach partition idxpart1_1b_idx; -- fail -ERROR: cannot attach index "idxpart1_1b_idx" as a partition of index "idxpart_1_idx" -DETAIL: The index definitions do not match. -alter index idxpart_1_idx attach partition idxpart1_1_idx; -alter index idxpart_2_idx attach partition idxpart1_2b_idx; -- fail -ERROR: cannot attach index "idxpart1_2b_idx" as a partition of index "idxpart_2_idx" -DETAIL: The index definitions do not match. -alter index idxpart_2_idx attach partition idxpart1_2c_idx; -- fail -ERROR: cannot attach index "idxpart1_2c_idx" as a partition of index "idxpart_2_idx" -DETAIL: The index definitions do not match. -alter index idxpart_2_idx attach partition idxpart1_2_idx; -- ok -select relname as child, inhparent::regclass as parent, pg_get_indexdef as childdef - from pg_class left join pg_inherits on inhrelid = oid, - lateral pg_get_indexdef(pg_class.oid) - where relkind in ('i', 'I') and relname like 'idxpart%' order by relname; - child | parent | childdef ------------------+---------------+----------------------------------------------------------------------------------------- - idxpart1_1_idx | idxpart_1_idx | CREATE INDEX idxpart1_1_idx ON public.idxpart1 USING btree (b, a) - idxpart1_1b_idx | | CREATE INDEX idxpart1_1b_idx ON public.idxpart1 USING btree (b) - idxpart1_2_idx | idxpart_2_idx | CREATE INDEX idxpart1_2_idx ON public.idxpart1 USING btree (((b + a))) WHERE (a > 1) - idxpart1_2b_idx | | CREATE INDEX idxpart1_2b_idx ON public.idxpart1 USING btree (((a + b))) WHERE (a > 1) - idxpart1_2c_idx | | CREATE INDEX idxpart1_2c_idx ON public.idxpart1 USING btree (((b + a))) WHERE (b > 1) - idxpart_1_idx | | CREATE INDEX idxpart_1_idx ON ONLY public.idxpart USING btree (b, a) - idxpart_2_idx | | CREATE INDEX idxpart_2_idx ON ONLY public.idxpart USING btree (((b + a))) WHERE (a > 1) -(7 rows) - -drop table idxpart; --- Make sure the partition columns are mapped correctly -create table idxpart (a int, b int, c text) partition by range (a); -create index idxparti on idxpart (a); -create index idxparti2 on idxpart (c, b); -create table idxpart1 (c text, a int, b int); -alter table idxpart attach partition idxpart1 for values from (0) to (10); -create table idxpart2 (c text, a int, b int); -create index on idxpart2 (a); -create index on idxpart2 (c, b); -alter table idxpart attach partition idxpart2 for values from (10) to (20); -select c.relname, pg_get_indexdef(indexrelid) - from pg_class c join pg_index i on c.oid = i.indexrelid - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - relname | pg_get_indexdef -------------------+--------------------------------------------------------------------- - idxpart1_a_idx | CREATE INDEX idxpart1_a_idx ON public.idxpart1 USING btree (a) - idxpart1_c_b_idx | CREATE INDEX idxpart1_c_b_idx ON public.idxpart1 USING btree (c, b) - idxpart2_a_idx | CREATE INDEX idxpart2_a_idx ON public.idxpart2 USING btree (a) - idxpart2_c_b_idx | CREATE INDEX idxpart2_c_b_idx ON public.idxpart2 USING btree (c, b) - idxparti | CREATE INDEX idxparti ON ONLY public.idxpart USING btree (a) - idxparti2 | CREATE INDEX idxparti2 ON ONLY public.idxpart USING btree (c, b) -(6 rows) - -drop table idxpart; --- Verify that columns are mapped correctly in expression indexes -create table idxpart (col1 int, col2 int, a int, b int) partition by range (a); -create table idxpart1 (col2 int, b int, col1 int, a int); -create table idxpart2 (col1 int, col2 int, b int, a int); -alter table idxpart drop column col1, drop column col2; -alter table idxpart1 drop column col1, drop column col2; -alter table idxpart2 drop column col1, drop column col2; -create index on idxpart2 (abs(b)); -alter table idxpart attach partition idxpart2 for values from (0) to (1); -create index on idxpart (abs(b)); -create index on idxpart ((b + 1)); -alter table idxpart attach partition idxpart1 for values from (1) to (2); -select c.relname, pg_get_indexdef(indexrelid) - from pg_class c join pg_index i on c.oid = i.indexrelid - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - relname | pg_get_indexdef --------------------+------------------------------------------------------------------------------ - idxpart1_abs_idx | CREATE INDEX idxpart1_abs_idx ON public.idxpart1 USING btree (abs(b)) - idxpart1_expr_idx | CREATE INDEX idxpart1_expr_idx ON public.idxpart1 USING btree (((b + 1))) - idxpart2_abs_idx | CREATE INDEX idxpart2_abs_idx ON public.idxpart2 USING btree (abs(b)) - idxpart2_expr_idx | CREATE INDEX idxpart2_expr_idx ON public.idxpart2 USING btree (((b + 1))) - idxpart_abs_idx | CREATE INDEX idxpart_abs_idx ON ONLY public.idxpart USING btree (abs(b)) - idxpart_expr_idx | CREATE INDEX idxpart_expr_idx ON ONLY public.idxpart USING btree (((b + 1))) -(6 rows) - -drop table idxpart; --- Verify that columns are mapped correctly for WHERE in a partial index -create table idxpart (col1 int, a int, col3 int, b int) partition by range (a); -alter table idxpart drop column col1, drop column col3; -create table idxpart1 (col1 int, col2 int, col3 int, col4 int, b int, a int); -alter table idxpart1 drop column col1, drop column col2, drop column col3, drop column col4; -alter table idxpart attach partition idxpart1 for values from (0) to (1000); -create table idxpart2 (col1 int, col2 int, b int, a int); -create index on idxpart2 (a) where b > 1000; -alter table idxpart2 drop column col1, drop column col2; -alter table idxpart attach partition idxpart2 for values from (1000) to (2000); -create index on idxpart (a) where b > 1000; -select c.relname, pg_get_indexdef(indexrelid) - from pg_class c join pg_index i on c.oid = i.indexrelid - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - relname | pg_get_indexdef -----------------+------------------------------------------------------------------------------------ - idxpart1_a_idx | CREATE INDEX idxpart1_a_idx ON public.idxpart1 USING btree (a) WHERE (b > 1000) - idxpart2_a_idx | CREATE INDEX idxpart2_a_idx ON public.idxpart2 USING btree (a) WHERE (b > 1000) - idxpart_a_idx | CREATE INDEX idxpart_a_idx ON ONLY public.idxpart USING btree (a) WHERE (b > 1000) -(3 rows) - -drop table idxpart; --- Column number mapping: dropped columns in the partition -create table idxpart1 (drop_1 int, drop_2 int, col_keep int, drop_3 int); -alter table idxpart1 drop column drop_1; -alter table idxpart1 drop column drop_2; -alter table idxpart1 drop column drop_3; -create index on idxpart1 (col_keep); -create table idxpart (col_keep int) partition by range (col_keep); -create index on idxpart (col_keep); -alter table idxpart attach partition idxpart1 for values from (0) to (1000); -\d idxpart - Partitioned table "public.idxpart" - Column | Type | Collation | Nullable | Default -----------+---------+-----------+----------+--------- - col_keep | integer | | | -Partition key: RANGE (col_keep) -Indexes: - "idxpart_col_keep_idx" btree (col_keep) -Number of partitions: 1 (Use \d+ to list them.) - -\d idxpart1 - Table "public.idxpart1" - Column | Type | Collation | Nullable | Default -----------+---------+-----------+----------+--------- - col_keep | integer | | | -Partition of: idxpart FOR VALUES FROM (0) TO (1000) -Indexes: - "idxpart1_col_keep_idx" btree (col_keep) - -select attrelid::regclass, attname, attnum from pg_attribute - where attrelid::regclass::text like 'idxpart%' and attnum > 0 - order by attrelid::regclass, attnum; - attrelid | attname | attnum ------------------------+------------------------------+-------- - idxpart1 | ........pg.dropped.1........ | 1 - idxpart1 | ........pg.dropped.2........ | 2 - idxpart1 | col_keep | 3 - idxpart1 | ........pg.dropped.4........ | 4 - idxpart1_col_keep_idx | col_keep | 1 - idxpart | col_keep | 1 - idxpart_col_keep_idx | col_keep | 1 -(7 rows) - -drop table idxpart; --- Column number mapping: dropped columns in the parent table -create table idxpart(drop_1 int, drop_2 int, col_keep int, drop_3 int) partition by range (col_keep); -alter table idxpart drop column drop_1; -alter table idxpart drop column drop_2; -alter table idxpart drop column drop_3; -create table idxpart1 (col_keep int); -create index on idxpart1 (col_keep); -create index on idxpart (col_keep); -alter table idxpart attach partition idxpart1 for values from (0) to (1000); -\d idxpart - Partitioned table "public.idxpart" - Column | Type | Collation | Nullable | Default -----------+---------+-----------+----------+--------- - col_keep | integer | | | -Partition key: RANGE (col_keep) -Indexes: - "idxpart_col_keep_idx" btree (col_keep) -Number of partitions: 1 (Use \d+ to list them.) - -\d idxpart1 - Table "public.idxpart1" - Column | Type | Collation | Nullable | Default -----------+---------+-----------+----------+--------- - col_keep | integer | | | -Partition of: idxpart FOR VALUES FROM (0) TO (1000) -Indexes: - "idxpart1_col_keep_idx" btree (col_keep) - -select attrelid::regclass, attname, attnum from pg_attribute - where attrelid::regclass::text like 'idxpart%' and attnum > 0 - order by attrelid::regclass, attnum; - attrelid | attname | attnum ------------------------+------------------------------+-------- - idxpart | ........pg.dropped.1........ | 1 - idxpart | ........pg.dropped.2........ | 2 - idxpart | col_keep | 3 - idxpart | ........pg.dropped.4........ | 4 - idxpart1 | col_keep | 1 - idxpart1_col_keep_idx | col_keep | 1 - idxpart_col_keep_idx | col_keep | 1 -(7 rows) - -drop table idxpart; --- --- Constraint-related indexes --- --- Verify that it works to add primary key / unique to partitioned tables -create table idxpart (a int primary key, b int) partition by range (a); -\d idxpart - Partitioned table "public.idxpart" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | not null | - b | integer | | | -Partition key: RANGE (a) -Indexes: - "idxpart_pkey" PRIMARY KEY, btree (a) -Number of partitions: 0 - --- multiple primary key on child should fail -create table failpart partition of idxpart (b primary key) for values from (0) to (100); -ERROR: multiple primary keys for table "failpart" are not allowed -drop table idxpart; --- primary key on child is okay if there's no PK in the parent, though -create table idxpart (a int) partition by range (a); -create table idxpart1pk partition of idxpart (a primary key) for values from (0) to (100); -\d idxpart1pk - Table "public.idxpart1pk" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | not null | -Partition of: idxpart FOR VALUES FROM (0) TO (100) -Indexes: - "idxpart1pk_pkey" PRIMARY KEY, btree (a) - -drop table idxpart; --- Failing to use the full partition key is not allowed -create table idxpart (a int unique, b int) partition by range (a, b); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: UNIQUE constraint on table "idxpart" lacks column "b" which is part of the partition key. -create table idxpart (a int, b int unique) partition by range (a, b); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: UNIQUE constraint on table "idxpart" lacks column "a" which is part of the partition key. -create table idxpart (a int primary key, b int) partition by range (b, a); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: PRIMARY KEY constraint on table "idxpart" lacks column "b" which is part of the partition key. -create table idxpart (a int, b int primary key) partition by range (b, a); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: PRIMARY KEY constraint on table "idxpart" lacks column "a" which is part of the partition key. --- OK if you use them in some other order -create table idxpart (a int, b int, c text, primary key (a, b, c)) partition by range (b, c, a); -drop table idxpart; --- OK to add an exclusion constraint if partitioning by its equal column -create table idxpart (a int4range, exclude USING GIST (a with = )) partition by range (a); -drop table idxpart; --- OK more than one equal column -create table idxpart (a int4range, b int4range, exclude USING GIST (a with =, b with =)) partition by range (a, b); -drop table idxpart; --- OK with more than one equal column: constraint is a proper superset of partition key -create table idxpart (a int4range, b int4range, exclude USING GIST (a with =, b with =)) partition by range (a); -drop table idxpart; --- Not OK more than one equal column: partition keys are a proper superset of constraint -create table idxpart (a int4range, b int4range, exclude USING GIST (a with = )) partition by range (a, b); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: EXCLUDE constraint on table "idxpart" lacks column "b" which is part of the partition key. --- Not OK with just -|- -create table idxpart (a int4range, exclude USING GIST (a with -|- )) partition by range (a); -ERROR: cannot match partition key to index on column "a" using non-equal operator "-|-" --- OK with equals and &&, and equals is the partition key -create table idxpart (a int4range, b int4range, exclude USING GIST (a with =, b with &&)) partition by range (a); -drop table idxpart; --- Not OK with equals and &&, and equals is not the partition key -create table idxpart (a int4range, b int4range, c int4range, exclude USING GIST (b with =, c with &&)) partition by range (a); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: EXCLUDE constraint on table "idxpart" lacks column "a" which is part of the partition key. --- OK more than one equal column and a && column -create table idxpart (a int4range, b int4range, c int4range, exclude USING GIST (a with =, b with =, c with &&)) partition by range (a, b); -drop table idxpart; --- no expressions in partition key for PK/UNIQUE -create table idxpart (a int primary key, b int) partition by range ((b + a)); -ERROR: unsupported PRIMARY KEY constraint with partition key definition -DETAIL: PRIMARY KEY constraints cannot be used when partition keys include expressions. -create table idxpart (a int unique, b int) partition by range ((b + a)); -ERROR: unsupported UNIQUE constraint with partition key definition -DETAIL: UNIQUE constraints cannot be used when partition keys include expressions. --- use ALTER TABLE to add a primary key -create table idxpart (a int, b int, c text) partition by range (a, b); -alter table idxpart add primary key (a); -- not an incomplete one though -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: PRIMARY KEY constraint on table "idxpart" lacks column "b" which is part of the partition key. -alter table idxpart add primary key (a, b); -- this works -\d idxpart - Partitioned table "public.idxpart" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | not null | - b | integer | | not null | - c | text | | | -Partition key: RANGE (a, b) -Indexes: - "idxpart_pkey" PRIMARY KEY, btree (a, b) -Number of partitions: 0 - -create table idxpart1 partition of idxpart for values from (0, 0) to (1000, 1000); -\d idxpart1 - Table "public.idxpart1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | not null | - b | integer | | not null | - c | text | | | -Partition of: idxpart FOR VALUES FROM (0, 0) TO (1000, 1000) -Indexes: - "idxpart1_pkey" PRIMARY KEY, btree (a, b) - -drop table idxpart; --- use ALTER TABLE to add a unique constraint -create table idxpart (a int, b int) partition by range (a, b); -alter table idxpart add unique (a); -- not an incomplete one though -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: UNIQUE constraint on table "idxpart" lacks column "b" which is part of the partition key. -alter table idxpart add unique (b, a); -- this works -\d idxpart - Partitioned table "public.idxpart" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition key: RANGE (a, b) -Indexes: - "idxpart_b_a_key" UNIQUE CONSTRAINT, btree (b, a) -Number of partitions: 0 - -drop table idxpart; --- Exclusion constraints can be added if partitioning by their equal column -create table idxpart (a int4range, b int4range) partition by range (a); -alter table idxpart add exclude USING GIST (a with =); -drop table idxpart; --- OK more than one equal column -create table idxpart (a int4range, b int4range) partition by range (a, b); -alter table idxpart add exclude USING GIST (a with =, b with =); -drop table idxpart; --- OK with more than one equal column: constraint is a proper superset of partition key -create table idxpart (a int4range, b int4range) partition by range (a); -alter table idxpart add exclude USING GIST (a with =, b with =); -drop table idxpart; --- Not OK more than one equal column: partition keys are a proper superset of constraint -create table idxpart (a int4range, b int4range) partition by range (a, b); -alter table idxpart add exclude USING GIST (a with =); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: EXCLUDE constraint on table "idxpart" lacks column "b" which is part of the partition key. -drop table idxpart; --- Not OK with just -|- -create table idxpart (a int4range, b int4range) partition by range (a, b); -alter table idxpart add exclude USING GIST (a with -|-); -ERROR: cannot match partition key to index on column "a" using non-equal operator "-|-" -drop table idxpart; --- OK with equals and &&, and equals is the partition key -create table idxpart (a int4range, b int4range) partition by range (a); -alter table idxpart add exclude USING GIST (a with =, b with &&); -drop table idxpart; --- Not OK with equals and &&, and equals is not the partition key -create table idxpart (a int4range, b int4range, c int4range) partition by range (a); -alter table idxpart add exclude USING GIST (b with =, c with &&); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: EXCLUDE constraint on table "idxpart" lacks column "a" which is part of the partition key. -drop table idxpart; --- OK more than one equal column and a && column -create table idxpart (a int4range, b int4range, c int4range) partition by range (a, b); -alter table idxpart add exclude USING GIST (a with =, b with =, c with &&); -drop table idxpart; --- When (sub)partitions are created, they also contain the constraint -create table idxpart (a int, b int, primary key (a, b)) partition by range (a, b); -create table idxpart1 partition of idxpart for values from (1, 1) to (10, 10); -create table idxpart2 partition of idxpart for values from (10, 10) to (20, 20) - partition by range (b); -create table idxpart21 partition of idxpart2 for values from (10) to (15); -create table idxpart22 partition of idxpart2 for values from (15) to (20); -create table idxpart3 (b int not null, a int not null); -alter table idxpart attach partition idxpart3 for values from (20, 20) to (30, 30); -select conname, contype, conrelid::regclass, conindid::regclass, conkey - from pg_constraint where conrelid::regclass::text like 'idxpart%' - order by conrelid::regclass::text, conname; - conname | contype | conrelid | conindid | conkey -----------------+---------+-----------+----------------+-------- - idxpart_pkey | p | idxpart | idxpart_pkey | {1,2} - idxpart1_pkey | p | idxpart1 | idxpart1_pkey | {1,2} - idxpart2_pkey | p | idxpart2 | idxpart2_pkey | {1,2} - idxpart21_pkey | p | idxpart21 | idxpart21_pkey | {1,2} - idxpart22_pkey | p | idxpart22 | idxpart22_pkey | {1,2} - idxpart3_pkey | p | idxpart3 | idxpart3_pkey | {2,1} -(6 rows) - -drop table idxpart; --- Verify that multi-layer partitioning honors the requirement that all --- columns in the partition key must appear in primary/unique key -create table idxpart (a int, b int, primary key (a)) partition by range (a); -create table idxpart2 partition of idxpart -for values from (0) to (1000) partition by range (b); -- fail -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: PRIMARY KEY constraint on table "idxpart2" lacks column "b" which is part of the partition key. -drop table idxpart; --- Ditto for the ATTACH PARTITION case -create table idxpart (a int unique, b int) partition by range (a); -create table idxpart1 (a int not null, b int, unique (a, b)) - partition by range (a, b); -alter table idxpart attach partition idxpart1 for values from (1) to (1000); -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: UNIQUE constraint on table "idxpart1" lacks column "b" which is part of the partition key. -DROP TABLE idxpart, idxpart1; --- Multi-layer partitioning works correctly in this case: -create table idxpart (a int, b int, primary key (a, b)) partition by range (a); -create table idxpart2 partition of idxpart for values from (0) to (1000) partition by range (b); -create table idxpart21 partition of idxpart2 for values from (0) to (1000); -select conname, contype, conrelid::regclass, conindid::regclass, conkey - from pg_constraint where conrelid::regclass::text like 'idxpart%' - order by conname; - conname | contype | conrelid | conindid | conkey -----------------+---------+-----------+----------------+-------- - idxpart21_pkey | p | idxpart21 | idxpart21_pkey | {1,2} - idxpart2_pkey | p | idxpart2 | idxpart2_pkey | {1,2} - idxpart_pkey | p | idxpart | idxpart_pkey | {1,2} -(3 rows) - -drop table idxpart; --- If a partitioned table has a unique/PK constraint, then it's not possible --- to drop the corresponding constraint in the children; nor it's possible --- to drop the indexes individually. Dropping the constraint in the parent --- gets rid of the lot. -create table idxpart (i int) partition by hash (i); -create table idxpart0 partition of idxpart (i) for values with (modulus 2, remainder 0); -create table idxpart1 partition of idxpart (i) for values with (modulus 2, remainder 1); -alter table idxpart0 add primary key(i); -alter table idxpart add primary key(i); -select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, - conname, conislocal, coninhcount, connoinherit, convalidated - from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) - left join pg_constraint con on (idx.indexrelid = con.conindid) - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated -----------+---------------+--------------+------------+---------------+------------+-------------+--------------+-------------- - idxpart0 | idxpart0_pkey | idxpart_pkey | t | idxpart0_pkey | f | 1 | t | t - idxpart1 | idxpart1_pkey | idxpart_pkey | t | idxpart1_pkey | f | 1 | f | t - idxpart | idxpart_pkey | | t | idxpart_pkey | t | 0 | t | t -(3 rows) - -drop index idxpart0_pkey; -- fail -ERROR: cannot drop index idxpart0_pkey because index idxpart_pkey requires it -HINT: You can drop index idxpart_pkey instead. -drop index idxpart1_pkey; -- fail -ERROR: cannot drop index idxpart1_pkey because index idxpart_pkey requires it -HINT: You can drop index idxpart_pkey instead. -alter table idxpart0 drop constraint idxpart0_pkey; -- fail -ERROR: cannot drop inherited constraint "idxpart0_pkey" of relation "idxpart0" -alter table idxpart1 drop constraint idxpart1_pkey; -- fail -ERROR: cannot drop inherited constraint "idxpart1_pkey" of relation "idxpart1" -alter table idxpart drop constraint idxpart_pkey; -- ok -select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, - conname, conislocal, coninhcount, connoinherit, convalidated - from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) - left join pg_constraint con on (idx.indexrelid = con.conindid) - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated -----------+------------+-----------+------------+---------+------------+-------------+--------------+-------------- -(0 rows) - -drop table idxpart; --- If the partition to be attached already has a primary key, fail if --- it doesn't match the parent's PK. -CREATE TABLE idxpart (c1 INT PRIMARY KEY, c2 INT, c3 VARCHAR(10)) PARTITION BY RANGE(c1); -CREATE TABLE idxpart1 (LIKE idxpart); -ALTER TABLE idxpart1 ADD PRIMARY KEY (c1, c2); -ALTER TABLE idxpart ATTACH PARTITION idxpart1 FOR VALUES FROM (100) TO (200); -ERROR: multiple primary keys for table "idxpart1" are not allowed -DROP TABLE idxpart, idxpart1; --- Ditto if there is some distance between the PKs (subpartitioning) -create table idxpart (a int, b int, primary key (a)) partition by range (a); -create table idxpart1 (a int not null, b int) partition by range (a); -create table idxpart11 (a int not null, b int primary key); -alter table idxpart1 attach partition idxpart11 for values from (0) to (1000); -alter table idxpart attach partition idxpart1 for values from (0) to (10000); -ERROR: multiple primary keys for table "idxpart11" are not allowed -drop table idxpart, idxpart1, idxpart11; --- If a partitioned table has a constraint whose index is not valid, --- attaching a missing partition makes it valid. -create table idxpart (a int) partition by range (a); -create table idxpart0 (like idxpart); -alter table idxpart0 add primary key (a); -alter table idxpart attach partition idxpart0 for values from (0) to (1000); -alter table only idxpart add primary key (a); -select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, - conname, conislocal, coninhcount, connoinherit, convalidated - from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) - left join pg_constraint con on (idx.indexrelid = con.conindid) - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated -----------+---------------+-----------+------------+---------------+------------+-------------+--------------+-------------- - idxpart0 | idxpart0_pkey | | t | idxpart0_pkey | t | 0 | t | t - idxpart | idxpart_pkey | | f | idxpart_pkey | t | 0 | t | t -(2 rows) - -alter index idxpart_pkey attach partition idxpart0_pkey; -select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, - conname, conislocal, coninhcount, connoinherit, convalidated - from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) - left join pg_constraint con on (idx.indexrelid = con.conindid) - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated -----------+---------------+--------------+------------+---------------+------------+-------------+--------------+-------------- - idxpart0 | idxpart0_pkey | idxpart_pkey | t | idxpart0_pkey | f | 1 | t | t - idxpart | idxpart_pkey | | t | idxpart_pkey | t | 0 | t | t -(2 rows) - -drop table idxpart; --- Related to the above scenario: ADD PRIMARY KEY on the parent mustn't --- automatically propagate NOT NULL to child columns. -create table idxpart (a int) partition by range (a); -create table idxpart0 (like idxpart); -alter table idxpart0 add unique (a); -alter table idxpart attach partition idxpart0 default; -alter table only idxpart add primary key (a); -- fail, no not-null constraint -ERROR: constraint must be added to child tables too -DETAIL: Column "a" of relation "idxpart0" is not already NOT NULL. -HINT: Do not specify the ONLY keyword. -alter table idxpart0 alter column a set not null; -alter table only idxpart add primary key (a); -- now it works -alter index idxpart_pkey attach partition idxpart0_a_key; -alter table idxpart0 alter column a drop not null; -- fail, pkey needs it -ERROR: column "a" is marked NOT NULL in parent table -drop table idxpart; --- if a partition has a unique index without a constraint, does not attach --- automatically; creates a new index instead. -create table idxpart (a int, b int) partition by range (a); -create table idxpart1 (a int not null, b int); -create unique index on idxpart1 (a); -alter table idxpart add primary key (a); -alter table idxpart attach partition idxpart1 for values from (1) to (1000); -select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, - conname, conislocal, coninhcount, connoinherit, convalidated - from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) - left join pg_constraint con on (idx.indexrelid = con.conindid) - where indrelid::regclass::text like 'idxpart%' - order by indexrelid::regclass::text collate "C"; - indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated -----------+----------------+--------------+------------+---------------+------------+-------------+--------------+-------------- - idxpart1 | idxpart1_a_idx | | t | | | | | - idxpart1 | idxpart1_pkey | idxpart_pkey | t | idxpart1_pkey | f | 1 | f | t - idxpart | idxpart_pkey | | t | idxpart_pkey | t | 0 | t | t -(3 rows) - -drop table idxpart; --- Can't attach an index without a corresponding constraint -create table idxpart (a int, b int) partition by range (a); -create table idxpart1 (a int not null, b int); -create unique index on idxpart1 (a); -alter table idxpart attach partition idxpart1 for values from (1) to (1000); -alter table only idxpart add primary key (a); -alter index idxpart_pkey attach partition idxpart1_a_idx; -- fail -ERROR: cannot attach index "idxpart1_a_idx" as a partition of index "idxpart_pkey" -DETAIL: The index "idxpart_pkey" belongs to a constraint in table "idxpart" but no constraint exists for index "idxpart1_a_idx". -drop table idxpart; --- Test that unique constraints are working -create table idxpart (a int, b text, primary key (a, b)) partition by range (a); -create table idxpart1 partition of idxpart for values from (0) to (100000); -create table idxpart2 (c int, like idxpart); -insert into idxpart2 (c, a, b) values (42, 572814, 'inserted first'); -alter table idxpart2 drop column c; -create unique index on idxpart (a); -alter table idxpart attach partition idxpart2 for values from (100000) to (1000000); -insert into idxpart values (0, 'zero'), (42, 'life'), (2^16, 'sixteen'); -insert into idxpart select 2^g, format('two to power of %s', g) from generate_series(15, 17) g; -ERROR: duplicate key value violates unique constraint "idxpart1_a_idx" -DETAIL: Key (a)=(65536) already exists. -insert into idxpart values (16, 'sixteen'); -insert into idxpart (b, a) values ('one', 142857), ('two', 285714); -insert into idxpart select a * 2, b || b from idxpart where a between 2^16 and 2^19; -ERROR: duplicate key value violates unique constraint "idxpart2_a_idx" -DETAIL: Key (a)=(285714) already exists. -insert into idxpart values (572814, 'five'); -ERROR: duplicate key value violates unique constraint "idxpart2_a_idx" -DETAIL: Key (a)=(572814) already exists. -insert into idxpart values (857142, 'six'); -select tableoid::regclass, * from idxpart order by a; - tableoid | a | b -----------+--------+---------------- - idxpart1 | 0 | zero - idxpart1 | 16 | sixteen - idxpart1 | 42 | life - idxpart1 | 65536 | sixteen - idxpart2 | 142857 | one - idxpart2 | 285714 | two - idxpart2 | 572814 | inserted first - idxpart2 | 857142 | six -(8 rows) - -drop table idxpart; --- Test some other non-btree index types -create table idxpart (a int, b text, c int[]) partition by range (a); -create table idxpart1 partition of idxpart for values from (0) to (100000); -set enable_seqscan to off; -create index idxpart_brin on idxpart using brin(b); -explain (costs off) select * from idxpart where b = 'abcd'; - QUERY PLAN -------------------------------------------- - Bitmap Heap Scan on idxpart1 idxpart - Recheck Cond: (b = 'abcd'::text) - -> Bitmap Index Scan on idxpart1_b_idx - Index Cond: (b = 'abcd'::text) -(4 rows) - -drop index idxpart_brin; -create index idxpart_spgist on idxpart using spgist(b); -explain (costs off) select * from idxpart where b = 'abcd'; - QUERY PLAN -------------------------------------------- - Bitmap Heap Scan on idxpart1 idxpart - Recheck Cond: (b = 'abcd'::text) - -> Bitmap Index Scan on idxpart1_b_idx - Index Cond: (b = 'abcd'::text) -(4 rows) - -drop index idxpart_spgist; -create index idxpart_gin on idxpart using gin(c); -explain (costs off) select * from idxpart where c @> array[42]; - QUERY PLAN ----------------------------------------------- - Bitmap Heap Scan on idxpart1 idxpart - Recheck Cond: (c @> '{42}'::integer[]) - -> Bitmap Index Scan on idxpart1_c_idx - Index Cond: (c @> '{42}'::integer[]) -(4 rows) - -drop index idxpart_gin; -reset enable_seqscan; -drop table idxpart; --- intentionally leave some objects around -create table idxpart (a int) partition by range (a); -create table idxpart1 partition of idxpart for values from (0) to (100); -create table idxpart2 partition of idxpart for values from (100) to (1000) - partition by range (a); -create table idxpart21 partition of idxpart2 for values from (100) to (200); -create table idxpart22 partition of idxpart2 for values from (200) to (300); -create index on idxpart22 (a); -create index on only idxpart2 (a); -alter index idxpart2_a_idx attach partition idxpart22_a_idx; -create index on idxpart (a); -create table idxpart_another (a int, b int, primary key (a, b)) partition by range (a); -create table idxpart_another_1 partition of idxpart_another for values from (0) to (100); -create table idxpart3 (c int, b int, a int) partition by range (a); -alter table idxpart3 drop column b, drop column c; -create table idxpart31 partition of idxpart3 for values from (1000) to (1200); -create table idxpart32 partition of idxpart3 for values from (1200) to (1400); -alter table idxpart attach partition idxpart3 for values from (1000) to (2000); --- More objects intentionally left behind, to verify some pg_dump/pg_upgrade --- behavior; see https://postgr.es/m/20190321204928.GA17535@alvherre.pgsql -create schema regress_indexing; -set search_path to regress_indexing; -create table pk (a int primary key) partition by range (a); -create table pk1 partition of pk for values from (0) to (1000); -create table pk2 (b int, a int); -alter table pk2 drop column b; -alter table pk2 alter a set not null; -alter table pk attach partition pk2 for values from (1000) to (2000); -create table pk3 partition of pk for values from (2000) to (3000); -create table pk4 (like pk); -alter table pk attach partition pk4 for values from (3000) to (4000); -create table pk5 (like pk) partition by range (a); -create table pk51 partition of pk5 for values from (4000) to (4500); -create table pk52 partition of pk5 for values from (4500) to (5000); -alter table pk attach partition pk5 for values from (4000) to (5000); -reset search_path; --- Test that covering partitioned indexes work in various cases -create table covidxpart (a int, b int) partition by list (a); -create unique index on covidxpart (a) include (b); -create table covidxpart1 partition of covidxpart for values in (1); -create table covidxpart2 partition of covidxpart for values in (2); -insert into covidxpart values (1, 1); -insert into covidxpart values (1, 1); -ERROR: duplicate key value violates unique constraint "covidxpart1_a_b_idx" -DETAIL: Key (a)=(1) already exists. -create table covidxpart3 (b int, c int, a int); -alter table covidxpart3 drop c; -alter table covidxpart attach partition covidxpart3 for values in (3); -insert into covidxpart values (3, 1); -insert into covidxpart values (3, 1); -ERROR: duplicate key value violates unique constraint "covidxpart3_a_b_idx" -DETAIL: Key (a)=(3) already exists. -create table covidxpart4 (b int, a int); -create unique index on covidxpart4 (a) include (b); -create unique index on covidxpart4 (a); -alter table covidxpart attach partition covidxpart4 for values in (4); -insert into covidxpart values (4, 1); -insert into covidxpart values (4, 1); -ERROR: duplicate key value violates unique constraint "covidxpart4_a_b_idx" -DETAIL: Key (a)=(4) already exists. -create unique index on covidxpart (b) include (a); -- should fail -ERROR: unique constraint on partitioned table must include all partitioning columns -DETAIL: UNIQUE constraint on table "covidxpart" lacks column "a" which is part of the partition key. --- check that detaching a partition also detaches the primary key constraint -create table parted_pk_detach_test (a int primary key) partition by list (a); -create table parted_pk_detach_test1 partition of parted_pk_detach_test for values in (1); -alter table parted_pk_detach_test1 drop constraint parted_pk_detach_test1_pkey; -- should fail -ERROR: cannot drop inherited constraint "parted_pk_detach_test1_pkey" of relation "parted_pk_detach_test1" -alter table parted_pk_detach_test detach partition parted_pk_detach_test1; -alter table parted_pk_detach_test1 drop constraint parted_pk_detach_test1_pkey; -drop table parted_pk_detach_test, parted_pk_detach_test1; -create table parted_uniq_detach_test (a int unique) partition by list (a); -create table parted_uniq_detach_test1 partition of parted_uniq_detach_test for values in (1); -alter table parted_uniq_detach_test1 drop constraint parted_uniq_detach_test1_a_key; -- should fail -ERROR: cannot drop inherited constraint "parted_uniq_detach_test1_a_key" of relation "parted_uniq_detach_test1" -alter table parted_uniq_detach_test detach partition parted_uniq_detach_test1; -alter table parted_uniq_detach_test1 drop constraint parted_uniq_detach_test1_a_key; -drop table parted_uniq_detach_test, parted_uniq_detach_test1; --- check that dropping a column takes with it any partitioned indexes --- depending on it. -create table parted_index_col_drop(a int, b int, c int) - partition by list (a); -create table parted_index_col_drop1 partition of parted_index_col_drop - for values in (1) partition by list (a); --- leave this partition without children. -create table parted_index_col_drop2 partition of parted_index_col_drop - for values in (2) partition by list (a); -create table parted_index_col_drop11 partition of parted_index_col_drop1 - for values in (1); -create index on parted_index_col_drop (b); -create index on parted_index_col_drop (c); -create index on parted_index_col_drop (b, c); -alter table parted_index_col_drop drop column c; -\d parted_index_col_drop - Partitioned table "public.parted_index_col_drop" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition key: LIST (a) -Indexes: - "parted_index_col_drop_b_idx" btree (b) -Number of partitions: 2 (Use \d+ to list them.) - -\d parted_index_col_drop1 - Partitioned table "public.parted_index_col_drop1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition of: parted_index_col_drop FOR VALUES IN (1) -Partition key: LIST (a) -Indexes: - "parted_index_col_drop1_b_idx" btree (b) -Number of partitions: 1 (Use \d+ to list them.) - -\d parted_index_col_drop2 - Partitioned table "public.parted_index_col_drop2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition of: parted_index_col_drop FOR VALUES IN (2) -Partition key: LIST (a) -Indexes: - "parted_index_col_drop2_b_idx" btree (b) -Number of partitions: 0 - -\d parted_index_col_drop11 - Table "public.parted_index_col_drop11" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | - b | integer | | | -Partition of: parted_index_col_drop1 FOR VALUES IN (1) -Indexes: - "parted_index_col_drop11_b_idx" btree (b) - -drop table parted_index_col_drop; --- Check that invalid indexes are not selected when attaching a partition. -create table parted_inval_tab (a int) partition by range (a); -create index parted_inval_idx on parted_inval_tab (a); -create table parted_inval_tab_1 (a int) partition by range (a); -create table parted_inval_tab_1_1 partition of parted_inval_tab_1 - for values from (0) to (10); -create table parted_inval_tab_1_2 partition of parted_inval_tab_1 - for values from (10) to (20); --- this creates an invalid index. -create index parted_inval_ixd_1 on only parted_inval_tab_1 (a); --- this creates new indexes for all the partitions of parted_inval_tab_1, --- discarding the invalid index created previously as what is chosen. -alter table parted_inval_tab attach partition parted_inval_tab_1 - for values from (1) to (100); -select indexrelid::regclass, indisvalid, - indrelid::regclass, inhparent::regclass - from pg_index idx left join - pg_inherits inh on (idx.indexrelid = inh.inhrelid) - where indexrelid::regclass::text like 'parted_inval%' - order by indexrelid::regclass::text collate "C"; - indexrelid | indisvalid | indrelid | inhparent -----------------------------+------------+----------------------+-------------------------- - parted_inval_idx | t | parted_inval_tab | - parted_inval_ixd_1 | f | parted_inval_tab_1 | - parted_inval_tab_1_1_a_idx | t | parted_inval_tab_1_1 | parted_inval_tab_1_a_idx - parted_inval_tab_1_2_a_idx | t | parted_inval_tab_1_2 | parted_inval_tab_1_a_idx - parted_inval_tab_1_a_idx | t | parted_inval_tab_1 | parted_inval_idx -(5 rows) - -drop table parted_inval_tab; --- Check setup of indisvalid across a complex partition tree on index --- creation. If one index in a partition index is invalid, so should its --- partitioned index. -create table parted_isvalid_tab (a int, b int) partition by range (a); -create table parted_isvalid_tab_1 partition of parted_isvalid_tab - for values from (1) to (10) partition by range (a); -create table parted_isvalid_tab_2 partition of parted_isvalid_tab - for values from (10) to (20) partition by range (a); -create table parted_isvalid_tab_11 partition of parted_isvalid_tab_1 - for values from (1) to (5); -create table parted_isvalid_tab_12 partition of parted_isvalid_tab_1 - for values from (5) to (10); --- create an invalid index on one of the partitions. -insert into parted_isvalid_tab_11 values (1, 0); -create index concurrently parted_isvalid_idx_11 on parted_isvalid_tab_11 ((a/b)); -ERROR: division by zero --- The previous invalid index is selected, invalidating all the indexes up to --- the top-most parent. -create index parted_isvalid_idx on parted_isvalid_tab ((a/b)); -select indexrelid::regclass, indisvalid, - indrelid::regclass, inhparent::regclass - from pg_index idx left join - pg_inherits inh on (idx.indexrelid = inh.inhrelid) - where indexrelid::regclass::text like 'parted_isvalid%' - order by indexrelid::regclass::text collate "C"; - indexrelid | indisvalid | indrelid | inhparent ---------------------------------+------------+-----------------------+------------------------------- - parted_isvalid_idx | f | parted_isvalid_tab | - parted_isvalid_idx_11 | f | parted_isvalid_tab_11 | parted_isvalid_tab_1_expr_idx - parted_isvalid_tab_12_expr_idx | t | parted_isvalid_tab_12 | parted_isvalid_tab_1_expr_idx - parted_isvalid_tab_1_expr_idx | f | parted_isvalid_tab_1 | parted_isvalid_idx - parted_isvalid_tab_2_expr_idx | t | parted_isvalid_tab_2 | parted_isvalid_idx -(5 rows) - -drop table parted_isvalid_tab; --- Check state of replica indexes when attaching a partition. -begin; -create table parted_replica_tab (id int not null) partition by range (id); -create table parted_replica_tab_1 partition of parted_replica_tab - for values from (1) to (10) partition by range (id); -create table parted_replica_tab_11 partition of parted_replica_tab_1 - for values from (1) to (5); -create unique index parted_replica_idx - on only parted_replica_tab using btree (id); -create unique index parted_replica_idx_1 - on only parted_replica_tab_1 using btree (id); --- This triggers an update of pg_index.indisreplident for parted_replica_idx. -alter table only parted_replica_tab_1 replica identity - using index parted_replica_idx_1; -create unique index parted_replica_idx_11 on parted_replica_tab_11 USING btree (id); -select indexrelid::regclass, indisvalid, indisreplident, - indrelid::regclass, inhparent::regclass - from pg_index idx left join - pg_inherits inh on (idx.indexrelid = inh.inhrelid) - where indexrelid::regclass::text like 'parted_replica%' - order by indexrelid::regclass::text collate "C"; - indexrelid | indisvalid | indisreplident | indrelid | inhparent ------------------------+------------+----------------+-----------------------+----------- - parted_replica_idx | f | f | parted_replica_tab | - parted_replica_idx_1 | f | t | parted_replica_tab_1 | - parted_replica_idx_11 | t | f | parted_replica_tab_11 | -(3 rows) - --- parted_replica_idx is not valid yet here, because parted_replica_idx_1 --- is not valid. -alter index parted_replica_idx ATTACH PARTITION parted_replica_idx_1; -select indexrelid::regclass, indisvalid, indisreplident, - indrelid::regclass, inhparent::regclass - from pg_index idx left join - pg_inherits inh on (idx.indexrelid = inh.inhrelid) - where indexrelid::regclass::text like 'parted_replica%' - order by indexrelid::regclass::text collate "C"; - indexrelid | indisvalid | indisreplident | indrelid | inhparent ------------------------+------------+----------------+-----------------------+-------------------- - parted_replica_idx | f | f | parted_replica_tab | - parted_replica_idx_1 | f | t | parted_replica_tab_1 | parted_replica_idx - parted_replica_idx_11 | t | f | parted_replica_tab_11 | -(3 rows) - --- parted_replica_idx becomes valid here. -alter index parted_replica_idx_1 ATTACH PARTITION parted_replica_idx_11; -alter table only parted_replica_tab_1 replica identity - using index parted_replica_idx_1; -commit; -select indexrelid::regclass, indisvalid, indisreplident, - indrelid::regclass, inhparent::regclass - from pg_index idx left join - pg_inherits inh on (idx.indexrelid = inh.inhrelid) - where indexrelid::regclass::text like 'parted_replica%' - order by indexrelid::regclass::text collate "C"; - indexrelid | indisvalid | indisreplident | indrelid | inhparent ------------------------+------------+----------------+-----------------------+---------------------- - parted_replica_idx | t | f | parted_replica_tab | - parted_replica_idx_1 | t | t | parted_replica_tab_1 | parted_replica_idx - parted_replica_idx_11 | t | f | parted_replica_tab_11 | parted_replica_idx_1 -(3 rows) - -drop table parted_replica_tab; +WARNING: terminating connection because of crash of another server process +DETAIL: The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory. +HINT: In a moment you should be able to reconnect to the database and repeat your command. +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/partition_aggregate.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/partition_aggregate.out --- /tmp/cirrus-ci-build/src/test/regress/expected/partition_aggregate.out 2024-09-11 00:19:52.093233906 +0000 +++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/partition_aggregate.out 2024-09-11 00:29:43.216157055 +0000 @@ -939,582 +939,10 @@ ALTER TABLE pagg_tab_ml ATTACH PARTITION pagg_tab_ml_p3 FOR VALUES FROM (20) TO (30); INSERT INTO pagg_tab_ml SELECT i % 30, i % 10, to_char(i % 4, 'FM0000') FROM generate_series(0, 29999) i; ANALYZE pagg_tab_ml; --- For Parallel Append -SET max_parallel_workers_per_gather TO 2; -SET parallel_setup_cost = 0; --- Full aggregation at level 1 as GROUP BY clause matches with PARTITION KEY --- for level 1 only. For subpartitions, GROUP BY clause does not match with --- PARTITION KEY, but still we do not see a partial aggregation as array_agg() --- is not partial agg safe. -EXPLAIN (COSTS OFF) -SELECT a, sum(b), array_agg(distinct c), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - QUERY PLAN --------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (array_agg(DISTINCT pagg_tab_ml.c)) - -> Gather - Workers Planned: 2 - -> Parallel Append - -> GroupAggregate - Group Key: pagg_tab_ml.a - Filter: (avg(pagg_tab_ml.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml.a, pagg_tab_ml.c - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> GroupAggregate - Group Key: pagg_tab_ml_5.a - Filter: (avg(pagg_tab_ml_5.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_5.a, pagg_tab_ml_5.c - -> Append - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_5 - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_6 - -> GroupAggregate - Group Key: pagg_tab_ml_2.a - Filter: (avg(pagg_tab_ml_2.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_2.a, pagg_tab_ml_2.c - -> Append - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_2 - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_3 -(27 rows) - -SELECT a, sum(b), array_agg(distinct c), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - a | sum | array_agg | count -----+------+-------------+------- - 0 | 0 | {0000,0002} | 1000 - 1 | 1000 | {0001,0003} | 1000 - 2 | 2000 | {0000,0002} | 1000 - 10 | 0 | {0000,0002} | 1000 - 11 | 1000 | {0001,0003} | 1000 - 12 | 2000 | {0000,0002} | 1000 - 20 | 0 | {0000,0002} | 1000 - 21 | 1000 | {0001,0003} | 1000 - 22 | 2000 | {0000,0002} | 1000 -(9 rows) - --- Without ORDER BY clause, to test Gather at top-most path -EXPLAIN (COSTS OFF) -SELECT a, sum(b), array_agg(distinct c), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3; - QUERY PLAN ---------------------------------------------------------------------------- - Gather - Workers Planned: 2 - -> Parallel Append - -> GroupAggregate - Group Key: pagg_tab_ml.a - Filter: (avg(pagg_tab_ml.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml.a, pagg_tab_ml.c - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> GroupAggregate - Group Key: pagg_tab_ml_5.a - Filter: (avg(pagg_tab_ml_5.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_5.a, pagg_tab_ml_5.c - -> Append - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_5 - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_6 - -> GroupAggregate - Group Key: pagg_tab_ml_2.a - Filter: (avg(pagg_tab_ml_2.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_2.a, pagg_tab_ml_2.c - -> Append - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_2 - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_3 -(25 rows) - -RESET parallel_setup_cost; --- Full aggregation at level 1 as GROUP BY clause matches with PARTITION KEY --- for level 1 only. For subpartitions, GROUP BY clause does not match with --- PARTITION KEY, thus we will have a partial aggregation for them. -EXPLAIN (COSTS OFF) -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - QUERY PLAN ---------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (count(*)) - -> Append - -> HashAggregate - Group Key: pagg_tab_ml.a - Filter: (avg(pagg_tab_ml.b) < '3'::numeric) - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> Finalize GroupAggregate - Group Key: pagg_tab_ml_2.a - Filter: (avg(pagg_tab_ml_2.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_2.a - -> Append - -> Partial HashAggregate - Group Key: pagg_tab_ml_2.a - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_2 - -> Partial HashAggregate - Group Key: pagg_tab_ml_3.a - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_3 - -> Finalize GroupAggregate - Group Key: pagg_tab_ml_5.a - Filter: (avg(pagg_tab_ml_5.b) < '3'::numeric) - -> Sort - Sort Key: pagg_tab_ml_5.a - -> Append - -> Partial HashAggregate - Group Key: pagg_tab_ml_5.a - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_5 - -> Partial HashAggregate - Group Key: pagg_tab_ml_6.a - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_6 -(31 rows) - -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - a | sum | count -----+------+------- - 0 | 0 | 1000 - 1 | 1000 | 1000 - 2 | 2000 | 1000 - 10 | 0 | 1000 - 11 | 1000 | 1000 - 12 | 2000 | 1000 - 20 | 0 | 1000 - 21 | 1000 | 1000 - 22 | 2000 | 1000 -(9 rows) - --- Partial aggregation at all levels as GROUP BY clause does not match with --- PARTITION KEY -EXPLAIN (COSTS OFF) -SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b ORDER BY 1, 2, 3; - QUERY PLAN ---------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.b, (sum(pagg_tab_ml.a)), (count(*)) - -> Finalize GroupAggregate - Group Key: pagg_tab_ml.b - -> Sort - Sort Key: pagg_tab_ml.b - -> Append - -> Partial HashAggregate - Group Key: pagg_tab_ml.b - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> Partial HashAggregate - Group Key: pagg_tab_ml_1.b - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1 - -> Partial HashAggregate - Group Key: pagg_tab_ml_2.b - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2 - -> Partial HashAggregate - Group Key: pagg_tab_ml_3.b - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3 - -> Partial HashAggregate - Group Key: pagg_tab_ml_4.b - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4 -(22 rows) - -SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b HAVING avg(a) < 15 ORDER BY 1, 2, 3; - b | sum | count ----+-------+------- - 0 | 30000 | 3000 - 1 | 33000 | 3000 - 2 | 36000 | 3000 - 3 | 39000 | 3000 - 4 | 42000 | 3000 -(5 rows) - --- Full aggregation at all levels as GROUP BY clause matches with PARTITION KEY -EXPLAIN (COSTS OFF) -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; - QUERY PLAN ----------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (count(*)) - -> Append - -> HashAggregate - Group Key: pagg_tab_ml.a, pagg_tab_ml.b, pagg_tab_ml.c - Filter: (avg(pagg_tab_ml.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> HashAggregate - Group Key: pagg_tab_ml_1.a, pagg_tab_ml_1.b, pagg_tab_ml_1.c - Filter: (avg(pagg_tab_ml_1.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1 - -> HashAggregate - Group Key: pagg_tab_ml_2.a, pagg_tab_ml_2.b, pagg_tab_ml_2.c - Filter: (avg(pagg_tab_ml_2.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2 - -> HashAggregate - Group Key: pagg_tab_ml_3.a, pagg_tab_ml_3.b, pagg_tab_ml_3.c - Filter: (avg(pagg_tab_ml_3.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3 - -> HashAggregate - Group Key: pagg_tab_ml_4.a, pagg_tab_ml_4.b, pagg_tab_ml_4.c - Filter: (avg(pagg_tab_ml_4.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4 -(23 rows) - -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; - a | sum | count -----+------+------- - 8 | 4000 | 500 - 8 | 4000 | 500 - 9 | 4500 | 500 - 9 | 4500 | 500 - 18 | 4000 | 500 - 18 | 4000 | 500 - 19 | 4500 | 500 - 19 | 4500 | 500 - 28 | 4000 | 500 - 28 | 4000 | 500 - 29 | 4500 | 500 - 29 | 4500 | 500 -(12 rows) - --- Parallelism within partitionwise aggregates -SET min_parallel_table_scan_size TO '8kB'; -SET parallel_setup_cost TO 0; --- Full aggregation at level 1 as GROUP BY clause matches with PARTITION KEY --- for level 1 only. For subpartitions, GROUP BY clause does not match with --- PARTITION KEY, thus we will have a partial aggregation for them. -EXPLAIN (COSTS OFF) -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - QUERY PLAN ------------------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (count(*)) - -> Append - -> Finalize GroupAggregate - Group Key: pagg_tab_ml.a - Filter: (avg(pagg_tab_ml.b) < '3'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_ml.a - -> Partial HashAggregate - Group Key: pagg_tab_ml.a - -> Parallel Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> Finalize GroupAggregate - Group Key: pagg_tab_ml_2.a - Filter: (avg(pagg_tab_ml_2.b) < '3'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_ml_2.a - -> Parallel Append - -> Partial HashAggregate - Group Key: pagg_tab_ml_2.a - -> Parallel Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_2 - -> Partial HashAggregate - Group Key: pagg_tab_ml_3.a - -> Parallel Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_3 - -> Finalize GroupAggregate - Group Key: pagg_tab_ml_5.a - Filter: (avg(pagg_tab_ml_5.b) < '3'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_ml_5.a - -> Parallel Append - -> Partial HashAggregate - Group Key: pagg_tab_ml_5.a - -> Parallel Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_5 - -> Partial HashAggregate - Group Key: pagg_tab_ml_6.a - -> Parallel Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_6 -(41 rows) - -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; - a | sum | count -----+------+------- - 0 | 0 | 1000 - 1 | 1000 | 1000 - 2 | 2000 | 1000 - 10 | 0 | 1000 - 11 | 1000 | 1000 - 12 | 2000 | 1000 - 20 | 0 | 1000 - 21 | 1000 | 1000 - 22 | 2000 | 1000 -(9 rows) - --- Partial aggregation at all levels as GROUP BY clause does not match with --- PARTITION KEY -EXPLAIN (COSTS OFF) -SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b ORDER BY 1, 2, 3; - QUERY PLAN ------------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_ml.b, (sum(pagg_tab_ml.a)), (count(*)) - -> Finalize GroupAggregate - Group Key: pagg_tab_ml.b - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_ml.b - -> Parallel Append - -> Partial HashAggregate - Group Key: pagg_tab_ml.b - -> Parallel Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> Partial HashAggregate - Group Key: pagg_tab_ml_3.b - -> Parallel Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3 - -> Partial HashAggregate - Group Key: pagg_tab_ml_1.b - -> Parallel Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1 - -> Partial HashAggregate - Group Key: pagg_tab_ml_4.b - -> Parallel Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4 - -> Partial HashAggregate - Group Key: pagg_tab_ml_2.b - -> Parallel Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2 -(24 rows) - -SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b HAVING avg(a) < 15 ORDER BY 1, 2, 3; - b | sum | count ----+-------+------- - 0 | 30000 | 3000 - 1 | 33000 | 3000 - 2 | 36000 | 3000 - 3 | 39000 | 3000 - 4 | 42000 | 3000 -(5 rows) - --- Full aggregation at all levels as GROUP BY clause matches with PARTITION KEY -EXPLAIN (COSTS OFF) -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; - QUERY PLAN ----------------------------------------------------------------------------------- - Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (count(*)) - -> Parallel Append - -> HashAggregate - Group Key: pagg_tab_ml.a, pagg_tab_ml.b, pagg_tab_ml.c - Filter: (avg(pagg_tab_ml.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml - -> HashAggregate - Group Key: pagg_tab_ml_3.a, pagg_tab_ml_3.b, pagg_tab_ml_3.c - Filter: (avg(pagg_tab_ml_3.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3 - -> HashAggregate - Group Key: pagg_tab_ml_1.a, pagg_tab_ml_1.b, pagg_tab_ml_1.c - Filter: (avg(pagg_tab_ml_1.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1 - -> HashAggregate - Group Key: pagg_tab_ml_4.a, pagg_tab_ml_4.b, pagg_tab_ml_4.c - Filter: (avg(pagg_tab_ml_4.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4 - -> HashAggregate - Group Key: pagg_tab_ml_2.a, pagg_tab_ml_2.b, pagg_tab_ml_2.c - Filter: (avg(pagg_tab_ml_2.b) > '7'::numeric) - -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2 -(25 rows) - -SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; - a | sum | count -----+------+------- - 8 | 4000 | 500 - 8 | 4000 | 500 - 9 | 4500 | 500 - 9 | 4500 | 500 - 18 | 4000 | 500 - 18 | 4000 | 500 - 19 | 4500 | 500 - 19 | 4500 | 500 - 28 | 4000 | 500 - 28 | 4000 | 500 - 29 | 4500 | 500 - 29 | 4500 | 500 -(12 rows) - --- Parallelism within partitionwise aggregates (single level) --- Add few parallel setup cost, so that we will see a plan which gathers --- partially created paths even for full aggregation and sticks a single Gather --- followed by finalization step. --- Without this, the cost of doing partial aggregation + Gather + finalization --- for each partition and then Append over it turns out to be same and this --- wins as we add it first. This parallel_setup_cost plays a vital role in --- costing such plans. -SET parallel_setup_cost TO 10; -CREATE TABLE pagg_tab_para(x int, y int) PARTITION BY RANGE(x); -CREATE TABLE pagg_tab_para_p1 PARTITION OF pagg_tab_para FOR VALUES FROM (0) TO (12); -CREATE TABLE pagg_tab_para_p2 PARTITION OF pagg_tab_para FOR VALUES FROM (12) TO (22); -CREATE TABLE pagg_tab_para_p3 PARTITION OF pagg_tab_para FOR VALUES FROM (22) TO (30); -INSERT INTO pagg_tab_para SELECT i % 30, i % 20 FROM generate_series(0, 29999) i; -ANALYZE pagg_tab_para; --- When GROUP BY clause matches; full aggregation is performed for each partition. -EXPLAIN (COSTS OFF) -SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_para.x, (sum(pagg_tab_para.y)), (avg(pagg_tab_para.y)) - -> Finalize GroupAggregate - Group Key: pagg_tab_para.x - Filter: (avg(pagg_tab_para.y) < '7'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_para.x - -> Parallel Append - -> Partial HashAggregate - Group Key: pagg_tab_para.x - -> Parallel Seq Scan on pagg_tab_para_p1 pagg_tab_para - -> Partial HashAggregate - Group Key: pagg_tab_para_1.x - -> Parallel Seq Scan on pagg_tab_para_p2 pagg_tab_para_1 - -> Partial HashAggregate - Group Key: pagg_tab_para_2.x - -> Parallel Seq Scan on pagg_tab_para_p3 pagg_tab_para_2 -(19 rows) - -SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - x | sum | avg | count -----+------+--------------------+------- - 0 | 5000 | 5.0000000000000000 | 1000 - 1 | 6000 | 6.0000000000000000 | 1000 - 10 | 5000 | 5.0000000000000000 | 1000 - 11 | 6000 | 6.0000000000000000 | 1000 - 20 | 5000 | 5.0000000000000000 | 1000 - 21 | 6000 | 6.0000000000000000 | 1000 -(6 rows) - --- When GROUP BY clause does not match; partial aggregation is performed for each partition. -EXPLAIN (COSTS OFF) -SELECT y, sum(x), avg(x), count(*) FROM pagg_tab_para GROUP BY y HAVING avg(x) < 12 ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_para.y, (sum(pagg_tab_para.x)), (avg(pagg_tab_para.x)) - -> Finalize GroupAggregate - Group Key: pagg_tab_para.y - Filter: (avg(pagg_tab_para.x) < '12'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_para.y - -> Parallel Append - -> Partial HashAggregate - Group Key: pagg_tab_para.y - -> Parallel Seq Scan on pagg_tab_para_p1 pagg_tab_para - -> Partial HashAggregate - Group Key: pagg_tab_para_1.y - -> Parallel Seq Scan on pagg_tab_para_p2 pagg_tab_para_1 - -> Partial HashAggregate - Group Key: pagg_tab_para_2.y - -> Parallel Seq Scan on pagg_tab_para_p3 pagg_tab_para_2 -(19 rows) - -SELECT y, sum(x), avg(x), count(*) FROM pagg_tab_para GROUP BY y HAVING avg(x) < 12 ORDER BY 1, 2, 3; - y | sum | avg | count -----+-------+---------------------+------- - 0 | 15000 | 10.0000000000000000 | 1500 - 1 | 16500 | 11.0000000000000000 | 1500 - 10 | 15000 | 10.0000000000000000 | 1500 - 11 | 16500 | 11.0000000000000000 | 1500 -(4 rows) - --- Test when parent can produce parallel paths but not any (or some) of its children --- (Use one more aggregate to tilt the cost estimates for the plan we want) -ALTER TABLE pagg_tab_para_p1 SET (parallel_workers = 0); -ALTER TABLE pagg_tab_para_p3 SET (parallel_workers = 0); -ANALYZE pagg_tab_para; -EXPLAIN (COSTS OFF) -SELECT x, sum(y), avg(y), sum(x+y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - QUERY PLAN -------------------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_para.x, (sum(pagg_tab_para.y)), (avg(pagg_tab_para.y)) - -> Finalize GroupAggregate - Group Key: pagg_tab_para.x - Filter: (avg(pagg_tab_para.y) < '7'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_para.x - -> Partial HashAggregate - Group Key: pagg_tab_para.x - -> Parallel Append - -> Seq Scan on pagg_tab_para_p1 pagg_tab_para_1 - -> Seq Scan on pagg_tab_para_p3 pagg_tab_para_3 - -> Parallel Seq Scan on pagg_tab_para_p2 pagg_tab_para_2 -(15 rows) - -SELECT x, sum(y), avg(y), sum(x+y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - x | sum | avg | sum | count -----+------+--------------------+-------+------- - 0 | 5000 | 5.0000000000000000 | 5000 | 1000 - 1 | 6000 | 6.0000000000000000 | 7000 | 1000 - 10 | 5000 | 5.0000000000000000 | 15000 | 1000 - 11 | 6000 | 6.0000000000000000 | 17000 | 1000 - 20 | 5000 | 5.0000000000000000 | 25000 | 1000 - 21 | 6000 | 6.0000000000000000 | 27000 | 1000 -(6 rows) - -ALTER TABLE pagg_tab_para_p2 SET (parallel_workers = 0); -ANALYZE pagg_tab_para; -EXPLAIN (COSTS OFF) -SELECT x, sum(y), avg(y), sum(x+y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - QUERY PLAN ----------------------------------------------------------------------------------- - Sort - Sort Key: pagg_tab_para.x, (sum(pagg_tab_para.y)), (avg(pagg_tab_para.y)) - -> Finalize GroupAggregate - Group Key: pagg_tab_para.x - Filter: (avg(pagg_tab_para.y) < '7'::numeric) - -> Gather Merge - Workers Planned: 2 - -> Sort - Sort Key: pagg_tab_para.x - -> Partial HashAggregate - Group Key: pagg_tab_para.x - -> Parallel Append - -> Seq Scan on pagg_tab_para_p1 pagg_tab_para_1 - -> Seq Scan on pagg_tab_para_p2 pagg_tab_para_2 - -> Seq Scan on pagg_tab_para_p3 pagg_tab_para_3 -(15 rows) - -SELECT x, sum(y), avg(y), sum(x+y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - x | sum | avg | sum | count -----+------+--------------------+-------+------- - 0 | 5000 | 5.0000000000000000 | 5000 | 1000 - 1 | 6000 | 6.0000000000000000 | 7000 | 1000 - 10 | 5000 | 5.0000000000000000 | 15000 | 1000 - 11 | 6000 | 6.0000000000000000 | 17000 | 1000 - 20 | 5000 | 5.0000000000000000 | 25000 | 1000 - 21 | 6000 | 6.0000000000000000 | 27000 | 1000 -(6 rows) - --- Reset parallelism parameters to get partitionwise aggregation plan. -RESET min_parallel_table_scan_size; -RESET parallel_setup_cost; -EXPLAIN (COSTS OFF) -SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - QUERY PLAN ------------------------------------------------------------------------------ - Sort - Sort Key: pagg_tab_para.x, (sum(pagg_tab_para.y)), (avg(pagg_tab_para.y)) - -> Append - -> HashAggregate - Group Key: pagg_tab_para.x - Filter: (avg(pagg_tab_para.y) < '7'::numeric) - -> Seq Scan on pagg_tab_para_p1 pagg_tab_para - -> HashAggregate - Group Key: pagg_tab_para_1.x - Filter: (avg(pagg_tab_para_1.y) < '7'::numeric) - -> Seq Scan on pagg_tab_para_p2 pagg_tab_para_1 - -> HashAggregate - Group Key: pagg_tab_para_2.x - Filter: (avg(pagg_tab_para_2.y) < '7'::numeric) - -> Seq Scan on pagg_tab_para_p3 pagg_tab_para_2 -(15 rows) - -SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; - x | sum | avg | count -----+------+--------------------+------- - 0 | 5000 | 5.0000000000000000 | 1000 - 1 | 6000 | 6.0000000000000000 | 1000 - 10 | 5000 | 5.0000000000000000 | 1000 - 11 | 6000 | 6.0000000000000000 | 1000 - 20 | 5000 | 5.0000000000000000 | 1000 - 21 | 6000 | 6.0000000000000000 | 1000 -(6 rows) - +WARNING: terminating connection because of crash of another server process +DETAIL: The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory. +HINT: In a moment you should be able to reconnect to the database and repeat your command. +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/tuplesort.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/tuplesort.out --- /tmp/cirrus-ci-build/src/test/regress/expected/tuplesort.out 2024-09-11 00:19:52.129348415 +0000 +++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/tuplesort.out 2024-09-11 00:29:43.236157031 +0000 @@ -223,483 +223,10 @@ BEGIN; SET LOCAL enable_indexscan = false; CLUSTER abbrev_abort_uuids USING abbrev_abort_uuids__abort_increasing_idx; --- head -SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing -FROM abbrev_abort_uuids -ORDER BY ctid LIMIT 5; - id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing --------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- - 1 | 00000000-0000-0000-0000-000000000000 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 - 2 | 00000000-0000-0000-0000-000000000001 | 00000000-0000-0000-0000-000000019999 | 00000001-0000-0000-0000-000000000001 | 00009990-0000-0000-0000-000000019999 - 3 | 00000000-0000-0000-0000-000000000002 | 00000000-0000-0000-0000-000000019998 | 00000002-0000-0000-0000-000000000002 | 00009989-0000-0000-0000-000000019998 - 20004 | 00000000-0000-0000-0000-000000000002 | 00000000-0000-0000-0000-000000019998 | 00000002-0000-0000-0000-000000000002 | 00009989-0000-0000-0000-000000019998 - 4 | 00000000-0000-0000-0000-000000000003 | 00000000-0000-0000-0000-000000019997 | 00000003-0000-0000-0000-000000000003 | 00009988-0000-0000-0000-000000019997 -(5 rows) - --- tail -SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing -FROM abbrev_abort_uuids -ORDER BY ctid DESC LIMIT 5; - id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing --------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- - 0 | | | | - 20002 | | | | - 20003 | | | | - 20001 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 - 20010 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 -(5 rows) - -ROLLBACK; --- when aborting, decreasing order -BEGIN; -SET LOCAL enable_indexscan = false; -CLUSTER abbrev_abort_uuids USING abbrev_abort_uuids__abort_decreasing_idx; --- head -SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing -FROM abbrev_abort_uuids -ORDER BY ctid LIMIT 5; - id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing --------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- - 20010 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 - 20001 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 - 20000 | 00000000-0000-0000-0000-000000019999 | 00000000-0000-0000-0000-000000000001 | 00009990-0000-0000-0000-000000019999 | 00000001-0000-0000-0000-000000000001 - 19999 | 00000000-0000-0000-0000-000000019998 | 00000000-0000-0000-0000-000000000002 | 00009989-0000-0000-0000-000000019998 | 00000002-0000-0000-0000-000000000002 - 20009 | 00000000-0000-0000-0000-000000019997 | 00000000-0000-0000-0000-000000000003 | 00009988-0000-0000-0000-000000019997 | 00000003-0000-0000-0000-000000000003 -(5 rows) - --- tail -SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing -FROM abbrev_abort_uuids -ORDER BY ctid DESC LIMIT 5; - id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing --------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- - 0 | | | | - 20002 | | | | - 20003 | | | | - 1 | 00000000-0000-0000-0000-000000000000 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 - 2 | 00000000-0000-0000-0000-000000000001 | 00000000-0000-0000-0000-000000019999 | 00000001-0000-0000-0000-000000000001 | 00009990-0000-0000-0000-000000019999 -(5 rows) - -ROLLBACK; --- when not aborting, increasing order -BEGIN; -SET LOCAL enable_indexscan = false; -CLUSTER abbrev_abort_uuids USING abbrev_abort_uuids__noabort_increasing_idx; --- head -SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing -FROM abbrev_abort_uuids -ORDER BY ctid LIMIT 5; - id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing --------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- - 1 | 00000000-0000-0000-0000-000000000000 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 - 10010 | 00000000-0000-0000-0000-000000010009 | 00000000-0000-0000-0000-000000009991 | 00000000-0000-0000-0000-000000010009 | 00009991-0000-0000-0000-000000009991 - 2 | 00000000-0000-0000-0000-000000000001 | 00000000-0000-0000-0000-000000019999 | 00000001-0000-0000-0000-000000000001 | 00009990-0000-0000-0000-000000019999 - 10011 | 00000000-0000-0000-0000-000000010010 | 00000000-0000-0000-0000-000000009990 | 00000001-0000-0000-0000-000000010010 | 00009990-0000-0000-0000-000000009990 - 20004 | 00000000-0000-0000-0000-000000000002 | 00000000-0000-0000-0000-000000019998 | 00000002-0000-0000-0000-000000000002 | 00009989-0000-0000-0000-000000019998 -(5 rows) - --- tail -SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing -FROM abbrev_abort_uuids -ORDER BY ctid DESC LIMIT 5; - id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing --------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- - 0 | | | | - 20002 | | | | - 20003 | | | | - 10009 | 00000000-0000-0000-0000-000000010008 | 00000000-0000-0000-0000-000000009992 | 00010008-0000-0000-0000-000000010008 | 00009992-0000-0000-0000-000000009992 - 10008 | 00000000-0000-0000-0000-000000010007 | 00000000-0000-0000-0000-000000009993 | 00010007-0000-0000-0000-000000010007 | 00009993-0000-0000-0000-000000009993 -(5 rows) - -ROLLBACK; --- when no aborting, decreasing order -BEGIN; -SET LOCAL enable_indexscan = false; -CLUSTER abbrev_abort_uuids USING abbrev_abort_uuids__noabort_decreasing_idx; --- head -SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing -FROM abbrev_abort_uuids -ORDER BY ctid LIMIT 5; - id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing --------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- - 20010 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 - 20001 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 - 9992 | 00000000-0000-0000-0000-000000009991 | 00000000-0000-0000-0000-000000010009 | 00009991-0000-0000-0000-000000009991 | 00000000-0000-0000-0000-000000010009 - 20000 | 00000000-0000-0000-0000-000000019999 | 00000000-0000-0000-0000-000000000001 | 00009990-0000-0000-0000-000000019999 | 00000001-0000-0000-0000-000000000001 - 9991 | 00000000-0000-0000-0000-000000009990 | 00000000-0000-0000-0000-000000010010 | 00009990-0000-0000-0000-000000009990 | 00000001-0000-0000-0000-000000010010 -(5 rows) - --- tail -SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing -FROM abbrev_abort_uuids -ORDER BY ctid DESC LIMIT 5; - id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing --------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- - 0 | | | | - 20003 | | | | - 20002 | | | | - 9993 | 00000000-0000-0000-0000-000000009992 | 00000000-0000-0000-0000-000000010008 | 00009992-0000-0000-0000-000000009992 | 00010008-0000-0000-0000-000000010008 - 9994 | 00000000-0000-0000-0000-000000009993 | 00000000-0000-0000-0000-000000010007 | 00009993-0000-0000-0000-000000009993 | 00010007-0000-0000-0000-000000010007 -(5 rows) - -ROLLBACK; ----- --- test sorting of large datums VALUES ----- --- Ensure the order is correct and values look intact -SELECT LEFT(a,10),b FROM - (VALUES(REPEAT('a', 512 * 1024),1),(REPEAT('b', 512 * 1024),2)) v(a,b) -ORDER BY v.a DESC; - left | b -------------+--- - bbbbbbbbbb | 2 - aaaaaaaaaa | 1 -(2 rows) - ----- --- test forward and backward scans for in-memory and disk based tuplesort ----- --- in-memory -BEGIN; -SET LOCAL enable_indexscan = false; --- unfortunately can't show analyze output confirming sort method, --- the memory used output wouldn't be stable -EXPLAIN (COSTS OFF) DECLARE c SCROLL CURSOR FOR SELECT noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_decreasing; - QUERY PLAN --------------------------------------- - Sort - Sort Key: noabort_decreasing - -> Seq Scan on abbrev_abort_uuids -(3 rows) - -DECLARE c SCROLL CURSOR FOR SELECT noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_decreasing; --- first and second -FETCH NEXT FROM c; - noabort_decreasing --------------------------------------- - 00000000-0000-0000-0000-000000000000 -(1 row) - -FETCH NEXT FROM c; - noabort_decreasing --------------------------------------- - 00000000-0000-0000-0000-000000000000 -(1 row) - --- scroll beyond beginning -FETCH BACKWARD FROM c; - noabort_decreasing --------------------------------------- - 00000000-0000-0000-0000-000000000000 -(1 row) - -FETCH BACKWARD FROM c; - noabort_decreasing --------------------- -(0 rows) - -FETCH BACKWARD FROM c; - noabort_decreasing --------------------- -(0 rows) - -FETCH BACKWARD FROM c; - noabort_decreasing --------------------- -(0 rows) - -FETCH NEXT FROM c; - noabort_decreasing --------------------------------------- - 00000000-0000-0000-0000-000000000000 -(1 row) - --- scroll beyond end -FETCH LAST FROM c; - noabort_decreasing --------------------- - -(1 row) - -FETCH BACKWARD FROM c; - noabort_decreasing --------------------- - -(1 row) - -FETCH NEXT FROM c; - noabort_decreasing --------------------- - -(1 row) - -FETCH NEXT FROM c; - noabort_decreasing --------------------- -(0 rows) - -FETCH NEXT FROM c; - noabort_decreasing --------------------- -(0 rows) - -FETCH BACKWARD FROM c; - noabort_decreasing --------------------- - -(1 row) - -FETCH NEXT FROM c; - noabort_decreasing --------------------- -(0 rows) - -COMMIT; --- disk based -BEGIN; -SET LOCAL enable_indexscan = false; -SET LOCAL work_mem = '100kB'; --- unfortunately can't show analyze output confirming sort method, --- the memory used output wouldn't be stable -EXPLAIN (COSTS OFF) DECLARE c SCROLL CURSOR FOR SELECT noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_decreasing; - QUERY PLAN --------------------------------------- - Sort - Sort Key: noabort_decreasing - -> Seq Scan on abbrev_abort_uuids -(3 rows) - -DECLARE c SCROLL CURSOR FOR SELECT noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_decreasing; --- first and second -FETCH NEXT FROM c; - noabort_decreasing --------------------------------------- - 00000000-0000-0000-0000-000000000000 -(1 row) - -FETCH NEXT FROM c; - noabort_decreasing --------------------------------------- - 00000000-0000-0000-0000-000000000000 -(1 row) - --- scroll beyond beginning -FETCH BACKWARD FROM c; - noabort_decreasing --------------------------------------- - 00000000-0000-0000-0000-000000000000 -(1 row) - -FETCH BACKWARD FROM c; - noabort_decreasing --------------------- -(0 rows) - -FETCH BACKWARD FROM c; - noabort_decreasing --------------------- -(0 rows) - -FETCH BACKWARD FROM c; - noabort_decreasing --------------------- -(0 rows) - -FETCH NEXT FROM c; - noabort_decreasing --------------------------------------- - 00000000-0000-0000-0000-000000000000 -(1 row) - --- scroll beyond end -FETCH LAST FROM c; - noabort_decreasing --------------------- - -(1 row) - -FETCH BACKWARD FROM c; - noabort_decreasing --------------------- - -(1 row) - -FETCH NEXT FROM c; - noabort_decreasing --------------------- - -(1 row) - -FETCH NEXT FROM c; - noabort_decreasing --------------------- -(0 rows) - -FETCH NEXT FROM c; - noabort_decreasing --------------------- -(0 rows) - -FETCH BACKWARD FROM c; - noabort_decreasing --------------------- - -(1 row) - -FETCH NEXT FROM c; - noabort_decreasing --------------------- -(0 rows) - -COMMIT; ----- --- test tuplesort using both in-memory and disk sort ---- --- memory based -SELECT - -- fixed-width by-value datum - (array_agg(id ORDER BY id DESC NULLS FIRST))[0:5], - -- fixed-width by-ref datum - (array_agg(abort_increasing ORDER BY abort_increasing DESC NULLS LAST))[0:5], - -- variable-width datum - (array_agg(id::text ORDER BY id::text DESC NULLS LAST))[0:5], - -- fixed width by-value datum tuplesort - percentile_disc(0.99) WITHIN GROUP (ORDER BY id), - -- ensure state is shared - percentile_disc(0.01) WITHIN GROUP (ORDER BY id), - -- fixed width by-ref datum tuplesort - percentile_disc(0.8) WITHIN GROUP (ORDER BY abort_increasing), - -- variable width by-ref datum tuplesort - percentile_disc(0.2) WITHIN GROUP (ORDER BY id::text), - -- multi-column tuplesort - rank('00000000-0000-0000-0000-000000000000', '2', '2') WITHIN GROUP (ORDER BY noabort_increasing, id, id::text) -FROM ( - SELECT * FROM abbrev_abort_uuids - UNION ALL - SELECT NULL, NULL, NULL, NULL, NULL) s; - array_agg | array_agg | array_agg | percentile_disc | percentile_disc | percentile_disc | percentile_disc | rank ---------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+-----------------+-----------------+--------------------------------------+-----------------+------ - {NULL,20010,20009,20008,20007} | {00000000-0000-0000-0000-000000020000,00000000-0000-0000-0000-000000020000,00000000-0000-0000-0000-000000019999,00000000-0000-0000-0000-000000019998,00000000-0000-0000-0000-000000019997} | {9999,9998,9997,9996,9995} | 19810 | 200 | 00000000-0000-0000-0000-000000016003 | 136 | 2 -(1 row) - --- disk based (see also above) -BEGIN; -SET LOCAL work_mem = '100kB'; -SELECT - (array_agg(id ORDER BY id DESC NULLS FIRST))[0:5], - (array_agg(abort_increasing ORDER BY abort_increasing DESC NULLS LAST))[0:5], - (array_agg(id::text ORDER BY id::text DESC NULLS LAST))[0:5], - percentile_disc(0.99) WITHIN GROUP (ORDER BY id), - percentile_disc(0.01) WITHIN GROUP (ORDER BY id), - percentile_disc(0.8) WITHIN GROUP (ORDER BY abort_increasing), - percentile_disc(0.2) WITHIN GROUP (ORDER BY id::text), - rank('00000000-0000-0000-0000-000000000000', '2', '2') WITHIN GROUP (ORDER BY noabort_increasing, id, id::text) -FROM ( - SELECT * FROM abbrev_abort_uuids - UNION ALL - SELECT NULL, NULL, NULL, NULL, NULL) s; - array_agg | array_agg | array_agg | percentile_disc | percentile_disc | percentile_disc | percentile_disc | rank ---------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+-----------------+-----------------+--------------------------------------+-----------------+------ - {NULL,20010,20009,20008,20007} | {00000000-0000-0000-0000-000000020000,00000000-0000-0000-0000-000000020000,00000000-0000-0000-0000-000000019999,00000000-0000-0000-0000-000000019998,00000000-0000-0000-0000-000000019997} | {9999,9998,9997,9996,9995} | 19810 | 200 | 00000000-0000-0000-0000-000000016003 | 136 | 2 -(1 row) - -ROLLBACK; ----- --- test tuplesort mark/restore ---- -CREATE TEMP TABLE test_mark_restore(col1 int, col2 int, col12 int); --- need a few duplicates for mark/restore to matter -INSERT INTO test_mark_restore(col1, col2, col12) - SELECT a.i, b.i, a.i * b.i FROM generate_series(1, 500) a(i), generate_series(1, 5) b(i); -BEGIN; -SET LOCAL enable_nestloop = off; -SET LOCAL enable_hashjoin = off; -SET LOCAL enable_material = off; --- set query into variable once, to avoid repetition of the fairly long query -SELECT $$ - SELECT col12, count(distinct a.col1), count(distinct a.col2), count(distinct b.col1), count(distinct b.col2), count(*) - FROM test_mark_restore a - JOIN test_mark_restore b USING(col12) - GROUP BY 1 - HAVING count(*) > 1 - ORDER BY 2 DESC, 1 DESC, 3 DESC, 4 DESC, 5 DESC, 6 DESC - LIMIT 10 -$$ AS qry \gset --- test mark/restore with in-memory sorts -EXPLAIN (COSTS OFF) :qry; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ - Limit - -> Sort - Sort Key: (count(DISTINCT a.col1)) DESC, a.col12 DESC, (count(DISTINCT a.col2)) DESC, (count(DISTINCT b.col1)) DESC, (count(DISTINCT b.col2)) DESC, (count(*)) DESC - -> GroupAggregate - Group Key: a.col12 - Filter: (count(*) > 1) - -> Incremental Sort - Sort Key: a.col12 DESC, a.col1 - Presorted Key: a.col12 - -> Merge Join - Merge Cond: (a.col12 = b.col12) - -> Sort - Sort Key: a.col12 DESC - -> Seq Scan on test_mark_restore a - -> Sort - Sort Key: b.col12 DESC - -> Seq Scan on test_mark_restore b -(17 rows) - -:qry; - col12 | count | count | count | count | count --------+-------+-------+-------+-------+------- - 480 | 5 | 5 | 5 | 5 | 25 - 420 | 5 | 5 | 5 | 5 | 25 - 360 | 5 | 5 | 5 | 5 | 25 - 300 | 5 | 5 | 5 | 5 | 25 - 240 | 5 | 5 | 5 | 5 | 25 - 180 | 5 | 5 | 5 | 5 | 25 - 120 | 5 | 5 | 5 | 5 | 25 - 60 | 5 | 5 | 5 | 5 | 25 - 960 | 4 | 4 | 4 | 4 | 16 - 900 | 4 | 4 | 4 | 4 | 16 -(10 rows) - --- test mark/restore with on-disk sorts -SET LOCAL work_mem = '100kB'; -EXPLAIN (COSTS OFF) :qry; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ - Limit - -> Sort - Sort Key: (count(DISTINCT a.col1)) DESC, a.col12 DESC, (count(DISTINCT a.col2)) DESC, (count(DISTINCT b.col1)) DESC, (count(DISTINCT b.col2)) DESC, (count(*)) DESC - -> GroupAggregate - Group Key: a.col12 - Filter: (count(*) > 1) - -> Incremental Sort - Sort Key: a.col12 DESC, a.col1 - Presorted Key: a.col12 - -> Merge Join - Merge Cond: (a.col12 = b.col12) - -> Sort - Sort Key: a.col12 DESC - -> Seq Scan on test_mark_restore a - -> Sort - Sort Key: b.col12 DESC - -> Seq Scan on test_mark_restore b -(17 rows) - -:qry; - col12 | count | count | count | count | count --------+-------+-------+-------+-------+------- - 480 | 5 | 5 | 5 | 5 | 25 - 420 | 5 | 5 | 5 | 5 | 25 - 360 | 5 | 5 | 5 | 5 | 25 - 300 | 5 | 5 | 5 | 5 | 25 - 240 | 5 | 5 | 5 | 5 | 25 - 180 | 5 | 5 | 5 | 5 | 25 - 120 | 5 | 5 | 5 | 5 | 25 - 60 | 5 | 5 | 5 | 5 | 25 - 960 | 4 | 4 | 4 | 4 | 16 - 900 | 4 | 4 | 4 | 4 | 16 -(10 rows) - -COMMIT; +WARNING: terminating connection because of crash of another server process +DETAIL: The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory. +HINT: In a moment you should be able to reconnect to the database and repeat your command. +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/stats.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/stats.out --- /tmp/cirrus-ci-build/src/test/regress/expected/stats.out 2024-09-11 00:19:52.117310245 +0000 +++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/stats.out 2024-09-11 00:29:43.180157098 +0000 @@ -820,873 +820,7 @@ SELECT lower = pg_lsn(0), upper > pg_lsn(0) FROM pg_stat_lsn_bounds_for_time(now() - make_interval(years=> 100)); - ?column? | ?column? -----------+---------- - t | t -(1 row) - --- An LSN range covering a time 100 years in the future should be from roughly --- the current time to FFFFFFFF/FFFFFFFF (UINT64_MAX). -SELECT lower > pg_lsn(0), - upper = pg_lsn('FFFFFFFF/FFFFFFFF') - FROM pg_stat_lsn_bounds_for_time(now() + make_interval(years=> 100)); - ?column? | ?column? -----------+---------- - t | t -(1 row) - --- A TimestampTz range covering LSN 0 should be from -infinity to a positive --- time (either the oldest time in the stream or the current time). -SELECT lower = timestamptz('-infinity'), - upper::time > 'allballs'::time - FROM pg_stat_time_bounds_for_lsn(pg_lsn(0)); - ?column? | ?column? -----------+---------- - t | t -(1 row) - --- A TimestampTz range covering an LSN 1 GB in the future should be from --- roughly the current time to infinity. -SELECT lower::time > 'allballs'::time, - upper = timestamptz('infinity') - FROM pg_stat_time_bounds_for_lsn( - pg_current_wal_insert_lsn() + 1000000000); - ?column? | ?column? -----------+---------- - t | t -(1 row) - --- Test that sessions is incremented when a new session is started in pg_stat_database -SELECT sessions AS db_stat_sessions FROM pg_stat_database WHERE datname = (SELECT current_database()) \gset -\c -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sessions > :db_stat_sessions FROM pg_stat_database WHERE datname = (SELECT current_database()); - ?column? ----------- - t -(1 row) - --- Test pg_stat_checkpointer checkpointer-related stats, together with pg_stat_wal -SELECT num_requested AS rqst_ckpts_before FROM pg_stat_checkpointer \gset --- Test pg_stat_wal (and make a temp table so our temp schema exists) -SELECT wal_bytes AS wal_bytes_before FROM pg_stat_wal \gset -CREATE TEMP TABLE test_stats_temp AS SELECT 17; -DROP TABLE test_stats_temp; --- Checkpoint twice: The checkpointer reports stats after reporting completion --- of the checkpoint. But after a second checkpoint we'll see at least the --- results of the first. -CHECKPOINT; -CHECKPOINT; -SELECT num_requested > :rqst_ckpts_before FROM pg_stat_checkpointer; - ?column? ----------- - t -(1 row) - -SELECT wal_bytes > :wal_bytes_before FROM pg_stat_wal; - ?column? ----------- - t -(1 row) - --- Test pg_stat_get_backend_idset() and some allied functions. --- In particular, verify that their notion of backend ID matches --- our temp schema index. -SELECT (current_schemas(true))[1] = ('pg_temp_' || beid::text) AS match -FROM pg_stat_get_backend_idset() beid -WHERE pg_stat_get_backend_pid(beid) = pg_backend_pid(); - match -------- - t -(1 row) - ------ --- Test that resetting stats works for reset timestamp ------ --- Test that reset_slru with a specified SLRU works. -SELECT stats_reset AS slru_commit_ts_reset_ts FROM pg_stat_slru WHERE name = 'commit_timestamp' \gset -SELECT stats_reset AS slru_notify_reset_ts FROM pg_stat_slru WHERE name = 'notify' \gset -SELECT pg_stat_reset_slru('commit_timestamp'); - pg_stat_reset_slru --------------------- - -(1 row) - -SELECT stats_reset > :'slru_commit_ts_reset_ts'::timestamptz FROM pg_stat_slru WHERE name = 'commit_timestamp'; - ?column? ----------- - t -(1 row) - -SELECT stats_reset AS slru_commit_ts_reset_ts FROM pg_stat_slru WHERE name = 'commit_timestamp' \gset --- Test that multiple SLRUs are reset when no specific SLRU provided to reset function -SELECT pg_stat_reset_slru(); - pg_stat_reset_slru --------------------- - -(1 row) - -SELECT stats_reset > :'slru_commit_ts_reset_ts'::timestamptz FROM pg_stat_slru WHERE name = 'commit_timestamp'; - ?column? ----------- - t -(1 row) - -SELECT stats_reset > :'slru_notify_reset_ts'::timestamptz FROM pg_stat_slru WHERE name = 'notify'; - ?column? ----------- - t -(1 row) - --- Test that reset_shared with archiver specified as the stats type works -SELECT stats_reset AS archiver_reset_ts FROM pg_stat_archiver \gset -SELECT pg_stat_reset_shared('archiver'); - pg_stat_reset_shared ----------------------- - -(1 row) - -SELECT stats_reset > :'archiver_reset_ts'::timestamptz FROM pg_stat_archiver; - ?column? ----------- - t -(1 row) - --- Test that reset_shared with bgwriter specified as the stats type works -SELECT stats_reset AS bgwriter_reset_ts FROM pg_stat_bgwriter \gset -SELECT pg_stat_reset_shared('bgwriter'); - pg_stat_reset_shared ----------------------- - -(1 row) - -SELECT stats_reset > :'bgwriter_reset_ts'::timestamptz FROM pg_stat_bgwriter; - ?column? ----------- - t -(1 row) - --- Test that reset_shared with checkpointer specified as the stats type works -SELECT stats_reset AS checkpointer_reset_ts FROM pg_stat_checkpointer \gset -SELECT pg_stat_reset_shared('checkpointer'); - pg_stat_reset_shared ----------------------- - -(1 row) - -SELECT stats_reset > :'checkpointer_reset_ts'::timestamptz FROM pg_stat_checkpointer; - ?column? ----------- - t -(1 row) - --- Test that reset_shared with recovery_prefetch specified as the stats type works -SELECT stats_reset AS recovery_prefetch_reset_ts FROM pg_stat_recovery_prefetch \gset -SELECT pg_stat_reset_shared('recovery_prefetch'); - pg_stat_reset_shared ----------------------- - -(1 row) - -SELECT stats_reset > :'recovery_prefetch_reset_ts'::timestamptz FROM pg_stat_recovery_prefetch; - ?column? ----------- - t -(1 row) - --- Test that reset_shared with slru specified as the stats type works -SELECT max(stats_reset) AS slru_reset_ts FROM pg_stat_slru \gset -SELECT pg_stat_reset_shared('slru'); - pg_stat_reset_shared ----------------------- - -(1 row) - -SELECT max(stats_reset) > :'slru_reset_ts'::timestamptz FROM pg_stat_slru; - ?column? ----------- - t -(1 row) - --- Test that reset_shared with wal specified as the stats type works -SELECT stats_reset AS wal_reset_ts FROM pg_stat_wal \gset -SELECT pg_stat_reset_shared('wal'); - pg_stat_reset_shared ----------------------- - -(1 row) - -SELECT stats_reset > :'wal_reset_ts'::timestamptz FROM pg_stat_wal; - ?column? ----------- - t -(1 row) - --- Test error case for reset_shared with unknown stats type -SELECT pg_stat_reset_shared('unknown'); -ERROR: unrecognized reset target: "unknown" -HINT: Target must be "archiver", "bgwriter", "checkpointer", "io", "recovery_prefetch", "slru", or "wal". --- Test that reset works for pg_stat_database --- Since pg_stat_database stats_reset starts out as NULL, reset it once first so we have something to compare it to -SELECT pg_stat_reset(); - pg_stat_reset ---------------- - -(1 row) - -SELECT stats_reset AS db_reset_ts FROM pg_stat_database WHERE datname = (SELECT current_database()) \gset -SELECT pg_stat_reset(); - pg_stat_reset ---------------- - -(1 row) - -SELECT stats_reset > :'db_reset_ts'::timestamptz FROM pg_stat_database WHERE datname = (SELECT current_database()); - ?column? ----------- - t -(1 row) - ----- --- pg_stat_get_snapshot_timestamp behavior ----- -BEGIN; -SET LOCAL stats_fetch_consistency = snapshot; --- no snapshot yet, return NULL -SELECT pg_stat_get_snapshot_timestamp(); - pg_stat_get_snapshot_timestamp --------------------------------- - -(1 row) - --- any attempt at accessing stats will build snapshot -SELECT pg_stat_get_function_calls(0); - pg_stat_get_function_calls ----------------------------- - -(1 row) - -SELECT pg_stat_get_snapshot_timestamp() >= NOW(); - ?column? ----------- - t -(1 row) - --- shows NULL again after clearing -SELECT pg_stat_clear_snapshot(); - pg_stat_clear_snapshot ------------------------- - -(1 row) - -SELECT pg_stat_get_snapshot_timestamp(); - pg_stat_get_snapshot_timestamp --------------------------------- - -(1 row) - -COMMIT; ----- --- Changing stats_fetch_consistency in a transaction. ----- -BEGIN; --- Stats filled under the cache mode -SET LOCAL stats_fetch_consistency = cache; -SELECT pg_stat_get_function_calls(0); - pg_stat_get_function_calls ----------------------------- - -(1 row) - -SELECT pg_stat_get_snapshot_timestamp() IS NOT NULL AS snapshot_ok; - snapshot_ok -------------- - f -(1 row) - --- Success in accessing pre-existing snapshot data. -SET LOCAL stats_fetch_consistency = snapshot; -SELECT pg_stat_get_snapshot_timestamp() IS NOT NULL AS snapshot_ok; - snapshot_ok -------------- - f -(1 row) - -SELECT pg_stat_get_function_calls(0); - pg_stat_get_function_calls ----------------------------- - -(1 row) - -SELECT pg_stat_get_snapshot_timestamp() IS NOT NULL AS snapshot_ok; - snapshot_ok -------------- - t -(1 row) - --- Snapshot cleared. -SET LOCAL stats_fetch_consistency = none; -SELECT pg_stat_get_snapshot_timestamp() IS NOT NULL AS snapshot_ok; - snapshot_ok -------------- - f -(1 row) - -SELECT pg_stat_get_function_calls(0); - pg_stat_get_function_calls ----------------------------- - -(1 row) - -SELECT pg_stat_get_snapshot_timestamp() IS NOT NULL AS snapshot_ok; - snapshot_ok -------------- - f -(1 row) - -ROLLBACK; ----- --- pg_stat_have_stats behavior ----- --- fixed-numbered stats exist -SELECT pg_stat_have_stats('bgwriter', 0, 0); - pg_stat_have_stats --------------------- - t -(1 row) - --- unknown stats kinds error out -SELECT pg_stat_have_stats('zaphod', 0, 0); -ERROR: invalid statistics kind: "zaphod" --- db stats have objoid 0 -SELECT pg_stat_have_stats('database', :dboid, 1); - pg_stat_have_stats --------------------- - f -(1 row) - -SELECT pg_stat_have_stats('database', :dboid, 0); - pg_stat_have_stats --------------------- - t -(1 row) - --- pg_stat_have_stats returns true for committed index creation -CREATE table stats_test_tab1 as select generate_series(1,10) a; -CREATE index stats_test_idx1 on stats_test_tab1(a); -SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid \gset -SET enable_seqscan TO off; -select a from stats_test_tab1 where a = 3; - a ---- - 3 -(1 row) - -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - t -(1 row) - --- pg_stat_have_stats returns false for dropped index with stats -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - t -(1 row) - -DROP index stats_test_idx1; -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - f -(1 row) - --- pg_stat_have_stats returns false for rolled back index creation -BEGIN; -CREATE index stats_test_idx1 on stats_test_tab1(a); -SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid \gset -select a from stats_test_tab1 where a = 3; - a ---- - 3 -(1 row) - -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - t -(1 row) - -ROLLBACK; -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - f -(1 row) - --- pg_stat_have_stats returns true for reindex CONCURRENTLY -CREATE index stats_test_idx1 on stats_test_tab1(a); -SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid \gset -select a from stats_test_tab1 where a = 3; - a ---- - 3 -(1 row) - -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - t -(1 row) - -REINDEX index CONCURRENTLY stats_test_idx1; --- false for previous oid -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - f -(1 row) - --- true for new oid -SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid \gset -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - t -(1 row) - --- pg_stat_have_stats returns true for a rolled back drop index with stats -BEGIN; -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - t -(1 row) - -DROP index stats_test_idx1; -ROLLBACK; -SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); - pg_stat_have_stats --------------------- - t -(1 row) - --- put enable_seqscan back to on -SET enable_seqscan TO on; --- ensure that stats accessors handle NULL input correctly -SELECT pg_stat_get_replication_slot(NULL); - pg_stat_get_replication_slot ------------------------------- - -(1 row) - -SELECT pg_stat_get_subscription_stats(NULL); - pg_stat_get_subscription_stats --------------------------------- - -(1 row) - --- Test that the following operations are tracked in pg_stat_io: --- - reads of target blocks into shared buffers --- - writes of shared buffers to permanent storage --- - extends of relations using shared buffers --- - fsyncs done to ensure the durability of data dirtying shared buffers --- - shared buffer hits --- There is no test for blocks evicted from shared buffers, because we cannot --- be sure of the state of shared buffers at the point the test is run. --- Create a regular table and insert some data to generate IOCONTEXT_NORMAL --- extends. -SELECT sum(extends) AS io_sum_shared_before_extends - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset -SELECT sum(writes) AS writes, sum(fsyncs) AS fsyncs - FROM pg_stat_io - WHERE object = 'relation' \gset io_sum_shared_before_ -CREATE TABLE test_io_shared(a int); -INSERT INTO test_io_shared SELECT i FROM generate_series(1,100)i; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(extends) AS io_sum_shared_after_extends - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset -SELECT :io_sum_shared_after_extends > :io_sum_shared_before_extends; - ?column? ----------- - t -(1 row) - --- After a checkpoint, there should be some additional IOCONTEXT_NORMAL writes --- and fsyncs. --- See comment above for rationale for two explicit CHECKPOINTs. -CHECKPOINT; -CHECKPOINT; -SELECT sum(writes) AS writes, sum(fsyncs) AS fsyncs - FROM pg_stat_io - WHERE object = 'relation' \gset io_sum_shared_after_ -SELECT :io_sum_shared_after_writes > :io_sum_shared_before_writes; - ?column? ----------- - t -(1 row) - -SELECT current_setting('fsync') = 'off' - OR :io_sum_shared_after_fsyncs > :io_sum_shared_before_fsyncs; - ?column? ----------- - t -(1 row) - --- Change the tablespace so that the table is rewritten directly, then SELECT --- from it to cause it to be read back into shared buffers. -SELECT sum(reads) AS io_sum_shared_before_reads - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset --- Do this in a transaction to prevent spurious failures due to concurrent accesses to our newly --- rewritten table, e.g. by autovacuum. -BEGIN; -ALTER TABLE test_io_shared SET TABLESPACE regress_tblspace; --- SELECT from the table so that the data is read into shared buffers and --- context 'normal', object 'relation' reads are counted. -SELECT COUNT(*) FROM test_io_shared; - count -------- - 100 -(1 row) - -COMMIT; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(reads) AS io_sum_shared_after_reads - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset -SELECT :io_sum_shared_after_reads > :io_sum_shared_before_reads; - ?column? ----------- - t -(1 row) - -SELECT sum(hits) AS io_sum_shared_before_hits - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset --- Select from the table again to count hits. --- Ensure we generate hits by forcing a nested loop self-join with no --- materialize node. The outer side's buffer will stay pinned, preventing its --- eviction, while we loop through the inner side and generate hits. -BEGIN; -SET LOCAL enable_nestloop TO on; SET LOCAL enable_mergejoin TO off; -SET LOCAL enable_hashjoin TO off; SET LOCAL enable_material TO off; --- ensure plan stays as we expect it to -EXPLAIN (COSTS OFF) SELECT COUNT(*) FROM test_io_shared t1 INNER JOIN test_io_shared t2 USING (a); - QUERY PLAN -------------------------------------------- - Aggregate - -> Nested Loop - Join Filter: (t1.a = t2.a) - -> Seq Scan on test_io_shared t1 - -> Seq Scan on test_io_shared t2 -(5 rows) - -SELECT COUNT(*) FROM test_io_shared t1 INNER JOIN test_io_shared t2 USING (a); - count -------- - 100 -(1 row) - -COMMIT; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(hits) AS io_sum_shared_after_hits - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset -SELECT :io_sum_shared_after_hits > :io_sum_shared_before_hits; - ?column? ----------- - t -(1 row) - -DROP TABLE test_io_shared; --- Test that the follow IOCONTEXT_LOCAL IOOps are tracked in pg_stat_io: --- - eviction of local buffers in order to reuse them --- - reads of temporary table blocks into local buffers --- - writes of local buffers to permanent storage --- - extends of temporary tables --- Set temp_buffers to its minimum so that we can trigger writes with fewer --- inserted tuples. Do so in a new session in case temporary tables have been --- accessed by previous tests in this session. -\c -SET temp_buffers TO 100; -CREATE TEMPORARY TABLE test_io_local(a int, b TEXT); -SELECT sum(extends) AS extends, sum(evictions) AS evictions, sum(writes) AS writes - FROM pg_stat_io - WHERE context = 'normal' AND object = 'temp relation' \gset io_sum_local_before_ --- Insert tuples into the temporary table, generating extends in the stats. --- Insert enough values that we need to reuse and write out dirty local --- buffers, generating evictions and writes. -INSERT INTO test_io_local SELECT generate_series(1, 5000) as id, repeat('a', 200); --- Ensure the table is large enough to exceed our temp_buffers setting. -SELECT pg_relation_size('test_io_local') / current_setting('block_size')::int8 > 100; - ?column? ----------- - t -(1 row) - -SELECT sum(reads) AS io_sum_local_before_reads - FROM pg_stat_io WHERE context = 'normal' AND object = 'temp relation' \gset --- Read in evicted buffers, generating reads. -SELECT COUNT(*) FROM test_io_local; - count -------- - 5000 -(1 row) - -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(evictions) AS evictions, - sum(reads) AS reads, - sum(writes) AS writes, - sum(extends) AS extends - FROM pg_stat_io - WHERE context = 'normal' AND object = 'temp relation' \gset io_sum_local_after_ -SELECT :io_sum_local_after_evictions > :io_sum_local_before_evictions, - :io_sum_local_after_reads > :io_sum_local_before_reads, - :io_sum_local_after_writes > :io_sum_local_before_writes, - :io_sum_local_after_extends > :io_sum_local_before_extends; - ?column? | ?column? | ?column? | ?column? -----------+----------+----------+---------- - t | t | t | t -(1 row) - --- Change the tablespaces so that the temporary table is rewritten to other --- local buffers, exercising a different codepath than standard local buffer --- writes. -ALTER TABLE test_io_local SET TABLESPACE regress_tblspace; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(writes) AS io_sum_local_new_tblspc_writes - FROM pg_stat_io WHERE context = 'normal' AND object = 'temp relation' \gset -SELECT :io_sum_local_new_tblspc_writes > :io_sum_local_after_writes; - ?column? ----------- - t -(1 row) - -RESET temp_buffers; --- Test that reuse of strategy buffers and reads of blocks into these reused --- buffers while VACUUMing are tracked in pg_stat_io. If there is sufficient --- demand for shared buffers from concurrent queries, some buffers may be --- pinned by other backends before they can be reused. In such cases, the --- backend will evict a buffer from outside the ring and add it to the --- ring. This is considered an eviction and not a reuse. --- Set wal_skip_threshold smaller than the expected size of --- test_io_vac_strategy so that, even if wal_level is minimal, VACUUM FULL will --- fsync the newly rewritten test_io_vac_strategy instead of writing it to WAL. --- Writing it to WAL will result in the newly written relation pages being in --- shared buffers -- preventing us from testing BAS_VACUUM BufferAccessStrategy --- reads. -SET wal_skip_threshold = '1 kB'; -SELECT sum(reuses) AS reuses, sum(reads) AS reads, sum(evictions) AS evictions - FROM pg_stat_io WHERE context = 'vacuum' \gset io_sum_vac_strategy_before_ -CREATE TABLE test_io_vac_strategy(a int, b int) WITH (autovacuum_enabled = 'false'); -INSERT INTO test_io_vac_strategy SELECT i, i from generate_series(1, 4500)i; --- Ensure that the next VACUUM will need to perform IO by rewriting the table --- first with VACUUM (FULL). -VACUUM (FULL) test_io_vac_strategy; --- Use the minimum BUFFER_USAGE_LIMIT to cause reuses or evictions with the --- smallest table possible. -VACUUM (PARALLEL 0, BUFFER_USAGE_LIMIT 128) test_io_vac_strategy; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(reuses) AS reuses, sum(reads) AS reads, sum(evictions) AS evictions - FROM pg_stat_io WHERE context = 'vacuum' \gset io_sum_vac_strategy_after_ -SELECT :io_sum_vac_strategy_after_reads > :io_sum_vac_strategy_before_reads; - ?column? ----------- - t -(1 row) - -SELECT (:io_sum_vac_strategy_after_reuses + :io_sum_vac_strategy_after_evictions) > - (:io_sum_vac_strategy_before_reuses + :io_sum_vac_strategy_before_evictions); - ?column? ----------- - t -(1 row) - -RESET wal_skip_threshold; --- Test that extends done by a CTAS, which uses a BAS_BULKWRITE --- BufferAccessStrategy, are tracked in pg_stat_io. -SELECT sum(extends) AS io_sum_bulkwrite_strategy_extends_before - FROM pg_stat_io WHERE context = 'bulkwrite' \gset -CREATE TABLE test_io_bulkwrite_strategy AS SELECT i FROM generate_series(1,100)i; -SELECT pg_stat_force_next_flush(); - pg_stat_force_next_flush --------------------------- - -(1 row) - -SELECT sum(extends) AS io_sum_bulkwrite_strategy_extends_after - FROM pg_stat_io WHERE context = 'bulkwrite' \gset -SELECT :io_sum_bulkwrite_strategy_extends_after > :io_sum_bulkwrite_strategy_extends_before; - ?column? ----------- - t -(1 row) - --- Test IO stats reset -SELECT pg_stat_have_stats('io', 0, 0); - pg_stat_have_stats --------------------- - t -(1 row) - -SELECT sum(evictions) + sum(reuses) + sum(extends) + sum(fsyncs) + sum(reads) + sum(writes) + sum(writebacks) + sum(hits) AS io_stats_pre_reset - FROM pg_stat_io \gset -SELECT pg_stat_reset_shared('io'); - pg_stat_reset_shared ----------------------- - -(1 row) - -SELECT sum(evictions) + sum(reuses) + sum(extends) + sum(fsyncs) + sum(reads) + sum(writes) + sum(writebacks) + sum(hits) AS io_stats_post_reset - FROM pg_stat_io \gset -SELECT :io_stats_post_reset < :io_stats_pre_reset; - ?column? ----------- - t -(1 row) - --- test BRIN index doesn't block HOT update -CREATE TABLE brin_hot ( - id integer PRIMARY KEY, - val integer NOT NULL -) WITH (autovacuum_enabled = off, fillfactor = 70); -INSERT INTO brin_hot SELECT *, 0 FROM generate_series(1, 235); -CREATE INDEX val_brin ON brin_hot using brin(val); -CREATE FUNCTION wait_for_hot_stats() RETURNS void AS $$ -DECLARE - start_time timestamptz := clock_timestamp(); - updated bool; -BEGIN - -- we don't want to wait forever; loop will exit after 30 seconds - FOR i IN 1 .. 300 LOOP - SELECT (pg_stat_get_tuples_hot_updated('brin_hot'::regclass::oid) > 0) INTO updated; - EXIT WHEN updated; - - -- wait a little - PERFORM pg_sleep_for('100 milliseconds'); - -- reset stats snapshot so we can test again - PERFORM pg_stat_clear_snapshot(); - END LOOP; - -- report time waited in postmaster log (where it won't change test output) - RAISE log 'wait_for_hot_stats delayed % seconds', - EXTRACT(epoch FROM clock_timestamp() - start_time); -END -$$ LANGUAGE plpgsql; -UPDATE brin_hot SET val = -3 WHERE id = 42; --- We can't just call wait_for_hot_stats() at this point, because we only --- transmit stats when the session goes idle, and we probably didn't --- transmit the last couple of counts yet thanks to the rate-limiting logic --- in pgstat_report_stat(). But instead of waiting for the rate limiter's --- timeout to elapse, let's just start a new session. The old one will --- then send its stats before dying. -\c - -SELECT wait_for_hot_stats(); - wait_for_hot_stats --------------------- - -(1 row) - -SELECT pg_stat_get_tuples_hot_updated('brin_hot'::regclass::oid); - pg_stat_get_tuples_hot_updated --------------------------------- - 1 -(1 row) - -DROP TABLE brin_hot; -DROP FUNCTION wait_for_hot_stats(); --- Test handling of index predicates - updating attributes in precicates --- should not block HOT when summarizing indexes are involved. We update --- a row that was not indexed due to the index predicate, and becomes --- indexable - the HOT-updated tuple is forwarded to the BRIN index. -CREATE TABLE brin_hot_2 (a int, b int); -INSERT INTO brin_hot_2 VALUES (1, 100); -CREATE INDEX ON brin_hot_2 USING brin (b) WHERE a = 2; -UPDATE brin_hot_2 SET a = 2; -EXPLAIN (COSTS OFF) SELECT * FROM brin_hot_2 WHERE a = 2 AND b = 100; - QUERY PLAN ------------------------------------ - Seq Scan on brin_hot_2 - Filter: ((a = 2) AND (b = 100)) -(2 rows) - -SELECT COUNT(*) FROM brin_hot_2 WHERE a = 2 AND b = 100; - count -------- - 1 -(1 row) - -SET enable_seqscan = off; -EXPLAIN (COSTS OFF) SELECT * FROM brin_hot_2 WHERE a = 2 AND b = 100; - QUERY PLAN ---------------------------------------------- - Bitmap Heap Scan on brin_hot_2 - Recheck Cond: ((b = 100) AND (a = 2)) - -> Bitmap Index Scan on brin_hot_2_b_idx - Index Cond: (b = 100) -(4 rows) - -SELECT COUNT(*) FROM brin_hot_2 WHERE a = 2 AND b = 100; - count -------- - 1 -(1 row) - -DROP TABLE brin_hot_2; --- Test that updates to indexed columns are still propagated to the --- BRIN column. --- https://postgr.es/m/05ebcb44-f383-86e3-4f31-0a97a55634cf@enterprisedb.com -CREATE TABLE brin_hot_3 (a int, filler text) WITH (fillfactor = 10); -INSERT INTO brin_hot_3 SELECT 1, repeat(' ', 500) FROM generate_series(1, 20); -CREATE INDEX ON brin_hot_3 USING brin (a) WITH (pages_per_range = 1); -UPDATE brin_hot_3 SET a = 2; -EXPLAIN (COSTS OFF) SELECT * FROM brin_hot_3 WHERE a = 2; - QUERY PLAN ---------------------------------------------- - Bitmap Heap Scan on brin_hot_3 - Recheck Cond: (a = 2) - -> Bitmap Index Scan on brin_hot_3_a_idx - Index Cond: (a = 2) -(4 rows) - -SELECT COUNT(*) FROM brin_hot_3 WHERE a = 2; - count -------- - 20 -(1 row) - -DROP TABLE brin_hot_3; -SET enable_seqscan = on; --- End of Stats Test +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection to server was lost diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/oidjoins.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/oidjoins.out --- /tmp/cirrus-ci-build/src/test/regress/expected/oidjoins.out 2024-09-11 00:19:52.089221182 +0000 +++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/oidjoins.out 2024-09-11 00:29:43.512156695 +0000 @@ -1,268 +1,2 @@ --- --- Verify system catalog foreign key relationships --- -DO $doblock$ -declare - fk record; - nkeys integer; - cmd text; - err record; -begin - for fk in select * from pg_get_catalog_foreign_keys() - loop - raise notice 'checking % % => % %', - fk.fktable, fk.fkcols, fk.pktable, fk.pkcols; - nkeys := array_length(fk.fkcols, 1); - cmd := 'SELECT ctid'; - for i in 1 .. nkeys loop - cmd := cmd || ', ' || quote_ident(fk.fkcols[i]); - end loop; - if fk.is_array then - cmd := cmd || ' FROM (SELECT ctid'; - for i in 1 .. nkeys-1 loop - cmd := cmd || ', ' || quote_ident(fk.fkcols[i]); - end loop; - cmd := cmd || ', unnest(' || quote_ident(fk.fkcols[nkeys]); - cmd := cmd || ') as ' || quote_ident(fk.fkcols[nkeys]); - cmd := cmd || ' FROM ' || fk.fktable::text || ') fk WHERE '; - else - cmd := cmd || ' FROM ' || fk.fktable::text || ' fk WHERE '; - end if; - if fk.is_opt then - for i in 1 .. nkeys loop - cmd := cmd || quote_ident(fk.fkcols[i]) || ' != 0 AND '; - end loop; - end if; - cmd := cmd || 'NOT EXISTS(SELECT 1 FROM ' || fk.pktable::text || ' pk WHERE '; - for i in 1 .. nkeys loop - if i > 1 then cmd := cmd || ' AND '; end if; - cmd := cmd || 'pk.' || quote_ident(fk.pkcols[i]); - cmd := cmd || ' = fk.' || quote_ident(fk.fkcols[i]); - end loop; - cmd := cmd || ')'; - -- raise notice 'cmd = %', cmd; - for err in execute cmd loop - raise warning 'FK VIOLATION IN %(%): %', fk.fktable, fk.fkcols, err; - end loop; - end loop; -end -$doblock$; -NOTICE: checking pg_proc {pronamespace} => pg_namespace {oid} -NOTICE: checking pg_proc {proowner} => pg_authid {oid} -NOTICE: checking pg_proc {prolang} => pg_language {oid} -NOTICE: checking pg_proc {provariadic} => pg_type {oid} -NOTICE: checking pg_proc {prosupport} => pg_proc {oid} -NOTICE: checking pg_proc {prorettype} => pg_type {oid} -NOTICE: checking pg_proc {proargtypes} => pg_type {oid} -NOTICE: checking pg_proc {proallargtypes} => pg_type {oid} -NOTICE: checking pg_proc {protrftypes} => pg_type {oid} -NOTICE: checking pg_type {typnamespace} => pg_namespace {oid} -NOTICE: checking pg_type {typowner} => pg_authid {oid} -NOTICE: checking pg_type {typrelid} => pg_class {oid} -NOTICE: checking pg_type {typsubscript} => pg_proc {oid} -NOTICE: checking pg_type {typelem} => pg_type {oid} -NOTICE: checking pg_type {typarray} => pg_type {oid} -NOTICE: checking pg_type {typinput} => pg_proc {oid} -NOTICE: checking pg_type {typoutput} => pg_proc {oid} -NOTICE: checking pg_type {typreceive} => pg_proc {oid} -NOTICE: checking pg_type {typsend} => pg_proc {oid} -NOTICE: checking pg_type {typmodin} => pg_proc {oid} -NOTICE: checking pg_type {typmodout} => pg_proc {oid} -NOTICE: checking pg_type {typanalyze} => pg_proc {oid} -NOTICE: checking pg_type {typbasetype} => pg_type {oid} -NOTICE: checking pg_type {typcollation} => pg_collation {oid} -NOTICE: checking pg_attribute {attrelid} => pg_class {oid} -NOTICE: checking pg_attribute {atttypid} => pg_type {oid} -NOTICE: checking pg_attribute {attcollation} => pg_collation {oid} -NOTICE: checking pg_class {relnamespace} => pg_namespace {oid} -NOTICE: checking pg_class {reltype} => pg_type {oid} -NOTICE: checking pg_class {reloftype} => pg_type {oid} -NOTICE: checking pg_class {relowner} => pg_authid {oid} -NOTICE: checking pg_class {relam} => pg_am {oid} -NOTICE: checking pg_class {reltablespace} => pg_tablespace {oid} -NOTICE: checking pg_class {reltoastrelid} => pg_class {oid} -NOTICE: checking pg_class {relrewrite} => pg_class {oid} -NOTICE: checking pg_attrdef {adrelid} => pg_class {oid} -NOTICE: checking pg_attrdef {adrelid,adnum} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_constraint {connamespace} => pg_namespace {oid} -NOTICE: checking pg_constraint {conrelid} => pg_class {oid} -NOTICE: checking pg_constraint {contypid} => pg_type {oid} -NOTICE: checking pg_constraint {conindid} => pg_class {oid} -NOTICE: checking pg_constraint {conparentid} => pg_constraint {oid} -NOTICE: checking pg_constraint {confrelid} => pg_class {oid} -NOTICE: checking pg_constraint {conpfeqop} => pg_operator {oid} -NOTICE: checking pg_constraint {conppeqop} => pg_operator {oid} -NOTICE: checking pg_constraint {conffeqop} => pg_operator {oid} -NOTICE: checking pg_constraint {conexclop} => pg_operator {oid} -NOTICE: checking pg_constraint {conrelid,conkey} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_constraint {confrelid,confkey} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_inherits {inhrelid} => pg_class {oid} -NOTICE: checking pg_inherits {inhparent} => pg_class {oid} -NOTICE: checking pg_index {indexrelid} => pg_class {oid} -NOTICE: checking pg_index {indrelid} => pg_class {oid} -NOTICE: checking pg_index {indcollation} => pg_collation {oid} -NOTICE: checking pg_index {indclass} => pg_opclass {oid} -NOTICE: checking pg_index {indrelid,indkey} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_operator {oprnamespace} => pg_namespace {oid} -NOTICE: checking pg_operator {oprowner} => pg_authid {oid} -NOTICE: checking pg_operator {oprleft} => pg_type {oid} -NOTICE: checking pg_operator {oprright} => pg_type {oid} -NOTICE: checking pg_operator {oprresult} => pg_type {oid} -NOTICE: checking pg_operator {oprcom} => pg_operator {oid} -NOTICE: checking pg_operator {oprnegate} => pg_operator {oid} -NOTICE: checking pg_operator {oprcode} => pg_proc {oid} -NOTICE: checking pg_operator {oprrest} => pg_proc {oid} -NOTICE: checking pg_operator {oprjoin} => pg_proc {oid} -NOTICE: checking pg_opfamily {opfmethod} => pg_am {oid} -NOTICE: checking pg_opfamily {opfnamespace} => pg_namespace {oid} -NOTICE: checking pg_opfamily {opfowner} => pg_authid {oid} -NOTICE: checking pg_opclass {opcmethod} => pg_am {oid} -NOTICE: checking pg_opclass {opcnamespace} => pg_namespace {oid} -NOTICE: checking pg_opclass {opcowner} => pg_authid {oid} -NOTICE: checking pg_opclass {opcfamily} => pg_opfamily {oid} -NOTICE: checking pg_opclass {opcintype} => pg_type {oid} -NOTICE: checking pg_opclass {opckeytype} => pg_type {oid} -NOTICE: checking pg_am {amhandler} => pg_proc {oid} -NOTICE: checking pg_amop {amopfamily} => pg_opfamily {oid} -NOTICE: checking pg_amop {amoplefttype} => pg_type {oid} -NOTICE: checking pg_amop {amoprighttype} => pg_type {oid} -NOTICE: checking pg_amop {amopopr} => pg_operator {oid} -NOTICE: checking pg_amop {amopmethod} => pg_am {oid} -NOTICE: checking pg_amop {amopsortfamily} => pg_opfamily {oid} -NOTICE: checking pg_amproc {amprocfamily} => pg_opfamily {oid} -NOTICE: checking pg_amproc {amproclefttype} => pg_type {oid} -NOTICE: checking pg_amproc {amprocrighttype} => pg_type {oid} -NOTICE: checking pg_amproc {amproc} => pg_proc {oid} -NOTICE: checking pg_language {lanowner} => pg_authid {oid} -NOTICE: checking pg_language {lanplcallfoid} => pg_proc {oid} -NOTICE: checking pg_language {laninline} => pg_proc {oid} -NOTICE: checking pg_language {lanvalidator} => pg_proc {oid} -NOTICE: checking pg_largeobject_metadata {lomowner} => pg_authid {oid} -NOTICE: checking pg_largeobject {loid} => pg_largeobject_metadata {oid} -NOTICE: checking pg_aggregate {aggfnoid} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggtransfn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggfinalfn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggcombinefn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggserialfn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggdeserialfn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggmtransfn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggminvtransfn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggmfinalfn} => pg_proc {oid} -NOTICE: checking pg_aggregate {aggsortop} => pg_operator {oid} -NOTICE: checking pg_aggregate {aggtranstype} => pg_type {oid} -NOTICE: checking pg_aggregate {aggmtranstype} => pg_type {oid} -NOTICE: checking pg_statistic {starelid} => pg_class {oid} -NOTICE: checking pg_statistic {staop1} => pg_operator {oid} -NOTICE: checking pg_statistic {staop2} => pg_operator {oid} -NOTICE: checking pg_statistic {staop3} => pg_operator {oid} -NOTICE: checking pg_statistic {staop4} => pg_operator {oid} -NOTICE: checking pg_statistic {staop5} => pg_operator {oid} -NOTICE: checking pg_statistic {stacoll1} => pg_collation {oid} -NOTICE: checking pg_statistic {stacoll2} => pg_collation {oid} -NOTICE: checking pg_statistic {stacoll3} => pg_collation {oid} -NOTICE: checking pg_statistic {stacoll4} => pg_collation {oid} -NOTICE: checking pg_statistic {stacoll5} => pg_collation {oid} -NOTICE: checking pg_statistic {starelid,staattnum} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_statistic_ext {stxrelid} => pg_class {oid} -NOTICE: checking pg_statistic_ext {stxnamespace} => pg_namespace {oid} -NOTICE: checking pg_statistic_ext {stxowner} => pg_authid {oid} -NOTICE: checking pg_statistic_ext {stxrelid,stxkeys} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_statistic_ext_data {stxoid} => pg_statistic_ext {oid} -NOTICE: checking pg_rewrite {ev_class} => pg_class {oid} -NOTICE: checking pg_trigger {tgrelid} => pg_class {oid} -NOTICE: checking pg_trigger {tgparentid} => pg_trigger {oid} -NOTICE: checking pg_trigger {tgfoid} => pg_proc {oid} -NOTICE: checking pg_trigger {tgconstrrelid} => pg_class {oid} -NOTICE: checking pg_trigger {tgconstrindid} => pg_class {oid} -NOTICE: checking pg_trigger {tgconstraint} => pg_constraint {oid} -NOTICE: checking pg_trigger {tgrelid,tgattr} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_event_trigger {evtowner} => pg_authid {oid} -NOTICE: checking pg_event_trigger {evtfoid} => pg_proc {oid} -NOTICE: checking pg_description {classoid} => pg_class {oid} -NOTICE: checking pg_cast {castsource} => pg_type {oid} -NOTICE: checking pg_cast {casttarget} => pg_type {oid} -NOTICE: checking pg_cast {castfunc} => pg_proc {oid} -NOTICE: checking pg_enum {enumtypid} => pg_type {oid} -NOTICE: checking pg_namespace {nspowner} => pg_authid {oid} -NOTICE: checking pg_conversion {connamespace} => pg_namespace {oid} -NOTICE: checking pg_conversion {conowner} => pg_authid {oid} -NOTICE: checking pg_conversion {conproc} => pg_proc {oid} -NOTICE: checking pg_depend {classid} => pg_class {oid} -NOTICE: checking pg_depend {refclassid} => pg_class {oid} -NOTICE: checking pg_database {datdba} => pg_authid {oid} -NOTICE: checking pg_database {dattablespace} => pg_tablespace {oid} -NOTICE: checking pg_db_role_setting {setdatabase} => pg_database {oid} -NOTICE: checking pg_db_role_setting {setrole} => pg_authid {oid} -NOTICE: checking pg_tablespace {spcowner} => pg_authid {oid} -NOTICE: checking pg_auth_members {roleid} => pg_authid {oid} -NOTICE: checking pg_auth_members {member} => pg_authid {oid} -NOTICE: checking pg_auth_members {grantor} => pg_authid {oid} -NOTICE: checking pg_shdepend {dbid} => pg_database {oid} -NOTICE: checking pg_shdepend {classid} => pg_class {oid} -NOTICE: checking pg_shdepend {refclassid} => pg_class {oid} -NOTICE: checking pg_shdescription {classoid} => pg_class {oid} -NOTICE: checking pg_ts_config {cfgnamespace} => pg_namespace {oid} -NOTICE: checking pg_ts_config {cfgowner} => pg_authid {oid} -NOTICE: checking pg_ts_config {cfgparser} => pg_ts_parser {oid} -NOTICE: checking pg_ts_config_map {mapcfg} => pg_ts_config {oid} -NOTICE: checking pg_ts_config_map {mapdict} => pg_ts_dict {oid} -NOTICE: checking pg_ts_dict {dictnamespace} => pg_namespace {oid} -NOTICE: checking pg_ts_dict {dictowner} => pg_authid {oid} -NOTICE: checking pg_ts_dict {dicttemplate} => pg_ts_template {oid} -NOTICE: checking pg_ts_parser {prsnamespace} => pg_namespace {oid} -NOTICE: checking pg_ts_parser {prsstart} => pg_proc {oid} -NOTICE: checking pg_ts_parser {prstoken} => pg_proc {oid} -NOTICE: checking pg_ts_parser {prsend} => pg_proc {oid} -NOTICE: checking pg_ts_parser {prsheadline} => pg_proc {oid} -NOTICE: checking pg_ts_parser {prslextype} => pg_proc {oid} -NOTICE: checking pg_ts_template {tmplnamespace} => pg_namespace {oid} -NOTICE: checking pg_ts_template {tmplinit} => pg_proc {oid} -NOTICE: checking pg_ts_template {tmpllexize} => pg_proc {oid} -NOTICE: checking pg_extension {extowner} => pg_authid {oid} -NOTICE: checking pg_extension {extnamespace} => pg_namespace {oid} -NOTICE: checking pg_extension {extconfig} => pg_class {oid} -NOTICE: checking pg_foreign_data_wrapper {fdwowner} => pg_authid {oid} -NOTICE: checking pg_foreign_data_wrapper {fdwhandler} => pg_proc {oid} -NOTICE: checking pg_foreign_data_wrapper {fdwvalidator} => pg_proc {oid} -NOTICE: checking pg_foreign_server {srvowner} => pg_authid {oid} -NOTICE: checking pg_foreign_server {srvfdw} => pg_foreign_data_wrapper {oid} -NOTICE: checking pg_user_mapping {umuser} => pg_authid {oid} -NOTICE: checking pg_user_mapping {umserver} => pg_foreign_server {oid} -NOTICE: checking pg_foreign_table {ftrelid} => pg_class {oid} -NOTICE: checking pg_foreign_table {ftserver} => pg_foreign_server {oid} -NOTICE: checking pg_policy {polrelid} => pg_class {oid} -NOTICE: checking pg_policy {polroles} => pg_authid {oid} -NOTICE: checking pg_default_acl {defaclrole} => pg_authid {oid} -NOTICE: checking pg_default_acl {defaclnamespace} => pg_namespace {oid} -NOTICE: checking pg_init_privs {classoid} => pg_class {oid} -NOTICE: checking pg_seclabel {classoid} => pg_class {oid} -NOTICE: checking pg_shseclabel {classoid} => pg_class {oid} -NOTICE: checking pg_collation {collnamespace} => pg_namespace {oid} -NOTICE: checking pg_collation {collowner} => pg_authid {oid} -NOTICE: checking pg_partitioned_table {partrelid} => pg_class {oid} -NOTICE: checking pg_partitioned_table {partdefid} => pg_class {oid} -NOTICE: checking pg_partitioned_table {partclass} => pg_opclass {oid} -NOTICE: checking pg_partitioned_table {partcollation} => pg_collation {oid} -NOTICE: checking pg_partitioned_table {partrelid,partattrs} => pg_attribute {attrelid,attnum} -NOTICE: checking pg_range {rngtypid} => pg_type {oid} -NOTICE: checking pg_range {rngsubtype} => pg_type {oid} -NOTICE: checking pg_range {rngmultitypid} => pg_type {oid} -NOTICE: checking pg_range {rngcollation} => pg_collation {oid} -NOTICE: checking pg_range {rngsubopc} => pg_opclass {oid} -NOTICE: checking pg_range {rngcanonical} => pg_proc {oid} -NOTICE: checking pg_range {rngsubdiff} => pg_proc {oid} -NOTICE: checking pg_transform {trftype} => pg_type {oid} -NOTICE: checking pg_transform {trflang} => pg_language {oid} -NOTICE: checking pg_transform {trffromsql} => pg_proc {oid} -NOTICE: checking pg_transform {trftosql} => pg_proc {oid} -NOTICE: checking pg_sequence {seqrelid} => pg_class {oid} -NOTICE: checking pg_sequence {seqtypid} => pg_type {oid} -NOTICE: checking pg_publication {pubowner} => pg_authid {oid} -NOTICE: checking pg_publication_namespace {pnpubid} => pg_publication {oid} -NOTICE: checking pg_publication_namespace {pnnspid} => pg_namespace {oid} -NOTICE: checking pg_publication_rel {prpubid} => pg_publication {oid} -NOTICE: checking pg_publication_rel {prrelid} => pg_class {oid} -NOTICE: checking pg_subscription {subdbid} => pg_database {oid} -NOTICE: checking pg_subscription {subowner} => pg_authid {oid} -NOTICE: checking pg_subscription_rel {srsubid} => pg_subscription {oid} -NOTICE: checking pg_subscription_rel {srrelid} => pg_class {oid} +psql: error: connection to server on socket "/tmp/zHbaMLr5OX/.s.PGSQL.22638" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/event_trigger.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/event_trigger.out --- /tmp/cirrus-ci-build/src/test/regress/expected/event_trigger.out 2024-09-11 00:19:52.053106672 +0000 +++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/event_trigger.out 2024-09-11 00:29:43.512156695 +0000 @@ -1,742 +1,2 @@ --- should fail, return type mismatch -create event trigger regress_event_trigger - on ddl_command_start - execute procedure pg_backend_pid(); -ERROR: function pg_backend_pid must return type event_trigger --- OK -create function test_event_trigger() returns event_trigger as $$ -BEGIN - RAISE NOTICE 'test_event_trigger: % %', tg_event, tg_tag; -END -$$ language plpgsql; --- should fail, can't call it as a plain function -SELECT test_event_trigger(); -ERROR: trigger functions can only be called as triggers -CONTEXT: compilation of PL/pgSQL function "test_event_trigger" near line 1 --- should fail, event triggers cannot have declared arguments -create function test_event_trigger_arg(name text) -returns event_trigger as $$ BEGIN RETURN 1; END $$ language plpgsql; -ERROR: event trigger functions cannot have declared arguments -CONTEXT: compilation of PL/pgSQL function "test_event_trigger_arg" near line 1 --- should fail, SQL functions cannot be event triggers -create function test_event_trigger_sql() returns event_trigger as $$ -SELECT 1 $$ language sql; -ERROR: SQL functions cannot return type event_trigger --- should fail, no elephant_bootstrap entry point -create event trigger regress_event_trigger on elephant_bootstrap - execute procedure test_event_trigger(); -ERROR: unrecognized event name "elephant_bootstrap" --- OK -create event trigger regress_event_trigger on ddl_command_start - execute procedure test_event_trigger(); --- OK -create event trigger regress_event_trigger_end on ddl_command_end - execute function test_event_trigger(); --- should fail, food is not a valid filter variable -create event trigger regress_event_trigger2 on ddl_command_start - when food in ('sandwich') - execute procedure test_event_trigger(); -ERROR: unrecognized filter variable "food" --- should fail, sandwich is not a valid command tag -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('sandwich') - execute procedure test_event_trigger(); -ERROR: filter value "sandwich" not recognized for filter variable "tag" --- should fail, create skunkcabbage is not a valid command tag -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('create table', 'create skunkcabbage') - execute procedure test_event_trigger(); -ERROR: filter value "create skunkcabbage" not recognized for filter variable "tag" --- should fail, can't have event triggers on event triggers -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('DROP EVENT TRIGGER') - execute procedure test_event_trigger(); -ERROR: event triggers are not supported for DROP EVENT TRIGGER --- should fail, can't have event triggers on global objects -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('CREATE ROLE') - execute procedure test_event_trigger(); -ERROR: event triggers are not supported for CREATE ROLE --- should fail, can't have event triggers on global objects -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('CREATE DATABASE') - execute procedure test_event_trigger(); -ERROR: event triggers are not supported for CREATE DATABASE --- should fail, can't have event triggers on global objects -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('CREATE TABLESPACE') - execute procedure test_event_trigger(); -ERROR: event triggers are not supported for CREATE TABLESPACE --- should fail, can't have same filter variable twice -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('create table') and tag in ('CREATE FUNCTION') - execute procedure test_event_trigger(); -ERROR: filter variable "tag" specified more than once --- should fail, can't have arguments -create event trigger regress_event_trigger2 on ddl_command_start - execute procedure test_event_trigger('argument not allowed'); -ERROR: syntax error at or near "'argument not allowed'" -LINE 2: execute procedure test_event_trigger('argument not allowe... - ^ --- OK -create event trigger regress_event_trigger2 on ddl_command_start - when tag in ('create table', 'CREATE FUNCTION') - execute procedure test_event_trigger(); --- OK -comment on event trigger regress_event_trigger is 'test comment'; --- drop as non-superuser should fail -create role regress_evt_user; -set role regress_evt_user; -create event trigger regress_event_trigger_noperms on ddl_command_start - execute procedure test_event_trigger(); -ERROR: permission denied to create event trigger "regress_event_trigger_noperms" -HINT: Must be superuser to create an event trigger. -reset role; --- test enabling and disabling -alter event trigger regress_event_trigger disable; --- fires _trigger2 and _trigger_end should fire, but not _trigger -create table event_trigger_fire1 (a int); -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -NOTICE: test_event_trigger: ddl_command_end CREATE TABLE -alter event trigger regress_event_trigger enable; -set session_replication_role = replica; --- fires nothing -create table event_trigger_fire2 (a int); -alter event trigger regress_event_trigger enable replica; --- fires only _trigger -create table event_trigger_fire3 (a int); -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -alter event trigger regress_event_trigger enable always; --- fires only _trigger -create table event_trigger_fire4 (a int); -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -reset session_replication_role; --- fires all three -create table event_trigger_fire5 (a int); -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -NOTICE: test_event_trigger: ddl_command_end CREATE TABLE --- non-top-level command -create function f1() returns int -language plpgsql -as $$ -begin - create table event_trigger_fire6 (a int); - return 0; -end $$; -NOTICE: test_event_trigger: ddl_command_start CREATE FUNCTION -NOTICE: test_event_trigger: ddl_command_start CREATE FUNCTION -NOTICE: test_event_trigger: ddl_command_end CREATE FUNCTION -select f1(); -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -NOTICE: test_event_trigger: ddl_command_end CREATE TABLE - f1 ----- - 0 -(1 row) - --- non-top-level command -create procedure p1() -language plpgsql -as $$ -begin - create table event_trigger_fire7 (a int); -end $$; -NOTICE: test_event_trigger: ddl_command_start CREATE PROCEDURE -NOTICE: test_event_trigger: ddl_command_end CREATE PROCEDURE -call p1(); -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -NOTICE: test_event_trigger: ddl_command_start CREATE TABLE -NOTICE: test_event_trigger: ddl_command_end CREATE TABLE --- clean up -alter event trigger regress_event_trigger disable; -drop table event_trigger_fire2, event_trigger_fire3, event_trigger_fire4, event_trigger_fire5, event_trigger_fire6, event_trigger_fire7; -NOTICE: test_event_trigger: ddl_command_end DROP TABLE -drop routine f1(), p1(); -NOTICE: test_event_trigger: ddl_command_end DROP ROUTINE --- regress_event_trigger_end should fire on these commands -grant all on table event_trigger_fire1 to public; -NOTICE: test_event_trigger: ddl_command_end GRANT -comment on table event_trigger_fire1 is 'here is a comment'; -NOTICE: test_event_trigger: ddl_command_end COMMENT -revoke all on table event_trigger_fire1 from public; -NOTICE: test_event_trigger: ddl_command_end REVOKE -drop table event_trigger_fire1; -NOTICE: test_event_trigger: ddl_command_end DROP TABLE -create foreign data wrapper useless; -NOTICE: test_event_trigger: ddl_command_end CREATE FOREIGN DATA WRAPPER -create server useless_server foreign data wrapper useless; -NOTICE: test_event_trigger: ddl_command_end CREATE SERVER -create user mapping for regress_evt_user server useless_server; -NOTICE: test_event_trigger: ddl_command_end CREATE USER MAPPING -alter default privileges for role regress_evt_user - revoke delete on tables from regress_evt_user; -NOTICE: test_event_trigger: ddl_command_end ALTER DEFAULT PRIVILEGES --- alter owner to non-superuser should fail -alter event trigger regress_event_trigger owner to regress_evt_user; -ERROR: permission denied to change owner of event trigger "regress_event_trigger" -HINT: The owner of an event trigger must be a superuser. --- alter owner to superuser should work -alter role regress_evt_user superuser; -alter event trigger regress_event_trigger owner to regress_evt_user; --- should fail, name collision -alter event trigger regress_event_trigger rename to regress_event_trigger2; -ERROR: event trigger "regress_event_trigger2" already exists --- OK -alter event trigger regress_event_trigger rename to regress_event_trigger3; --- should fail, doesn't exist any more -drop event trigger regress_event_trigger; -ERROR: event trigger "regress_event_trigger" does not exist --- should fail, regress_evt_user owns some objects -drop role regress_evt_user; -ERROR: role "regress_evt_user" cannot be dropped because some objects depend on it -DETAIL: owner of event trigger regress_event_trigger3 -owner of user mapping for regress_evt_user on server useless_server -owner of default privileges on new relations belonging to role regress_evt_user --- cleanup before next test --- these are all OK; the second one should emit a NOTICE -drop event trigger if exists regress_event_trigger2; -drop event trigger if exists regress_event_trigger2; -NOTICE: event trigger "regress_event_trigger2" does not exist, skipping -drop event trigger regress_event_trigger3; -drop event trigger regress_event_trigger_end; --- test support for dropped objects -CREATE SCHEMA schema_one authorization regress_evt_user; -CREATE SCHEMA schema_two authorization regress_evt_user; -CREATE SCHEMA audit_tbls authorization regress_evt_user; -CREATE TEMP TABLE a_temp_tbl (); -SET SESSION AUTHORIZATION regress_evt_user; -CREATE TABLE schema_one.table_one(a int); -CREATE TABLE schema_one."table two"(a int); -CREATE TABLE schema_one.table_three(a int); -CREATE TABLE audit_tbls.schema_one_table_two(the_value text); -CREATE TABLE schema_two.table_two(a int); -CREATE TABLE schema_two.table_three(a int, b text); -CREATE TABLE audit_tbls.schema_two_table_three(the_value text); -CREATE OR REPLACE FUNCTION schema_two.add(int, int) RETURNS int LANGUAGE plpgsql - CALLED ON NULL INPUT - AS $$ BEGIN RETURN coalesce($1,0) + coalesce($2,0); END; $$; -CREATE AGGREGATE schema_two.newton - (BASETYPE = int, SFUNC = schema_two.add, STYPE = int); -RESET SESSION AUTHORIZATION; -CREATE TABLE undroppable_objs ( - object_type text, - object_identity text -); -INSERT INTO undroppable_objs VALUES -('table', 'schema_one.table_three'), -('table', 'audit_tbls.schema_two_table_three'); -CREATE TABLE dropped_objects ( - type text, - schema text, - object text -); --- This tests errors raised within event triggers; the one in audit_tbls --- uses 2nd-level recursive invocation via test_evtrig_dropped_objects(). -CREATE OR REPLACE FUNCTION undroppable() RETURNS event_trigger -LANGUAGE plpgsql AS $$ -DECLARE - obj record; -BEGIN - PERFORM 1 FROM pg_tables WHERE tablename = 'undroppable_objs'; - IF NOT FOUND THEN - RAISE NOTICE 'table undroppable_objs not found, skipping'; - RETURN; - END IF; - FOR obj IN - SELECT * FROM pg_event_trigger_dropped_objects() JOIN - undroppable_objs USING (object_type, object_identity) - LOOP - RAISE EXCEPTION 'object % of type % cannot be dropped', - obj.object_identity, obj.object_type; - END LOOP; -END; -$$; -CREATE EVENT TRIGGER undroppable ON sql_drop - EXECUTE PROCEDURE undroppable(); -CREATE OR REPLACE FUNCTION test_evtrig_dropped_objects() RETURNS event_trigger -LANGUAGE plpgsql AS $$ -DECLARE - obj record; -BEGIN - FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() - LOOP - IF obj.object_type = 'table' THEN - EXECUTE format('DROP TABLE IF EXISTS audit_tbls.%I', - format('%s_%s', obj.schema_name, obj.object_name)); - END IF; - - INSERT INTO dropped_objects - (type, schema, object) VALUES - (obj.object_type, obj.schema_name, obj.object_identity); - END LOOP; -END -$$; -CREATE EVENT TRIGGER regress_event_trigger_drop_objects ON sql_drop - WHEN TAG IN ('drop table', 'drop function', 'drop view', - 'drop owned', 'drop schema', 'alter table') - EXECUTE PROCEDURE test_evtrig_dropped_objects(); -ALTER TABLE schema_one.table_one DROP COLUMN a; -DROP SCHEMA schema_one, schema_two CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table schema_two.table_two -drop cascades to table schema_two.table_three -drop cascades to function schema_two.add(integer,integer) -drop cascades to function schema_two.newton(integer) -drop cascades to table schema_one.table_one -drop cascades to table schema_one."table two" -drop cascades to table schema_one.table_three -NOTICE: table "schema_two_table_two" does not exist, skipping -NOTICE: table "audit_tbls_schema_two_table_three" does not exist, skipping -ERROR: object audit_tbls.schema_two_table_three of type table cannot be dropped -CONTEXT: PL/pgSQL function undroppable() line 14 at RAISE -SQL statement "DROP TABLE IF EXISTS audit_tbls.schema_two_table_three" -PL/pgSQL function test_evtrig_dropped_objects() line 8 at EXECUTE -DELETE FROM undroppable_objs WHERE object_identity = 'audit_tbls.schema_two_table_three'; -DROP SCHEMA schema_one, schema_two CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table schema_two.table_two -drop cascades to table schema_two.table_three -drop cascades to function schema_two.add(integer,integer) -drop cascades to function schema_two.newton(integer) -drop cascades to table schema_one.table_one -drop cascades to table schema_one."table two" -drop cascades to table schema_one.table_three -NOTICE: table "schema_two_table_two" does not exist, skipping -NOTICE: table "audit_tbls_schema_two_table_three" does not exist, skipping -NOTICE: table "schema_one_table_one" does not exist, skipping -NOTICE: table "schema_one_table two" does not exist, skipping -NOTICE: table "schema_one_table_three" does not exist, skipping -ERROR: object schema_one.table_three of type table cannot be dropped -CONTEXT: PL/pgSQL function undroppable() line 14 at RAISE -DELETE FROM undroppable_objs WHERE object_identity = 'schema_one.table_three'; -DROP SCHEMA schema_one, schema_two CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table schema_two.table_two -drop cascades to table schema_two.table_three -drop cascades to function schema_two.add(integer,integer) -drop cascades to function schema_two.newton(integer) -drop cascades to table schema_one.table_one -drop cascades to table schema_one."table two" -drop cascades to table schema_one.table_three -NOTICE: table "schema_two_table_two" does not exist, skipping -NOTICE: table "audit_tbls_schema_two_table_three" does not exist, skipping -NOTICE: table "schema_one_table_one" does not exist, skipping -NOTICE: table "schema_one_table two" does not exist, skipping -NOTICE: table "schema_one_table_three" does not exist, skipping -SELECT * FROM dropped_objects WHERE schema IS NULL OR schema <> 'pg_toast'; - type | schema | object ---------------+------------+------------------------------------- - table column | schema_one | schema_one.table_one.a - schema | | schema_two - table | schema_two | schema_two.table_two - type | schema_two | schema_two.table_two - type | schema_two | schema_two.table_two[] - table | audit_tbls | audit_tbls.schema_two_table_three - type | audit_tbls | audit_tbls.schema_two_table_three - type | audit_tbls | audit_tbls.schema_two_table_three[] - table | schema_two | schema_two.table_three - type | schema_two | schema_two.table_three - type | schema_two | schema_two.table_three[] - function | schema_two | schema_two.add(integer,integer) - aggregate | schema_two | schema_two.newton(integer) - schema | | schema_one - table | schema_one | schema_one.table_one - type | schema_one | schema_one.table_one - type | schema_one | schema_one.table_one[] - table | schema_one | schema_one."table two" - type | schema_one | schema_one."table two" - type | schema_one | schema_one."table two"[] - table | schema_one | schema_one.table_three - type | schema_one | schema_one.table_three - type | schema_one | schema_one.table_three[] -(23 rows) - -DROP OWNED BY regress_evt_user; -NOTICE: schema "audit_tbls" does not exist, skipping -SELECT * FROM dropped_objects WHERE type = 'schema'; - type | schema | object ---------+--------+------------ - schema | | schema_two - schema | | schema_one - schema | | audit_tbls -(3 rows) - -DROP ROLE regress_evt_user; -DROP EVENT TRIGGER regress_event_trigger_drop_objects; -DROP EVENT TRIGGER undroppable; --- Event triggers on relations. -CREATE OR REPLACE FUNCTION event_trigger_report_dropped() - RETURNS event_trigger - LANGUAGE plpgsql -AS $$ -DECLARE r record; -BEGIN - FOR r IN SELECT * from pg_event_trigger_dropped_objects() - LOOP - IF NOT r.normal AND NOT r.original THEN - CONTINUE; - END IF; - RAISE NOTICE 'NORMAL: orig=% normal=% istemp=% type=% identity=% name=% args=%', - r.original, r.normal, r.is_temporary, r.object_type, - r.object_identity, r.address_names, r.address_args; - END LOOP; -END; $$; -CREATE EVENT TRIGGER regress_event_trigger_report_dropped ON sql_drop - EXECUTE PROCEDURE event_trigger_report_dropped(); -CREATE OR REPLACE FUNCTION event_trigger_report_end() - RETURNS event_trigger - LANGUAGE plpgsql -AS $$ -DECLARE r RECORD; -BEGIN - FOR r IN SELECT * FROM pg_event_trigger_ddl_commands() - LOOP - RAISE NOTICE 'END: command_tag=% type=% identity=%', - r.command_tag, r.object_type, r.object_identity; - END LOOP; -END; $$; -CREATE EVENT TRIGGER regress_event_trigger_report_end ON ddl_command_end - EXECUTE PROCEDURE event_trigger_report_end(); -CREATE SCHEMA evttrig - CREATE TABLE one (col_a SERIAL PRIMARY KEY, col_b text DEFAULT 'forty two', col_c SERIAL) - CREATE INDEX one_idx ON one (col_b) - CREATE TABLE two (col_c INTEGER CHECK (col_c > 0) REFERENCES one DEFAULT 42) - CREATE TABLE id (col_d int NOT NULL GENERATED ALWAYS AS IDENTITY); -NOTICE: END: command_tag=CREATE SCHEMA type=schema identity=evttrig -NOTICE: END: command_tag=CREATE SEQUENCE type=sequence identity=evttrig.one_col_a_seq -NOTICE: END: command_tag=CREATE SEQUENCE type=sequence identity=evttrig.one_col_c_seq -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.one -NOTICE: END: command_tag=CREATE INDEX type=index identity=evttrig.one_pkey -NOTICE: END: command_tag=ALTER SEQUENCE type=sequence identity=evttrig.one_col_a_seq -NOTICE: END: command_tag=ALTER SEQUENCE type=sequence identity=evttrig.one_col_c_seq -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.two -NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.two -NOTICE: END: command_tag=CREATE SEQUENCE type=sequence identity=evttrig.id_col_d_seq -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.id -NOTICE: END: command_tag=ALTER SEQUENCE type=sequence identity=evttrig.id_col_d_seq -NOTICE: END: command_tag=CREATE INDEX type=index identity=evttrig.one_idx --- Partitioned tables with a partitioned index -CREATE TABLE evttrig.parted ( - id int PRIMARY KEY) - PARTITION BY RANGE (id); -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.parted -NOTICE: END: command_tag=CREATE INDEX type=index identity=evttrig.parted_pkey -CREATE TABLE evttrig.part_1_10 PARTITION OF evttrig.parted (id) - FOR VALUES FROM (1) TO (10); -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.part_1_10 -CREATE TABLE evttrig.part_10_20 PARTITION OF evttrig.parted (id) - FOR VALUES FROM (10) TO (20) PARTITION BY RANGE (id); -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.part_10_20 -CREATE TABLE evttrig.part_10_15 PARTITION OF evttrig.part_10_20 (id) - FOR VALUES FROM (10) TO (15); -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.part_10_15 -CREATE TABLE evttrig.part_15_20 PARTITION OF evttrig.part_10_20 (id) - FOR VALUES FROM (15) TO (20); -NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.part_15_20 -ALTER TABLE evttrig.two DROP COLUMN col_c; -NOTICE: NORMAL: orig=t normal=f istemp=f type=table column identity=evttrig.two.col_c name={evttrig,two,col_c} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table constraint identity=two_col_c_check on evttrig.two name={evttrig,two,two_col_c_check} args={} -NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.two -ALTER TABLE evttrig.one ALTER COLUMN col_b DROP DEFAULT; -NOTICE: NORMAL: orig=t normal=f istemp=f type=default value identity=for evttrig.one.col_b name={evttrig,one,col_b} args={} -NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.one -ALTER TABLE evttrig.one DROP CONSTRAINT one_pkey; -NOTICE: NORMAL: orig=t normal=f istemp=f type=table constraint identity=one_pkey on evttrig.one name={evttrig,one,one_pkey} args={} -NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.one -ALTER TABLE evttrig.one DROP COLUMN col_c; -NOTICE: NORMAL: orig=t normal=f istemp=f type=table column identity=evttrig.one.col_c name={evttrig,one,col_c} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=default value identity=for evttrig.one.col_c name={evttrig,one,col_c} args={} -NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.one -ALTER TABLE evttrig.id ALTER COLUMN col_d SET DATA TYPE bigint; -NOTICE: END: command_tag=ALTER SEQUENCE type=sequence identity=evttrig.id_col_d_seq -NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.id -ALTER TABLE evttrig.id ALTER COLUMN col_d DROP IDENTITY, - ALTER COLUMN col_d SET DATA TYPE int; -NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.id -DROP INDEX evttrig.one_idx; -NOTICE: NORMAL: orig=t normal=f istemp=f type=index identity=evttrig.one_idx name={evttrig,one_idx} args={} -DROP SCHEMA evttrig CASCADE; -NOTICE: drop cascades to 4 other objects -DETAIL: drop cascades to table evttrig.one -drop cascades to table evttrig.two -drop cascades to table evttrig.id -drop cascades to table evttrig.parted -NOTICE: NORMAL: orig=t normal=f istemp=f type=schema identity=evttrig name={evttrig} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.one name={evttrig,one} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=sequence identity=evttrig.one_col_a_seq name={evttrig,one_col_a_seq} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=default value identity=for evttrig.one.col_a name={evttrig,one,col_a} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.two name={evttrig,two} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.id name={evttrig,id} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.parted name={evttrig,parted} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_1_10 name={evttrig,part_1_10} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_10_20 name={evttrig,part_10_20} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_10_15 name={evttrig,part_10_15} args={} -NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_15_20 name={evttrig,part_15_20} args={} -DROP TABLE a_temp_tbl; -NOTICE: NORMAL: orig=t normal=f istemp=t type=table identity=pg_temp.a_temp_tbl name={pg_temp,a_temp_tbl} args={} --- CREATE OPERATOR CLASS without FAMILY clause should report --- both CREATE OPERATOR FAMILY and CREATE OPERATOR CLASS -CREATE OPERATOR CLASS evttrigopclass FOR TYPE int USING btree AS STORAGE int; -NOTICE: END: command_tag=CREATE OPERATOR FAMILY type=operator family identity=public.evttrigopclass USING btree -NOTICE: END: command_tag=CREATE OPERATOR CLASS type=operator class identity=public.evttrigopclass USING btree -DROP EVENT TRIGGER regress_event_trigger_report_dropped; -DROP EVENT TRIGGER regress_event_trigger_report_end; --- only allowed from within an event trigger function, should fail -select pg_event_trigger_table_rewrite_oid(); -ERROR: pg_event_trigger_table_rewrite_oid() can only be called in a table_rewrite event trigger function --- test Table Rewrite Event Trigger -CREATE OR REPLACE FUNCTION test_evtrig_no_rewrite() RETURNS event_trigger -LANGUAGE plpgsql AS $$ -BEGIN - RAISE EXCEPTION 'rewrites not allowed'; -END; -$$; -create event trigger no_rewrite_allowed on table_rewrite - execute procedure test_evtrig_no_rewrite(); -create table rewriteme (id serial primary key, foo float, bar timestamptz); -insert into rewriteme - select x * 1.001 from generate_series(1, 500) as t(x); -alter table rewriteme alter column foo type numeric; -ERROR: rewrites not allowed -CONTEXT: PL/pgSQL function test_evtrig_no_rewrite() line 3 at RAISE -alter table rewriteme add column baz int default 0; --- test with more than one reason to rewrite a single table -CREATE OR REPLACE FUNCTION test_evtrig_no_rewrite() RETURNS event_trigger -LANGUAGE plpgsql AS $$ -BEGIN - RAISE NOTICE 'Table ''%'' is being rewritten (reason = %)', - pg_event_trigger_table_rewrite_oid()::regclass, - pg_event_trigger_table_rewrite_reason(); -END; -$$; -alter table rewriteme - add column onemore int default 0, - add column another int default -1, - alter column foo type numeric(10,4); -NOTICE: Table 'rewriteme' is being rewritten (reason = 4) --- matview rewrite when changing access method -CREATE MATERIALIZED VIEW heapmv USING heap AS SELECT 1 AS a; -ALTER MATERIALIZED VIEW heapmv SET ACCESS METHOD heap2; -NOTICE: Table 'heapmv' is being rewritten (reason = 8) -DROP MATERIALIZED VIEW heapmv; --- shouldn't trigger a table_rewrite event -alter table rewriteme alter column foo type numeric(12,4); -begin; -set timezone to 'UTC'; -alter table rewriteme alter column bar type timestamp; -set timezone to '0'; -alter table rewriteme alter column bar type timestamptz; -set timezone to 'Europe/London'; -alter table rewriteme alter column bar type timestamp; -- does rewrite -NOTICE: Table 'rewriteme' is being rewritten (reason = 4) -rollback; --- typed tables are rewritten when their type changes. Don't emit table --- name, because firing order is not stable. -CREATE OR REPLACE FUNCTION test_evtrig_no_rewrite() RETURNS event_trigger -LANGUAGE plpgsql AS $$ -BEGIN - RAISE NOTICE 'Table is being rewritten (reason = %)', - pg_event_trigger_table_rewrite_reason(); -END; -$$; -create type rewritetype as (a int); -create table rewritemetoo1 of rewritetype; -create table rewritemetoo2 of rewritetype; -alter type rewritetype alter attribute a type text cascade; -NOTICE: Table is being rewritten (reason = 4) -NOTICE: Table is being rewritten (reason = 4) --- but this doesn't work -create table rewritemetoo3 (a rewritetype); -alter type rewritetype alter attribute a type varchar cascade; -ERROR: cannot alter type "rewritetype" because column "rewritemetoo3.a" uses it -drop table rewriteme; -drop event trigger no_rewrite_allowed; -drop function test_evtrig_no_rewrite(); --- Tests for REINDEX -CREATE OR REPLACE FUNCTION reindex_start_command() -RETURNS event_trigger AS $$ -BEGIN - RAISE NOTICE 'REINDEX START: % %', tg_event, tg_tag; -END; -$$ LANGUAGE plpgsql; -CREATE EVENT TRIGGER regress_reindex_start ON ddl_command_start - WHEN TAG IN ('REINDEX') - EXECUTE PROCEDURE reindex_start_command(); -CREATE FUNCTION reindex_end_command() -RETURNS event_trigger AS $$ -DECLARE - obj record; -BEGIN - FOR obj IN SELECT * FROM pg_event_trigger_ddl_commands() - LOOP - RAISE NOTICE 'REINDEX END: command_tag=% type=% identity=%', - obj.command_tag, obj.object_type, obj.object_identity; - END LOOP; -END; -$$ LANGUAGE plpgsql; -CREATE EVENT TRIGGER regress_reindex_end ON ddl_command_end - WHEN TAG IN ('REINDEX') - EXECUTE PROCEDURE reindex_end_command(); --- Extra event to force the use of a snapshot. -CREATE FUNCTION reindex_end_command_snap() RETURNS EVENT_TRIGGER - AS $$ BEGIN PERFORM 1; END $$ LANGUAGE plpgsql; -CREATE EVENT TRIGGER regress_reindex_end_snap ON ddl_command_end - EXECUTE FUNCTION reindex_end_command_snap(); --- With simple relation -CREATE TABLE concur_reindex_tab (c1 int); -CREATE INDEX concur_reindex_ind ON concur_reindex_tab (c1); --- Both start and end triggers enabled. -REINDEX INDEX concur_reindex_ind; -NOTICE: REINDEX START: ddl_command_start REINDEX -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_ind -REINDEX TABLE concur_reindex_tab; -NOTICE: REINDEX START: ddl_command_start REINDEX -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_ind -REINDEX INDEX CONCURRENTLY concur_reindex_ind; -NOTICE: REINDEX START: ddl_command_start REINDEX -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_ind -REINDEX TABLE CONCURRENTLY concur_reindex_tab; -NOTICE: REINDEX START: ddl_command_start REINDEX -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_ind --- with start trigger disabled. -ALTER EVENT TRIGGER regress_reindex_start DISABLE; -REINDEX INDEX concur_reindex_ind; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_ind -REINDEX INDEX CONCURRENTLY concur_reindex_ind; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_ind --- without an index -DROP INDEX concur_reindex_ind; -REINDEX TABLE concur_reindex_tab; -NOTICE: table "concur_reindex_tab" has no indexes to reindex -REINDEX TABLE CONCURRENTLY concur_reindex_tab; -NOTICE: table "concur_reindex_tab" has no indexes that can be reindexed concurrently --- With a Schema -CREATE SCHEMA concur_reindex_schema; --- No indexes -REINDEX SCHEMA concur_reindex_schema; -REINDEX SCHEMA CONCURRENTLY concur_reindex_schema; -CREATE TABLE concur_reindex_schema.tab (a int); -CREATE INDEX ind ON concur_reindex_schema.tab (a); --- One index reported -REINDEX SCHEMA concur_reindex_schema; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=concur_reindex_schema.ind -REINDEX SCHEMA CONCURRENTLY concur_reindex_schema; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=concur_reindex_schema.ind --- One table on schema but no indexes -DROP INDEX concur_reindex_schema.ind; -REINDEX SCHEMA concur_reindex_schema; -REINDEX SCHEMA CONCURRENTLY concur_reindex_schema; -DROP SCHEMA concur_reindex_schema CASCADE; -NOTICE: drop cascades to table concur_reindex_schema.tab --- With a partitioned table, and nothing else. -CREATE TABLE concur_reindex_part (id int) PARTITION BY RANGE (id); -REINDEX TABLE concur_reindex_part; -REINDEX TABLE CONCURRENTLY concur_reindex_part; --- Partition that would be reindexed, still nothing. -CREATE TABLE concur_reindex_child PARTITION OF concur_reindex_part - FOR VALUES FROM (0) TO (10); -REINDEX TABLE concur_reindex_part; -REINDEX TABLE CONCURRENTLY concur_reindex_part; --- Now add some indexes. -CREATE INDEX concur_reindex_partidx ON concur_reindex_part (id); -REINDEX INDEX concur_reindex_partidx; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_child_id_idx -REINDEX INDEX CONCURRENTLY concur_reindex_partidx; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_child_id_idx -REINDEX TABLE concur_reindex_part; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_child_id_idx -REINDEX TABLE CONCURRENTLY concur_reindex_part; -NOTICE: REINDEX END: command_tag=REINDEX type=index identity=public.concur_reindex_child_id_idx -DROP TABLE concur_reindex_part; --- Clean up -DROP EVENT TRIGGER regress_reindex_start; -DROP EVENT TRIGGER regress_reindex_end; -DROP EVENT TRIGGER regress_reindex_end_snap; -DROP FUNCTION reindex_end_command(); -DROP FUNCTION reindex_end_command_snap(); -DROP FUNCTION reindex_start_command(); -DROP TABLE concur_reindex_tab; --- test Row Security Event Trigger -RESET SESSION AUTHORIZATION; -CREATE TABLE event_trigger_test (a integer, b text); -CREATE OR REPLACE FUNCTION start_command() -RETURNS event_trigger AS $$ -BEGIN -RAISE NOTICE '% - ddl_command_start', tg_tag; -END; -$$ LANGUAGE plpgsql; -CREATE OR REPLACE FUNCTION end_command() -RETURNS event_trigger AS $$ -BEGIN -RAISE NOTICE '% - ddl_command_end', tg_tag; -END; -$$ LANGUAGE plpgsql; -CREATE OR REPLACE FUNCTION drop_sql_command() -RETURNS event_trigger AS $$ -BEGIN -RAISE NOTICE '% - sql_drop', tg_tag; -END; -$$ LANGUAGE plpgsql; -CREATE EVENT TRIGGER start_rls_command ON ddl_command_start - WHEN TAG IN ('CREATE POLICY', 'ALTER POLICY', 'DROP POLICY') EXECUTE PROCEDURE start_command(); -CREATE EVENT TRIGGER end_rls_command ON ddl_command_end - WHEN TAG IN ('CREATE POLICY', 'ALTER POLICY', 'DROP POLICY') EXECUTE PROCEDURE end_command(); -CREATE EVENT TRIGGER sql_drop_command ON sql_drop - WHEN TAG IN ('DROP POLICY') EXECUTE PROCEDURE drop_sql_command(); -CREATE POLICY p1 ON event_trigger_test USING (FALSE); -NOTICE: CREATE POLICY - ddl_command_start -NOTICE: CREATE POLICY - ddl_command_end -ALTER POLICY p1 ON event_trigger_test USING (TRUE); -NOTICE: ALTER POLICY - ddl_command_start -NOTICE: ALTER POLICY - ddl_command_end -ALTER POLICY p1 ON event_trigger_test RENAME TO p2; -NOTICE: ALTER POLICY - ddl_command_start -NOTICE: ALTER POLICY - ddl_command_end -DROP POLICY p2 ON event_trigger_test; -NOTICE: DROP POLICY - ddl_command_start -NOTICE: DROP POLICY - sql_drop -NOTICE: DROP POLICY - ddl_command_end --- Check the object addresses of all the event triggers. -SELECT - e.evtname, - pg_describe_object('pg_event_trigger'::regclass, e.oid, 0) as descr, - b.type, b.object_names, b.object_args, - pg_identify_object(a.classid, a.objid, a.objsubid) as ident - FROM pg_event_trigger as e, - LATERAL pg_identify_object_as_address('pg_event_trigger'::regclass, e.oid, 0) as b, - LATERAL pg_get_object_address(b.type, b.object_names, b.object_args) as a - ORDER BY e.evtname; - evtname | descr | type | object_names | object_args | ident --------------------+---------------------------------+---------------+---------------------+-------------+-------------------------------------------------------- - end_rls_command | event trigger end_rls_command | event trigger | {end_rls_command} | {} | ("event trigger",,end_rls_command,end_rls_command) - sql_drop_command | event trigger sql_drop_command | event trigger | {sql_drop_command} | {} | ("event trigger",,sql_drop_command,sql_drop_command) - start_rls_command | event trigger start_rls_command | event trigger | {start_rls_command} | {} | ("event trigger",,start_rls_command,start_rls_command) -(3 rows) - -DROP EVENT TRIGGER start_rls_command; -DROP EVENT TRIGGER end_rls_command; -DROP EVENT TRIGGER sql_drop_command; --- Check the GUC for disabling event triggers -CREATE FUNCTION test_event_trigger_guc() RETURNS event_trigger -LANGUAGE plpgsql AS $$ -DECLARE - obj record; -BEGIN - FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() - LOOP - RAISE NOTICE '% dropped %', tg_tag, obj.object_type; - END LOOP; -END; -$$; -CREATE EVENT TRIGGER test_event_trigger_guc - ON sql_drop - WHEN TAG IN ('DROP POLICY') EXECUTE FUNCTION test_event_trigger_guc(); -SET event_triggers = 'on'; -CREATE POLICY pguc ON event_trigger_test USING (FALSE); -DROP POLICY pguc ON event_trigger_test; -NOTICE: DROP POLICY dropped policy -CREATE POLICY pguc ON event_trigger_test USING (FALSE); -SET event_triggers = 'off'; -DROP POLICY pguc ON event_trigger_test; +psql: error: connection to server on socket "/tmp/zHbaMLr5OX/.s.PGSQL.22638" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/event_trigger_login.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/event_trigger_login.out --- /tmp/cirrus-ci-build/src/test/regress/expected/event_trigger_login.out 2024-09-11 00:19:52.053106672 +0000 +++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/event_trigger_login.out 2024-09-11 00:29:43.592156598 +0000 @@ -1,39 +1,2 @@ --- Login event triggers -CREATE TABLE user_logins(id serial, who text); -GRANT SELECT ON user_logins TO public; -CREATE FUNCTION on_login_proc() RETURNS event_trigger AS $$ -BEGIN - INSERT INTO user_logins (who) VALUES (SESSION_USER); - RAISE NOTICE 'You are welcome!'; -END; -$$ LANGUAGE plpgsql; -CREATE EVENT TRIGGER on_login_trigger ON login EXECUTE PROCEDURE on_login_proc(); -ALTER EVENT TRIGGER on_login_trigger ENABLE ALWAYS; -\c -NOTICE: You are welcome! -SELECT COUNT(*) FROM user_logins; - count -------- - 1 -(1 row) - -\c -NOTICE: You are welcome! -SELECT COUNT(*) FROM user_logins; - count -------- - 2 -(1 row) - --- Check dathasloginevt in system catalog -SELECT dathasloginevt FROM pg_database WHERE datname= :'DBNAME'; - dathasloginevt ----------------- - t -(1 row) - --- Cleanup -DROP TABLE user_logins; -DROP EVENT TRIGGER on_login_trigger; -DROP FUNCTION on_login_proc(); -\c +psql: error: connection to server on socket "/tmp/zHbaMLr5OX/.s.PGSQL.22638" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/fast_default.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/fast_default.out --- /tmp/cirrus-ci-build/src/test/regress/expected/fast_default.out 2024-09-11 00:19:52.053106672 +0000 +++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/fast_default.out 2024-09-11 00:29:43.640156540 +0000 @@ -1,861 +1,2 @@ --- --- ALTER TABLE ADD COLUMN DEFAULT test --- -SET search_path = fast_default; -CREATE SCHEMA fast_default; -CREATE TABLE m(id OID); -INSERT INTO m VALUES (NULL::OID); -CREATE FUNCTION set(tabname name) RETURNS VOID -AS $$ -BEGIN - UPDATE m - SET id = (SELECT c.relfilenode - FROM pg_class AS c, pg_namespace AS s - WHERE c.relname = tabname - AND c.relnamespace = s.oid - AND s.nspname = 'fast_default'); -END; -$$ LANGUAGE 'plpgsql'; -CREATE FUNCTION comp() RETURNS TEXT -AS $$ -BEGIN - RETURN (SELECT CASE - WHEN m.id = c.relfilenode THEN 'Unchanged' - ELSE 'Rewritten' - END - FROM m, pg_class AS c, pg_namespace AS s - WHERE c.relname = 't' - AND c.relnamespace = s.oid - AND s.nspname = 'fast_default'); -END; -$$ LANGUAGE 'plpgsql'; -CREATE FUNCTION log_rewrite() RETURNS event_trigger -LANGUAGE plpgsql as -$func$ - -declare - this_schema text; -begin - select into this_schema relnamespace::regnamespace::text - from pg_class - where oid = pg_event_trigger_table_rewrite_oid(); - if this_schema = 'fast_default' - then - RAISE NOTICE 'rewriting table % for reason %', - pg_event_trigger_table_rewrite_oid()::regclass, - pg_event_trigger_table_rewrite_reason(); - end if; -end; -$func$; -CREATE TABLE has_volatile AS -SELECT * FROM generate_series(1,10) id; -CREATE EVENT TRIGGER has_volatile_rewrite - ON table_rewrite - EXECUTE PROCEDURE log_rewrite(); --- only the last of these should trigger a rewrite -ALTER TABLE has_volatile ADD col1 int; -ALTER TABLE has_volatile ADD col2 int DEFAULT 1; -ALTER TABLE has_volatile ADD col3 timestamptz DEFAULT current_timestamp; -ALTER TABLE has_volatile ADD col4 int DEFAULT (random() * 10000)::int; -NOTICE: rewriting table has_volatile for reason 2 --- Test a large sample of different datatypes -CREATE TABLE T(pk INT NOT NULL PRIMARY KEY, c_int INT DEFAULT 1); -SELECT set('t'); - set ------ - -(1 row) - -INSERT INTO T VALUES (1), (2); -ALTER TABLE T ADD COLUMN c_bpchar BPCHAR(5) DEFAULT 'hello', - ALTER COLUMN c_int SET DEFAULT 2; -INSERT INTO T VALUES (3), (4); -ALTER TABLE T ADD COLUMN c_text TEXT DEFAULT 'world', - ALTER COLUMN c_bpchar SET DEFAULT 'dog'; -INSERT INTO T VALUES (5), (6); -ALTER TABLE T ADD COLUMN c_date DATE DEFAULT '2016-06-02', - ALTER COLUMN c_text SET DEFAULT 'cat'; -INSERT INTO T VALUES (7), (8); -ALTER TABLE T ADD COLUMN c_timestamp TIMESTAMP DEFAULT '2016-09-01 12:00:00', - ADD COLUMN c_timestamp_null TIMESTAMP, - ALTER COLUMN c_date SET DEFAULT '2010-01-01'; -INSERT INTO T VALUES (9), (10); -ALTER TABLE T ADD COLUMN c_array TEXT[] - DEFAULT '{"This", "is", "the", "real", "world"}', - ALTER COLUMN c_timestamp SET DEFAULT '1970-12-31 11:12:13', - ALTER COLUMN c_timestamp_null SET DEFAULT '2016-09-29 12:00:00'; -INSERT INTO T VALUES (11), (12); -ALTER TABLE T ADD COLUMN c_small SMALLINT DEFAULT -5, - ADD COLUMN c_small_null SMALLINT, - ALTER COLUMN c_array - SET DEFAULT '{"This", "is", "no", "fantasy"}'; -INSERT INTO T VALUES (13), (14); -ALTER TABLE T ADD COLUMN c_big BIGINT DEFAULT 180000000000018, - ALTER COLUMN c_small SET DEFAULT 9, - ALTER COLUMN c_small_null SET DEFAULT 13; -INSERT INTO T VALUES (15), (16); -ALTER TABLE T ADD COLUMN c_num NUMERIC DEFAULT 1.00000000001, - ALTER COLUMN c_big SET DEFAULT -9999999999999999; -INSERT INTO T VALUES (17), (18); -ALTER TABLE T ADD COLUMN c_time TIME DEFAULT '12:00:00', - ALTER COLUMN c_num SET DEFAULT 2.000000000000002; -INSERT INTO T VALUES (19), (20); -ALTER TABLE T ADD COLUMN c_interval INTERVAL DEFAULT '1 day', - ALTER COLUMN c_time SET DEFAULT '23:59:59'; -INSERT INTO T VALUES (21), (22); -ALTER TABLE T ADD COLUMN c_hugetext TEXT DEFAULT repeat('abcdefg',1000), - ALTER COLUMN c_interval SET DEFAULT '3 hours'; -INSERT INTO T VALUES (23), (24); -ALTER TABLE T ALTER COLUMN c_interval DROP DEFAULT, - ALTER COLUMN c_hugetext SET DEFAULT repeat('poiuyt', 1000); -INSERT INTO T VALUES (25), (26); -ALTER TABLE T ALTER COLUMN c_bpchar DROP DEFAULT, - ALTER COLUMN c_date DROP DEFAULT, - ALTER COLUMN c_text DROP DEFAULT, - ALTER COLUMN c_timestamp DROP DEFAULT, - ALTER COLUMN c_array DROP DEFAULT, - ALTER COLUMN c_small DROP DEFAULT, - ALTER COLUMN c_big DROP DEFAULT, - ALTER COLUMN c_num DROP DEFAULT, - ALTER COLUMN c_time DROP DEFAULT, - ALTER COLUMN c_hugetext DROP DEFAULT; -INSERT INTO T VALUES (27), (28); -SELECT pk, c_int, c_bpchar, c_text, c_date, c_timestamp, - c_timestamp_null, c_array, c_small, c_small_null, - c_big, c_num, c_time, c_interval, - c_hugetext = repeat('abcdefg',1000) as c_hugetext_origdef, - c_hugetext = repeat('poiuyt', 1000) as c_hugetext_newdef -FROM T ORDER BY pk; - pk | c_int | c_bpchar | c_text | c_date | c_timestamp | c_timestamp_null | c_array | c_small | c_small_null | c_big | c_num | c_time | c_interval | c_hugetext_origdef | c_hugetext_newdef -----+-------+----------+--------+------------+--------------------------+--------------------------+--------------------------+---------+--------------+-------------------+-------------------+----------+------------+--------------------+------------------- - 1 | 1 | hello | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 2 | 1 | hello | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 3 | 2 | hello | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 4 | 2 | hello | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 5 | 2 | dog | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 6 | 2 | dog | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 7 | 2 | dog | cat | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 8 | 2 | dog | cat | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 9 | 2 | dog | cat | 01-01-2010 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 10 | 2 | dog | cat | 01-01-2010 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 11 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 12 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 13 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 14 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 15 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 16 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 17 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 18 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 1.00000000001 | 12:00:00 | @ 1 day | t | f - 19 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 12:00:00 | @ 1 day | t | f - 20 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 12:00:00 | @ 1 day | t | f - 21 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | @ 1 day | t | f - 22 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | @ 1 day | t | f - 23 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | @ 3 hours | t | f - 24 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | @ 3 hours | t | f - 25 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | | f | t - 26 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | | f | t - 27 | 2 | | | | | Thu Sep 29 12:00:00 2016 | | | 13 | | | | | | - 28 | 2 | | | | | Thu Sep 29 12:00:00 2016 | | | 13 | | | | | | -(28 rows) - -SELECT comp(); - comp ------------ - Unchanged -(1 row) - -DROP TABLE T; --- Test expressions in the defaults -CREATE OR REPLACE FUNCTION foo(a INT) RETURNS TEXT AS $$ -DECLARE res TEXT := ''; - i INT; -BEGIN - i := 0; - WHILE (i < a) LOOP - res := res || chr(ascii('a') + i); - i := i + 1; - END LOOP; - RETURN res; -END; $$ LANGUAGE PLPGSQL STABLE; -CREATE TABLE T(pk INT NOT NULL PRIMARY KEY, c_int INT DEFAULT LENGTH(foo(6))); -SELECT set('t'); - set ------ - -(1 row) - -INSERT INTO T VALUES (1), (2); -ALTER TABLE T ADD COLUMN c_bpchar BPCHAR(5) DEFAULT foo(4), - ALTER COLUMN c_int SET DEFAULT LENGTH(foo(8)); -INSERT INTO T VALUES (3), (4); -ALTER TABLE T ADD COLUMN c_text TEXT DEFAULT foo(6), - ALTER COLUMN c_bpchar SET DEFAULT foo(3); -INSERT INTO T VALUES (5), (6); -ALTER TABLE T ADD COLUMN c_date DATE - DEFAULT '2016-06-02'::DATE + LENGTH(foo(10)), - ALTER COLUMN c_text SET DEFAULT foo(12); -INSERT INTO T VALUES (7), (8); -ALTER TABLE T ADD COLUMN c_timestamp TIMESTAMP - DEFAULT '2016-09-01'::DATE + LENGTH(foo(10)), - ALTER COLUMN c_date - SET DEFAULT '2010-01-01'::DATE - LENGTH(foo(4)); -INSERT INTO T VALUES (9), (10); -ALTER TABLE T ADD COLUMN c_array TEXT[] - DEFAULT ('{"This", "is", "' || foo(4) || - '","the", "real", "world"}')::TEXT[], - ALTER COLUMN c_timestamp - SET DEFAULT '1970-12-31'::DATE + LENGTH(foo(30)); -INSERT INTO T VALUES (11), (12); -ALTER TABLE T ALTER COLUMN c_int DROP DEFAULT, - ALTER COLUMN c_array - SET DEFAULT ('{"This", "is", "' || foo(1) || - '", "fantasy"}')::text[]; -INSERT INTO T VALUES (13), (14); -ALTER TABLE T ALTER COLUMN c_bpchar DROP DEFAULT, - ALTER COLUMN c_date DROP DEFAULT, - ALTER COLUMN c_text DROP DEFAULT, - ALTER COLUMN c_timestamp DROP DEFAULT, - ALTER COLUMN c_array DROP DEFAULT; -INSERT INTO T VALUES (15), (16); -SELECT * FROM T; - pk | c_int | c_bpchar | c_text | c_date | c_timestamp | c_array -----+-------+----------+--------------+------------+--------------------------+------------------------------- - 1 | 6 | abcd | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} - 2 | 6 | abcd | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} - 3 | 8 | abcd | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} - 4 | 8 | abcd | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} - 5 | 8 | abc | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} - 6 | 8 | abc | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} - 7 | 8 | abc | abcdefghijkl | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} - 8 | 8 | abc | abcdefghijkl | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} - 9 | 8 | abc | abcdefghijkl | 12-28-2009 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} - 10 | 8 | abc | abcdefghijkl | 12-28-2009 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} - 11 | 8 | abc | abcdefghijkl | 12-28-2009 | Sat Jan 30 00:00:00 1971 | {This,is,abcd,the,real,world} - 12 | 8 | abc | abcdefghijkl | 12-28-2009 | Sat Jan 30 00:00:00 1971 | {This,is,abcd,the,real,world} - 13 | | abc | abcdefghijkl | 12-28-2009 | Sat Jan 30 00:00:00 1971 | {This,is,a,fantasy} - 14 | | abc | abcdefghijkl | 12-28-2009 | Sat Jan 30 00:00:00 1971 | {This,is,a,fantasy} - 15 | | | | | | - 16 | | | | | | -(16 rows) - -SELECT comp(); - comp ------------ - Unchanged -(1 row) - -DROP TABLE T; -DROP FUNCTION foo(INT); --- Fall back to full rewrite for volatile expressions -CREATE TABLE T(pk INT NOT NULL PRIMARY KEY); -INSERT INTO T VALUES (1); -SELECT set('t'); - set ------ - -(1 row) - --- now() is stable, because it returns the transaction timestamp -ALTER TABLE T ADD COLUMN c1 TIMESTAMP DEFAULT now(); -SELECT comp(); - comp ------------ - Unchanged -(1 row) - --- clock_timestamp() is volatile -ALTER TABLE T ADD COLUMN c2 TIMESTAMP DEFAULT clock_timestamp(); -NOTICE: rewriting table t for reason 2 -SELECT comp(); - comp ------------ - Rewritten -(1 row) - --- check that we notice insertion of a volatile default argument -CREATE FUNCTION foolme(timestamptz DEFAULT clock_timestamp()) - RETURNS timestamptz - IMMUTABLE AS 'select $1' LANGUAGE sql; -ALTER TABLE T ADD COLUMN c3 timestamptz DEFAULT foolme(); -NOTICE: rewriting table t for reason 2 -SELECT attname, atthasmissing, attmissingval FROM pg_attribute - WHERE attrelid = 't'::regclass AND attnum > 0 - ORDER BY attnum; - attname | atthasmissing | attmissingval ----------+---------------+--------------- - pk | f | - c1 | f | - c2 | f | - c3 | f | -(4 rows) - -DROP TABLE T; -DROP FUNCTION foolme(timestamptz); --- Simple querie -CREATE TABLE T (pk INT NOT NULL PRIMARY KEY); -SELECT set('t'); - set ------ - -(1 row) - -INSERT INTO T SELECT * FROM generate_series(1, 10) a; -ALTER TABLE T ADD COLUMN c_bigint BIGINT NOT NULL DEFAULT -1; -INSERT INTO T SELECT b, b - 10 FROM generate_series(11, 20) a(b); -ALTER TABLE T ADD COLUMN c_text TEXT DEFAULT 'hello'; -INSERT INTO T SELECT b, b - 10, (b + 10)::text FROM generate_series(21, 30) a(b); --- WHERE clause -SELECT c_bigint, c_text FROM T WHERE c_bigint = -1 LIMIT 1; - c_bigint | c_text -----------+-------- - -1 | hello -(1 row) - -EXPLAIN (VERBOSE TRUE, COSTS FALSE) -SELECT c_bigint, c_text FROM T WHERE c_bigint = -1 LIMIT 1; - QUERY PLAN ----------------------------------------------- - Limit - Output: c_bigint, c_text - -> Seq Scan on fast_default.t - Output: c_bigint, c_text - Filter: (t.c_bigint = '-1'::integer) -(5 rows) - -SELECT c_bigint, c_text FROM T WHERE c_text = 'hello' LIMIT 1; - c_bigint | c_text -----------+-------- - -1 | hello -(1 row) - -EXPLAIN (VERBOSE TRUE, COSTS FALSE) SELECT c_bigint, c_text FROM T WHERE c_text = 'hello' LIMIT 1; - QUERY PLAN --------------------------------------------- - Limit - Output: c_bigint, c_text - -> Seq Scan on fast_default.t - Output: c_bigint, c_text - Filter: (t.c_text = 'hello'::text) -(5 rows) - --- COALESCE -SELECT COALESCE(c_bigint, pk), COALESCE(c_text, pk::text) -FROM T -ORDER BY pk LIMIT 10; - coalesce | coalesce -----------+---------- - -1 | hello - -1 | hello - -1 | hello - -1 | hello - -1 | hello - -1 | hello - -1 | hello - -1 | hello - -1 | hello - -1 | hello -(10 rows) - --- Aggregate function -SELECT SUM(c_bigint), MAX(c_text COLLATE "C" ), MIN(c_text COLLATE "C") FROM T; - sum | max | min ------+-------+----- - 200 | hello | 31 -(1 row) - --- ORDER BY -SELECT * FROM T ORDER BY c_bigint, c_text, pk LIMIT 10; - pk | c_bigint | c_text -----+----------+-------- - 1 | -1 | hello - 2 | -1 | hello - 3 | -1 | hello - 4 | -1 | hello - 5 | -1 | hello - 6 | -1 | hello - 7 | -1 | hello - 8 | -1 | hello - 9 | -1 | hello - 10 | -1 | hello -(10 rows) - -EXPLAIN (VERBOSE TRUE, COSTS FALSE) -SELECT * FROM T ORDER BY c_bigint, c_text, pk LIMIT 10; - QUERY PLAN ----------------------------------------------- - Limit - Output: pk, c_bigint, c_text - -> Sort - Output: pk, c_bigint, c_text - Sort Key: t.c_bigint, t.c_text, t.pk - -> Seq Scan on fast_default.t - Output: pk, c_bigint, c_text -(7 rows) - --- LIMIT -SELECT * FROM T WHERE c_bigint > -1 ORDER BY c_bigint, c_text, pk LIMIT 10; - pk | c_bigint | c_text -----+----------+-------- - 11 | 1 | hello - 12 | 2 | hello - 13 | 3 | hello - 14 | 4 | hello - 15 | 5 | hello - 16 | 6 | hello - 17 | 7 | hello - 18 | 8 | hello - 19 | 9 | hello - 20 | 10 | hello -(10 rows) - -EXPLAIN (VERBOSE TRUE, COSTS FALSE) -SELECT * FROM T WHERE c_bigint > -1 ORDER BY c_bigint, c_text, pk LIMIT 10; - QUERY PLAN ----------------------------------------------------- - Limit - Output: pk, c_bigint, c_text - -> Sort - Output: pk, c_bigint, c_text - Sort Key: t.c_bigint, t.c_text, t.pk - -> Seq Scan on fast_default.t - Output: pk, c_bigint, c_text - Filter: (t.c_bigint > '-1'::integer) -(8 rows) - --- DELETE with RETURNING -DELETE FROM T WHERE pk BETWEEN 10 AND 20 RETURNING *; - pk | c_bigint | c_text -----+----------+-------- - 10 | -1 | hello - 11 | 1 | hello - 12 | 2 | hello - 13 | 3 | hello - 14 | 4 | hello - 15 | 5 | hello - 16 | 6 | hello - 17 | 7 | hello - 18 | 8 | hello - 19 | 9 | hello - 20 | 10 | hello -(11 rows) - -EXPLAIN (VERBOSE TRUE, COSTS FALSE) -DELETE FROM T WHERE pk BETWEEN 10 AND 20 RETURNING *; - QUERY PLAN ------------------------------------------------------------ - Delete on fast_default.t - Output: pk, c_bigint, c_text - -> Bitmap Heap Scan on fast_default.t - Output: ctid - Recheck Cond: ((t.pk >= 10) AND (t.pk <= 20)) - -> Bitmap Index Scan on t_pkey - Index Cond: ((t.pk >= 10) AND (t.pk <= 20)) -(7 rows) - --- UPDATE -UPDATE T SET c_text = '"' || c_text || '"' WHERE pk < 10; -SELECT * FROM T WHERE c_text LIKE '"%"' ORDER BY PK; - pk | c_bigint | c_text -----+----------+--------- - 1 | -1 | "hello" - 2 | -1 | "hello" - 3 | -1 | "hello" - 4 | -1 | "hello" - 5 | -1 | "hello" - 6 | -1 | "hello" - 7 | -1 | "hello" - 8 | -1 | "hello" - 9 | -1 | "hello" -(9 rows) - -SELECT comp(); - comp ------------ - Unchanged -(1 row) - -DROP TABLE T; --- Combine with other DDL -CREATE TABLE T(pk INT NOT NULL PRIMARY KEY); -SELECT set('t'); - set ------ - -(1 row) - -INSERT INTO T VALUES (1), (2); -ALTER TABLE T ADD COLUMN c_int INT NOT NULL DEFAULT -1; -INSERT INTO T VALUES (3), (4); -ALTER TABLE T ADD COLUMN c_text TEXT DEFAULT 'Hello'; -INSERT INTO T VALUES (5), (6); -ALTER TABLE T ALTER COLUMN c_text SET DEFAULT 'world', - ALTER COLUMN c_int SET DEFAULT 1; -INSERT INTO T VALUES (7), (8); -SELECT * FROM T ORDER BY pk; - pk | c_int | c_text -----+-------+-------- - 1 | -1 | Hello - 2 | -1 | Hello - 3 | -1 | Hello - 4 | -1 | Hello - 5 | -1 | Hello - 6 | -1 | Hello - 7 | 1 | world - 8 | 1 | world -(8 rows) - --- Add an index -CREATE INDEX i ON T(c_int, c_text); -SELECT c_text FROM T WHERE c_int = -1; - c_text --------- - Hello - Hello - Hello - Hello - Hello - Hello -(6 rows) - -SELECT comp(); - comp ------------ - Unchanged -(1 row) - --- query to exercise expand_tuple function -CREATE TABLE t1 AS -SELECT 1::int AS a , 2::int AS b -FROM generate_series(1,20) q; -ALTER TABLE t1 ADD COLUMN c text; -SELECT a, - stddev(cast((SELECT sum(1) FROM generate_series(1,20) x) AS float4)) - OVER (PARTITION BY a,b,c ORDER BY b) - AS z -FROM t1; - a | z ----+--- - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 -(20 rows) - -DROP TABLE T; --- test that we account for missing columns without defaults correctly --- in expand_tuple, and that rows are correctly expanded for triggers -CREATE FUNCTION test_trigger() -RETURNS trigger -LANGUAGE plpgsql -AS $$ - -begin - raise notice 'old tuple: %', to_json(OLD)::text; - if TG_OP = 'DELETE' - then - return OLD; - else - return NEW; - end if; -end; - -$$; --- 2 new columns, both have defaults -CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); -INSERT INTO t (a,b,c) VALUES (1,2,3); -ALTER TABLE t ADD COLUMN x int NOT NULL DEFAULT 4; -ALTER TABLE t ADD COLUMN y int NOT NULL DEFAULT 5; -CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | 3 | 4 | 5 -(1 row) - -UPDATE t SET y = 2; -NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":3,"x":4,"y":5} -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | 3 | 4 | 2 -(1 row) - -DROP TABLE t; --- 2 new columns, first has default -CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); -INSERT INTO t (a,b,c) VALUES (1,2,3); -ALTER TABLE t ADD COLUMN x int NOT NULL DEFAULT 4; -ALTER TABLE t ADD COLUMN y int; -CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | 3 | 4 | -(1 row) - -UPDATE t SET y = 2; -NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":3,"x":4,"y":null} -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | 3 | 4 | 2 -(1 row) - -DROP TABLE t; --- 2 new columns, second has default -CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); -INSERT INTO t (a,b,c) VALUES (1,2,3); -ALTER TABLE t ADD COLUMN x int; -ALTER TABLE t ADD COLUMN y int NOT NULL DEFAULT 5; -CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | 3 | | 5 -(1 row) - -UPDATE t SET y = 2; -NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":3,"x":null,"y":5} -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | 3 | | 2 -(1 row) - -DROP TABLE t; --- 2 new columns, neither has default -CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); -INSERT INTO t (a,b,c) VALUES (1,2,3); -ALTER TABLE t ADD COLUMN x int; -ALTER TABLE t ADD COLUMN y int; -CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | 3 | | -(1 row) - -UPDATE t SET y = 2; -NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":3,"x":null,"y":null} -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | 3 | | 2 -(1 row) - -DROP TABLE t; --- same as last 4 tests but here the last original column has a NULL value --- 2 new columns, both have defaults -CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); -INSERT INTO t (a,b,c) VALUES (1,2,NULL); -ALTER TABLE t ADD COLUMN x int NOT NULL DEFAULT 4; -ALTER TABLE t ADD COLUMN y int NOT NULL DEFAULT 5; -CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | | 4 | 5 -(1 row) - -UPDATE t SET y = 2; -NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":null,"x":4,"y":5} -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | | 4 | 2 -(1 row) - -DROP TABLE t; --- 2 new columns, first has default -CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); -INSERT INTO t (a,b,c) VALUES (1,2,NULL); -ALTER TABLE t ADD COLUMN x int NOT NULL DEFAULT 4; -ALTER TABLE t ADD COLUMN y int; -CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | | 4 | -(1 row) - -UPDATE t SET y = 2; -NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":null,"x":4,"y":null} -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | | 4 | 2 -(1 row) - -DROP TABLE t; --- 2 new columns, second has default -CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); -INSERT INTO t (a,b,c) VALUES (1,2,NULL); -ALTER TABLE t ADD COLUMN x int; -ALTER TABLE t ADD COLUMN y int NOT NULL DEFAULT 5; -CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | | | 5 -(1 row) - -UPDATE t SET y = 2; -NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":null,"x":null,"y":5} -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | | | 2 -(1 row) - -DROP TABLE t; --- 2 new columns, neither has default -CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); -INSERT INTO t (a,b,c) VALUES (1,2,NULL); -ALTER TABLE t ADD COLUMN x int; -ALTER TABLE t ADD COLUMN y int; -CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | | | -(1 row) - -UPDATE t SET y = 2; -NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":null,"x":null,"y":null} -SELECT * FROM t; - id | a | b | c | x | y -----+---+---+---+---+--- - 1 | 1 | 2 | | | 2 -(1 row) - -DROP TABLE t; --- make sure expanded tuple has correct self pointer --- it will be required by the RI trigger doing the cascading delete -CREATE TABLE leader (a int PRIMARY KEY, b int); -CREATE TABLE follower (a int REFERENCES leader ON DELETE CASCADE, b int); -INSERT INTO leader VALUES (1, 1), (2, 2); -ALTER TABLE leader ADD c int; -ALTER TABLE leader DROP c; -DELETE FROM leader; --- check that ALTER TABLE ... ALTER TYPE does the right thing -CREATE TABLE vtype( a integer); -INSERT INTO vtype VALUES (1); -ALTER TABLE vtype ADD COLUMN b DOUBLE PRECISION DEFAULT 0.2; -ALTER TABLE vtype ADD COLUMN c BOOLEAN DEFAULT true; -SELECT * FROM vtype; - a | b | c ----+-----+--- - 1 | 0.2 | t -(1 row) - -ALTER TABLE vtype - ALTER b TYPE text USING b::text, - ALTER c TYPE text USING c::text; -NOTICE: rewriting table vtype for reason 4 -SELECT * FROM vtype; - a | b | c ----+-----+------ - 1 | 0.2 | true -(1 row) - --- also check the case that doesn't rewrite the table -CREATE TABLE vtype2 (a int); -INSERT INTO vtype2 VALUES (1); -ALTER TABLE vtype2 ADD COLUMN b varchar(10) DEFAULT 'xxx'; -ALTER TABLE vtype2 ALTER COLUMN b SET DEFAULT 'yyy'; -INSERT INTO vtype2 VALUES (2); -ALTER TABLE vtype2 ALTER COLUMN b TYPE varchar(20) USING b::varchar(20); -SELECT * FROM vtype2; - a | b ----+----- - 1 | xxx - 2 | yyy -(2 rows) - --- Ensure that defaults are checked when evaluating whether HOT update --- is possible, this was broken for a while: --- https://postgr.es/m/20190202133521.ylauh3ckqa7colzj%40alap3.anarazel.de -BEGIN; -CREATE TABLE t(); -INSERT INTO t DEFAULT VALUES; -ALTER TABLE t ADD COLUMN a int DEFAULT 1; -CREATE INDEX ON t(a); --- set column with a default 1 to NULL, due to a bug that wasn't --- noticed has heap_getattr buggily returned NULL for default columns -UPDATE t SET a = NULL; --- verify that index and non-index scans show the same result -SET LOCAL enable_seqscan = true; -SELECT * FROM t WHERE a IS NULL; - a ---- - -(1 row) - -SET LOCAL enable_seqscan = false; -SELECT * FROM t WHERE a IS NULL; - a ---- - -(1 row) - -ROLLBACK; --- verify that a default set on a non-plain table doesn't set a missing --- value on the attribute -CREATE FOREIGN DATA WRAPPER dummy; -CREATE SERVER s0 FOREIGN DATA WRAPPER dummy; -CREATE FOREIGN TABLE ft1 (c1 integer NOT NULL) SERVER s0; -ALTER FOREIGN TABLE ft1 ADD COLUMN c8 integer DEFAULT 0; -ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE char(10); -SELECT count(*) - FROM pg_attribute - WHERE attrelid = 'ft1'::regclass AND - (attmissingval IS NOT NULL OR atthasmissing); - count -------- - 0 -(1 row) - --- cleanup -DROP FOREIGN TABLE ft1; -DROP SERVER s0; -DROP FOREIGN DATA WRAPPER dummy; -DROP TABLE vtype; -DROP TABLE vtype2; -DROP TABLE follower; -DROP TABLE leader; -DROP FUNCTION test_trigger(); -DROP TABLE t1; -DROP FUNCTION set(name); -DROP FUNCTION comp(); -DROP TABLE m; -DROP TABLE has_volatile; -DROP EVENT TRIGGER has_volatile_rewrite; -DROP FUNCTION log_rewrite; -DROP SCHEMA fast_default; --- Leave a table with an active fast default in place, for pg_upgrade testing -set search_path = public; -create table has_fast_default(f1 int); -insert into has_fast_default values(1); -alter table has_fast_default add column f2 int default 42; -table has_fast_default; - f1 | f2 -----+---- - 1 | 42 -(1 row) - +psql: error: connection to server on socket "/tmp/zHbaMLr5OX/.s.PGSQL.22638" failed: No such file or directory + Is the server running locally and accepting connections on that socket? diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/tablespace.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/tablespace.out --- /tmp/cirrus-ci-build/src/test/regress/expected/tablespace.out 2024-09-11 00:19:52.121322968 +0000 +++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/tablespace.out 2024-09-11 00:29:43.712156452 +0000 @@ -1,968 +1,2 @@ --- relative tablespace locations are not allowed -CREATE TABLESPACE regress_tblspace LOCATION 'relative'; -- fail -ERROR: tablespace location must be an absolute path --- empty tablespace locations are not usually allowed -CREATE TABLESPACE regress_tblspace LOCATION ''; -- fail -ERROR: tablespace location must be an absolute path --- as a special developer-only option to allow us to use tablespaces --- with streaming replication on the same server, an empty location --- can be allowed as a way to say that the tablespace should be created --- as a directory in pg_tblspc, rather than being a symlink -SET allow_in_place_tablespaces = true; --- create a tablespace using WITH clause -CREATE TABLESPACE regress_tblspacewith LOCATION '' WITH (some_nonexistent_parameter = true); -- fail -ERROR: unrecognized parameter "some_nonexistent_parameter" -CREATE TABLESPACE regress_tblspacewith LOCATION '' WITH (random_page_cost = 3.0); -- ok --- check to see the parameter was used -SELECT spcoptions FROM pg_tablespace WHERE spcname = 'regress_tblspacewith'; - spcoptions ------------------------- - {random_page_cost=3.0} -(1 row) - --- drop the tablespace so we can re-use the location -DROP TABLESPACE regress_tblspacewith; --- This returns a relative path as of an effect of allow_in_place_tablespaces, --- masking the tablespace OID used in the path name. -SELECT regexp_replace(pg_tablespace_location(oid), '(pg_tblspc)/(\d+)', '\1/NNN') - FROM pg_tablespace WHERE spcname = 'regress_tblspace'; - regexp_replace ----------------- - pg_tblspc/NNN -(1 row) - --- try setting and resetting some properties for the new tablespace -ALTER TABLESPACE regress_tblspace SET (random_page_cost = 1.0, seq_page_cost = 1.1); -ALTER TABLESPACE regress_tblspace SET (some_nonexistent_parameter = true); -- fail -ERROR: unrecognized parameter "some_nonexistent_parameter" -ALTER TABLESPACE regress_tblspace RESET (random_page_cost = 2.0); -- fail -ERROR: RESET must not include values for parameters -ALTER TABLESPACE regress_tblspace RESET (random_page_cost, effective_io_concurrency); -- ok --- REINDEX (TABLESPACE) --- catalogs and system tablespaces --- system catalog, fail -REINDEX (TABLESPACE regress_tblspace) TABLE pg_am; -ERROR: cannot move system relation "pg_am_name_index" -REINDEX (TABLESPACE regress_tblspace) TABLE CONCURRENTLY pg_am; -ERROR: cannot reindex system catalogs concurrently --- shared catalog, fail -REINDEX (TABLESPACE regress_tblspace) TABLE pg_authid; -ERROR: cannot move system relation "pg_authid_rolname_index" -REINDEX (TABLESPACE regress_tblspace) TABLE CONCURRENTLY pg_authid; -ERROR: cannot reindex system catalogs concurrently --- toast relations, fail -REINDEX (TABLESPACE regress_tblspace) INDEX pg_toast.pg_toast_1260_index; -ERROR: cannot move system relation "pg_toast_1260_index" -REINDEX (TABLESPACE regress_tblspace) INDEX CONCURRENTLY pg_toast.pg_toast_1260_index; -ERROR: cannot reindex system catalogs concurrently -REINDEX (TABLESPACE regress_tblspace) TABLE pg_toast.pg_toast_1260; -ERROR: cannot move system relation "pg_toast_1260_index" -REINDEX (TABLESPACE regress_tblspace) TABLE CONCURRENTLY pg_toast.pg_toast_1260; -ERROR: cannot reindex system catalogs concurrently --- system catalog, fail -REINDEX (TABLESPACE pg_global) TABLE pg_authid; -ERROR: cannot move system relation "pg_authid_rolname_index" -REINDEX (TABLESPACE pg_global) TABLE CONCURRENTLY pg_authid; -ERROR: cannot reindex system catalogs concurrently --- table with toast relation -CREATE TABLE regress_tblspace_test_tbl (num1 bigint, num2 double precision, t text); -INSERT INTO regress_tblspace_test_tbl (num1, num2, t) - SELECT round(random()*100), random(), 'text' - FROM generate_series(1, 10) s(i); -CREATE INDEX regress_tblspace_test_tbl_idx ON regress_tblspace_test_tbl (num1); --- move to global tablespace, fail -REINDEX (TABLESPACE pg_global) INDEX regress_tblspace_test_tbl_idx; -ERROR: only shared relations can be placed in pg_global tablespace -REINDEX (TABLESPACE pg_global) INDEX CONCURRENTLY regress_tblspace_test_tbl_idx; -ERROR: cannot move non-shared relation to tablespace "pg_global" --- check transactional behavior of REINDEX (TABLESPACE) -BEGIN; -REINDEX (TABLESPACE regress_tblspace) INDEX regress_tblspace_test_tbl_idx; -REINDEX (TABLESPACE regress_tblspace) TABLE regress_tblspace_test_tbl; -ROLLBACK; --- no relation moved to the new tablespace -SELECT c.relname FROM pg_class c, pg_tablespace s - WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace'; - relname ---------- -(0 rows) - --- check that all indexes are moved to a new tablespace with different --- relfilenode. --- Save first the existing relfilenode for the toast and main relations. -SELECT relfilenode as main_filenode FROM pg_class - WHERE relname = 'regress_tblspace_test_tbl_idx' \gset -SELECT relfilenode as toast_filenode FROM pg_class - WHERE oid = - (SELECT i.indexrelid - FROM pg_class c, - pg_index i - WHERE i.indrelid = c.reltoastrelid AND - c.relname = 'regress_tblspace_test_tbl') \gset -REINDEX (TABLESPACE regress_tblspace) TABLE regress_tblspace_test_tbl; -SELECT c.relname FROM pg_class c, pg_tablespace s - WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' - ORDER BY c.relname; - relname -------------------------------- - regress_tblspace_test_tbl_idx -(1 row) - -ALTER TABLE regress_tblspace_test_tbl SET TABLESPACE regress_tblspace; -ALTER TABLE regress_tblspace_test_tbl SET TABLESPACE pg_default; -SELECT c.relname FROM pg_class c, pg_tablespace s - WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' - ORDER BY c.relname; - relname -------------------------------- - regress_tblspace_test_tbl_idx -(1 row) - --- Move back to the default tablespace. -ALTER INDEX regress_tblspace_test_tbl_idx SET TABLESPACE pg_default; -SELECT c.relname FROM pg_class c, pg_tablespace s - WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' - ORDER BY c.relname; - relname ---------- -(0 rows) - -REINDEX (TABLESPACE regress_tblspace, CONCURRENTLY) TABLE regress_tblspace_test_tbl; -SELECT c.relname FROM pg_class c, pg_tablespace s - WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' - ORDER BY c.relname; - relname -------------------------------- - regress_tblspace_test_tbl_idx -(1 row) - -SELECT relfilenode = :main_filenode AS main_same FROM pg_class - WHERE relname = 'regress_tblspace_test_tbl_idx'; - main_same ------------ - f -(1 row) - -SELECT relfilenode = :toast_filenode as toast_same FROM pg_class - WHERE oid = - (SELECT i.indexrelid - FROM pg_class c, - pg_index i - WHERE i.indrelid = c.reltoastrelid AND - c.relname = 'regress_tblspace_test_tbl'); - toast_same ------------- - f -(1 row) - -DROP TABLE regress_tblspace_test_tbl; --- REINDEX (TABLESPACE) with partitions --- Create a partition tree and check the set of relations reindexed --- with their new tablespace. -CREATE TABLE tbspace_reindex_part (c1 int, c2 int) PARTITION BY RANGE (c1); -CREATE TABLE tbspace_reindex_part_0 PARTITION OF tbspace_reindex_part - FOR VALUES FROM (0) TO (10) PARTITION BY list (c2); -CREATE TABLE tbspace_reindex_part_0_1 PARTITION OF tbspace_reindex_part_0 - FOR VALUES IN (1); -CREATE TABLE tbspace_reindex_part_0_2 PARTITION OF tbspace_reindex_part_0 - FOR VALUES IN (2); --- This partitioned table will have no partitions. -CREATE TABLE tbspace_reindex_part_10 PARTITION OF tbspace_reindex_part - FOR VALUES FROM (10) TO (20) PARTITION BY list (c2); --- Create some partitioned indexes -CREATE INDEX tbspace_reindex_part_index ON ONLY tbspace_reindex_part (c1); -CREATE INDEX tbspace_reindex_part_index_0 ON ONLY tbspace_reindex_part_0 (c1); -ALTER INDEX tbspace_reindex_part_index ATTACH PARTITION tbspace_reindex_part_index_0; --- This partitioned index will have no partitions. -CREATE INDEX tbspace_reindex_part_index_10 ON ONLY tbspace_reindex_part_10 (c1); -ALTER INDEX tbspace_reindex_part_index ATTACH PARTITION tbspace_reindex_part_index_10; -CREATE INDEX tbspace_reindex_part_index_0_1 ON ONLY tbspace_reindex_part_0_1 (c1); -ALTER INDEX tbspace_reindex_part_index_0 ATTACH PARTITION tbspace_reindex_part_index_0_1; -CREATE INDEX tbspace_reindex_part_index_0_2 ON ONLY tbspace_reindex_part_0_2 (c1); -ALTER INDEX tbspace_reindex_part_index_0 ATTACH PARTITION tbspace_reindex_part_index_0_2; -SELECT relid, parentrelid, level FROM pg_partition_tree('tbspace_reindex_part_index') - ORDER BY relid, level; - relid | parentrelid | level ---------------------------------+------------------------------+------- - tbspace_reindex_part_index | | 0 - tbspace_reindex_part_index_0 | tbspace_reindex_part_index | 1 - tbspace_reindex_part_index_10 | tbspace_reindex_part_index | 1 - tbspace_reindex_part_index_0_1 | tbspace_reindex_part_index_0 | 2 - tbspace_reindex_part_index_0_2 | tbspace_reindex_part_index_0 | 2 -(5 rows) - --- Track the original tablespace, relfilenode and OID of each index --- in the tree. -CREATE TEMP TABLE reindex_temp_before AS - SELECT oid, relname, relfilenode, reltablespace - FROM pg_class - WHERE relname ~ 'tbspace_reindex_part_index'; -REINDEX (TABLESPACE regress_tblspace, CONCURRENTLY) TABLE tbspace_reindex_part; --- REINDEX CONCURRENTLY changes the OID of the old relation, hence a check --- based on the relation name below. -SELECT b.relname, - CASE WHEN a.relfilenode = b.relfilenode THEN 'relfilenode is unchanged' - ELSE 'relfilenode has changed' END AS filenode, - CASE WHEN a.reltablespace = b.reltablespace THEN 'reltablespace is unchanged' - ELSE 'reltablespace has changed' END AS tbspace - FROM reindex_temp_before b JOIN pg_class a ON b.relname = a.relname - ORDER BY 1; - relname | filenode | tbspace ---------------------------------+--------------------------+---------------------------- - tbspace_reindex_part_index | relfilenode is unchanged | reltablespace is unchanged - tbspace_reindex_part_index_0 | relfilenode is unchanged | reltablespace is unchanged - tbspace_reindex_part_index_0_1 | relfilenode has changed | reltablespace has changed - tbspace_reindex_part_index_0_2 | relfilenode has changed | reltablespace has changed - tbspace_reindex_part_index_10 | relfilenode is unchanged | reltablespace is unchanged -(5 rows) - -DROP TABLE tbspace_reindex_part; --- create a schema we can use -CREATE SCHEMA testschema; --- try a table -CREATE TABLE testschema.foo (i int) TABLESPACE regress_tblspace; -SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c - where c.reltablespace = t.oid AND c.relname = 'foo'; - relname | spcname ----------+------------------ - foo | regress_tblspace -(1 row) - -INSERT INTO testschema.foo VALUES(1); -INSERT INTO testschema.foo VALUES(2); --- tables from dynamic sources -CREATE TABLE testschema.asselect TABLESPACE regress_tblspace AS SELECT 1; -SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c - where c.reltablespace = t.oid AND c.relname = 'asselect'; - relname | spcname -----------+------------------ - asselect | regress_tblspace -(1 row) - -PREPARE selectsource(int) AS SELECT $1; -CREATE TABLE testschema.asexecute TABLESPACE regress_tblspace - AS EXECUTE selectsource(2); -SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c - where c.reltablespace = t.oid AND c.relname = 'asexecute'; - relname | spcname ------------+------------------ - asexecute | regress_tblspace -(1 row) - --- index -CREATE INDEX foo_idx on testschema.foo(i) TABLESPACE regress_tblspace; -SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c - where c.reltablespace = t.oid AND c.relname = 'foo_idx'; - relname | spcname ----------+------------------ - foo_idx | regress_tblspace -(1 row) - --- check \d output -\d testschema.foo - Table "testschema.foo" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - i | integer | | | -Indexes: - "foo_idx" btree (i), tablespace "regress_tblspace" -Tablespace: "regress_tblspace" - -\d testschema.foo_idx - Index "testschema.foo_idx" - Column | Type | Key? | Definition ---------+---------+------+------------ - i | integer | yes | i -btree, for table "testschema.foo" -Tablespace: "regress_tblspace" - --- --- partitioned table --- -CREATE TABLE testschema.part (a int) PARTITION BY LIST (a); -SET default_tablespace TO pg_global; -CREATE TABLE testschema.part_1 PARTITION OF testschema.part FOR VALUES IN (1); -ERROR: only shared relations can be placed in pg_global tablespace -RESET default_tablespace; -CREATE TABLE testschema.part_1 PARTITION OF testschema.part FOR VALUES IN (1); -SET default_tablespace TO regress_tblspace; -CREATE TABLE testschema.part_2 PARTITION OF testschema.part FOR VALUES IN (2); -SET default_tablespace TO pg_global; -CREATE TABLE testschema.part_3 PARTITION OF testschema.part FOR VALUES IN (3); -ERROR: only shared relations can be placed in pg_global tablespace -ALTER TABLE testschema.part SET TABLESPACE regress_tblspace; -CREATE TABLE testschema.part_3 PARTITION OF testschema.part FOR VALUES IN (3); -CREATE TABLE testschema.part_4 PARTITION OF testschema.part FOR VALUES IN (4) - TABLESPACE pg_default; -CREATE TABLE testschema.part_56 PARTITION OF testschema.part FOR VALUES IN (5, 6) - PARTITION BY LIST (a); -ALTER TABLE testschema.part SET TABLESPACE pg_default; -CREATE TABLE testschema.part_78 PARTITION OF testschema.part FOR VALUES IN (7, 8) - PARTITION BY LIST (a); -ERROR: only shared relations can be placed in pg_global tablespace -CREATE TABLE testschema.part_910 PARTITION OF testschema.part FOR VALUES IN (9, 10) - PARTITION BY LIST (a) TABLESPACE regress_tblspace; -RESET default_tablespace; -CREATE TABLE testschema.part_78 PARTITION OF testschema.part FOR VALUES IN (7, 8) - PARTITION BY LIST (a); -SELECT relname, spcname FROM pg_catalog.pg_class c - JOIN pg_catalog.pg_namespace n ON (c.relnamespace = n.oid) - LEFT JOIN pg_catalog.pg_tablespace t ON c.reltablespace = t.oid - where c.relname LIKE 'part%' AND n.nspname = 'testschema' order by relname; - relname | spcname -----------+------------------ - part | - part_1 | - part_2 | regress_tblspace - part_3 | regress_tblspace - part_4 | - part_56 | regress_tblspace - part_78 | - part_910 | regress_tblspace -(8 rows) - -RESET default_tablespace; -DROP TABLE testschema.part; --- partitioned index -CREATE TABLE testschema.part (a int) PARTITION BY LIST (a); -CREATE TABLE testschema.part1 PARTITION OF testschema.part FOR VALUES IN (1); -CREATE INDEX part_a_idx ON testschema.part (a) TABLESPACE regress_tblspace; -CREATE TABLE testschema.part2 PARTITION OF testschema.part FOR VALUES IN (2); -SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c - where c.reltablespace = t.oid AND c.relname LIKE 'part%_idx' ORDER BY relname; - relname | spcname --------------+------------------ - part1_a_idx | regress_tblspace - part2_a_idx | regress_tblspace - part_a_idx | regress_tblspace -(3 rows) - -\d testschema.part - Partitioned table "testschema.part" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition key: LIST (a) -Indexes: - "part_a_idx" btree (a), tablespace "regress_tblspace" -Number of partitions: 2 (Use \d+ to list them.) - -\d+ testschema.part - Partitioned table "testschema.part" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - a | integer | | | | plain | | -Partition key: LIST (a) -Indexes: - "part_a_idx" btree (a), tablespace "regress_tblspace" -Partitions: testschema.part1 FOR VALUES IN (1), - testschema.part2 FOR VALUES IN (2) - -\d testschema.part1 - Table "testschema.part1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition of: testschema.part FOR VALUES IN (1) -Indexes: - "part1_a_idx" btree (a), tablespace "regress_tblspace" - -\d+ testschema.part1 - Table "testschema.part1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - a | integer | | | | plain | | -Partition of: testschema.part FOR VALUES IN (1) -Partition constraint: ((a IS NOT NULL) AND (a = 1)) -Indexes: - "part1_a_idx" btree (a), tablespace "regress_tblspace" - -\d testschema.part_a_idx -Partitioned index "testschema.part_a_idx" - Column | Type | Key? | Definition ---------+---------+------+------------ - a | integer | yes | a -btree, for table "testschema.part" -Number of partitions: 2 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -\d+ testschema.part_a_idx - Partitioned index "testschema.part_a_idx" - Column | Type | Key? | Definition | Storage | Stats target ---------+---------+------+------------+---------+-------------- - a | integer | yes | a | plain | -btree, for table "testschema.part" -Partitions: testschema.part1_a_idx, - testschema.part2_a_idx -Tablespace: "regress_tblspace" - --- partitioned rels cannot specify the default tablespace. These fail: -CREATE TABLE testschema.dflt (a int PRIMARY KEY) PARTITION BY LIST (a) TABLESPACE pg_default; -ERROR: cannot specify default tablespace for partitioned relations -CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE pg_default) PARTITION BY LIST (a); -ERROR: cannot specify default tablespace for partitioned relations -SET default_tablespace TO 'pg_default'; -CREATE TABLE testschema.dflt (a int PRIMARY KEY) PARTITION BY LIST (a) TABLESPACE regress_tblspace; -ERROR: cannot specify default tablespace for partitioned relations -CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE regress_tblspace) PARTITION BY LIST (a); -ERROR: cannot specify default tablespace for partitioned relations --- but these work: -CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE regress_tblspace) PARTITION BY LIST (a) TABLESPACE regress_tblspace; -SET default_tablespace TO ''; -CREATE TABLE testschema.dflt2 (a int PRIMARY KEY) PARTITION BY LIST (a); -DROP TABLE testschema.dflt, testschema.dflt2; --- check that default_tablespace doesn't affect ALTER TABLE index rebuilds -CREATE TABLE testschema.test_default_tab(id bigint) TABLESPACE regress_tblspace; -INSERT INTO testschema.test_default_tab VALUES (1); -CREATE INDEX test_index1 on testschema.test_default_tab (id); -CREATE INDEX test_index2 on testschema.test_default_tab (id) TABLESPACE regress_tblspace; -ALTER TABLE testschema.test_default_tab ADD CONSTRAINT test_index3 PRIMARY KEY (id); -ALTER TABLE testschema.test_default_tab ADD CONSTRAINT test_index4 UNIQUE (id) USING INDEX TABLESPACE regress_tblspace; -\d testschema.test_index1 - Index "testschema.test_index1" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -btree, for table "testschema.test_default_tab" - -\d testschema.test_index2 - Index "testschema.test_index2" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_index3 - Index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab" - -\d testschema.test_index4 - Index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - --- use a custom tablespace for default_tablespace -SET default_tablespace TO regress_tblspace; --- tablespace should not change if no rewrite -ALTER TABLE testschema.test_default_tab ALTER id TYPE bigint; -\d testschema.test_index1 - Index "testschema.test_index1" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -btree, for table "testschema.test_default_tab" - -\d testschema.test_index2 - Index "testschema.test_index2" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_index3 - Index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab" - -\d testschema.test_index4 - Index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -SELECT * FROM testschema.test_default_tab; - id ----- - 1 -(1 row) - --- tablespace should not change even if there is an index rewrite -ALTER TABLE testschema.test_default_tab ALTER id TYPE int; -\d testschema.test_index1 - Index "testschema.test_index1" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -btree, for table "testschema.test_default_tab" - -\d testschema.test_index2 - Index "testschema.test_index2" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_index3 - Index "testschema.test_index3" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -primary key, btree, for table "testschema.test_default_tab" - -\d testschema.test_index4 - Index "testschema.test_index4" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -unique, btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -SELECT * FROM testschema.test_default_tab; - id ----- - 1 -(1 row) - --- now use the default tablespace for default_tablespace -SET default_tablespace TO ''; --- tablespace should not change if no rewrite -ALTER TABLE testschema.test_default_tab ALTER id TYPE int; -\d testschema.test_index1 - Index "testschema.test_index1" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -btree, for table "testschema.test_default_tab" - -\d testschema.test_index2 - Index "testschema.test_index2" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_index3 - Index "testschema.test_index3" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -primary key, btree, for table "testschema.test_default_tab" - -\d testschema.test_index4 - Index "testschema.test_index4" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -unique, btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - --- tablespace should not change even if there is an index rewrite -ALTER TABLE testschema.test_default_tab ALTER id TYPE bigint; -\d testschema.test_index1 - Index "testschema.test_index1" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -btree, for table "testschema.test_default_tab" - -\d testschema.test_index2 - Index "testschema.test_index2" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_index3 - Index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab" - -\d testschema.test_index4 - Index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -DROP TABLE testschema.test_default_tab; --- check that default_tablespace doesn't affect ALTER TABLE index rebuilds --- (this time with a partitioned table) -CREATE TABLE testschema.test_default_tab_p(id bigint, val bigint) - PARTITION BY LIST (id) TABLESPACE regress_tblspace; -CREATE TABLE testschema.test_default_tab_p1 PARTITION OF testschema.test_default_tab_p - FOR VALUES IN (1); -INSERT INTO testschema.test_default_tab_p VALUES (1); -CREATE INDEX test_index1 on testschema.test_default_tab_p (val); -CREATE INDEX test_index2 on testschema.test_default_tab_p (val) TABLESPACE regress_tblspace; -ALTER TABLE testschema.test_default_tab_p ADD CONSTRAINT test_index3 PRIMARY KEY (id); -ALTER TABLE testschema.test_default_tab_p ADD CONSTRAINT test_index4 UNIQUE (id) USING INDEX TABLESPACE regress_tblspace; -\d testschema.test_index1 -Partitioned index "testschema.test_index1" - Column | Type | Key? | Definition ---------+--------+------+------------ - val | bigint | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index2 -Partitioned index "testschema.test_index2" - Column | Type | Key? | Definition ---------+--------+------+------------ - val | bigint | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -\d testschema.test_index3 -Partitioned index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index4 -Partitioned index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - --- use a custom tablespace for default_tablespace -SET default_tablespace TO regress_tblspace; --- tablespace should not change if no rewrite -ALTER TABLE testschema.test_default_tab_p ALTER val TYPE bigint; -\d testschema.test_index1 -Partitioned index "testschema.test_index1" - Column | Type | Key? | Definition ---------+--------+------+------------ - val | bigint | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index2 -Partitioned index "testschema.test_index2" - Column | Type | Key? | Definition ---------+--------+------+------------ - val | bigint | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -\d testschema.test_index3 -Partitioned index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index4 -Partitioned index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -SELECT * FROM testschema.test_default_tab_p; - id | val -----+----- - 1 | -(1 row) - --- tablespace should not change even if there is an index rewrite -ALTER TABLE testschema.test_default_tab_p ALTER val TYPE int; -\d testschema.test_index1 -Partitioned index "testschema.test_index1" - Column | Type | Key? | Definition ---------+---------+------+------------ - val | integer | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index2 -Partitioned index "testschema.test_index2" - Column | Type | Key? | Definition ---------+---------+------+------------ - val | integer | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -\d testschema.test_index3 -Partitioned index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index4 -Partitioned index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -SELECT * FROM testschema.test_default_tab_p; - id | val -----+----- - 1 | -(1 row) - --- now use the default tablespace for default_tablespace -SET default_tablespace TO ''; --- tablespace should not change if no rewrite -ALTER TABLE testschema.test_default_tab_p ALTER val TYPE int; -\d testschema.test_index1 -Partitioned index "testschema.test_index1" - Column | Type | Key? | Definition ---------+---------+------+------------ - val | integer | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index2 -Partitioned index "testschema.test_index2" - Column | Type | Key? | Definition ---------+---------+------+------------ - val | integer | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -\d testschema.test_index3 -Partitioned index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index4 -Partitioned index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - --- tablespace should not change even if there is an index rewrite -ALTER TABLE testschema.test_default_tab_p ALTER val TYPE bigint; -\d testschema.test_index1 -Partitioned index "testschema.test_index1" - Column | Type | Key? | Definition ---------+--------+------+------------ - val | bigint | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index2 -Partitioned index "testschema.test_index2" - Column | Type | Key? | Definition ---------+--------+------+------------ - val | bigint | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -\d testschema.test_index3 -Partitioned index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index4 -Partitioned index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -DROP TABLE testschema.test_default_tab_p; --- check that default_tablespace affects index additions in ALTER TABLE -CREATE TABLE testschema.test_tab(id int) TABLESPACE regress_tblspace; -INSERT INTO testschema.test_tab VALUES (1); -SET default_tablespace TO regress_tblspace; -ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_unique UNIQUE (id); -SET default_tablespace TO ''; -ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_pkey PRIMARY KEY (id); -\d testschema.test_tab_unique - Index "testschema.test_tab_unique" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -unique, btree, for table "testschema.test_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_tab_pkey - Index "testschema.test_tab_pkey" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -primary key, btree, for table "testschema.test_tab" - -SELECT * FROM testschema.test_tab; - id ----- - 1 -(1 row) - -DROP TABLE testschema.test_tab; --- check that default_tablespace is handled correctly by multi-command --- ALTER TABLE that includes a tablespace-preserving rewrite -CREATE TABLE testschema.test_tab(a int, b int, c int); -SET default_tablespace TO regress_tblspace; -ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_unique UNIQUE (a); -CREATE INDEX test_tab_a_idx ON testschema.test_tab (a); -SET default_tablespace TO ''; -CREATE INDEX test_tab_b_idx ON testschema.test_tab (b); -\d testschema.test_tab_unique - Index "testschema.test_tab_unique" - Column | Type | Key? | Definition ---------+---------+------+------------ - a | integer | yes | a -unique, btree, for table "testschema.test_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_tab_a_idx - Index "testschema.test_tab_a_idx" - Column | Type | Key? | Definition ---------+---------+------+------------ - a | integer | yes | a -btree, for table "testschema.test_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_tab_b_idx - Index "testschema.test_tab_b_idx" - Column | Type | Key? | Definition ---------+---------+------+------------ - b | integer | yes | b -btree, for table "testschema.test_tab" - -ALTER TABLE testschema.test_tab ALTER b TYPE bigint, ADD UNIQUE (c); -\d testschema.test_tab_unique - Index "testschema.test_tab_unique" - Column | Type | Key? | Definition ---------+---------+------+------------ - a | integer | yes | a -unique, btree, for table "testschema.test_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_tab_a_idx - Index "testschema.test_tab_a_idx" - Column | Type | Key? | Definition ---------+---------+------+------------ - a | integer | yes | a -btree, for table "testschema.test_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_tab_b_idx - Index "testschema.test_tab_b_idx" - Column | Type | Key? | Definition ---------+--------+------+------------ - b | bigint | yes | b -btree, for table "testschema.test_tab" - -DROP TABLE testschema.test_tab; --- let's try moving a table from one place to another -CREATE TABLE testschema.atable AS VALUES (1), (2); -CREATE UNIQUE INDEX anindex ON testschema.atable(column1); -ALTER TABLE testschema.atable SET TABLESPACE regress_tblspace; -ALTER INDEX testschema.anindex SET TABLESPACE regress_tblspace; -ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_global; -ERROR: only shared relations can be placed in pg_global tablespace -ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_default; -ALTER INDEX testschema.part_a_idx SET TABLESPACE regress_tblspace; -INSERT INTO testschema.atable VALUES(3); -- ok -INSERT INTO testschema.atable VALUES(1); -- fail (checks index) -ERROR: duplicate key value violates unique constraint "anindex" -DETAIL: Key (column1)=(1) already exists. -SELECT COUNT(*) FROM testschema.atable; -- checks heap - count -------- - 3 -(1 row) - --- let's try moving a materialized view from one place to another -CREATE MATERIALIZED VIEW testschema.amv AS SELECT * FROM testschema.atable; -ALTER MATERIALIZED VIEW testschema.amv SET TABLESPACE regress_tblspace; -REFRESH MATERIALIZED VIEW testschema.amv; -SELECT COUNT(*) FROM testschema.amv; - count -------- - 3 -(1 row) - --- Will fail with bad path -CREATE TABLESPACE regress_badspace LOCATION '/no/such/location'; -ERROR: directory "/no/such/location" does not exist --- No such tablespace -CREATE TABLE bar (i int) TABLESPACE regress_nosuchspace; -ERROR: tablespace "regress_nosuchspace" does not exist --- Fail, in use for some partitioned object -DROP TABLESPACE regress_tblspace; -ERROR: tablespace "regress_tblspace" cannot be dropped because some objects depend on it -DETAIL: tablespace for index testschema.part_a_idx -ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_default; --- Fail, not empty -DROP TABLESPACE regress_tblspace; -ERROR: tablespace "regress_tblspace" is not empty -CREATE ROLE regress_tablespace_user1 login; -CREATE ROLE regress_tablespace_user2 login; -GRANT USAGE ON SCHEMA testschema TO regress_tablespace_user2; -ALTER TABLESPACE regress_tblspace OWNER TO regress_tablespace_user1; -CREATE TABLE testschema.tablespace_acl (c int); --- new owner lacks permission to create this index from scratch -CREATE INDEX k ON testschema.tablespace_acl (c) TABLESPACE regress_tblspace; -ALTER TABLE testschema.tablespace_acl OWNER TO regress_tablespace_user2; -SET SESSION ROLE regress_tablespace_user2; -CREATE TABLE tablespace_table (i int) TABLESPACE regress_tblspace; -- fail -ERROR: permission denied for tablespace regress_tblspace -ALTER TABLE testschema.tablespace_acl ALTER c TYPE bigint; -REINDEX (TABLESPACE regress_tblspace) TABLE tablespace_table; -- fail -ERROR: permission denied for tablespace regress_tblspace -REINDEX (TABLESPACE regress_tblspace, CONCURRENTLY) TABLE tablespace_table; -- fail -ERROR: permission denied for tablespace regress_tblspace -RESET ROLE; -ALTER TABLESPACE regress_tblspace RENAME TO regress_tblspace_renamed; -ALTER TABLE ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; -ALTER INDEX ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; -ALTER MATERIALIZED VIEW ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; --- Should show notice that nothing was done -ALTER TABLE ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; -NOTICE: no matching relations in tablespace "regress_tblspace_renamed" found -ALTER MATERIALIZED VIEW ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; -NOTICE: no matching relations in tablespace "regress_tblspace_renamed" found --- Should succeed -DROP TABLESPACE regress_tblspace_renamed; -DROP SCHEMA testschema CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table testschema.foo -drop cascades to table testschema.asselect -drop cascades to table testschema.asexecute -drop cascades to table testschema.part -drop cascades to table testschema.atable -drop cascades to materialized view testschema.amv -drop cascades to table testschema.tablespace_acl -DROP ROLE regress_tablespace_user1; -DROP ROLE regress_tablespace_user2; +psql: error: connection to server on socket "/tmp/zHbaMLr5OX/.s.PGSQL.22638" failed: No such file or directory + Is the server running locally and accepting connections on that socket?